diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 0b78b5b6..3f1edbd8 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -6,3 +6,4 @@ 5d1ef8a64055a039aa4a0834d2d26fe7752fe9a0 92080b1cd2ca5f123158571481722767b99c2b27 13769f90af4500948b0376d57df4cceaa13e78b5 +9865a970893d9e515786f33c629b14f71645bf1e diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 6cbee85c..1af8e733 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -32,7 +32,14 @@ jobs: - ubuntu-latest - macos-latest - windows-latest - + inhouse: + - stable + - master + exclude: + - os: macos-latest + inhouse: master + - os: windows-latest + inhouse: master runs-on: ${{ matrix.os }} defaults: @@ -46,16 +53,6 @@ jobs: run: | echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc - - name: Add solver to environment - run: | - echo -e "- glpk\n- ipopt<3.13.3" >> envs/environment.yaml - if: ${{ matrix.os }} == 'windows-latest' - - - name: Add solver to environment - run: | - echo -e "- glpk\n- ipopt" >> envs/environment.yaml - if: ${{ matrix.os }} != 'windows-latest' - - name: Setup micromamba uses: mamba-org/setup-micromamba@v1 with: @@ -66,6 +63,11 @@ jobs: cache-environment: true cache-downloads: true + - name: Install inhouse packages + run: | + pip install git+https://github.com/PyPSA/atlite.git@master git+https://github.com/PyPSA/powerplantmatching.git@master git+https://github.com/PyPSA/linopy.git@master + if: ${{ matrix.inhouse }} == 'master' + - name: Set cache dates run: | echo "WEEK=$(date +'%Y%U')" >> $GITHUB_ENV @@ -79,14 +81,10 @@ jobs: key: data-cutouts-${{ env.WEEK }}-${{ env.DATA_CACHE_NUMBER }} - name: Test snakemake workflow - run: | - snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime - snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime - snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime - snakemake -call solve_elec_networks --configfile config/test/config.scenarios.electricity.yaml + run: ./test.sh - name: Upload artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4.3.0 with: name: resources-results path: | @@ -94,3 +92,4 @@ jobs: results if-no-files-found: warn retention-days: 1 + if: matrix.os == 'ubuntu' && matrix.inhouse == 'stable' diff --git a/.gitignore b/.gitignore index e79d129d..467ecd95 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ __pycache__ *dconf gurobi.log .vscode +*.orig /bak /resources @@ -33,23 +34,24 @@ dconf /data/links_p_nom.csv /data/*totals.csv /data/biomass* -/data/emobility/ -/data/eea* -/data/jrc* +/data/bundle-sector/emobility/ +/data/bundle-sector/eea* +/data/bundle-sector/jrc* /data/heating/ -/data/eurostat* +/data/bundle-sector/eurostat* /data/odyssee/ /data/transport_data.csv -/data/switzerland* +/data/bundle-sector/switzerland* /data/.nfs* -/data/Industrial_Database.csv +/data/bundle-sector/Industrial_Database.csv /data/retro/tabula-calculator-calcsetbuilding.csv -/data/nuts* +/data/bundle-sector/nuts* data/gas_network/scigrid-gas/ data/costs_*.csv dask-worker-space/ publications.jrc.ec.europa.eu/ +d1gam3xoknrgr2.cloudfront.net/ *.org diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 006673b9..5c41e781 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,7 +5,7 @@ exclude: "^LICENSES" repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: check-merge-conflict - id: end-of-file-fixer @@ -17,7 +17,7 @@ repos: # Sort package imports alphabetically - repo: https://github.com/PyCQA/isort - rev: 5.12.0 + rev: 5.13.2 hooks: - id: isort args: ["--profile", "black", "--filter-files"] @@ -30,10 +30,10 @@ repos: # Find common spelling mistakes in comments and docstrings - repo: https://github.com/codespell-project/codespell - rev: v2.2.5 + rev: v2.2.6 hooks: - id: codespell - args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco,berfore'] # Ignore capital case words, e.g. country codes + args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco,berfore,vor'] # Ignore capital case words, e.g. country codes types_or: [python, rst, markdown] files: ^(scripts|doc)/ @@ -45,13 +45,13 @@ repos: args: ["--in-place", "--make-summary-multi-line", "--pre-summary-newline"] - repo: https://github.com/keewis/blackdoc - rev: v0.3.8 + rev: v0.3.9 hooks: - id: blackdoc # Formatting with "black" coding style -- repo: https://github.com/psf/black - rev: 23.7.0 +- repo: https://github.com/psf/black-pre-commit-mirror + rev: 24.1.1 hooks: # Format Python files - id: black @@ -67,14 +67,14 @@ repos: # Do YAML formatting (before the linter checks it for misses) - repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks - rev: v2.10.0 + rev: v2.12.0 hooks: - id: pretty-format-yaml args: [--autofix, --indent, "2", --preserve-quotes] # Format Snakemake rule / workflow files - repo: https://github.com/snakemake/snakefmt - rev: v0.8.4 + rev: v0.10.0 hooks: - id: snakefmt @@ -87,6 +87,6 @@ repos: # Check for FSFE REUSE compliance (licensing) - repo: https://github.com/fsfe/reuse-tool - rev: v2.1.0 + rev: v3.0.1 hooks: - id: reuse diff --git a/.readthedocs.yml b/.readthedocs.yml index 900dba1e..30684052 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -14,4 +14,3 @@ build: python: install: - requirements: doc/requirements.txt - system_packages: false diff --git a/CITATION.cff b/CITATION.cff index c80b73ef..f8b28b5f 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -6,7 +6,7 @@ cff-version: 1.1.0 message: "If you use this package, please cite it in the following way." title: "PyPSA-Eur: An open sector-coupled optimisation model of the European energy system" repository: https://github.com/pypsa/pypsa-eur -version: 0.8.1 +version: 0.9.0 license: MIT authors: - family-names: Brown diff --git a/README.md b/README.md index 9691abc4..4a58d75c 100644 --- a/README.md +++ b/README.md @@ -61,9 +61,9 @@ The dataset consists of: - A grid model based on a modified [GridKit](https://github.com/bdw/GridKit) extraction of the [ENTSO-E Transmission System - Map](https://www.entsoe.eu/data/map/). The grid model contains 6763 lines + Map](https://www.entsoe.eu/data/map/). The grid model contains 7072 lines (alternating current lines at and above 220kV voltage level and all high - voltage direct current lines) and 3642 substations. + voltage direct current lines) and 3803 substations. - The open power plant database [powerplantmatching](https://github.com/FRESNA/powerplantmatching). - Electrical demand time series from the @@ -103,6 +103,6 @@ We strongly welcome anyone interested in contributing to this project. If you ha # Licence The code in PyPSA-Eur is released as free software under the -[MIT License](https://opensource.org/licenses/MIT), see `LICENSE.txt`. +[MIT License](https://opensource.org/licenses/MIT), see [`doc/licenses.rst`](doc/licenses.rst). However, different licenses and terms of use may apply to the various input data. diff --git a/Snakefile b/Snakefile index c9fdbb34..e0949ed9 100644 --- a/Snakefile +++ b/Snakefile @@ -13,9 +13,10 @@ from scripts._helpers import path_provider min_version("7.7") HTTP = HTTPRemoteProvider() - -if not exists("config/config.yaml"): - copyfile("config/config.default.yaml", "config/config.yaml") +conf_file = os.path.join(workflow.current_basedir, "config/config.yaml") +conf_default_file = os.path.join(workflow.current_basedir, "config/config.default.yaml") +if not exists(conf_file) and exists(conf_default_file): + copyfile(conf_default_file, conf_file) configfile: "config/config.yaml" @@ -42,6 +43,12 @@ resources = path_provider("resources/", RDIR, run["shared_resources"]) CDIR = "" if run["shared_cutouts"] else RDIR LOGS = "logs/" + RDIR BENCHMARKS = "benchmarks/" + RDIR +if not (shared_resources := run.get("shared_resources")): + RESOURCES = "resources/" + RDIR +elif isinstance(shared_resources, str): + RESOURCES = "resources/" + shared_resources + "/" +else: + RESOURCES = "resources/" RESULTS = "results/" + RDIR @@ -77,13 +84,31 @@ if config["foresight"] == "myopic": include: "rules/solve_myopic.smk" +if config["foresight"] == "perfect": + + include: "rules/solve_perfect.smk" + + +rule all: + input: + RESULTS + "graphs/costs.pdf", + default_target: True + + rule purge: - message: - "Purging generated resources, results and docs. Downloads are kept." run: - rmtree("resources/", ignore_errors=True) - rmtree("results/", ignore_errors=True) - rmtree("doc/_build", ignore_errors=True) + import builtins + + do_purge = builtins.input( + "Do you really want to delete all generated resources, \nresults and docs (downloads are kept)? [y/N] " + ) + if do_purge == "y": + rmtree("resources/", ignore_errors=True) + rmtree("results/", ignore_errors=True) + rmtree("doc/_build", ignore_errors=True) + print("Purging generated resources, results and docs. Downloads are kept.") + else: + raise Exception(f"Input {do_purge}. Aborting purge.") rule dag: @@ -118,6 +143,7 @@ rule sync: shell: """ rsync -uvarh --ignore-missing-args --files-from=.sync-send . {params.cluster} + rsync -uvarh --no-g {params.cluster}/resources . || echo "No resources directory, skipping rsync" rsync -uvarh --no-g {params.cluster}/results . || echo "No results directory, skipping rsync" rsync -uvarh --no-g {params.cluster}/logs . || echo "No logs directory, skipping rsync" """ diff --git a/config/config.default.yaml b/config/config.default.yaml index 238c3d41..ccd3baf8 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -3,7 +3,7 @@ # SPDX-License-Identifier: CC0-1.0 # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#top-level-configuration -version: 0.8.1 +version: 0.9.0 tutorial: false logging: @@ -47,7 +47,7 @@ scenario: opts: - '' sector_opts: - - Co2L0-3H-T-H-B-I-A-solar+p3-dist1 + - Co2L0-3H-T-H-B-I-A-dist1 planning_horizons: # - 2020 # - 2030 @@ -62,6 +62,9 @@ snapshots: start: "2013-01-01" end: "2014-01-01" inclusive: 'left' + resolution: false + segmentation: false + #representative: false # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#enable enable: @@ -71,11 +74,13 @@ enable: retrieve_sector_databundle: true retrieve_cost_data: true build_cutout: false + retrieve_irena: false retrieve_cutout: true build_natura_raster: false retrieve_natura_raster: true custom_busmap: false + # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#co2-budget co2_budget: 2020: 0.701 @@ -88,8 +93,10 @@ co2_budget: # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#electricity electricity: - voltages: [220., 300., 380.] + voltages: [220., 300., 380., 500., 750.] + gaslimit_enable: false gaslimit: false + co2limit_enable: false co2limit: 7.75e+7 co2base: 1.487e+9 agg_p_nom_limits: data/agg_p_nom_minmax.csv @@ -110,8 +117,9 @@ electricity: Store: [battery, H2] Link: [] # H2 pipeline - powerplants_filter: (DateOut >= 2022 or DateOut != DateOut) + powerplants_filter: (DateOut >= 2023 or DateOut != DateOut) and not (Country == 'Germany' and Fueltype == 'Nuclear') custom_powerplants: false + everywhere_powerplants: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass] conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass] renewable_carriers: [solar, onwind, offwind-ac, offwind-dc, hydro] @@ -126,6 +134,10 @@ electricity: Onshore: [onwind] PV: [solar] + autarky: + enable: false + by_country: false + # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#atlite atlite: default_cutout: europe-2013-era5 @@ -137,14 +149,14 @@ atlite: # module: era5 europe-2013-era5: module: era5 # in priority order - x: [-12., 35.] + x: [-12., 42.] y: [33., 72] dx: 0.3 dy: 0.3 time: ['2013', '2013'] europe-2013-sarah: module: [sarah, era5] # in priority order - x: [-12., 45.] + x: [-12., 42.] y: [33., 65] dx: 0.2 dy: 0.2 @@ -160,45 +172,51 @@ renewable: resource: method: wind turbine: Vestas_V112_3MW + add_cutout_windspeed: true capacity_per_sqkm: 3 # correction_factor: 0.93 corine: grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32] distance: 1000 distance_grid_codes: [1, 2, 3, 4, 5, 6] + luisa: false + # grid_codes: [1111, 1121, 1122, 1123, 1130, 1210, 1221, 1222, 1230, 1241, 1242] + # distance: 1000 + # distance_grid_codes: [1111, 1121, 1122, 1123, 1130, 1210, 1221, 1222, 1230, 1241, 1242] natura: true excluder_resolution: 100 - potential: simple # or conservative clip_p_max_pu: 1.e-2 offwind-ac: cutout: europe-2013-era5 resource: method: wind - turbine: NREL_ReferenceTurbine_5MW_offshore + turbine: NREL_ReferenceTurbine_2020ATB_5.5MW + add_cutout_windspeed: true capacity_per_sqkm: 2 correction_factor: 0.8855 corine: [44, 255] + luisa: false # [0, 5230] natura: true ship_threshold: 400 max_depth: 50 max_shore_distance: 30000 excluder_resolution: 200 - potential: simple # or conservative clip_p_max_pu: 1.e-2 offwind-dc: cutout: europe-2013-era5 resource: method: wind - turbine: NREL_ReferenceTurbine_5MW_offshore + turbine: NREL_ReferenceTurbine_2020ATB_5.5MW + add_cutout_windspeed: true capacity_per_sqkm: 2 correction_factor: 0.8855 corine: [44, 255] + luisa: false # [0, 5230] natura: true ship_threshold: 400 max_depth: 50 min_shore_distance: 30000 excluder_resolution: 200 - potential: simple # or conservative clip_p_max_pu: 1.e-2 solar: cutout: europe-2013-sarah @@ -208,12 +226,12 @@ renewable: orientation: slope: 35. azimuth: 180. - capacity_per_sqkm: 1.7 + capacity_per_sqkm: 5.1 # correction_factor: 0.854337 corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32] + luisa: false # [1111, 1121, 1122, 1123, 1130, 1210, 1221, 1222, 1230, 1241, 1242, 1310, 1320, 1330, 1410, 1421, 1422, 2110, 2120, 2130, 2210, 2220, 2230, 2310, 2410, 2420, 3210, 3320, 3330] natura: true excluder_resolution: 100 - potential: simple # or conservative clip_p_max_pu: 1.e-2 hydro: cutout: europe-2013-era5 @@ -237,10 +255,13 @@ lines: 220.: "Al/St 240/40 2-bundle 220.0" 300.: "Al/St 240/40 3-bundle 300.0" 380.: "Al/St 240/40 4-bundle 380.0" + 500.: "Al/St 240/40 4-bundle 380.0" + 750.: "Al/St 560/50 4-bundle 750.0" s_max_pu: 0.7 s_nom_max: .inf - max_extension: .inf + max_extension: 20000 #MW length_factor: 1.25 + reconnect_crimea: true under_construction: 'zero' # 'zero': set capacity to zero, 'remove': remove, 'keep': with full capacity dynamic_line_rating: activate: false @@ -253,7 +274,7 @@ lines: links: p_max_pu: 1.0 p_nom_max: .inf - max_extension: .inf + max_extension: 30000 #MW include_tyndp: true under_construction: 'zero' # 'zero': set capacity to zero, 'remove': remove, 'keep': with full capacity @@ -263,7 +284,7 @@ transformers: s_nom: 2000. type: '' -# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#load +# docs-load in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#load load: power_statistics: true interpolate_limit: 3 @@ -288,6 +309,7 @@ pypsa_eur: - offwind-dc - solar - ror + - nuclear StorageUnit: - PHS - hydro @@ -338,6 +360,7 @@ existing_capacities: grouping_years_power: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030] grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] # these should not extend 2020 threshold_capacity: 10 + default_heating_lifetime: 20 conventional_carriers: - lignite - coal @@ -350,11 +373,14 @@ sector: potential: 0.6 progress: 2020: 0.0 + 2025: 0.15 2030: 0.3 + 2035: 0.45 2040: 0.6 + 2045: 0.8 2050: 1.0 district_heating_loss: 0.15 - cluster_heat_buses: false + cluster_heat_buses: true bev_dsm_restriction_value: 0.75 bev_dsm_restriction_time: 7 transport_heating_deadband_upper: 20. @@ -374,18 +400,27 @@ sector: v2g: true land_transport_fuel_cell_share: 2020: 0 - 2030: 0.05 - 2040: 0.1 - 2050: 0.15 + 2025: 0 + 2030: 0 + 2035: 0 + 2040: 0 + 2045: 0 + 2050: 0 land_transport_electric_share: 2020: 0 - 2030: 0.25 - 2040: 0.6 - 2050: 0.85 + 2025: 0.15 + 2030: 0.3 + 2035: 0.45 + 2040: 0.7 + 2045: 0.85 + 2050: 1 land_transport_ice_share: 2020: 1 + 2025: 0.85 2030: 0.7 + 2035: 0.55 2040: 0.3 + 2045: 0.15 2050: 0 transport_fuel_cell_efficiency: 0.5 transport_internal_combustion_efficiency: 0.3 @@ -399,18 +434,27 @@ sector: shipping_hydrogen_liquefaction: false shipping_hydrogen_share: 2020: 0 + 2025: 0 2030: 0 + 2035: 0 2040: 0 + 2045: 0 2050: 0 shipping_methanol_share: 2020: 0 + 2025: 0.15 2030: 0.3 + 2035: 0.5 2040: 0.7 + 2045: 0.85 2050: 1 shipping_oil_share: 2020: 1 + 2025: 0.85 2030: 0.7 + 2035: 0.5 2040: 0.3 + 2045: 0.15 2050: 0 shipping_methanol_efficiency: 0.46 shipping_oil_efficiency: 0.40 @@ -439,22 +483,27 @@ sector: decentral: 3 central: 180 boilers: true + resistive_heaters: true oil_boilers: false biomass_boiler: true + overdimension_individual_heating: 1.1 #to cover demand peaks bigger than data chp: true micro_chp: false solar_thermal: true solar_cf_correction: 0.788457 # = >>> 1/1.2683 marginal_cost_storage: 0. #1e-4 methanation: true - helmeth: false coal_cc: false dac: true co2_vent: false + central_heat_vent: false allam_cycle: false hydrogen_fuel_cell: true hydrogen_turbine: false SMR: true + SMR_cc: true + regional_methanol_demand: false + regional_oil_demand: false regional_co2_sequestration_potential: enable: false attribute: 'conservative estimate Mt' @@ -464,8 +513,10 @@ sector: years_of_storage: 25 co2_sequestration_potential: 200 co2_sequestration_cost: 10 + co2_sequestration_lifetime: 50 co2_spatial: false co2network: false + co2_network_cost_factor: 1 cc_fraction: 0.9 hydrogen_underground_storage: true hydrogen_underground_storage_locations: @@ -473,14 +524,28 @@ sector: - nearshore # within 50 km of sea # - offshore ammonia: false - min_part_load_fischer_tropsch: 0.9 - min_part_load_methanolisation: 0.5 + min_part_load_fischer_tropsch: 0.7 + min_part_load_methanolisation: 0.3 + min_part_load_methanation: 0.3 use_fischer_tropsch_waste_heat: true + use_haber_bosch_waste_heat: true + use_methanolisation_waste_heat: true + use_methanation_waste_heat: true use_fuel_cell_waste_heat: true - use_electrolysis_waste_heat: false + use_electrolysis_waste_heat: true electricity_distribution_grid: true electricity_distribution_grid_cost_factor: 1.0 electricity_grid_connection: true + transmission_efficiency: + DC: + efficiency_static: 0.98 + efficiency_per_1000km: 0.977 + H2 pipeline: + efficiency_per_1000km: 1 # 0.979 + compression_per_1000km: 0.019 + gas pipeline: + efficiency_per_1000km: 1 #0.977 + compression_per_1000km: 0.01 H2_network: true gas_network: false H2_retrofit: false @@ -490,10 +555,25 @@ sector: gas_distribution_grid_cost_factor: 1.0 biomass_spatial: false biomass_transport: false + biogas_upgrading_cc: false conventional_generation: OCGT: gas biomass_to_liquid: false biosng: false + limit_max_growth: + enable: false + # allowing 30% larger than max historic growth + factor: 1.3 + max_growth: # unit GW + onwind: 16 # onshore max grow so far 16 GW in Europe https://www.iea.org/reports/renewables-2020/wind + solar: 28 # solar max grow so far 28 GW in Europe https://www.iea.org/reports/renewables-2020/solar-pv + offwind-ac: 35 # offshore max grow so far 3.5 GW in Europe https://windeurope.org/about-wind/statistics/offshore/european-offshore-wind-industry-key-trends-statistics-2019/ + offwind-dc: 35 + max_relative_growth: + onwind: 3 + solar: 3 + offwind-ac: 3 + offwind-dc: 3 # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#industry industry: @@ -526,14 +606,39 @@ industry: MWh_NH3_per_tNH3: 5.166 MWh_CH4_per_tNH3_SMR: 10.8 MWh_elec_per_tNH3_SMR: 0.7 - MWh_H2_per_tNH3_electrolysis: 6.5 - MWh_elec_per_tNH3_electrolysis: 1.17 + MWh_H2_per_tNH3_electrolysis: 5.93 + MWh_elec_per_tNH3_electrolysis: 0.2473 MWh_NH3_per_MWh_H2_cracker: 1.46 # https://github.com/euronion/trace/blob/44a5ff8401762edbef80eff9cfe5a47c8d3c8be4/data/efficiencies.csv NH3_process_emissions: 24.5 petrochemical_process_emissions: 25.5 - HVC_primary_fraction: 1. - HVC_mechanical_recycling_fraction: 0. - HVC_chemical_recycling_fraction: 0. + #HVC primary/recycling based on values used in Neumann et al https://doi.org/10.1016/j.joule.2023.06.016, linearly interpolated between 2020 and 2050 + #2020 recycling rates based on Agora https://static.agora-energiewende.de/fileadmin/Projekte/2021/2021_02_EU_CEAP/A-EW_254_Mobilising-circular-economy_study_WEB.pdf + #fractions refer to the total primary HVC production in 2020 + #assumes 6.7 Mtplastics produced from recycling in 2020 + HVC_primary_fraction: + 2020: 1.0 + 2025: 0.9 + 2030: 0.8 + 2035: 0.7 + 2040: 0.6 + 2045: 0.5 + 2050: 0.4 + HVC_mechanical_recycling_fraction: + 2020: 0.12 + 2025: 0.15 + 2030: 0.18 + 2035: 0.21 + 2040: 0.24 + 2045: 0.27 + 2050: 0.30 + HVC_chemical_recycling_fraction: + 2020: 0.0 + 2025: 0.0 + 2030: 0.04 + 2035: 0.08 + 2040: 0.12 + 2045: 0.16 + 2050: 0.20 HVC_production_today: 52. MWh_elec_per_tHVC_mechanical_recycling: 0.547 MWh_elec_per_tHVC_chemical_recycling: 6.9 @@ -546,11 +651,13 @@ industry: hotmaps_locate_missing: false reference_year: 2015 + # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#costs costs: year: 2030 - version: v0.6.0 + version: v0.7.0 rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person) + social_discountrate: 0.02 fill_values: FOM: 0 VOM: 0 @@ -574,10 +681,13 @@ costs: battery: 0. battery inverter: 0. emission_prices: + enable: false co2: 0. + co2_monthly_prices: false # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#clustering clustering: + focus_weights: false simplify_network: to_substations: false algorithm: kmeans # choose from: [hac, kmeans] @@ -606,14 +716,22 @@ solving: skip_iterations: true rolling_horizon: false seed: 123 + custom_extra_functionality: "../data/custom_extra_functionality.py" + # io_api: "direct" # Increases performance but only supported for the highs and gurobi solvers # options that go into the optimize function track_iterations: false min_iterations: 4 max_iterations: 6 - transmission_losses: 0 + transmission_losses: 2 linearized_unit_commitment: true horizon: 365 + constraints: + CCL: false + EQ: false + BAU: false + SAFE: false + solver: name: gurobi options: gurobi-default @@ -668,6 +786,10 @@ solving: solutiontype: 2 # non basic solution, ie no crossover barrier.convergetol: 1.e-5 feasopt.tolerance: 1.e-6 + copt-default: + Threads: 8 + LpMethod: 2 + Crossover: 0 cbc-default: {} # Used in CI glpk-default: {} # Used in CI @@ -681,6 +803,13 @@ plotting: color_geomap: ocean: white land: white + projection: + name: "EqualEarth" + # See https://scitools.org.uk/cartopy/docs/latest/reference/projections.html for alternatives, for example: + # name: "LambertConformal" + # central_longitude: 10. + # central_latitude: 50. + # standard_parallels: [35, 65] eu_node_location: x: -5.5 y: 46. @@ -703,6 +832,7 @@ plotting: H2: "Hydrogen Storage" lines: "Transmission Lines" ror: "Run of River" + load: "Load Shedding" ac: "AC" dc: "DC" @@ -726,7 +856,6 @@ plotting: hydroelectricity: '#298c81' PHS: '#51dbcc' hydro+PHS: "#08ad97" - wave: '#a7d4cf' # solar solar: "#f9d002" solar PV: "#f9d002" @@ -753,6 +882,7 @@ plotting: fossil gas: '#e05b09' natural gas: '#e05b09' biogas to gas: '#e36311' + biogas to gas CC: '#e51245' CCGT: '#a85522' CCGT marginal: '#a85522' allam: '#B98F76' @@ -764,6 +894,7 @@ plotting: gas pipeline new: '#a87c62' # oil oil: '#c9c9c9' + imported oil: '#a3a3a3' oil boiler: '#adadad' residential rural oil boiler: '#a9a9a9' services rural oil boiler: '#a5a5a5' @@ -782,6 +913,7 @@ plotting: Coal: '#545454' coal: '#545454' Coal marginal: '#545454' + coal for industry: '#343434' solid: '#545454' Lignite: '#826837' lignite: '#826837' @@ -852,12 +984,14 @@ plotting: # heat demand Heat load: '#cc1f1f' heat: '#cc1f1f' + heat vent: '#aa3344' heat demand: '#cc1f1f' rural heat: '#ff5c5c' residential rural heat: '#ff7c7c' services rural heat: '#ff9c9c' central heat: '#cc1f1f' urban central heat: '#d15959' + urban central heat vent: '#a74747' decentral heat: '#750606' residential urban decentral heat: '#a33c3c' services urban decentral heat: '#cc1f1f' @@ -870,9 +1004,11 @@ plotting: air heat pump: '#36eb41' residential urban decentral air heat pump: '#48f74f' services urban decentral air heat pump: '#5af95d' + services rural air heat pump: '#5af95d' urban central air heat pump: '#6cfb6b' ground heat pump: '#2fb537' residential rural ground heat pump: '#48f74f' + residential rural air heat pump: '#48f74f' services rural ground heat pump: '#5af95d' Ambient: '#98eb9d' CHP: '#8a5751' @@ -895,6 +1031,7 @@ plotting: H2 for shipping: "#ebaee0" H2: '#bf13a0' hydrogen: '#bf13a0' + retrofitted H2 boiler: '#e5a0d9' SMR: '#870c71' SMR CC: '#4f1745' H2 liquefaction: '#d647bd' @@ -919,7 +1056,6 @@ plotting: Sabatier: '#9850ad' methanation: '#c44ce6' methane: '#c44ce6' - helmeth: '#e899ff' # synfuels Fischer-Tropsch: '#25c49a' liquid: '#25c49a' @@ -934,6 +1070,7 @@ plotting: CO2 sequestration: '#f29dae' DAC: '#ff5270' co2 stored: '#f2385a' + co2 sequestered: '#f2682f' co2: '#f29dae' co2 vent: '#ffd4dc' CO2 pipeline: '#f5627f' @@ -965,3 +1102,4 @@ plotting: DC: "#8a1caf" DC-DC: "#8a1caf" DC link: "#8a1caf" + load: "#dd2e23" diff --git a/config/config.entsoe-all.yaml b/config/config.entsoe-all.yaml new file mode 100644 index 00000000..dd19d2c7 --- /dev/null +++ b/config/config.entsoe-all.yaml @@ -0,0 +1,43 @@ +# SPDX-FileCopyrightText: 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 + +run: + name: "entsoe-all" + disable_progressbar: true + shared_resources: false + shared_cutouts: true + +scenario: + simpl: + - '' + ll: + - vopt + clusters: + - 39 + - 128 + - 256 + opts: + - '' + sector_opts: + - '' + planning_horizons: + - '' + +# TODO add Turkey (TR) +countries: ['AL', 'AT', 'BA', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'ME', 'MD', 'MK', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'SE', 'SI', 'SK', 'UA'] + +electricity: + custom_powerplants: true + co2limit: 9.59e+7 + co2base: 1.918e+9 + +lines: + reconnect_crimea: true + +enable: + retrieve: true + retrieve_databundle: true + retrieve_sector_databundle: false + retrieve_cost_data: true + retrieve_cutout: true diff --git a/config/config.perfect.yaml b/config/config.perfect.yaml new file mode 100644 index 00000000..ff531303 --- /dev/null +++ b/config/config.perfect.yaml @@ -0,0 +1,46 @@ +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 +run: + name: "perfect" + +# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#foresight +foresight: perfect + +# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#scenario +# Wildcard docs in https://pypsa-eur.readthedocs.io/en/latest/wildcards.html +scenario: + simpl: + - '' + ll: + - v1.0 + clusters: + - 37 + opts: + - '' + sector_opts: + - 1p5-4380H-T-H-B-I-A-dist1 + - 1p7-4380H-T-H-B-I-A-dist1 + - 2p0-4380H-T-H-B-I-A-dist1 + planning_horizons: + - 2020 + - 2025 + - 2030 + - 2035 + - 2040 + - 2045 + - 2050 + + +# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#co2-budget +co2_budget: + # update of IPCC 6th AR compared to the 1.5SR. (discussed here: https://twitter.com/JoeriRogelj/status/1424743828339167233) + 1p5: 34.2 # 25.7 # Budget in Gt CO2 for 1.5 for Europe, global 420 Gt, assuming per capita share + 1p6: 43.259666 # 35 # Budget in Gt CO2 for 1.6 for Europe, global 580 Gt + 1p7: 51.4 # 45 # Budget in Gt CO2 for 1.7 for Europe, global 800 Gt + 2p0: 69.778 # 73.9 # Budget in Gt CO2 for 2 for Europe, global 1170 Gt + + +sector: + min_part_load_fischer_tropsch: 0 + min_part_load_methanolisation: 0 diff --git a/config/test/config.electricity.yaml b/config/test/config.electricity.yaml index b750bf62..22c8e8d3 100644 --- a/config/test/config.electricity.yaml +++ b/config/test/config.electricity.yaml @@ -8,14 +8,14 @@ tutorial: true run: name: "test-elec" # use this to keep track of runs with different settings disable_progressbar: true - shared_resources: true + shared_resources: "test" shared_cutouts: true scenario: clusters: - 5 opts: - - Co2L-24H + - Co2L-24h countries: ['BE'] diff --git a/config/test/config.myopic.yaml b/config/test/config.myopic.yaml index 0bb85ec6..2e7b3e6e 100644 --- a/config/test/config.myopic.yaml +++ b/config/test/config.myopic.yaml @@ -7,7 +7,7 @@ tutorial: true run: name: "test-sector-myopic" disable_progressbar: true - shared_resources: true + shared_resources: "test" shared_cutouts: true foresight: myopic @@ -18,7 +18,7 @@ scenario: clusters: - 5 sector_opts: - - 24H-T-H-B-I-A-solar+p3-dist1 + - 24h-T-H-B-I-A-dist1 planning_horizons: - 2030 - 2040 @@ -30,6 +30,9 @@ snapshots: start: "2013-03-01" end: "2013-03-08" +sector: + central_heat_vent: true + electricity: co2limit: 100.e+6 diff --git a/config/test/config.overnight.yaml b/config/test/config.overnight.yaml index a2a0f5a4..8b98fea9 100644 --- a/config/test/config.overnight.yaml +++ b/config/test/config.overnight.yaml @@ -7,7 +7,7 @@ tutorial: true run: name: "test-sector-overnight" disable_progressbar: true - shared_resources: true + shared_resources: "test" shared_cutouts: true @@ -17,7 +17,7 @@ scenario: clusters: - 5 sector_opts: - - CO2L0-24H-T-H-B-I-A-solar+p3-dist1 + - CO2L0-24h-T-H-B-I-A-dist1 planning_horizons: - 2030 diff --git a/config/test/config.perfect.yaml b/config/test/config.perfect.yaml new file mode 100644 index 00000000..2e716066 --- /dev/null +++ b/config/test/config.perfect.yaml @@ -0,0 +1,92 @@ +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 + +tutorial: true + +run: + name: "test-sector-perfect" + disable_progressbar: true + shared_resources: "test" + shared_cutouts: true + +foresight: perfect + +scenario: + ll: + - v1.0 + clusters: + - 5 + sector_opts: + - 8760h-T-H-B-I-A-dist1 + planning_horizons: + - 2030 + - 2040 + - 2050 + +countries: ['BE'] + +snapshots: + start: "2013-03-01" + end: "2013-03-08" + +electricity: + co2limit: 100.e+6 + + extendable_carriers: + Generator: [OCGT] + StorageUnit: [battery] + Store: [H2] + Link: [H2 pipeline] + + renewable_carriers: [solar, onwind, offwind-ac, offwind-dc] + +sector: + min_part_load_fischer_tropsch: 0 + min_part_load_methanolisation: 0 + +atlite: + default_cutout: be-03-2013-era5 + cutouts: + be-03-2013-era5: + module: era5 + x: [4., 15.] + y: [46., 56.] + time: ["2013-03-01", "2013-03-08"] + +renewable: + onwind: + cutout: be-03-2013-era5 + offwind-ac: + cutout: be-03-2013-era5 + max_depth: false + offwind-dc: + cutout: be-03-2013-era5 + max_depth: false + solar: + cutout: be-03-2013-era5 + +industry: + St_primary_fraction: + 2020: 0.8 + 2030: 0.6 + 2040: 0.5 + 2050: 0.4 + +solving: + solver: + name: glpk + options: glpk-default + mem: 4000 + +plotting: + map: + boundaries: + eu_node_location: + x: -5.5 + y: 46. + costs_max: 1000 + costs_threshold: 0.0000001 + energy_max: + energy_min: + energy_threshold: 0.000001 diff --git a/data/GDP_PPP_30arcsec_v3_mapped_default.csv b/data/GDP_PPP_30arcsec_v3_mapped_default.csv new file mode 100644 index 00000000..f0e640b3 --- /dev/null +++ b/data/GDP_PPP_30arcsec_v3_mapped_default.csv @@ -0,0 +1,151 @@ +name,GDP_PPP,country +3140,632728.0438507323,MD +3139,806541.9318093687,MD +3142,1392454.6690911907,MD +3152,897871.2903553953,MD +3246,645554.8588933202,MD +7049,1150156.4449477682,MD +1924,162285.16792916053,UA +1970,751970.6071848695,UA +2974,368873.75840156944,UA +2977,294847.85539198935,UA +2979,197988.13680768458,UA +2980,301371.2491126519,UA +3031,56925.21878805953,UA +3032,139395.18279351242,UA +3033,145377.8061037629,UA +3035,52282.83655208812,UA +3036,497950.25890516065,UA +3037,1183293.1987702171,UA +3038,255005.98207636533,UA +3039,224711.50098325178,UA +3040,342959.943226467,UA +3044,69119.31486955672,UA +3045,246273.65986119965,UA +3047,146742.08407299497,UA +3049,107265.7028733467,UA +3050,1126147.985259493,UA +3051,69833.56303043803,UA +3052,67230.88206577855,UA +3053,27019.224685201345,UA +3054,260571.47337292184,UA +3055,88760.94152915622,UA +3056,101368.26196568517,UA +3058,55752.92329667119,UA +3059,89024.37880630122,UA +3062,358411.291265149,UA +3064,75081.64142862396,UA +3065,158101.42949135564,UA +3066,83763.89576442329,UA +3068,173474.51218344545,UA +3069,60327.01572375589,UA +3070,18073.687271955278,UA +3071,249069.43314695224,UA +3072,220707.35700825177,UA +3073,61342.30137462664,UA +3074,254235.98867635374,UA +3077,769558.9832370486,UA +3078,132674.2315809836,UA +3079,1388517.1478032232,UA +3080,1861003.8718246964,UA +3082,140123.73854745473,UA +3083,834887.5595419679,UA +3084,1910795.5590558557,UA +3086,93828.36549170096,UA +3088,347197.65113392205,UA +3089,3754718.141734592,UA +3090,521912.69768585655,UA +3093,232818.05269714879,UA +3095,435376.20361377904,UA +3099,345596.5288937008,UA +3100,175689.10947424968,UA +3105,538438.9311459162,UA +3107,88096.86032871014,UA +3108,79847.68447063807,UA +3109,348504.73449373,UA +3144,71657.0165675802,UA +3146,80342.05037424155,UA +3158,74465.12922576343,UA +3164,3102112.2672631275,UA +3165,65215.04081671433,UA +3166,413924.2225725632,UA +3167,135060.0056434935,UA +3168,54980.442979330146,UA +3170,29584.879122227037,UA +3171,142780.68163047134,UA +3172,40436.63814695243,UA +3173,1253342.1790126422,UA +3174,173842.03139155387,UA +3176,65699.76352408895,UA +3177,143591.75419817626,UA +3178,56434.04525832523,UA +3179,389996.1670051216,UA +3180,138452.84503524794,UA +3181,67402.59500436619,UA +3184,51204.293695376415,UA +3185,46867.82356528432,UA +3186,103892.35612417295,UA +3187,193668.91476930346,UA +3189,54584.176457692694,UA +3190,219077.64942830536,UA +3197,88516.52699983507,UA +3198,298166.8272673622,UA +3199,61334.952541812374,UA +3229,175692.61136747137,UA +3230,106722.62773321665,UA +3236,61542.06264321315,UA +3241,83752.90489164277,UA +4301,48419.52825967164,UA +4305,147759.74280349456,UA +4306,53156.905740992224,UA +4315,218025.78516351627,UA +4317,155240.40554731718,UA +4318,1342144.2459407183,UA +4319,91669.1449633853,UA +4321,85852.49282415409,UA +4347,67938.7698430624,UA +4357,20064.979012172935,UA +4360,47840.51245168512,UA +4361,55580.924388032574,UA +4362,165753.82588729708,UA +4363,46390.2448142152,UA +4365,96265.47592938849,UA +4366,272003.25510057947,UA +4367,80878.50229245829,UA +4370,330072.35444044066,UA +4371,7707066.181975477,UA +4373,2019766.7891575783,UA +4374,985354.331818515,UA +4377,230805.08833664874,UA +4382,125670.67125287943,UA +4383,46914.065511740075,UA +4384,48020.804310510954,UA +4385,55612.34707641123,UA +4387,74558.3475791577,UA +4388,245243.33449409154,UA +4389,95696.56767732685,UA +4391,251085.7523045193,UA +4401,66375.82996856027,UA +4403,111954.41038437477,UA +4405,46911.68560148837,UA +4408,150782.51691456966,UA +4409,112776.7399582134,UA +4410,153076.56860965435,UA +4412,192629.31238456024,UA +4413,181295.3120834606,UA +4414,995694.9413199169,UA +4416,157640.7868989174,UA +4418,77580.20674809469,UA +4420,122320.99275223716,UA +4424,184891.10924920067,UA +4425,84486.75974340564,UA +4431,50485.84380961137,UA +4435,231040.45446464577,UA +4436,81222.18707585508,UA +4438,114819.76472988473,UA +4439,76839.1052178896,UA +4440,135337.0313562152,UA +4441,49159.485269198034,UA +7031,42001.73757065917,UA +7059,159790.48382874,UA +7063,39599.10564971086,UA diff --git a/data/custom_extra_functionality.py b/data/custom_extra_functionality.py new file mode 100644 index 00000000..e7a9df0f --- /dev/null +++ b/data/custom_extra_functionality.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2023- The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT + + +def custom_extra_functionality(n, snapshots, snakemake): + """ + Add custom extra functionality constraints. + """ + pass diff --git a/data/custom_powerplants.csv b/data/custom_powerplants.csv index 4e039b7a..4fd47498 100644 --- a/data/custom_powerplants.csv +++ b/data/custom_powerplants.csv @@ -1 +1,37 @@ -Name,Fueltype,Technology,Set,Country,Capacity,Efficiency,Duration,Volume_Mm3,DamHeight_m,YearCommissioned,Retrofit,lat,lon,projectID,YearDecommissioning +,Name,Fueltype,Technology,Set,Country,Capacity,Efficiency,Duration,Volume_Mm3,DamHeight_m,StorageCapacity_MWh,DateIn,DateRetrofit,DateMothball,DateOut,lat,lon,EIC,projectID +1266,Khmelnitskiy,Nuclear,,PP,UA,1901.8916595755832,,0.0,0.0,0.0,0.0,1988.0,2005.0,,,50.3023,26.6466,[nan],"{'GEO': ['GEO3842'], 'GPD': ['WRI1005111'], 'CARMA': ['CARMA22000']}" +1268,Kaniv,Hydro,Reservoir,PP,UA,452.1656050955414,,0.0,0.0,0.0,0.0,1972.0,2003.0,,,49.76653,31.47165,[nan],"{'GEO': ['GEO43017'], 'GPD': ['WRI1005122'], 'CARMA': ['CARMA21140']}" +1269,Kahovska kakhovka,Hydro,Reservoir,PP,UA,352.45222929936307,,0.0,0.0,0.0,0.0,1955.0,1956.0,,,46.77858,33.36965,[nan],"{'GEO': ['GEO43018'], 'GPD': ['WRI1005118'], 'CARMA': ['CARMA20855']}" +1347,Kharkiv,Natural Gas,Steam Turbine,CHP,UA,494.94274967602314,,0.0,0.0,0.0,0.0,1979.0,1980.0,,,49.9719,36107,[nan],"{'GEO': ['GEO43027'], 'GPD': ['WRI1005126'], 'CARMA': ['CARMA21972']}" +1348,Kremenchuk,Hydro,Reservoir,PP,UA,617.0382165605096,,0.0,0.0,0.0,0.0,1959.0,1960.0,,,49.07759,33.2505,[nan],"{'GEO': ['GEO43019'], 'GPD': ['WRI1005121'], 'CARMA': ['CARMA23072']}" +1377,Krivorozhskaya,Hard Coal,Steam Turbine,PP,UA,2600.0164509342876,,0.0,0.0,0.0,0.0,1965.0,1992.0,,,47.5432,33.6583,[nan],"{'GEO': ['GEO42989'], 'GPD': ['WRI1005100'], 'CARMA': ['CARMA23176']}" +1407,Zmiyevskaya zmiivskaya,Hard Coal,Steam Turbine,PP,UA,2028.3816283884514,,0.0,0.0,0.0,0.0,1960.0,2005.0,,,49.5852,36.5231,[nan],"{'GEO': ['GEO42999'], 'GPD': ['WRI1005103'], 'CARMA': ['CARMA51042']}" +1408,Pridneprovskaya,Hard Coal,Steam Turbine,CHP,UA,1627.3152609570984,,0.0,0.0,0.0,0.0,1959.0,1966.0,,,48.4051,35.1131,[nan],"{'GEO': ['GEO42990'], 'GPD': ['WRI1005102'], 'CARMA': ['CARMA35874']}" +1409,Kurakhovskaya,Hard Coal,Steam Turbine,PP,UA,1371.0015824607397,,0.0,0.0,0.0,0.0,1972.0,2003.0,,,47.9944,37.24022,[nan],"{'GEO': ['GEO42994'], 'GPD': ['WRI1005104'], 'CARMA': ['CARMA23339']}" +1410,Dobrotvorsky,Hard Coal,Steam Turbine,PP,UA,553.1949895604868,,0.0,0.0,0.0,0.0,1960.0,1964.0,,,50.2133,24375,[nan],"{'GEO': ['GEO42992'], 'GPD': ['WRI1005096'], 'CARMA': ['CARMA10971']}" +1422,Zuyevskaya,Hard Coal,Steam Turbine,PP,UA,1147.87960333801,,0.0,0.0,0.0,0.0,1982.0,2007.0,,,48.0331,38.28615,[nan],"{'GEO': ['GEO42995'], 'GPD': ['WRI1005106'], 'CARMA': ['CARMA51083']}" +1423,Zaporozhye,Nuclear,,PP,UA,5705.67497872675,,0.0,0.0,0.0,0.0,1985.0,1996.0,,,47.5119,34.5863,[nan],"{'GEO': ['GEO6207'], 'GPD': ['WRI1005114'], 'CARMA': ['CARMA50875']}" +1424,Trypilska,Hard Coal,Steam Turbine,PP,UA,1659.5849686814602,,0.0,0.0,0.0,0.0,1969.0,1972.0,,,50.1344,30.7468,[nan],"{'GEO': ['GEO43000'], 'GPD': ['WRI1005099'], 'CARMA': ['CARMA46410']}" +1425,Tashlyk,Hydro,Pumped Storage,Store,UA,285.55968954109585,,0.0,0.0,0.0,0.0,2006.0,2007.0,,,47.7968,31.1811,[nan],"{'GEO': ['GEO43025'], 'GPD': ['WRI1005117'], 'CARMA': ['CARMA44696']}" +1426,Starobeshivska,Hard Coal,Steam Turbine,PP,UA,1636.5351774497733,,0.0,0.0,0.0,0.0,1961.0,1967.0,,,47.7997,38.00612,[nan],"{'GEO': ['GEO43003'], 'GPD': ['WRI1005105'], 'CARMA': ['CARMA43083']}" +1427,South,Nuclear,,PP,UA,2852.837489363375,,0.0,0.0,0.0,0.0,1983.0,1989.0,,,47812,31.22,[nan],"{'GEO': ['GEO5475'], 'GPD': ['WRI1005113'], 'CARMA': ['CARMA42555']}" +1428,Rovno rivne,Nuclear,,PP,UA,2695.931427448389,,0.0,0.0,0.0,0.0,1981.0,2006.0,,,51.3245,25.89744,[nan],"{'GEO': ['GEO5174'], 'GPD': ['WRI1005112'], 'CARMA': ['CARMA38114']}" +1429,Ladyzhinska,Hard Coal,Steam Turbine,PP,UA,1659.5849686814602,,0.0,0.0,0.0,0.0,1970.0,1971.0,,,48706,29.2202,[nan],"{'GEO': ['GEO42993'], 'GPD': ['WRI1005098'], 'CARMA': ['CARMA24024']}" +1430,Kiev,Hydro,Pumped Storage,PP,UA,635.8694635681177,,0.0,0.0,0.0,0.0,1964.0,1972.0,,,50.5998,30501,"[nan, nan]","{'GEO': ['GEO43024', 'GEO43023'], 'GPD': ['WRI1005123', 'WRI1005124'], 'CARMA': ['CARMA23516', 'CARMA23517']}" +2450,Cet chisinau,Natural Gas,,PP,MD,306.0,,0.0,0.0,0.0,0.0,,,,,47.027550000000005,28.8801,"[nan, nan]","{'GPD': ['WRI1002985', 'WRI1002984'], 'CARMA': ['CARMA8450', 'CARMA8451']}" +2460,Hydropower che costesti,Hydro,,PP,MD,16.0,,0.0,0.0,0.0,0.0,1978.0,,,,47.8381,27.2246,[nan],"{'GPD': ['WRI1002987'], 'CARMA': ['CARMA9496']}" +2465,Moldavskaya gres,Hard Coal,,PP,MD,2520.0,,0.0,0.0,0.0,0.0,,,,,46.6292,29.9407,[nan],"{'GPD': ['WRI1002989'], 'CARMA': ['CARMA28979']}" +2466,Hydropower dubasari,Hydro,,PP,MD,48.0,,0.0,0.0,0.0,0.0,,,,,47.2778,29123,[nan],"{'GPD': ['WRI1002988'], 'CARMA': ['CARMA11384']}" +2676,Cet nord balti,Natural Gas,,PP,MD,24.0,,0.0,0.0,0.0,0.0,,,,,47.7492,27.8938,[nan],"{'GPD': ['WRI1002986'], 'CARMA': ['CARMA3071']}" +2699,Dniprodzerzhynsk,Hydro,Reservoir,PP,UA,360.3503184713376,,0.0,0.0,0.0,0.0,1963.0,1964.0,,,48.5485,34.541015,[nan],"{'GEO': ['GEO43020'], 'GPD': ['WRI1005119']}" +2707,Burshtynska tes,Hard Coal,Steam Turbine,PP,UA,2212.779958241947,,0.0,0.0,0.0,0.0,1965.0,1984.0,,,49.21038,24.66654,[nan],"{'GEO': ['GEO42991'], 'GPD': ['WRI1005097']}" +2708,Danipro dnieper,Hydro,Reservoir,PP,UA,1484.8407643312103,,0.0,0.0,0.0,0.0,1932.0,1947.0,,,47.86944,35.08611,[nan],"{'GEO': ['GEO43016'], 'GPD': ['WRI1005120']}" +2709,Dniester,Hydro,Pumped Storage,Store,UA,612.7241020616891,,0.0,0.0,0.0,0.0,2009.0,2011.0,,,48.51361,27.47333,[nan],"{'GEO': ['GEO43022'], 'GPD': ['WRI1005116', 'WRI1005115']}" +2710,Kiev,Natural Gas,Steam Turbine,CHP,UA,458.2803237740955,,0.0,0.0,0.0,0.0,1982.0,1984.0,,,50532,30.6625,[nan],"{'GEO': ['GEO42998'], 'GPD': ['WRI1005125']}" +2712,Luganskaya,Hard Coal,Steam Turbine,PP,UA,1060.2903966575996,,0.0,0.0,0.0,0.0,1962.0,1969.0,,,48.74781,39.2624,[nan],"{'GEO': ['GEO42996'], 'GPD': ['WRI1005110']}" +2713,Slavyanskaya,Hard Coal,Steam Turbine,PP,UA,737.5933194139823,,0.0,0.0,0.0,0.0,1971.0,1971.0,,,48872,37.76567,[nan],"{'GEO': ['GEO43002'], 'GPD': ['WRI1005109']}" +2714,Vuhlehirska uglegorskaya,Hard Coal,Steam Turbine,PP,UA,3319.1699373629203,,0.0,0.0,0.0,0.0,1972.0,1977.0,,,48.4633,38.20328,[nan],"{'GEO': ['GEO43001'], 'GPD': ['WRI1005107']}" +2715,Zaporiska,Hard Coal,Steam Turbine,PP,UA,3319.1699373629203,,0.0,0.0,0.0,0.0,1972.0,1977.0,,,47.5089,34.6253,[nan],"{'GEO': ['GEO42988'], 'GPD': ['WRI1005101']}" +3678,Mironovskaya,Hard Coal,,PP,UA,815.0,,0.0,0.0,0.0,0.0,,,,,48.3407,38.4049,[nan],"{'GPD': ['WRI1005108'], 'CARMA': ['CARMA28679']}" +3679,Kramatorskaya,Hard Coal,,PP,UA,120.0,,0.0,0.0,0.0,0.0,1974.0,,,,48.7477,37.5723,[nan],"{'GPD': ['WRI1075856'], 'CARMA': ['CARMA54560']}" +3680,Chernihiv,Hard Coal,,PP,UA,200.0,,0.0,0.0,0.0,0.0,1968.0,,,,51455,31.2602,[nan],"{'GPD': ['WRI1075853'], 'CARMA': ['CARMA8190']}" diff --git a/data/eia_hydro_annual_generation.csv b/data/eia_hydro_annual_generation.csv index 9b781ee3..859decf7 100644 --- a/data/eia_hydro_annual_generation.csv +++ b/data/eia_hydro_annual_generation.csv @@ -1,50 +1,53 @@ -https://www.eia.gov/international/data/world/electricity/electricity-generation?pd=2&p=000000000000000000000000000000g&u=1&f=A&v=mapbubble&a=-&i=none&vo=value&t=R&g=000000000000002&l=73-1028i008017kg6368g80a4k000e0ag00gg0004g8g0ho00g000400008&s=315532800000&e=1577836800000&ev=false& -Report generated on: 03-28-2022 11:20:48 -"API","","1980","1981","1982","1983","1984","1985","1986","1987","1988","1989","1990","1991","1992","1993","1994","1995","1996","1997","1998","1999","2000","2001","2002","2003","2004","2005","2006","2007","2008","2009","2010","2011","2012","2013","2014","2015","2016","2017","2018","2019","2020" -"","hydroelectricity net generation (billion kWh)","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","","" -"INTL.33-12-EURO-BKWH.A"," Europe","458.018","464.155","459.881","473.685","481.241","476.739","459.535","491.085","534.517","465.365","474.466","475.47","509.041","526.448","531.815","543.743","529.114164","543.845616","562.441501","569.308453","591.206662","587.371195","541.542535","506.19703","544.536443","545.176179","537.335934","540.934407","567.557921","564.244482","619.96477","543.05273","600.46622","631.86431","619.59229","615.53013","629.98906","562.59258","619.31106","610.62616","670.925" -"INTL.33-12-ALB-BKWH.A"," Albania","2.919","3.018","3.093","3.167","3.241","3.315","3.365","3.979","3.713","3.846","2.82","3.483","3.187","3.281","3.733","4.162","5.669","4.978","4.872","5.231","4.548","3.519","3.477","5.117","5.411","5.319","4.951","2.76","3.759","5.201","7.49133","4.09068","4.67775","6.88941","4.67676","5.83605","7.70418","4.47975","8.46648","5.15394","5.281" -"INTL.33-12-AUT-BKWH.A"," Austria","28.501","30.008","29.893","29.577","28.384","30.288","30.496","25.401","35.151","34.641","31.179","31.112","34.483","36.336","35.349","36.696","33.874","35.744","36.792","40.292","41.418","40.05","39.825","32.883","36.394","36.31","35.48","36.732","37.969","40.487","36.466","32.511","41.862","40.138","39.001","35.255","37.954","36.462","35.73","40.43655","45.344" -"INTL.33-12-BEL-BKWH.A"," Belgium","0.274","0.377","0.325","0.331","0.348","0.282","0.339","0.425","0.354","0.3","0.263","0.226","0.338","0.252","0.342","0.335","0.237","0.30195","0.38511","0.338","0.455","0.437","0.356","0.245","0.314","0.285","0.355","0.385","0.406","0.325","0.298","0.193","0.353","0.376","0.289","0.314","0.367","0.268","0.311","0.108","1.29" -"INTL.33-12-BIH-BKWH.A"," Bosnia and Herzegovina","--","--","--","--","--","--","--","--","--","--","--","--","3.374","2.343","3.424","3.607","5.104","4.608","4.511","5.477","5.043","5.129","5.215","4.456","5.919","5.938","5.798","3.961","4.818","6.177","7.946","4.343","4.173","7.164","5.876","5.495","5.585","3.7521","6.35382","6.02019","6.1" -"INTL.33-12-BGR-BKWH.A"," Bulgaria","3.674","3.58","3.018","3.318","3.226","2.214","2.302","2.512","2.569","2.662","1.859","2.417","2.042","1.923","1.453","2.291","2.89","2.726","3.066","2.725","2.646","1.72","2.172","2.999","3.136","4.294","4.196","2.845","2.796","3.435","4.98168","2.84328","3.14622","3.99564","4.55598","5.59845","3.8412","2.79972","5.09553","3.34917","3.37" -"INTL.33-12-HRV-BKWH.A"," Croatia","--","--","--","--","--","--","--","--","--","--","--","--","4.298","4.302","4.881","5.212","7.156","5.234","5.403","6.524","5.794","6.482","5.311","4.827","6.888","6.27","5.94","4.194","5.164","6.663","9.035","4.983","4.789","8.536","8.917","6.327","6.784","5.255","7.62399","5.87268","3.4" -"INTL.33-12-CYP-BKWH.A"," Cyprus","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0" -"INTL.33-12-CZE-BKWH.A"," Czech Republic","--","--","--","--","--","--","--","--","--","--","--","--","--","1.355","1.445","1.982","1.949","1.68201","1.382","1.664","1.7404","2.033","2.467","1.369","1.999","2.356","2.525","2.068","2.004","2.405","2.775","1.95","2.107","2.704","1.909","1.779","1.983","1.852","1.615","1.98792","3.4" -"INTL.33-12-DNK-BKWH.A"," Denmark","0.03","0.031","0.028","0.036","0.028","0.027","0.029","0.029","0.032","0.027","0.027","0.026","0.028","0.027","0.033","0.03","0.019","0.019","0.02673","0.031","0.03","0.028","0.032","0.021","0.027","0.023","0.023","0.028","0.026","0.019","0.021","0.017","0.017","0.013","0.015","0.018","0.019","0.018","0.015","0.01584","0.02" -"INTL.33-12-EST-BKWH.A"," Estonia","--","--","--","--","--","--","--","--","--","--","--","--","0.001","0.001","0.003","0.002","0.002","0.003","0.004","0.004","0.005","0.007","0.006","0.013","0.022","0.022","0.014","0.021","0.028","0.032","0.027","0.03","0.042","0.026","0.027","0.027","0.035","0.026","0.015","0.01881","0.04" -"INTL.33-12-FRO-BKWH.A"," Faroe Islands","0.049","0.049","0.049","0.049","0.049","0.049","0.049","0.049","0.062","0.071","0.074","0.074","0.083","0.073","0.075","0.075","0.069564","0.075066","0.076501","0.069453","0.075262","0.075195","0.095535","0.08483","0.093443","0.097986","0.099934","0.103407","0.094921","0.091482","0.06676","0.092","0.099","0.091","0.121","0.132","0.105","0.11","0.107","0.102","0.11" -"INTL.33-12-FIN-BKWH.A"," Finland","10.115","13.518","12.958","13.445","13.115","12.211","12.266","13.658","13.229","12.9","10.75","13.065","14.956","13.341","11.669","12.796","11.742","12.11958","14.9","12.652","14.513","13.073","10.668","9.495","14.919","13.646","11.379","14.035","16.941","12.559","12.743","12.278","16.667","12.672","13.24","16.584","15.634","14.61","13.137","12.31461","15.56" -"INTL.33-12-CSK-BKWH.A"," Former Czechoslovakia","4.8","4.2","3.7","3.9","3.2","4.3","4","4.853","4.355","4.229","3.919","3.119","3.602","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" -"INTL.33-12-SCG-BKWH.A"," Former Serbia and Montenegro","--","--","--","--","--","--","--","--","--","--","--","--","11.23","10.395","11.016","12.071","14.266","12.636","12.763","13.243","11.88","12.326","11.633","9.752","11.01","11.912","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" -"INTL.33-12-YUG-BKWH.A"," Former Yugoslavia","27.868","25.044","23.295","21.623","25.645","24.363","27.474","25.98","25.612","23.256","19.601","18.929","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" -"INTL.33-12-FRA-BKWH.A"," France","68.253","70.358","68.6","67.515","64.01","60.248","60.953","68.623","73.952","45.744","52.796","56.277","68.313","64.3","78.057","72.196","64.43","63.151","61.479","71.832","66.466","73.888","59.992","58.567","59.276","50.965","55.741","57.029","63.017","56.428","61.945","45.184","59.099","71.042","62.993","54.876","60.094","49.389","64.485","56.98242","64.84" -"INTL.33-12-DEU-BKWH.A"," Germany","--","--","--","--","--","--","--","--","--","--","--","14.742","17.223","17.699","19.731","21.562","21.737","17.18343","17.044","19.451","21.515","22.506","22.893","19.071","20.866","19.442","19.808","20.957","20.239","18.841","20.678","17.323","21.331","22.66","19.31","18.664","20.214","19.985","17.815","19.86039","24.75" -"INTL.33-12-DDR-BKWH.A"," Germany, East","1.658","1.718","1.748","1.683","1.748","1.758","1.767","1.726","1.719","1.551","1.389","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" -"INTL.33-12-DEUW-BKWH.A"," Germany, West","17.125","17.889","17.694","16.713","16.434","15.354","16.526","18.36","18.128","16.482","15.769","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--" -"INTL.33-12-GIB-BKWH.A"," Gibraltar","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0" -"INTL.33-12-GRC-BKWH.A"," Greece","3.396","3.398","3.551","2.331","2.852","2.792","3.222","2.768","2.354","1.888","1.751","3.068","2.181","2.26","2.573","3.494","4.305","3.84318","3.68","4.546","3.656","2.076","2.772","4.718","4.625","4.967","5.806","2.565","3.279","5.32","7.431","3.998","4.387","6.337","4.464","5.782","5.543","3.962","5.035","3.9798","3.43" -"INTL.33-12-HUN-BKWH.A"," Hungary","0.111","0.166","0.158","0.153","0.179","0.153","0.152","0.167","0.167","0.156","0.176","0.192","0.156","0.164","0.159","0.161","0.205","0.21384","0.15345","0.179","0.176","0.184","0.192","0.169","0.203","0.2","0.184","0.208","0.211","0.226","0.184","0.216","0.206","0.208","0.294","0.227","0.253","0.214","0.216","0.21681","0.24" -"INTL.33-12-ISL-BKWH.A"," Iceland","3.053","3.085","3.407","3.588","3.738","3.667","3.846","3.918","4.169","4.217","4.162","4.162","4.267","4.421","4.47","4.635","4.724","5.15493","5.565","5.987","6.292","6.512","6.907","7.017","7.063","6.949","7.22","8.31","12.303","12.156","12.51","12.382","12.214","12.747","12.554","13.541","13.092","13.892","13.679","13.32441","12.46" -"INTL.33-12-IRL-BKWH.A"," Ireland","0.833","0.855","0.792","0.776","0.68","0.824","0.91","0.673","0.862","0.684","0.69","0.738","0.809","0.757","0.911","0.706","0.715","0.67122","0.907","0.838","0.838","0.59","0.903","0.592","0.624","0.625","0.717","0.66","0.959","0.893","0.593","0.699","0.795","0.593","0.701","0.798","0.674","0.685","0.687","0.87813","1.21" -"INTL.33-12-ITA-BKWH.A"," Italy","44.997","42.782","41.216","40.96","41.923","40.616","40.626","39.05","40.205","33.647","31.31","41.817","41.778","41.011","44.212","37.404","41.617","41.18697","40.808","44.911","43.763","46.343","39.125","33.303","41.915","35.706","36.624","32.488","41.207","48.647","50.506","45.36477","41.45625","52.24626","57.95955","45.08163","42.00768","35.83701","48.29913","45.31824","47.72" -"INTL.33-12-XKS-BKWH.A"," Kosovo","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","0.075","0.119","0.154","0.104","0.095","0.142","0.149","0.139","0.243","0.177","0.27027","0.2079","0.26" -"INTL.33-12-LVA-BKWH.A"," Latvia","--","--","--","--","--","--","--","--","--","--","--","--","2.498","2.846","3.272","2.908","1.841","2.922","2.99","2.729","2.791","2.805","2.438","2.243","3.078","3.293","2.671","2.706","3.078","3.422","3.488","2.857","3.677","2.838","1.953","1.841","2.523","4.356","2.417","2.08692","2.59" -"INTL.33-12-LTU-BKWH.A"," Lithuania","--","--","--","--","--","--","--","--","--","--","--","--","0.308","0.389","0.447","0.369","0.323","0.291","0.413","0.409","0.336","0.322","0.35","0.323","0.417","0.446193","0.393","0.417","0.398","0.42","0.535","0.475","0.419","0.516","0.395","0.346","0.45","0.597","0.427","0.34254","1.06" -"INTL.33-12-LUX-BKWH.A"," Luxembourg","0.086","0.095","0.084","0.083","0.088","0.071","0.084","0.101","0.097","0.072","0.07","0.083","0.069","0.066","0.117","0.087","0.059","0.082","0.114","0.084","0.119","0.117","0.098","0.078","0.103","0.093","0.11","0.116","0.131","0.105","0.104","0.061","0.095","0.114","0.104","0.095","0.111","0.082","0.089","0.10593","1.09" -"INTL.33-12-MLT-BKWH.A"," Malta","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0","0" -"INTL.33-12-MNE-BKWH.A"," Montenegro","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","1.733","1.271","1.524","2.05","2.723","1.192","1.462","2.479","1.734","1.476","1.825","1.014","2.09187","1.78","1.8" -"INTL.33-12-NLD-BKWH.A"," Netherlands","0","0","0","0","0","0.003","0.003","0.001","0.002","0.037","0.119","0.079","0.119","0.091","0.1","0.087","0.079","0.09108","0.111","0.089","0.141","0.116","0.109","0.071","0.094","0.087","0.105","0.106","0.101","0.097","0.105","0.057","0.104","0.114","0.112","0.093","0.1","0.061","0.072","0.07326","0.05" -"INTL.33-12-MKD-BKWH.A"," North Macedonia","--","--","--","--","--","--","--","--","--","--","--","--","0.817","0.517","0.696","0.793","0.842","0.891","1.072","1.375","1.158","0.62","0.749","1.36","1.467","1.477","1.634","1","0.832","1.257","2.407","1.419","1.031","1.568","1.195","1.846","1.878","1.099","1.773","1.15236","1.24" -"INTL.33-12-NOR-BKWH.A"," Norway","82.717","91.876","91.507","104.704","104.895","101.464","95.321","102.341","107.919","117.369","119.933","109.032","115.505","118.024","110.398","120.315","102.823","108.677","114.546","120.237","140.4","119.258","128.078","104.425","107.693","134.331","118.175","132.319","137.654","124.03","116.257","119.78","141.189","127.551","134.844","136.662","142.244","141.651","138.202","123.66288","141.69" -"INTL.33-12-POL-BKWH.A"," Poland","2.326","2.116","1.528","1.658","1.394","1.833","1.534","1.644","1.775","1.593","1.403","1.411","1.492","1.473","1.716","1.868","1.912","1.941","2.286","2.133","2.085","2.302","2.256","1.654","2.06","2.179","2.022","2.328","2.13","2.351","2.9","2.313","2.02","2.421","2.165","1.814","2.117","2.552","1.949","1.93842","2.93" -"INTL.33-12-PRT-BKWH.A"," Portugal","7.873","4.934","6.82","7.897","9.609","10.512","8.364","9.005","12.037","5.72","9.065","8.952","4.599","8.453","10.551","8.26","14.613","12.97395","12.853","7.213","11.21","13.894","7.722","15.566","9.77","4.684","10.892","9.991","6.73","8.201","15.954","11.423","5.589","13.652","15.471","8.615","15.608","5.79","12.316","8.6526","13.96" -"INTL.33-12-ROU-BKWH.A"," Romania","12.506","12.605","11.731","9.934","11.208","11.772","10.688","11.084","13.479","12.497","10.87","14.107","11.583","12.64","12.916","16.526","15.597","17.334","18.69","18.107","14.63","14.774","15.886","13.126","16.348","20.005","18.172","15.806","17.023","15.379","19.684","14.581","11.945","14.807","18.618","16.467","17.848","14.349","17.48736","15.65289","15.53" -"INTL.33-12-SRB-BKWH.A"," Serbia","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","--","10.855","9.937","9.468","10.436","11.772","8.58","9.193","10.101","10.893","9.979","10.684","9.061","10.53261","10.07028","9.66" -"INTL.33-12-SVK-BKWH.A"," Slovakia","--","--","--","--","--","--","--","--","--","--","--","--","--","3.432","4.311","4.831","4.185","4.023","4.224","4.429","4.569","4.878","5.215","3.4452","4.059","4.592","4.355","4.406","4","4.324","5.184","3.211","3.687","4.329","3.762","3.701","4.302","4.321","3.506","4.27383","4.67" -"INTL.33-12-SVN-BKWH.A"," Slovenia","--","--","--","--","--","--","--","--","--","--","--","--","3.379","2.974","3.348","3.187","3.616","3.046","3.4","3.684","3.771","3.741","3.265","2.916","4.033","3.426","3.555","3.233","3.978","4.666","4.452","3.506","3.841","4.562","6.011","3.75","4.443","3.814","4.643","4.43421","5.24" -"INTL.33-12-ESP-BKWH.A"," Spain","29.16","21.64","25.99","26.696","31.088","30.895","26.105","27.016","34.76","19.046","25.16","27.01","18.731","24.133","27.898","22.881","39.404","34.43","33.665","22.634","29.274","40.617","22.691","40.643","31.359","18.209","25.699","27.036","23.13","26.147","41.576","30.07","20.192","36.45","38.815","27.656","35.77","18.007","33.743","24.23025","33.34" -"INTL.33-12-SWE-BKWH.A"," Sweden","58.133","59.006","54.369","62.801","67.106","70.095","60.134","70.95","69.016","70.911","71.778","62.603","73.588","73.905","58.508","67.421","51.2226","68.365","74.25","70.974","77.798","78.269","65.696","53.005","59.522","72.075","61.106","65.497","68.378","65.193","66.279","66.047","78.333","60.81","63.227","74.734","61.645","64.651","61.79","64.46583","71.6" -"INTL.33-12-CHE-BKWH.A"," Switzerland","32.481","35.13","35.974","35.069","29.871","31.731","32.576","34.328","35.437","29.477","29.497","31.756","32.373","35.416","38.678","34.817","28.458","33.70257","33.136","39.604","36.466","40.895","34.862","34.471","33.411","30.914","30.649","34.898","35.676","35.366","35.704","32.069","38.218","38.08","37.659","37.879","34.281","33.754","34.637","37.6596","40.62" -"INTL.33-12-TUR-BKWH.A"," Turkey","11.159","12.308","13.81","11.13","13.19","11.822","11.637","18.314","28.447","17.61","22.917","22.456","26.302","33.611","30.28","35.186","40.07","39.41784","41.80671","34.33","30.57","23.77","33.346","34.977","45.623","39.165","43.802","35.492","32.937","35.598","51.423","51.155","56.669","58.225","39.75","65.856","66.686","57.824","59.49","87.99714","77.39" -"INTL.33-12-GBR-BKWH.A"," United Kingdom","3.921","4.369","4.543","4.548","3.992","4.08","4.767","4.13","4.915","4.732","5.119","4.534","5.329","4.237","5.043","4.79","3.359","4.127","5.067","5.283","5.035","4.015","4.74","3.195","4.795","4.873","4.547","5.026","5.094","5.178","3.566","5.655","5.286","4.667","5.832","6.246","5.342","5.836","5.189","5.89941","7.64" +https://www.eia.gov/international/data/world/electricity/electricity-generation?pd=2&p=000000000000000000000000000000g&u=1&f=A&v=mapbubble&a=-&i=none&vo=value&t=R&g=000000000000002&l=73-1028i008017kg6368g80a4k000e0ag00gg0004g8g0ho00g000400008&l=72-00000000000000000000000000080000000000000000000g&s=315532800000&e=1609459200000&ev=false&,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +Report generated on: 01-06-2023 21:17:46,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +API,,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021 +,hydroelectricity net generation (billion kWh),,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +INTL.33-12-EURO-BKWH.A, Europe,"458,018","464,155","459,881","473,685","481,241","476,739","459,535","491,085","534,517","465,365","474,466","475,47","509,041","526,448","531,815","543,743","529,114164","543,845616","562,491501","566,861453","588,644662","584,806195","539,051405","503,7067","542,112443","542,974669","535,006084","538,449707","565,143111","561,761402","617,547148","540,926277","598,055253","629,44709","617,111295","613,079848","627,720566217","560,362524","616,5081462","606,5997419","644,1106599","628,1390143" +INTL.33-12-ALB-BKWH.A, Albania,"2,919","3,018","3,093","3,167","3,241","3,315","3,365","3,979","3,713","3,846","2,82","3,483","3,187","3,281","3,733","4,162","5,669","4,978","4,872","5,231","4,548","3,519","3,477","5,117","5,411","5,319","4,951","2,76","3,759","5,201","7,49133","4,09068","4,67775","6,88941","4,67676","5,83605","7,70418","4,47975","8,46648","5,15394","5,281","8,891943" +INTL.33-12-AUT-BKWH.A, Austria,"28,501","30,008","29,893","29,577","28,384","30,288","30,496","25,401","35,151","34,641","31,179","31,112","34,483","36,336","35,349","36,696","33,874","35,744","36,792","40,292","41,418","40,05","39,825","32,883","36,394","36,31","35,48","36,732","37,969","40,487","36,466","32,511","41,862","40,138","39,001","35,255","37,954","36,462","35,73","40,43655","41,9356096","38,75133" +INTL.33-12-BEL-BKWH.A, Belgium,"0,274","0,377","0,325","0,331","0,348","0,282","0,339","0,425","0,354","0,3","0,263","0,226","0,338","0,252","0,342","0,335","0,237","0,30195","0,38511","0,338","0,455","0,437","0,356","0,245","0,314","0,285","0,355","0,385","0,406","0,325","0,298","0,193","0,353","0,376","0,289","0,314","0,367","0,268","0,3135","0,302","0,2669","0,3933" +INTL.33-12-BIH-BKWH.A, Bosnia and Herzegovina,--,--,--,--,--,--,--,--,--,--,--,--,"3,374","2,343","3,424","3,607","5,104","4,608","4,511","5,477","5,043","5,129","5,215","4,456","5,919","5,938","5,798","3,961","4,818","6,177","7,946","4,343","4,173","7,164","5,876","5,495","5,585","3,7521","6,35382","6,02019","4,58","6,722" +INTL.33-12-BGR-BKWH.A, Bulgaria,"3,674","3,58","3,018","3,318","3,226","2,214","2,302","2,512","2,569","2,662","1,859","2,417","2,042","1,923","1,453","2,291","2,89","2,726","3,066","2,725","2,646","1,72","2,172","2,999","3,136","4,294","4,196","2,845","2,796","3,435","4,98168","2,84328","3,14622","3,99564","4,55598","5,59845","3,8412","2,79972","5,09553","2,929499","2,820398","4,819205" +INTL.33-12-HRV-BKWH.A, Croatia,--,--,--,--,--,--,--,--,--,--,--,--,"4,298","4,302","4,881","5,212","7,156","5,234","5,403","6,524","5,794","6,482","5,311","4,827","6,888","6,27","5,94","4,194","5,164","6,663","9,035","4,983","4,789","8,536","8,917","6,327","6,784","5,255","7,62399","5,87268","5,6624","7,1277" +INTL.33-12-CYP-BKWH.A, Cyprus,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +INTL.33-12-CZE-BKWH.A, Czechia,--,--,--,--,--,--,--,--,--,--,--,--,--,"1,355","1,445","1,982","1,949","1,68201","1,382","1,664","1,7404","2,033","2,467","1,369","1,999","2,356","2,525","2,068","2,004","2,405","2,775","1,95","2,107","2,704","1,909","1,779","1,983","1,852","1,615","1,98792","2,143884","2,40852" +INTL.33-12-DNK-BKWH.A, Denmark,"0,03","0,031","0,028","0,036","0,028","0,027","0,029","0,029","0,032","0,027","0,027","0,026","0,028","0,027","0,033","0,03","0,019","0,019","0,02673","0,031","0,03","0,028","0,032","0,021","0,027","0,023","0,023","0,028","0,026","0,019","0,021","0,017","0,017","0,013","0,015","0,01803","0,01927","0,017871","0,0148621","0,0172171","0,017064","0,016295" +INTL.33-12-EST-BKWH.A, Estonia,--,--,--,--,--,--,--,--,--,--,--,--,"0,001","0,001","0,003","0,002","0,002","0,003","0,004","0,004","0,005","0,007","0,006","0,013","0,022","0,022","0,014","0,021","0,028","0,032","0,027","0,029999","0,042","0,026","0,027","0,027","0,035","0,025999","0,0150003","0,0189999","0,03","0,0248" +INTL.33-12-FRO-BKWH.A, Faroe Islands,"0,049","0,049","0,049","0,049","0,049","0,049","0,049","0,049","0,062","0,071","0,074","0,074","0,083","0,073","0,075","0,075","0,069564","0,075066","0,076501","0,069453","0,075262","0,075195","0,095535","0,08483","0,093443","0,097986","0,099934","0,103407","0,094921","0,091482","0,06676","0,092","0,099","0,091","0,121","0,132","0,105","0,11","0,107","0,102","0,11","0,11" +INTL.33-12-FIN-BKWH.A, Finland,"10,115","13,518","12,958","13,445","13,115","12,211","12,266","13,658","13,229","12,9","10,75","13,065","14,956","13,341","11,669","12,796","11,742","12,11958","14,9","12,652","14,513","13,073","10,668","9,495","14,919","13,646","11,379","14,035","16,941","12,559","12,743","12,278001","16,666998","12,672","13,240001","16,583999","15,634127","14,609473","13,1369998","12,2454823","15,883","15,766" +INTL.33-12-CSK-BKWH.A, Former Czechoslovakia,"4,8","4,2","3,7","3,9","3,2","4,3",4,"4,853","4,355","4,229","3,919","3,119","3,602",--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,-- +INTL.33-12-SCG-BKWH.A, Former Serbia and Montenegro,--,--,--,--,--,--,--,--,--,--,--,--,"11,23","10,395","11,016","12,071","14,266","12,636","12,763","13,243","11,88","12,326","11,633","9,752","11,01","11,912",--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,-- +INTL.33-12-YUG-BKWH.A, Former Yugoslavia,"27,868","25,044","23,295","21,623","25,645","24,363","27,474","25,98","25,612","23,256","19,601","18,929",--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,-- +INTL.33-12-FRA-BKWH.A, France,"68,253","70,358","68,6","67,515","64,01","60,248","60,953","68,623","73,952","45,744","52,796","56,277","68,313","64,3","78,057","72,196","64,43","63,151","61,479","71,832","66,466","73,888","59,992","58,567","59,276","50,965","55,741","57,029","63,017","56,428","61,945","45,184","59,099","71,042","62,993","54,876","60,094","49,389","64,485","56,913891","62,06191","58,856657" +INTL.33-12-DEU-BKWH.A, Germany,--,--,--,--,--,--,--,--,--,--,--,"14,742","17,223","17,699","19,731","21,562","21,737","17,18343","17,044","19,451","21,515","22,506","22,893","19,071","20,866","19,442","19,808","20,957","20,239","18,841","20,678","17,323","21,331","22,66","19,31","18,664","20,214","19,985","17,694","19,731","18,322","19,252" +INTL.33-12-DDR-BKWH.A," Germany, East","1,658","1,718","1,748","1,683","1,748","1,758","1,767","1,726","1,719","1,551","1,389",--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,-- +INTL.33-12-DEUW-BKWH.A," Germany, West","17,125","17,889","17,694","16,713","16,434","15,354","16,526","18,36","18,128","16,482","15,769",--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,-- +INTL.33-12-GIB-BKWH.A, Gibraltar,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +INTL.33-12-GRC-BKWH.A, Greece,"3,396","3,398","3,551","2,331","2,852","2,792","3,222","2,768","2,354","1,888","1,751","3,068","2,181","2,26","2,573","3,494","4,305","3,84318","3,68","4,546","3,656","2,076","2,772","4,718","4,625","4,967","5,806","2,565","3,279","5,32","7,431","3,998","4,387","6,337","4,464","5,782","5,543","3,962","5,035","3,9798","3,343687","5,909225" +INTL.33-12-HUN-BKWH.A, Hungary,"0,111","0,166","0,158","0,153","0,179","0,153","0,152","0,167","0,167","0,156","0,176","0,192","0,156","0,164","0,159","0,161","0,205","0,21384","0,15345","0,179","0,176","0,184","0,192","0,169","0,203","0,2","0,184","0,208","0,211","0,226","0,184","0,215999","0,205999","0,207999","0,294001","0,226719","0,253308","0,213999","0,216","0,2129999","0,238","0,202379" +INTL.33-12-ISL-BKWH.A, Iceland,"3,053","3,085","3,407","3,588","3,738","3,667","3,846","3,918","4,169","4,217","4,162","4,162","4,267","4,421","4,47","4,635","4,724","5,15493","5,565","5,987","6,292","6,512","6,907","7,017","7,063","6,949","7,22","8,31","12,303","12,156","12,509999","12,381999","12,213999","12,747001","12,554","13,541","13,091609","13,891929","13,679377","13,32911","12,9196201","13,5746171" +INTL.33-12-IRL-BKWH.A, Ireland,"0,833","0,855","0,792","0,776","0,68","0,824","0,91","0,673","0,862","0,684","0,69","0,738","0,809","0,757","0,911","0,706","0,715","0,67122","0,907","0,838","0,838","0,59","0,903","0,592","0,624","0,625","0,717","0,66","0,959","0,893","0,593","0,699","0,795","0,593","0,701","0,798","0,674","0,685","0,687","0,87813","0,932656","0,750122" +INTL.33-12-ITA-BKWH.A, Italy,"44,997","42,782","41,216","40,96","41,923","40,616","40,626","39,05","40,205","33,647","31,31","41,817","41,778","41,011","44,212","37,404","41,617","41,18697","40,808","44,911","43,763","46,343","39,125","33,303","41,915","35,706","36,624","32,488","41,207","48,647","50,506","45,36477","41,45625","52,24626","57,95955","45,08163","42,00768","35,83701","48,29913","45,31824","47,551784","44,739" +INTL.33-12-XKS-BKWH.A, Kosovo,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,"0,075","0,119","0,154","0,104","0,095","0,142","0,149","0,139","0,243","0,177","0,27027","0,2079","0,262826","0,300635" +INTL.33-12-LVA-BKWH.A, Latvia,--,--,--,--,--,--,--,--,--,--,--,--,"2,498","2,846","3,272","2,908","1,841","2,922","2,99","2,729","2,791","2,805","2,438","2,243","3,078","3,293","2,671","2,706","3,078","3,422","3,487998","2,8568","3,677","2,838","1,953","1,841","2,522819","4,355513","2,4170639","2,0958919","2,5840101","2,6889293" +INTL.33-12-LTU-BKWH.A, Lithuania,--,--,--,--,--,--,--,--,--,--,--,--,"0,308","0,389","0,447","0,369","0,323","0,291","0,413","0,409","0,336","0,322","0,35","0,323","0,417","0,446193","0,393","0,417","0,398","0,42","0,535","0,475","0,419","0,516","0,395","0,346","0,45","0,597","0,427","0,34254","0,3006","0,3837" +INTL.33-12-LUX-BKWH.A, Luxembourg,"0,086","0,095","0,084","0,083","0,088","0,071","0,084","0,101","0,097","0,072","0,07","0,083","0,069","0,066","0,117","0,087","0,059","0,082","0,114","0,084","0,119","0,117","0,098","0,078","0,103","0,093","0,11","0,116","0,131","0,105","0,104","0,061","0,095","0,114","0,104","0,095","0,111","0,082","0,089","0,10593","0,091602","0,1068" +INTL.33-12-MLT-BKWH.A, Malta,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 +INTL.33-12-MNE-BKWH.A, Montenegro,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,"1,733","1,271","1,524","2,05","2,723","1,192","1,462","2,479","1,734","1,476","1,825","1,014","1,693443","1,262781","0,867637","1,212652" +INTL.33-12-NLD-BKWH.A, Netherlands,0,0,0,0,0,"0,003","0,003","0,001","0,002","0,037","0,119","0,079","0,119","0,091","0,1","0,087","0,079","0,09108","0,111","0,089","0,141","0,116","0,109","0,071","0,094","0,087","0,105","0,106","0,101","0,097","0,105","0,057","0,104389","0,11431","0,112202","0,0927","0,100078","0,060759","0,0723481","0,074182","0,0462851","0,0838927" +INTL.33-12-MKD-BKWH.A, North Macedonia,--,--,--,--,--,--,--,--,--,--,--,--,"0,817","0,517","0,696","0,793","0,842","0,891","1,072","1,375","1,158","0,62","0,749","1,36","1,467","1,477","1,634",1,"0,832","1,257","2,407","1,419","1,031","1,568","1,195","1,846","1,878","1,099","1,773","1,15236","1,277144","1,451623" +INTL.33-12-NOR-BKWH.A, Norway,"82,717","91,876","91,507","104,704","104,895","101,464","95,321","102,341","107,919","117,369","119,933","109,032","115,505","118,024","110,398","120,315","102,823","108,677","114,546","120,237","140,4","119,258","128,078","104,425","107,693","134,331","118,175","132,319","137,654","124,03","116,257","119,78","141,189","127,551","134,844","136,662","142,244","141,651","138,202","123,66288","141,69",144 +INTL.33-12-POL-BKWH.A, Poland,"2,326","2,116","1,528","1,658","1,394","1,833","1,534","1,644","1,775","1,593","1,403","1,411","1,492","1,473","1,716","1,868","1,912","1,941","2,286","2,133","2,085","2,302","2,256","1,654","2,06","2,179","2,022","2,328","2,13","2,351","2,9","2,313","2,02","2,421","2,165","1,814","2,117","2,552","1,949","1,93842","2,118337","2,339192" +INTL.33-12-PRT-BKWH.A, Portugal,"7,873","4,934","6,82","7,897","9,609","10,512","8,364","9,005","12,037","5,72","9,065","8,952","4,599","8,453","10,551","8,26","14,613","12,97395","12,853","7,213","11,21","13,894","7,722","15,566","9,77","4,684","10,892","9,991","6,73","8,201","15,954","11,423","5,589","13,652","15,471","8,615","15,608","5,79","12,316","8,6526","12,082581","11,846464" +INTL.33-12-ROU-BKWH.A, Romania,"12,506","12,605","11,731","9,934","11,208","11,772","10,688","11,084","13,479","12,497","10,87","14,107","11,583","12,64","12,916","16,526","15,597","17,334","18,69","18,107","14,63","14,774","15,886","13,126","16,348","20,005","18,172","15,806","17,023","15,379","19,684","14,581","11,945","14,807","18,618","16,467","17,848","14,349","17,48736","15,580622","15,381243","17,376933" +INTL.33-12-SRB-BKWH.A, Serbia,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,--,"10,855","9,937","9,468","10,436","11,772","8,58","9,193","10,101","10,893","9,979","10,684","9,061","10,53261","9,457175","9,034496","11,284232" +INTL.33-12-SVK-BKWH.A, Slovakia,--,--,--,--,--,--,--,--,--,--,--,--,--,"3,432","4,311","4,831","4,185","4,023","4,224","4,429","4,569","4,878","5,215","3,4452","4,059","4,592","4,355","4,406",4,"4,324","5,184","3,211","3,687","4,329","3,762","3,701","4,302","4,321","3,506","4,27383","4,517","4,17" +INTL.33-12-SVN-BKWH.A, Slovenia,--,--,--,--,--,--,--,--,--,--,--,--,"3,379","2,974","3,348","3,187","3,616","3,046","3,4","3,684","3,771","3,741","3,265","2,916","4,033","3,426","3,555","3,233","3,978","4,666","4,452","3,506","3,841","4,562","6,011","3,75","4,443","3,814","4,643","4,43421","4,93406","4,711944" +INTL.33-12-ESP-BKWH.A, Spain,"29,16","21,64","25,99","26,696","31,088","30,895","26,105","27,016","34,76","19,046","25,16","27,01","18,731","24,133","27,898","22,881","39,404","34,43","33,665","22,634","29,274","40,617","22,691","40,643","31,359","18,209","25,699","27,036","23,13","26,147","41,576","30,07","20,192","36,45","38,815","27,656","35,77","18,007","33,743","24,23025","30,507","29,626" +INTL.33-12-SWE-BKWH.A, Sweden,"58,133","59,006","54,369","62,801","67,106","70,095","60,134","70,95","69,016","70,911","71,778","62,603","73,588","73,905","58,508","67,421","51,2226","68,365","74,25","70,974","77,798","78,269","65,696","53,005","59,522","72,075","61,106","65,497","68,378","65,193","66,279","66,047","78,333","60,81","63,227","74,734","61,645","64,651","61,79","64,46583","71,6","71,086" +INTL.33-12-CHE-BKWH.A, Switzerland,"32,481","35,13","35,974","35,069","29,871","31,731","32,576","34,328","35,437","29,477","29,497","31,756","32,373","35,416","38,678","34,817","28,458","33,70257","33,136","37,104","33,854","38,29","32,323","31,948","30,938","28,664","28,273","32,362","33,214","32,833","33,261","29,906","35,783","35,628","35,122","35,378","31,984","31,47968","32,095881","35,156989","37,867647","36,964485" +INTL.33-12-TUR-BKWH.A, Turkey,"11,159","12,308","13,81","11,13","13,19","11,822","11,637","18,314","28,447","17,61","22,917","22,456","26,302","33,611","30,28","35,186","40,07","39,41784","41,80671","34,33","30,57","23,77","33,346","34,977","45,623","39,165","43,802","35,492","32,937","35,598","51,423001","51,154999","56,668998","58,225","39,750001","65,856","66,685883","57,823851","59,490211","88,2094218","78,094369","55,1755392" +INTL.33-12-GBR-BKWH.A, United Kingdom,"3,921","4,369","4,543","4,548","3,992","4,08","4,767","4,13","4,915","4,732","5,119","4,534","5,329","4,237","5,043","4,79","3,359","4,127","5,117","5,336","5,085","4,055","4,78787","3,22767","4,844","4,92149","4,59315","5,0773","5,14119","5,22792","3,59138","5,69175","5,30965","4,70147","5,8878","6,29727","5,370412217","5,88187","5,44327","5,84628","6,75391","5,0149" +, Eurasia,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, +INTL.33-12-MDA-BKWH.A, Moldova,--,--,--,--,--,--,--,--,--,--,--,--,"0,255","0,371","0,275","0,321","0,362","0,378","0,387","0,363","0,392","0,359","0,348","0,358","0,35","0,359","0,365","0,354","0,385","0,354","0,403","0,348","0,266","0,311","0,317","0,265","0,228","0,282","0,27324","0,29799","0,276","0,316" +INTL.33-12-UKR-BKWH.A, Ukraine,--,--,--,--,--,--,--,--,--,--,--,--,"7,725","10,929","11,997","9,853","8,546","9,757","15,756","14,177","11,161","11,912","9,531","9,146","11,635","12,239","12,757","10,042","11,397","11,817","13,02","10,837","10,374","13,663","8,393","5,343","7,594","8,856","10,32372","6,5083","7,5638","10,3326" diff --git a/data/existing_infrastructure/offwind_capacity_IRENA.csv b/data/existing_infrastructure/offwind_capacity_IRENA.csv index 5400e4fb..d2a3f0f1 100644 --- a/data/existing_infrastructure/offwind_capacity_IRENA.csv +++ b/data/existing_infrastructure/offwind_capacity_IRENA.csv @@ -1,34 +1,34 @@ -Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018 -Albania,,,,,,,,,,,,,,,,,,, -Austria,,,,,,,,,,,,,,,,,,, -Belgium,,,,,,,,,,31.5,196.5,196.5,381,707.7,707.7,712,712.2,877.2,1185.9 -Bosnia Herzg,,,,,,,,,,,,,,,,,,, -Bulgaria,,,,,,,,,,,,,,,,,,, -Croatia,,,,,,,,,,,,,,,,,,, -Czechia,,,,,,,,,,,,,,,,,,, -Denmark,50,50,214,423.4,423.4,423.4,423.4,423.4,423.4,660.9,867.9,871.5,921.9,1271.1,1271.1,1271.1,1271.1,1263.8,1700.8 -Estonia,,,,,,,,,,,,,,,,,,, -Finland,,,,,,,,,24,24,26.3,26.3,26.3,26.3,26.3,32,32,72.7,72.7 -France,,,,,,,,,,,,,,,,,,2,2 -Germany,,,,,,,,,,35,80,188,268,508,994,3283,4132,5406,6396 -Greece,,,,,,,,,,,,,,,,,,, -Hungary,,,,,,,,,,,,,,,,,,, -Ireland,,,,,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2 -Italy,,,,,,,,,,,,,,,,,,, -Latvia,,,,,,,,,,,,,,,,,,, -Lithuania,,,,,,,,,,,,,,,,,,, -Luxembourg,,,,,,,,,,,,,,,,,,, -Montenegro,,,,,,,,,,,,,,,,,,, -Netherlands,,,,,,,108,108,228,228,228,228,228,228,228,357,957,957,957 -North Macedonia,,,,,,,,,,,,,,,,,,, -Norway,,,,,,,,,,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3 -Poland,,,,,,,,,,,,,,,,,,, -Portugal,,,,,,,,,,,,1.9,2,2,2,2,,, -Romania,,,,,,,,,,,,,,,,,,, -Serbia,,,,,,,,,,,,,,,,,,, -Slovakia,,,,,,,,,,,,,,,,,,, -Slovenia,,,,,,,,,,,,,,,,,,, -Spain,,,,,,,,,,,,,,5,5,5,5,5,5 -Sweden,13,22,22,22,22,22,22,131,133,163,163,163,163,212,213,213,203,203,203 -Switzerland,,,,,,,,,,,,,,,,,,, -UK,3.8,3.8,3.8,63.8,123.8,213.8,303.8,393.8,596.2,951.2,1341.5,1838.3,2995.5,3696,4501.3,5093.4,5293.4,6987.9,8216.5 +Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022 +Albania,,,,,,,,,,,,,,,,,,,,,,, +Austria,,,,,,,,,,,,,,,,,,,,,,, +Belgium,,,,,,,,,,31.5,196.5,196.5,381.0,707.7,707.7,712.0,712.2,877.2,1185.9,1555.5,2261.8,2261.8,2261.8 +Bosnia Herzg,,,,,,,,,,,,,,,,,,,,,,, +Bulgaria,,,,,,,,,,,,,,,,,,,,,,, +Croatia,,,,,,,,,,,,,,,,,,,,,,, +Czechia,,,,,,,,,,,,,,,,,,,,,,, +Denmark,49.95,49.95,213.95,423.35,423.35,423.35,423.35,423.35,423.35,660.85,867.85,871.45,921.85,1271.05,1271.05,1271.05,1271.05,1263.8,1700.8,1700.8,1700.8,2305.6,2305.6 +Estonia,,,,,,,,,,,,,,,,,,,,,,, +Finland,,,,,,,,,24.0,24.0,26.3,26.3,26.3,26.3,26.3,32.0,32.0,72.7,72.7,73.0,73.0,73.0,73.0 +France,,,,,,,,,,,,,,,,,,2.0,2.0,2.0,2.0,2.0,482.0 +Germany,,,,,,,,,,35.0,80.0,188.0,268.0,508.0,994.0,3283.0,4132.0,5406.0,6393.0,7555.0,7787.0,7787.0,8129.0 +Greece,,,,,,,,,,,,,,,,,,,,,,, +Hungary,,,,,,,,,,,,,,,,,,,,,,, +Ireland,,,,,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2 +Italy,,,,,,,,,,,,,,,,,,,,,,,30.0 +Latvia,,,,,,,,,,,,,,,,,,,,,,, +Lithuania,,,,,,,,,,,,,,,,,,,,,,, +Luxembourg,,,,,,,,,,,,,,,,,,,,,,, +Montenegro,,,,,,,,,,,,,,,,,,,,,,, +Netherlands,,,,,,,108.0,108.0,228.0,228.0,228.0,228.0,228.0,228.0,228.0,357.0,957.0,957.0,957.0,957.0,2459.5,2459.5,2571.0 +North Macedonia,,,,,,,,,,,,,,,,,,,,,,, +Norway,,,,,,,,,,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,6.3,66.3 +Poland,,,,,,,,,,,,,,,,,,,,,,, +Portugal,,,,,,,,,,,,1.86,2.0,2.0,2.0,2.0,,,,,25.0,25.0,25.0 +Romania,,,,,,,,,,,,,,,,,,,,,,, +Serbia,,,,,,,,,,,,,,,,,,,,,,, +Slovakia,,,,,,,,,,,,,,,,,,,,,,, +Slovenia,,,,,,,,,,,,,,,,,,,,,,, +Spain,,,,,,,,,,,,,,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0 +Sweden,13.0,22.0,22.0,22.0,22.0,22.0,22.0,131.0,133.0,163.0,163.0,163.0,163.0,212.0,213.0,213.0,203.0,203.0,203.0,203.0,203.0,193.0,193.0 +Switzerland,,,,,,,,,,,,,,,,,,,,,,, +UK,4.0,4.0,4.0,64.0,124.0,214.0,304.0,394.0,596.2,951.0,1341.0,1838.0,2995.0,3696.0,4501.0,5093.0,5293.0,6988.0,8181.0,9888.0,10383.0,11255.0,13928.0 diff --git a/data/existing_infrastructure/onwind_capacity_IRENA.csv b/data/existing_infrastructure/onwind_capacity_IRENA.csv index ca7bb5ec..cd5ac19c 100644 --- a/data/existing_infrastructure/onwind_capacity_IRENA.csv +++ b/data/existing_infrastructure/onwind_capacity_IRENA.csv @@ -1,34 +1,34 @@ -Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018 -Albania,,,,,,,,,,,,,,,,,,, -Austria,50,67,109,322,581,825.2,968.3,991.2,992,1001,1015.8,1106,1337.2,1674.5,2110.3,2488.7,2730,2886.7,3132.7 -Belgium,14,26,31,67,96,167,212,276,324,576.5,715.5,872.5,989,1072.3,1236.3,1464,1657.8,1919.3,2074.8 -Bosnia Herzg,,,,,,,,,,,,0.3,0.3,0.3,0.3,0.3,0.3,0.3,50.9 -Bulgaria,,,,,1,8,27,30,114,333,488,541,677,683,699,699,699,698.4,698.9 -Croatia,,,,,6,6,17,17,17,70,79,130,180,254,339,418,483,576.1,586.3 -Czechia,2,,6.4,10.6,16.5,22,43.5,113.8,150,193,213,213,258,262,278,281,282,308.2,316.2 -Denmark,2340.1,2447.2,2680.6,2696.6,2700.4,2704.5,2712.3,2700.9,2739.5,2821.2,2934,3080.5,3240.1,3547.9,3615.4,3805.9,3974.5,4225.8,4419.8 -Estonia,,,1,3,7,31,31,50,77,104,108,180,266,248,275,300,310,311.8,310 -Finland,38,39,43,52,82,82,86,110,119,123,170.7,172.7,230.7,420.7,600.7,973,1533,1971.3,1968.3 -France,38,66,138,218,358,690,1412,2223,3403,4582,5912,6758,7607.5,8156,9201.4,10298.2,11566.6,13497.4,14898.1 -Germany,6095,8754,12001,14381,16419,18248,20474,22116,22794,25697,26823,28524,30711,32969,37620,41297,45303,50174,52447 -Greece,226,270,287,371,470,491,749,846,1022,1171,1298,1640,1753,1809,1978,2091,2370,2624,2877.5 -Hungary,,1,1,3,3,17,33,61,134,203,293,331,325,329,329,329,329,329,329 -Ireland,116.5,122.9,134.8,210.3,311.2,468.1,651.3,715.3,917.1,1226.1,1365.2,1559.4,1679.2,1983,2258.1,2426,2760.8,3292.8,3650.9 -Italy,363,664,780,874,1127,1635,1902,2702,3525,4879,5794,6918,8102,8542,8683,9137,9384,9736.6,10230.2 -Latvia,2,2,22,26,26,26,26,26,28,29,30,36,59,65.9,68.9,68.2,69.9,77.1,78.2 -Lithuania,,,,,1,1,31,47,54,98,133,202,275,279,288,436,509,518,533 -Luxembourg,14,13.9,13.9,20.5,34.9,34.9,34.9,34.9,42.9,42.9,43.7,44.5,58.3,58.3,58.3,63.8,119.7,119.7,122.9 -Montenegro,,,,,,,,,,,,,,,,,,72,118 -Netherlands,447,486,672,905,1075,1224,1453,1641,1921,1994,2009,2088,2205,2485,2637,3034,3300,3245,3436 -North Macedonia,,,,,,,,,,,,,,,37,37,37,37,37 -Norway,13,13,97,97,152,265,284,348,395,420.7,422.7,509.7,702.7,815.7,856.7,864.7,880.7,1204.7,1708 -Poland,4,19,32,35,40,121,172,306,526,709,1108,1800,2564,3429,3836,4886,5747,5759.4,5766.1 -Portugal,83,125,190,268,553,1064,1681,2201,2857,3326,3796,4254.4,4409.6,4607.9,4854.6,4934.8,5124.1,5124.1,5172.4 -Romania,,,,,,1,1,3,5,15,389,988,1822,2773,3244,3130,3025,3029.8,3032.3 -Serbia,,,,,,,,,,,,,0.5,0.5,0.5,10.4,17,25,25 -Slovakia,,,,3,3,5,5,5,5,3,3,3,3,5,3,3,3,4,3 -Slovenia,,,,,,,,,,,,,,4,4,5,5,5,5.2 -Spain,2206,3397,4891,5945,8317,9918,11722,14820,16555,19176,20693,21529,22789,22953,22920,22938,22985,23119.5,23400.1 -Sweden,196,273,335,395,453,500,563,692,956,1312,1854,2601,3443,3982,4875,5606,6232,6408,7097 -Switzerland,3,5,5,5,9,12,12,12,14,18,42,46,49,60,60,60,75,75,75 -UK,408.2,489.2,530.2,678.2,809.2,1351.2,1651.2,2083.2,2849.8,3470.8,4079.8,4758,6035,7586.3,8572.7,9212.2,10832.3,12596.9,13553.9 +Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022 +Albania,,,,,,,,,,,,,,,,,,,,,,, +Austria,50.0,67.0,109.0,322.0,581.0,825.22,968.27,991.16,991.97,1000.99,1015.83,1105.97,1337.15,1674.54,2110.28,2488.73,2730.0,2886.7,3132.71,3224.12,3225.98,3407.81,3735.81 +Belgium,14.0,26.0,31.0,67.0,96.0,167.0,212.0,276.0,324.0,576.5,715.5,872.5,985.9,1061.3,1225.0,1469.3,1621.6,1902.2,2119.0,2308.0,2410.9,2686.6,2989.6 +Bosnia Herzg,,,,,,,,,,,,0.3,0.3,0.3,0.3,0.3,0.3,0.3,51.0,87.0,87.0,135.0,135.0 +Bulgaria,,,,,1.0,8.0,27.0,30.0,114.0,333.0,488.0,541.0,677.0,683.0,699.0,699.0,699.0,698.39,698.92,703.12,702.8,704.38,704.38 +Croatia,,,,,6.0,6.0,17.0,17.0,17.0,70.0,79.0,130.0,180.0,254.0,339.0,418.0,483.0,576.1,586.3,646.3,801.3,986.9,1042.9 +Czechia,2.0,,6.4,10.6,16.5,22.0,43.5,113.8,150.0,193.0,213.0,213.0,258.0,262.0,278.0,281.0,282.0,308.21,316.2,339.41,339.42,339.41,339.41 +Denmark,2340.07,2447.2,2680.58,2696.57,2700.36,2704.49,2712.35,2700.86,2739.52,2821.24,2933.98,3080.53,3240.09,3547.87,3615.35,3805.92,3974.09,4225.15,4421.86,4409.74,4566.23,4715.24,4782.24 +Estonia,,,1.0,3.0,7.0,31.0,31.0,50.0,77.0,104.0,108.0,180.0,266.0,248.0,275.0,300.0,310.0,311.8,310.0,316.0,317.0,315.0,315.0 +Finland,38.0,39.0,43.0,52.0,82.0,82.0,86.0,110.0,119.0,123.0,170.7,172.7,230.7,420.7,600.7,973.0,1533.0,1971.3,1968.3,2211.0,2513.0,3184.0,5541.0 +France,38.0,66.0,138.0,218.0,358.0,690.0,1412.0,2223.0,3403.0,4582.0,5912.0,6758.02,7607.5,8155.96,9201.42,10298.18,11566.56,13497.35,14898.14,16424.85,17512.0,18737.98,20637.98 +Germany,6095.0,8754.0,12001.0,14381.0,16419.0,18248.0,20474.0,22116.0,22794.0,25697.0,26823.0,28524.0,30711.0,32969.0,37620.0,41297.0,45303.0,50174.0,52328.0,53187.0,54414.0,56046.0,58165.0 +Greece,226.0,270.0,287.0,371.0,470.0,491.0,749.0,846.0,1022.0,1171.0,1298.0,1640.0,1753.0,1809.0,1978.0,2091.0,2370.0,2624.0,2877.5,3589.0,4119.25,4649.13,4879.13 +Hungary,,1.0,1.0,3.0,3.0,17.0,33.0,61.0,134.0,203.0,293.0,331.0,325.0,329.0,329.0,329.0,329.0,329.0,329.0,323.0,323.0,324.0,324.0 +Ireland,116.5,122.9,134.8,210.3,311.2,468.1,651.3,715.3,917.1,1226.1,1365.2,1559.4,1679.15,1898.1,2258.05,2425.95,2776.45,3293.95,3648.65,4101.25,4281.5,4313.84,4593.84 +Italy,363.0,664.0,780.0,874.0,1127.0,1635.0,1902.0,2702.0,3525.0,4879.0,5794.0,6918.0,8102.0,8542.0,8683.0,9137.0,9384.0,9736.58,10230.25,10679.46,10870.62,11253.73,11749.73 +Latvia,2.0,2.0,22.0,26.0,26.0,26.0,26.0,26.0,28.0,29.0,30.0,36.0,59.0,65.89,68.92,68.17,69.91,77.11,78.17,78.07,78.07,77.13,136.13 +Lithuania,,,,,1.0,1.0,31.0,47.0,54.0,98.0,133.0,202.0,275.0,279.0,288.0,436.0,509.0,518.0,533.0,534.0,540.0,671.0,814.0 +Luxembourg,14.0,13.9,13.9,20.5,34.9,34.9,34.9,34.9,42.92,42.93,43.73,44.53,58.33,58.33,58.34,63.79,119.69,119.69,122.89,135.79,152.74,136.44,165.44 +Montenegro,,,,,,,,,,,,,,,,,,72.0,72.0,118.0,118.0,118.0,118.0 +Netherlands,447.0,486.0,672.0,905.0,1075.0,1224.0,1453.0,1641.0,1921.0,1994.0,2009.0,2088.0,2205.0,2485.0,2637.0,3033.84,3300.12,3245.0,3436.11,3527.16,4188.38,5309.87,6176.0 +North Macedonia,,,,,,,,,,,,,,,37.0,37.0,37.0,37.0,37.0,37.0,37.0,37.0,37.0 +Norway,13.0,13.0,97.0,97.0,152.0,265.0,284.0,348.0,395.0,420.7,422.7,509.7,702.7,815.7,856.7,864.7,880.7,1204.7,1707.7,2911.7,4027.7,5042.7,5067.7 +Poland,4.0,19.0,32.0,35.0,40.0,121.0,172.0,306.0,526.0,709.0,1108.0,1800.0,2564.0,3429.0,3836.0,4886.0,5747.0,5759.36,5766.08,5837.76,6298.25,6967.34,7987.34 +Portugal,83.0,125.0,190.0,268.0,553.0,1064.0,1681.0,2201.0,2857.0,3326.0,3796.0,4254.35,4409.55,4607.95,4854.56,4934.84,5124.1,5124.1,5172.36,5222.75,5097.26,5402.33,5430.33 +Romania,,,,,,1.0,1.0,3.0,5.0,15.0,389.0,988.0,1822.0,2773.0,3244.0,3130.0,3025.0,3029.8,3032.26,3037.52,3012.53,3014.96,3014.96 +Serbia,,,,,,,,,,,,,0.5,0.5,0.5,10.4,17.0,25.0,227.0,398.0,398.0,398.0,398.0 +Slovakia,,,,3.0,3.0,5.0,5.0,5.0,5.0,3.0,3.0,3.0,3.0,5.0,3.0,3.0,3.0,4.0,3.0,4.0,4.0,4.0,4.0 +Slovenia,,,,,,,,,,,,,2.0,2.0,3.0,3.0,3.0,3.3,3.3,3.3,3.3,3.33,3.33 +Spain,2206.0,3397.0,4891.0,5945.0,8317.0,9918.0,11722.0,14820.0,16555.0,19176.0,20693.0,21529.0,22789.0,22953.0,22920.0,22938.0,22985.0,23119.48,23400.06,25585.08,26814.19,27902.65,29302.84 +Sweden,196.0,273.0,335.0,395.0,453.0,500.0,563.0,692.0,956.0,1312.0,1854.0,2601.0,3443.0,3982.0,4875.0,5606.0,6232.0,6408.0,7097.0,8478.0,9773.0,11923.0,14364.0 +Switzerland,3.0,5.0,5.0,5.0,9.0,12.0,12.0,12.0,14.0,18.0,42.0,46.0,49.0,60.0,60.0,60.0,75.0,75.0,75.0,75.0,87.0,87.0,87.0 +UK,431.0,490.0,531.0,678.0,809.0,1351.0,1651.0,2083.0,2849.8,3468.0,4080.0,4758.0,6035.0,7586.0,8573.0,9212.0,10833.0,12597.0,13425.0,13999.0,14075.0,14492.0,14832.0 diff --git a/data/existing_infrastructure/solar_capacity_IRENA.csv b/data/existing_infrastructure/solar_capacity_IRENA.csv index ac84c2d1..01683f8d 100644 --- a/data/existing_infrastructure/solar_capacity_IRENA.csv +++ b/data/existing_infrastructure/solar_capacity_IRENA.csv @@ -1,34 +1,34 @@ -Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018 -Albania,,0.1,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.3,0.4,0.6,0.7,0.8,0.9,1.1,1,1,1 -Austria,5,7,9,23,27,21,22.4,24.2,30.1,48.9,88.8,174.1,337.5,626,785.2,937.1,1096,1269,1437.6 -Belgium,,,1,1,1,2,2,20,62,386,1007,1979,2647,2902,3015.2,3131.7,3327,3616.2,3986.5 -Bosnia Herzg,,,,0.1,0.2,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,1.3,7.2,8.2,14.1,16,18.2 -Bulgaria,,,,,,,,0,0.1,2,25,154,1013,1020,1026,1029,1028,1035.6,1032.7 -Croatia,,,,,,,,,,0.3,0.3,0.3,4,19,33,47.8,55.8,60,67.7 -Czechia,0.1,0.1,0.2,0.3,0.4,0.6,0.8,4,39.5,464.6,1727,1913,2022,2063.5,2067.4,2074.9,2067.9,2069.5,2075.1 -Denmark,1,1,2,2,2,3,3,3,3,5,7,17,402,571,607,782.1,851,906.4,998 -Estonia,,,,,,,,,,0.1,0.1,0.2,0.4,1.5,3.3,6.5,10,15,31.9 -Finland,2,3,3,3,4,4,5,5,6,6,7,7,8,9,11,17,39,82,140 -France,7,7,8,9,11,13,15,26,80,277,1044,3003.6,4358.8,5277.3,6034.4,7137.5,7702.1,8610.4,9617 -Germany,114,195,260,435,1105,2056,2899,4170,6120,10564,18004,25914,34075,36708,37898,39222,40677,42291,45179 -Greece,,1,1,1,1,1,5,9,12,46,202,612,1536,2579,2596,2604,2604,2605.5,2651.6 -Hungary,,,,,,,,0.4,1,1,2,4,12,35,89,172,235,344,726 -Ireland,,,,,,,,,,0.6,0.7,0.8,0.9,1,1.6,2.4,5.9,15.7,24.2 -Italy,19,20,22,26,31,34,45,110,483,1264,3592,13131,16785,18185,18594,18901,19283,19682.3,20107.6 -Latvia,,,,,,,,,,,,,0.2,0.2,0.2,0.2,0.7,0.7,2 -Lithuania,,,,,,,,,0.1,0.1,0.1,0.3,7,68,69,69,70,73.8,82 -Luxembourg,,0.2,1.6,14.2,23.6,23.6,23.7,23.9,24.6,26.4,29.5,40.7,74.7,95,109.9,116.3,121.9,128.1,130.6 -Montenegro,,,,,,,0,0.2,0.4,0.4,0.6,0.8,0.9,1.1,2.1,2.7,3.1,3.4,3.4 -Netherlands,13,21,26,46,50,51,53,54,59,69,90,149,369,746,1048,1515,2049,2903,4522 -North Macedonia,,,,,,,,,,,0,2,4,7,15,17,16.7,16.7,20.6 -Norway,6,6,6,7,7,7,8,8,8.3,8.7,9.1,9.5,10,11,13,15,26.7,44.9,68.4 -Poland,,,,,,,,,,,,1.1,1.3,2.4,27.2,107.8,187.2,287.1,562 -Portugal,1,1,1,2,2,2,3,24,59,115,134,172,238,296,415,447,512.8,579.2,667.4 -Romania,,,,,,,,,0.1,0.1,0.1,1,41,761,1293,1326,1372,1374.1,1385.8 -Serbia,,,,,,0.1,0.2,0.4,0.9,1.2,1.3,1.5,3.1,4.7,6,9,11,10,10 -Slovakia,,,,,,,,,,,19,496,513,533,533,533,533,528,472 -Slovenia,,,0,0,0,0,0.2,0.6,1,4,12,57,142,187,223,238,233,246.8,221.3 -Spain,10,13,17,22,33,52,130,494,3384,3423,3873,4283,4569,4690,4697,4704,4713,4723,4763.5 -Sweden,3,3,3,4,4,4,5,6,8,9,11,12,24,43,60,104,153,402,492 -Switzerland,16,18,20,22,24,28,30,37,49,79,125,223,437,756,1061,1394,1664,1906,2171 -UK,2,3,4,6,8,11,14,18,23,27,95,1000,1753,2937,5528,9601.2,11930.5,12781.8,13118.3 +Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022 +Albania,,0.1,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.3,0.4,0.56,0.68,0.76,0.87,1.05,1.0,1.0,1.0,14.0,21.0,23.0,28.6 +Austria,5.0,7.0,9.0,23.0,27.0,18.49,19.61,21.42,27.0,45.56,85.27,169.88,333.09,620.78,779.76,931.56,1089.53,1262.01,1447.94,1694.4,2034.74,2773.91,3538.91 +Belgium,,,1.0,1.0,1.0,2.0,2.0,20.0,62.0,386.0,1006.6,1978.6,2646.6,2901.6,3015.0,3131.6,3328.8,3620.6,4000.0,4636.6,5572.8,6012.4,6898.4 +Bosnia Herzg,,,,0.1,0.2,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.35,1.34,7.17,8.17,14.12,16.0,18.15,22.35,34.89,56.51,107.47 +Bulgaria,,,,,,,,0.03,0.1,2.0,25.0,154.0,921.99,1038.54,1028.92,1027.89,1029.89,1030.7,1033.06,1044.39,1100.21,1274.71,1948.36 +Croatia,,,,,,,,,,0.3,0.3,0.3,4.0,19.0,33.0,47.8,55.8,60.0,67.7,84.8,108.5,138.3,182.3 +Czechia,0.1,0.1,0.2,0.3,0.4,0.59,0.84,3.96,39.5,464.6,1727.0,1913.0,2022.0,2063.5,2067.4,2074.9,2067.9,2075.44,2081.05,2110.67,2171.96,2246.09,2627.09 +Denmark,1.0,1.0,2.0,2.0,2.0,3.0,3.0,3.0,3.0,5.0,7.0,17.0,402.0,571.0,607.0,782.11,850.95,906.35,998.0,1080.0,1304.29,1704.04,3122.04 +Estonia,,,,,,,,,,0.1,0.1,0.2,0.38,1.5,3.34,6.5,10.0,15.0,31.9,120.6,207.67,394.77,534.77 +Finland,2.0,3.0,3.0,3.0,4.0,4.0,5.0,5.0,6.0,6.0,7.0,7.0,8.0,9.0,11.0,17.0,39.0,82.0,140.0,222.0,318.0,425.0,590.6 +France,7.0,7.0,8.0,9.0,11.0,13.0,15.0,26.0,80.0,277.0,1044.0,3003.57,4358.75,5277.29,6034.42,7137.52,7702.08,8610.44,9638.88,10738.39,11812.2,14436.97,17036.97 +Germany,114.0,195.0,260.0,435.0,1105.0,2056.0,2899.0,4170.0,6120.0,10564.0,18004.0,25914.0,34075.0,36708.0,37898.0,39222.0,40677.0,42291.0,45156.0,48912.0,53669.0,59371.0,66662.0 +Greece,,1.0,1.0,1.0,1.0,1.0,5.0,9.0,12.0,46.0,202.0,612.0,1536.0,2579.0,2596.0,2604.0,2604.0,2605.53,2651.57,2833.79,3287.72,4277.42,5557.42 +Hungary,,,,,,,,0.4,1.0,1.0,2.0,4.0,12.0,35.0,89.0,172.0,235.0,344.0,728.0,1400.0,2131.0,2968.0,2988.0 +Ireland,,,,,,,,,,,,,,,,,,,,,,, +Italy,19.0,20.0,22.0,26.0,31.0,34.0,45.0,110.0,483.0,1264.0,3592.0,13131.0,16785.0,18185.0,18594.0,18901.0,19283.0,19682.29,20107.59,20865.28,21650.04,22594.26,25076.56 +Latvia,,,,,,,,,,,,,,,,,0.69,0.69,1.96,3.3,5.1,7.16,56.16 +Lithuania,,,,,,,,,0.1,0.1,0.1,0.3,7.0,68.0,69.0,69.0,70.0,70.08,72.0,73.0,80.0,84.0,397.0 +Luxembourg,,0.16,1.59,14.17,23.56,23.58,23.7,23.93,24.56,26.36,29.45,40.67,74.65,95.02,109.93,116.27,121.9,128.1,130.62,159.74,186.64,277.16,319.16 +Montenegro,,,,,,,,,,,,,,,,,,,,,2.57,2.57,22.2 +Netherlands,13.0,21.0,26.0,46.0,50.0,51.0,53.0,54.0,59.0,69.0,90.0,149.0,287.0,650.0,1007.0,1526.26,2135.02,2910.89,4608.0,7226.0,11108.43,14910.69,18848.69 +North Macedonia,,,,,,,,,,,,2.0,4.0,7.0,15.0,17.0,16.7,16.7,16.7,16.71,84.93,84.93,84.93 +Norway,6.0,6.0,6.0,7.0,7.0,7.0,8.0,8.0,8.3,8.7,9.1,9.5,10.0,11.0,13.0,15.0,26.7,44.9,53.11,102.53,141.53,186.53,302.53 +Poland,,,,,,,,,,,,1.11,1.3,2.39,27.15,107.78,187.25,287.09,561.98,1539.26,3954.96,7415.52,11166.52 +Portugal,1.0,1.0,1.0,2.0,2.0,2.0,3.0,24.0,59.0,115.0,134.0,169.6,235.6,293.6,412.6,441.75,493.05,539.42,617.85,832.74,1010.07,1474.78,2364.78 +Romania,,,,,,,,,0.1,0.1,0.1,1.0,41.0,761.0,1293.0,1326.0,1372.0,1374.13,1385.82,1397.71,1382.54,1393.92,1413.92 +Serbia,,,,,,0.1,0.2,0.4,0.9,1.2,1.3,1.5,3.1,4.7,6.0,9.0,11.0,10.0,11.0,11.0,11.5,11.94,11.94 +Slovakia,,,,,,,,,,,19.0,496.0,513.0,533.0,533.0,533.0,533.0,528.0,472.0,590.0,535.0,537.0,537.0 +Slovenia,1.0,1.0,,,,0.05,0.19,0.59,1.0,4.0,12.0,57.0,142.0,187.0,223.0,238.0,233.0,246.8,246.8,277.88,369.78,461.16,632.16 +Spain,1.0,3.0,6.0,10.0,19.0,37.0,113.0,476.0,3365.0,3403.0,3851.0,4260.0,4545.0,4665.0,4672.0,4677.0,4687.0,4696.0,4730.7,8772.02,10100.42,13678.4,18176.73 +Sweden,3.0,3.0,3.0,4.0,4.0,4.0,5.0,6.0,8.0,9.0,11.0,12.0,24.0,43.0,60.0,104.0,153.0,231.0,411.0,698.0,1090.0,1587.0,2587.0 +Switzerland,16.0,18.0,20.0,22.0,24.0,28.0,30.0,37.0,49.0,79.0,125.0,223.0,437.0,756.0,1061.0,1394.0,1664.0,1906.0,2173.0,2498.0,2973.0,3655.0,4339.92 +UK,2.0,3.0,4.0,6.0,8.0,11.0,14.0,18.0,23.0,27.0,95.0,1000.0,1753.0,2937.0,5528.0,9601.0,11914.0,12760.0,13059.0,13345.0,13579.0,13965.0,14660.0 diff --git a/doc/conf.py b/doc/conf.py index 1ddae466..fe577ac7 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -80,9 +80,9 @@ author = "Tom Brown (KIT, TUB, FIAS), Jonas Hoersch (KIT, FIAS), Fabian Hofmann # built documents. # # The short X.Y version. -version = "0.8" +version = "0.9" # The full version, including alpha/beta/rc tags. -release = "0.8.1" +release = "0.9.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/configtables/clustering.csv b/doc/configtables/clustering.csv index 5f52c222..e831ca84 100644 --- a/doc/configtables/clustering.csv +++ b/doc/configtables/clustering.csv @@ -1,4 +1,5 @@ ,Unit,Values,Description +focus_weights,,,Optionally specify the focus weights for the clustering of countries. For instance: `DE: 0.8` will distribute 80% of all nodes to Germany and 20% to the rest of the countries. simplify_network,,, -- to_substations,bool,"{'true','false'}","Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones" -- algorithm,str,"One of {‘kmeans’, ‘hac’, ‘modularity‘}", diff --git a/doc/configtables/costs.csv b/doc/configtables/costs.csv index 9797d77e..b69c0bf9 100644 --- a/doc/configtables/costs.csv +++ b/doc/configtables/costs.csv @@ -1,9 +1,12 @@ -,Unit,Values,Description -year,--,"YYYY; e.g. '2030'","Year for which to retrieve cost assumptions of ``resources/costs.csv``." -version,--,"vX.X.X; e.g. 'v0.5.0'","Version of ``technology-data`` repository to use." -rooftop_share,--,float,"Share of rooftop PV when calculating capital cost of solar (joint rooftop and utility-scale PV)." -fill_values,--,float,"Default values if not specified for a technology in ``resources/costs.csv``." -capital_cost,EUR/MW,"Keys should be in the 'technology' column of ``resources/costs.csv``. Values can be any float.","For the given technologies, assumptions about their capital investment costs are set to the corresponding value. Optional; overwrites cost assumptions from ``resources/costs.csv``." -marginal_cost,EUR/MWh,"Keys should be in the 'technology' column of ``resources/costs.csv``. Values can be any float.","For the given technologies, assumptions about their marginal operating costs are set to the corresponding value. Optional; overwrites cost assumptions from ``resources/costs.csv``." -emission_prices,,,"Specify exogenous prices for emission types listed in ``network.carriers`` to marginal costs." --- co2,EUR/t,float,"Exogenous price of carbon-dioxide added to the marginal costs of fossil-fuelled generators according to their carbon intensity. Added through the keyword ``Ep`` in the ``{opts}`` wildcard only in the rule :mod:`prepare_network``." +,Unit,Values,Description +year,--,YYYY; e.g. '2030',Year for which to retrieve cost assumptions of ``resources/costs.csv``. +version,--,vX.X.X; e.g. 'v0.5.0',Version of ``technology-data`` repository to use. +rooftop_share,--,float,Share of rooftop PV when calculating capital cost of solar (joint rooftop and utility-scale PV). +social_discountrate,p.u.,float,Social discount rate to compare costs in different investment periods. 0.02 corresponds to a social discount rate of 2%. +fill_values,--,float,Default values if not specified for a technology in ``resources/costs.csv``. +capital_cost,EUR/MW,Keys should be in the 'technology' column of ``resources/costs.csv``. Values can be any float.,"For the given technologies, assumptions about their capital investment costs are set to the corresponding value. Optional; overwrites cost assumptions from ``resources/costs.csv``." +marginal_cost,EUR/MWh,Keys should be in the 'technology' column of ``resources/costs.csv``. Values can be any float.,"For the given technologies, assumptions about their marginal operating costs are set to the corresponding value. Optional; overwrites cost assumptions from ``resources/costs.csv``." +emission_prices,,,Specify exogenous prices for emission types listed in ``network.carriers`` to marginal costs. +-- enable,bool,true or false,Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well) +-- co2,EUR/t,float,Exogenous price of carbon-dioxide added to the marginal costs of fossil-fuelled generators according to their carbon intensity. Added through the keyword ``Ep`` in the ``{opts}`` wildcard only in the rule :mod:`prepare_network``. +-- co2_monthly_price,bool,true or false,Add monthly cost for a carbon-dioxide price based on historical values built by the rule ``build_monthly_prices`` diff --git a/doc/configtables/electricity.csv b/doc/configtables/electricity.csv index 4c04fee6..22a22d57 100644 --- a/doc/configtables/electricity.csv +++ b/doc/configtables/electricity.csv @@ -1,6 +1,8 @@ ,Unit,Values,Description voltages,kV,"Any subset of {220., 300., 380.}",Voltage levels to consider +gaslimit_enable,bool,true or false,Add an overall absolute gas limit configured in ``electricity: gaslimit``. gaslimit,MWhth,float or false,Global gas usage limit +co2limit_enable,bool,true or false,Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. co2limit,:math:`t_{CO_2-eq}/a`,float,Cap on total annual system carbon dioxide emissions co2base,:math:`t_{CO_2-eq}/a`,float,Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard. agg_p_nom_limits,file,path,Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``. @@ -22,6 +24,8 @@ powerplants_filter,--,"use `pandas.query `_ strings here, e.g. ``Country in ['Germany']``",Filter query for the custom powerplant database. ,,, +everywhere_powerplants,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}","List of conventional power plants to add to every node in the model with zero initial capacity. To be used in combination with ``extendable_carriers`` to allow for building conventional powerplants irrespective of existing locations." +,,, conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}","List of conventional power plants to include in the model from ``resources/powerplants.csv``. If an included carrier is also listed in ``extendable_carriers``, the capacity is taken as a lower bound." ,,, renewable_carriers,--,"Any subset of {solar, onwind, offwind-ac, offwind-dc, hydro}",List of renewable generators to include in the model. @@ -34,3 +38,6 @@ estimate_renewable_capacities,,, -- -- Offshore,--,"Any subset of {offwind-ac, offwind-dc}","List of PyPSA-Eur carriers that is considered as (IRENA, OPSD) onshore technology." -- -- Offshore,--,{onwind},"List of PyPSA-Eur carriers that is considered as (IRENA, OPSD) offshore technology." -- -- PV,--,{solar},"List of PyPSA-Eur carriers that is considered as (IRENA, OPSD) PV technology." +autarky,,, +-- enable,bool,true or false,Require each node to be autarkic by removing all lines and links. +-- by_country,bool,true or false,Require each country to be autarkic by removing all cross-border lines and links. ``electricity: autarky`` must be enabled. diff --git a/doc/configtables/enable.csv b/doc/configtables/enable.csv index e1349fef..8dd476cb 100644 --- a/doc/configtables/enable.csv +++ b/doc/configtables/enable.csv @@ -5,6 +5,7 @@ retrieve_databundle,bool,"{true, false}","Switch to retrieve databundle from zen retrieve_sector_databundle,bool,"{true, false}","Switch to retrieve sector databundle from zenodo via the rule :mod:`retrieve_sector_databundle` or whether to keep a custom databundle located in the corresponding folder." retrieve_cost_data,bool,"{true, false}","Switch to retrieve technology cost data from `technology-data repository `_." build_cutout,bool,"{true, false}","Switch to enable the building of cutouts via the rule :mod:`build_cutout`." +retrieve_irena,bool,"{true, false}",Switch to enable the retrieval of ``existing_capacities`` from IRENASTAT with :mod:`retrieve_irena`. retrieve_cutout,bool,"{true, false}","Switch to enable the retrieval of cutouts from zenodo with :mod:`retrieve_cutout`." build_natura_raster,bool,"{true, false}","Switch to enable the creation of the raster ``natura.tiff`` via the rule :mod:`build_natura_raster`." retrieve_natura_raster,bool,"{true, false}","Switch to enable the retrieval of ``natura.tiff`` from zenodo with :mod:`retrieve_natura_raster`." diff --git a/doc/configtables/existing_capacities.csv b/doc/configtables/existing_capacities.csv index 87519193..eacae35b 100644 --- a/doc/configtables/existing_capacities.csv +++ b/doc/configtables/existing_capacities.csv @@ -3,4 +3,5 @@ grouping_years_power ,--,A list of years,Intervals to group existing capacities grouping_years_heat ,--,A list of years below 2020,Intervals to group existing capacities for heat threshold_capacity ,MW,float,Capacities generators and links of below threshold are removed during add_existing_capacities +default_heating_lifetime ,years,int,Default lifetime for heating technologies conventional_carriers ,--,"Any subset of {uranium, coal, lignite, oil} ",List of conventional power plants to include in the sectoral network diff --git a/doc/configtables/licenses-sector.csv b/doc/configtables/licenses-sector.csv index d65d3b36..7f20b5a6 100644 --- a/doc/configtables/licenses-sector.csv +++ b/doc/configtables/licenses-sector.csv @@ -9,9 +9,8 @@ Swiss energy statistics from Swiss Federal Office of Energy,switzerland-sfoe/,un BASt emobility statistics,emobility/,unknown,http://www.bast.de/DE/Verkehrstechnik/Fachthemen/v2-verkehrszaehlung/Stundenwerte.html?nn=626916 BDEW heating profile,heat_load_profile_BDEW.csv,unknown,https://github.com/oemof/demandlib heating profiles for Aarhus,heat_load_profile_DK_AdamJensen.csv,unknown,Adam Jensen MA thesis at Aarhus University -George Lavidas wind/wave costs,WindWaveWEC_GLTB.xlsx,unknown,George Lavidas co2 budgets,co2_budget.csv,CC BY 4.0,https://arxiv.org/abs/2004.11009 -existing heating potentials,existing_infrastructure/existing_heating_raw.csv,unknown,https://ec.europa.eu/energy/studies/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment_en?redir=1 +existing heating potentials,existing_infrastructure/existing_heating_raw.csv,unknown,https://energy.ec.europa.eu/publications/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment-fossilrenewables-1_en IRENA existing VRE capacities,existing_infrastructure/{solar|onwind|offwind}_capcity_IRENA.csv,unknown,https://www.irena.org/Statistics/Download-Data USGS ammonia production,myb1-2017-nitro.xls,unknown,https://www.usgs.gov/centers/nmic/nitrogen-statistics-and-information hydrogen salt cavern potentials,h2_salt_caverns_GWh_per_sqkm.geojson,CC BY 4.0,https://doi.org/10.1016/j.ijhydene.2019.12.161 https://doi.org/10.20944/preprints201910.0187.v1 diff --git a/doc/configtables/licenses.csv b/doc/configtables/licenses.csv index 3e25f5df..37f46cd0 100644 --- a/doc/configtables/licenses.csv +++ b/doc/configtables/licenses.csv @@ -5,7 +5,7 @@ "naturalearth/*",,,,,http://www.naturalearthdata.com/about/terms-of-use/ "NUTS_2013 _60M_SH/*","x","x",,"x",https://ec.europa.eu/eurostat/web/gisco/geodata/reference-data/administrative-units-statistical-units "cantons.csv","x",,"x",,https://en.wikipedia.org/wiki/Data_codes_for_Switzerland -"EIA_hydro_generation _2000_2014.csv","x",,,,https://www.eia.gov/about/copyrights_reuse.php +"eia_hydro_annual_generation.csv","x",,,,https://www.eia.gov/about/copyrights_reuse.php "GEBCO_2014_2D.nc","x",,,,https://www.gebco.net/data_and_products/gridded_bathymetry_data/documents/gebco_2014_historic.pdf "hydro_capacities.csv","x",,,, "je-e-21.03.02.xls","x","x",,,https://www.bfs.admin.ch/bfs/en/home/fso/swiss-federal-statistical-office/terms-of-use.html diff --git a/doc/configtables/lines.csv b/doc/configtables/lines.csv index ec9ec007..3707d4a6 100644 --- a/doc/configtables/lines.csv +++ b/doc/configtables/lines.csv @@ -5,6 +5,7 @@ s_nom_max,MW,"float","Global upper limit for the maximum capacity of each extend max_extension,MW,"float","Upper limit for the extended capacity of each extendable line." length_factor,--,float,"Correction factor to account for the fact that buses are *not* connected by lines through air-line distance." under_construction,--,"One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity}","Specifies how to handle lines which are currently under construction." +reconnect_crimea,--,"true or false","Whether to reconnect Crimea to the Ukrainian grid" dynamic_line_rating,,, -- activate,bool,"true or false","Whether to take dynamic line rating into account" -- cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module must be ERA5.","Specifies the directory where the relevant weather data ist stored." diff --git a/doc/configtables/offwind-ac.csv b/doc/configtables/offwind-ac.csv index 6b756799..b2533f04 100644 --- a/doc/configtables/offwind-ac.csv +++ b/doc/configtables/offwind-ac.csv @@ -2,15 +2,15 @@ cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module must be ERA5.","Specifies the directory where the relevant weather data ist stored." resource,,, -- method,--,"Must be 'wind'","A superordinate technology type." --- turbine,--,"One of turbine types included in `atlite `_","Specifies the turbine type and its characteristic power curve." +-- turbine,--,"One of turbine types included in `atlite `_. Can be a string or a dictionary with years as keys which denote the year another turbine model becomes available.","Specifies the turbine type and its characteristic power curve." capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement." correction_factor,--,float,"Correction factor for capacity factor time series." excluder_resolution,m,float,"Resolution on which to perform geographical elibility analysis." corine,--,"Any *realistic* subset of the `CORINE Land Cover code list `_","Specifies areas according to CORINE Land Cover codes which are generally eligible for AC-connected offshore wind turbine placement." +luisa,--,"Any subset of the `LUISA Base Map codes in Annex 1 `_","Specifies areas according to the LUISA Base Map codes which are generally eligible for AC-connected offshore wind turbine placement." natura,bool,"{true, false}","Switch to exclude `Natura 2000 `_ natural protection areas. Area is excluded if ``true``." ship_threshold,--,float,"Ship density threshold from which areas are excluded." max_depth,m,float,"Maximum sea water depth at which wind turbines can be build. Maritime areas with deeper waters are excluded in the process of calculating the AC-connected offshore wind potential." min_shore_distance,m,float,"Minimum distance to the shore below which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential." max_shore_distance,m,float,"Maximum distance to the shore above which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential." -potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." diff --git a/doc/configtables/offwind-dc.csv b/doc/configtables/offwind-dc.csv index 1f72228a..7c537543 100644 --- a/doc/configtables/offwind-dc.csv +++ b/doc/configtables/offwind-dc.csv @@ -2,15 +2,15 @@ cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module must be ERA5.","Specifies the directory where the relevant weather data ist stored." resource,,, -- method,--,"Must be 'wind'","A superordinate technology type." --- turbine,--,"One of turbine types included in `atlite `__","Specifies the turbine type and its characteristic power curve." +-- turbine,--,"One of turbine types included in `atlite `_. Can be a string or a dictionary with years as keys which denote the year another turbine model becomes available.","Specifies the turbine type and its characteristic power curve." capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement." correction_factor,--,float,"Correction factor for capacity factor time series." excluder_resolution,m,float,"Resolution on which to perform geographical elibility analysis." corine,--,"Any *realistic* subset of the `CORINE Land Cover code list `_","Specifies areas according to CORINE Land Cover codes which are generally eligible for AC-connected offshore wind turbine placement." +luisa,--,"Any subset of the `LUISA Base Map codes in Annex 1 `_","Specifies areas according to the LUISA Base Map codes which are generally eligible for DC-connected offshore wind turbine placement." natura,bool,"{true, false}","Switch to exclude `Natura 2000 `_ natural protection areas. Area is excluded if ``true``." ship_threshold,--,float,"Ship density threshold from which areas are excluded." max_depth,m,float,"Maximum sea water depth at which wind turbines can be build. Maritime areas with deeper waters are excluded in the process of calculating the AC-connected offshore wind potential." min_shore_distance,m,float,"Minimum distance to the shore below which wind turbines cannot be build." max_shore_distance,m,float,"Maximum distance to the shore above which wind turbines cannot be build." -potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." diff --git a/doc/configtables/onwind.csv b/doc/configtables/onwind.csv index ba9482e5..3b09214b 100644 --- a/doc/configtables/onwind.csv +++ b/doc/configtables/onwind.csv @@ -2,14 +2,17 @@ cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module must be ERA5.","Specifies the directory where the relevant weather data ist stored." resource,,, -- method,--,"Must be 'wind'","A superordinate technology type." --- turbine,--,"One of turbine types included in `atlite `__","Specifies the turbine type and its characteristic power curve." +-- turbine,--,"One of turbine types included in `atlite `_. Can be a string or a dictionary with years as keys which denote the year another turbine model becomes available.","Specifies the turbine type and its characteristic power curve." capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of wind turbine placement." corine,,, -- grid_codes,--,"Any subset of the `CORINE Land Cover code list `_","Specifies areas according to CORINE Land Cover codes which are generally eligible for wind turbine placement." -- distance,m,float,"Distance to keep from areas specified in ``distance_grid_codes``" -- distance_grid_codes,--,"Any subset of the `CORINE Land Cover code list `_","Specifies areas according to CORINE Land Cover codes to which wind turbines must maintain a distance specified in the setting ``distance``." +luisa,,, +-- grid_codes,--,"Any subset of the `LUISA Base Map codes in Annex 1 `_","Specifies areas according to the LUISA Base Map codes which are generally eligible for wind turbine placement." +-- distance,m,float,"Distance to keep from areas specified in ``distance_grid_codes``" +-- distance_grid_codes,--,"Any subset of the `LUISA Base Map codes in Annex 1 `_","Specifies areas according to the LUISA Base Map codes to which wind turbines must maintain a distance specified in the setting ``distance``." natura,bool,"{true, false}","Switch to exclude `Natura 2000 `_ natural protection areas. Area is excluded if ``true``." -potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." correction_factor,--,float,"Correction factor for capacity factor time series." excluder_resolution,m,float,"Resolution on which to perform geographical elibility analysis." diff --git a/doc/configtables/opts.csv b/doc/configtables/opts.csv index 8c8a706f..b133c718 100644 --- a/doc/configtables/opts.csv +++ b/doc/configtables/opts.csv @@ -1,13 +1,13 @@ -Trigger, Description, Definition, Status -``nH``; i.e. ``2H``-``6H``, Resample the time-resolution by averaging over every ``n`` snapshots, ``prepare_network``: `average_every_nhours() `_ and its `caller `__), In active use -``nSEG``; e.g. ``4380SEG``, "Apply time series segmentation with `tsam `_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load.", ``prepare_network``: apply_time_segmentation(), In active use -``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() `_ and its `caller `__, In active use -``Ep``, Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() `_ and its `caller `__, In active use -``Ept``, Add monthly cost for a carbon-dioxide price based on historical values built by the rule ``build_monthly_prices``, In active use -``CCL``, Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``., ``solve_network``, In active use -``EQ``, "Require each country or node to on average produce a minimal share of its total consumption itself. Example: ``EQ0.5c`` demands each country to produce on average at least 50% of its consumption; ``EQ0.5`` demands each node to produce on average at least 50% of its consumption.", ``solve_network``, In active use -``ATK``, "Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.", ``prepare_network``, In active use -``BAU``, Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() `__, Untested -``SAFE``, Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network., ``solve_network`` `add_opts_constraints() `__, Untested -``carrier+{c|p|m}factor``,"Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use -``CH4L``,"Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal)", ``prepare_network``: ``add_gaslimit()``, In active use +Trigger, Description, Definition, Status +``nH``; i.e. ``2H``-``6H``, Resample the time-resolution by averaging over every ``n`` snapshots, ``prepare_network``: `average_every_nhours() `_ and its `caller `__), In active use +``nSEG``; e.g. ``4380SEG``,"Apply time series segmentation with `tsam `_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load.", ``prepare_network``: apply_time_segmentation(), In active use +``Co2L``,Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() `_ and its `caller `__, In active use +``Ep``,Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() `_ and its `caller `__, In active use +``Ept``,Add monthly cost for a carbon-dioxide price based on historical values built by the rule ``build_monthly_prices``, In active use, +``CCL``,Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``., ``solve_network``, In active use +``EQ``,Require each country or node to on average produce a minimal share of its total consumption itself. Example: ``EQ0.5c`` demands each country to produce on average at least 50% of its consumption; ``EQ0.5`` demands each node to produce on average at least 50% of its consumption., ``solve_network``, In active use +``ATK``,Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links., ``prepare_network``, In active use +``BAU``,Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() `__, Untested +``SAFE``,Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network., ``solve_network`` `add_opts_constraints() `__, Untested +``carrier+{c|p|m}factor``,"Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use +``CH4L``,"Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal)", ``prepare_network``: ``add_gaslimit()``, In active use diff --git a/doc/configtables/plotting.csv b/doc/configtables/plotting.csv index ed5d9c9f..82fc203c 100644 --- a/doc/configtables/plotting.csv +++ b/doc/configtables/plotting.csv @@ -1,6 +1,9 @@ ,Unit,Values,Description map,,, -- boundaries,°,"[x1,x2,y1,y2]",Boundaries of the map plots in degrees latitude (y) and longitude (x) +projection,,,, +-- name,--,"Valid Cartopy projection name","See https://scitools.org.uk/cartopy/docs/latest/reference/projections.html for list of available projections." +-- args,--,--,"Other entries under 'projection' are passed as keyword arguments to the projection constructor, e.g. ``central_longitude: 10.``." costs_max,bn Euro,float,Upper y-axis limit in cost bar plots. costs_threshold,bn Euro,float,Threshold below which technologies will not be shown in cost bar plots. energy_max,TWh,float,Upper y-axis limit in energy bar plots. diff --git a/doc/configtables/sector.csv b/doc/configtables/sector.csv index d610c862..d8cc3288 100644 --- a/doc/configtables/sector.csv +++ b/doc/configtables/sector.csv @@ -62,16 +62,17 @@ tes,--,"{true, false}",Add option for storing thermal energy in large water pits tes_tau,,,The time constant used to calculate the decay of thermal energy in thermal energy storage (TES): 1- :math:`e^{-1/24τ}`. -- decentral,days,float,The time constant in decentralized thermal energy storage (TES) -- central,days,float,The time constant in centralized thermal energy storage (TES) -boilers,--,"{true, false}",Add option for transforming electricity into heat using resistive heater +boilers,--,"{true, false}",Add option for transforming gas into heat using gas boilers +resistive_heaters,--,"{true, false}",Add option for transforming electricity into heat using resistive heaters (independently from gas boilers) oil_boilers,--,"{true, false}",Add option for transforming oil into heat using boilers biomass_boiler,--,"{true, false}",Add option for transforming biomass into heat using boilers +overdimension_individual_heating,--,"float",Add option for overdimensioning individual heating systems by a certain factor. This allows them to cover heat demand peaks e.g. 10% higher than those in the data with a setting of 1.1. chp,--,"{true, false}",Add option for using Combined Heat and Power (CHP) micro_chp,--,"{true, false}",Add option for using Combined Heat and Power (CHP) for decentral areas. solar_thermal,--,"{true, false}",Add option for using solar thermal to generate heat. solar_cf_correction,--,float,The correction factor for the value provided by the solar thermal profile calculations marginal_cost_storage,currency/MWh ,float,The marginal cost of discharging batteries in distributed grids methanation,--,"{true, false}",Add option for transforming hydrogen and CO2 into methane using methanation. -helmeth,--,"{true, false}",Add option for transforming power into gas using HELMETH (Integrated High-Temperature ELectrolysis and METHanation for Effective Power to Gas Conversion) coal_cc,--,"{true, false}",Add option for coal CHPs with carbon capture dac,--,"{true, false}",Add option for Direct Air Capture (DAC) co2_vent,--,"{true, false}",Add option for vent out CO2 from storages to the atmosphere. @@ -79,6 +80,9 @@ allam_cycle,--,"{true, false}",Add option to include `Allam cycle gas power plan hydrogen_fuel_cell,--,"{true, false}",Add option to include hydrogen fuel cell for re-electrification. Assuming OCGT technology costs hydrogen_turbine,--,"{true, false}",Add option to include hydrogen turbine for re-electrification. Assuming OCGT technology costs SMR,--,"{true, false}",Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR) +SMR CC,--,"{true, false}",Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR) and Carbon Capture (CC) +regional_methanol_demand,--,"{true, false}",Spatially resolve methanol demand. Set to true if regional CO2 constraints needed. +regional_oil_demand,--,"{true, false}",Spatially resolve oil demand. Set to true if regional CO2 constraints needed. regional_co2 _sequestration_potential,,, -- enable,--,"{true, false}",Add option for regionally-resolved geological carbon dioxide sequestration potentials based on `CO2StoP `_. -- attribute,--,string,Name of the attribute for the sequestration potential @@ -88,9 +92,11 @@ regional_co2 _sequestration_potential,,, -- years_of_storage,years,float,The years until potential exhausted at optimised annual rate co2_sequestration_potential,MtCO2/a,float,The potential of sequestering CO2 in Europe per year co2_sequestration_cost,currency/tCO2,float,The cost of sequestering a ton of CO2 +co2_sequestration_lifetime,years,int,The lifetime of a CO2 sequestration site co2_spatial,--,"{true, false}","Add option to spatially resolve carrier representing stored carbon dioxide. This allows for more detailed modelling of CCUTS, e.g. regarding the capturing of industrial process emissions, usage as feedstock for electrofuels, transport of carbon dioxide, and geological sequestration sites." ,,, co2network,--,"{true, false}",Add option for planning a new carbon dioxide transmission network +co2_network_cost_factor,p.u.,float,The cost factor for the capital cost of the carbon dioxide transmission network ,,, cc_fraction,--,float,The default fraction of CO2 captured with post-combustion capture hydrogen_underground _storage,--,"{true, false}",Add options for storing hydrogen underground. Storage potential depends regionally. @@ -107,6 +113,11 @@ electricity_distribution _grid,--,"{true, false}",Add a simplified representatio electricity_distribution _grid_cost_factor,,,Multiplies the investment cost of the electricity distribution grid ,,, electricity_grid _connection,--,"{true, false}",Add the cost of electricity grid connection for onshore wind and solar +transmission_efficiency,,,Section to specify transmission losses or compression energy demands of bidirectional links. Splits them into two capacity-linked unidirectional links. +-- {carrier},--,str,The carrier of the link. +-- -- efficiency_static,p.u.,float,Length-independent transmission efficiency. +-- -- efficiency_per_1000km,p.u. per 1000 km,float,Length-dependent transmission efficiency ($\eta^{\text{length}}$) +-- -- compression_per_1000km,p.u. per 1000 km,float,Length-dependent electricity demand for compression ($\eta \cdot \text{length}$) implemented as multi-link to local electricity bus. H2_network,--,"{true, false}",Add option for new hydrogen pipelines gas_network,--,"{true, false}","Add existing natural gas infrastructure, incl. LNG terminals, production and entry-points. The existing gas network is added with a lossless transport model. A length-weighted `k-edge augmentation algorithm `_ can be run to add new candidate gas pipelines such that all regions of the model can be connected to the gas network. When activated, all the gas demands are regionally disaggregated as well." H2_retrofit,--,"{true, false}",Add option for retrofiting existing pipelines to transport hydrogen. @@ -117,6 +128,14 @@ gas_distribution_grid _cost_factor,,,Multiplier for the investment cost of the g ,,, biomass_spatial,--,"{true, false}",Add option for resolving biomass demand regionally biomass_transport,--,"{true, false}",Add option for transporting solid biomass between nodes +biogas_upgrading_cc,--,"{true, false}",Add option to capture CO2 from biomass upgrading conventional_generation,,,Add a more detailed description of conventional carriers. Any power generation requires the consumption of fuel from nodes representing that fuel. biomass_to_liquid,--,"{true, false}",Add option for transforming solid biomass into liquid fuel with the same properties as oil biosng,--,"{true, false}",Add option for transforming solid biomass into synthesis gas with the same properties as natural gas +limit_max_growth,,, +-- enable,--,"{true, false}",Add option to limit the maximum growth of a carrier +-- factor,p.u.,float,The maximum growth factor of a carrier (e.g. 1.3 allows 30% larger than max historic growth) +-- max_growth,,, +-- -- {carrier},GW,float,The historic maximum growth of a carrier +-- max_relative_growth, +-- -- {carrier},p.u.,float,The historic maximum relative growth of a carrier diff --git a/doc/configtables/snapshots.csv b/doc/configtables/snapshots.csv index d60c78dc..0226a9aa 100644 --- a/doc/configtables/snapshots.csv +++ b/doc/configtables/snapshots.csv @@ -1,4 +1,6 @@ -,Unit,Values,Description -start,--,"str or datetime-like; e.g. YYYY-MM-DD","Left bound of date range" -end,--,"str or datetime-like; e.g. YYYY-MM-DD","Right bound of date range" -inclusive,--,"One of {'neither', 'both', ‘left’, ‘right’}","Make the time interval closed to the ``left``, ``right``, or both sides ``both`` or neither side ``None``." +,Unit,Values,Description +start,--,str or datetime-like; e.g. YYYY-MM-DD,Left bound of date range +end,--,str or datetime-like; e.g. YYYY-MM-DD,Right bound of date range +inclusive,--,"One of {'neither', 'both', ‘left’, ‘right’}","Make the time interval closed to the ``left``, ``right``, or both sides ``both`` or neither side ``None``." +resolution ,--,"{false,``nH``; i.e. ``2H``-``6H``}","Resample the time-resolution by averaging over every ``n`` snapshots in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks." +segmentation,--,"{false,``n``; e.g. ``4380``}","Apply time series segmentation with `tsam `_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load in :mod:`prepare_network`. **Warning:** This option should currently only be used with electricity-only networks, not for sector-coupled networks." diff --git a/doc/configtables/solar.csv b/doc/configtables/solar.csv index 803445d5..18587694 100644 --- a/doc/configtables/solar.csv +++ b/doc/configtables/solar.csv @@ -2,14 +2,14 @@ cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module can be ERA5 or SARAH-2.","Specifies the directory where the relevant weather data ist stored that is specified at ``atlite/cutouts`` configuration. Both ``sarah`` and ``era5`` work." resource,,, -- method,--,"Must be 'pv'","A superordinate technology type." --- panel,--,"One of {'Csi', 'CdTe', 'KANENA'} as defined in `atlite `__","Specifies the solar panel technology and its characteristic attributes." +-- panel,--,"One of {'Csi', 'CdTe', 'KANENA'} as defined in `atlite `_ . Can be a string or a dictionary with years as keys which denote the year another turbine model becomes available.","Specifies the solar panel technology and its characteristic attributes." -- orientation,,, -- -- slope,°,"Realistically any angle in [0., 90.]","Specifies the tilt angle (or slope) of the solar panel. A slope of zero corresponds to the face of the panel aiming directly overhead. A positive tilt angle steers the panel towards the equator." -- -- azimuth,°,"Any angle in [0., 360.]","Specifies the `azimuth `_ orientation of the solar panel. South corresponds to 180.°." capacity_per_sqkm,:math:`MW/km^2`,float,"Allowable density of solar panel placement." correction_factor,--,float,"A correction factor for the capacity factor (availability) time series." corine,--,"Any subset of the `CORINE Land Cover code list `_","Specifies areas according to CORINE Land Cover codes which are generally eligible for solar panel placement." +luisa,--,"Any subset of the `LUISA Base Map codes in Annex 1 `_","Specifies areas according to the LUISA Base Map codes which are generally eligible for solar panel placement." natura,bool,"{true, false}","Switch to exclude `Natura 2000 `_ natural protection areas. Area is excluded if ``true``." -potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." excluder_resolution,m,float,"Resolution on which to perform geographical elibility analysis." diff --git a/doc/configtables/solving.csv b/doc/configtables/solving.csv index 45d50d84..7189399b 100644 --- a/doc/configtables/solving.csv +++ b/doc/configtables/solving.csv @@ -6,12 +6,19 @@ options,,, -- skip_iterations,bool,"{'true','false'}","Skip iterating, do not update impedances of branches. Defaults to true." -- rolling_horizon,bool,"{'true','false'}","Whether to optimize the network in a rolling horizon manner, where the snapshot range is split into slices of size `horizon` which are solved consecutively." -- seed,--,int,Random seed for increased deterministic behaviour. +-- custom_extra_functionality,--,str,Path to a Python file with custom extra functionality code to be injected into the solving rules of the workflow relative to ``rules`` directory. +-- io_api,string,"{'lp','mps','direct'}",Passed to linopy and determines the API used to communicate with the solver. With the ``'lp'`` and ``'mps'`` options linopy passes a file to the solver; with the ``'direct'`` option (only supported for HIGHS and Gurobi) linopy uses an in-memory python API resulting in better performance. -- track_iterations,bool,"{'true','false'}",Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration) -- min_iterations,--,int,Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run. -- max_iterations,--,int,Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run. -- transmission_losses,int,[0-9],"Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored." -- linearized_unit_commitment,bool,"{'true','false'}",Whether to optimise using the linearized unit commitment formulation. -- horizon,--,int,Number of snapshots to consider in each iteration. Defaults to 100. +constraints ,,, +-- CCL,bool,"{'true','false'}",Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``. +-- EQ,bool/string,"{'false',`n(c| )``; i.e. ``0.5``-``0.7c``}",Require each country or node to on average produce a minimal share of its total consumption itself. Example: ``EQ0.5c`` demands each country to produce on average at least 50% of its consumption; ``EQ0.5`` demands each node to produce on average at least 50% of its consumption. +-- BAU,bool,"{'true','false'}",Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities`` +-- SAFE,bool,"{'true','false'}",Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network. solver,,, -- name,--,"One of {'gurobi', 'cplex', 'cbc', 'glpk', 'ipopt'}; potentially more possible",Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow. -- options,--,Key listed under ``solver_options``.,Link to specific parameter settings. diff --git a/doc/configuration.rst b/doc/configuration.rst index ceda1141..92ed269b 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -383,7 +383,7 @@ overwrite the existing values. .. literalinclude:: ../config/config.default.yaml :language: yaml - :start-after: type: + :start-after: # docs-load :end-before: # docs .. csv-table:: diff --git a/doc/foresight.rst b/doc/foresight.rst index c1be3443..f8ea6108 100644 --- a/doc/foresight.rst +++ b/doc/foresight.rst @@ -41,10 +41,10 @@ Perfect foresight scenarios .. warning:: - Perfect foresight is currently under development and not yet implemented. + Perfect foresight is currently implemented as an experimental test version. -For running perfect foresight scenarios, in future versions you will be able to -set in the ``config/config.yaml``: +For running perfect foresight scenarios, you can adjust the + ``config/config.perfect.yaml``: .. code:: yaml diff --git a/doc/img/base.png b/doc/img/base.png index e1c3b6f2..071c4995 100644 Binary files a/doc/img/base.png and b/doc/img/base.png differ diff --git a/doc/img/elec_s_X.png b/doc/img/elec_s_X.png index e0f4f4a3..37c10479 100644 Binary files a/doc/img/elec_s_X.png and b/doc/img/elec_s_X.png differ diff --git a/doc/img/intro-workflow.png b/doc/img/intro-workflow.png index da2c06d8..27b5a389 100644 Binary files a/doc/img/intro-workflow.png and b/doc/img/intro-workflow.png differ diff --git a/doc/index.rst b/doc/index.rst index 1552729c..7eaffa01 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -35,6 +35,8 @@ PyPSA-Eur: A Sector-Coupled Open Optimisation Model of the European Energy Syste :target: https://stackoverflow.com/questions/tagged/pypsa :alt: Stackoverflow +| + PyPSA-Eur is an open model dataset of the European energy system at the transmission network level that covers the full ENTSO-E area. It covers demand and supply for all energy sectors. From version v0.8.0, PyPSA-Eur includes all @@ -116,7 +118,7 @@ of the individual parts. topics we are working on. Please feel free to help or make suggestions. This project is currently maintained by the `Department of Digital -Transformation in Energy Systems `_ at the +Transformation in Energy Systems `_ at the `Technische Universität Berlin `_. Previous versions were developed within the `IAI `_ at the `Karlsruhe Institute of Technology (KIT) `_ which was funded by @@ -185,7 +187,7 @@ For sector-coupling studies: :: pages = "1--25" year = "2023", eprint = "2207.05816", - doi = "10.1016/j.joule.2022.04.016", + doi = "10.1016/j.joule.2023.06.016", } For sector-coupling studies with pathway optimisation: :: @@ -209,24 +211,6 @@ If you want to cite a specific PyPSA-Eur version, each release of PyPSA-Eur is s :target: https://doi.org/10.5281/zenodo.3520874 -Pre-Built Networks as a Dataset -=============================== - -There are pre-built networks available as a dataset on Zenodo as well for every release of PyPSA-Eur. - -.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3601881.svg - :target: https://doi.org/10.5281/zenodo.3601881 - -The included ``.nc`` files are PyPSA network files which can be imported with PyPSA via: - -.. code:: python - - import pypsa - - filename = "elec_s_1024_ec.nc" # example - n = pypsa.Network(filename) - - Operating Systems ================= diff --git a/doc/introduction.rst b/doc/introduction.rst index df060723..413db9d1 100644 --- a/doc/introduction.rst +++ b/doc/introduction.rst @@ -89,8 +89,8 @@ Folder Structure - ``results``: Stores the solved PyPSA network data, summary files and plots. - ``logs``: Stores log files. - ``benchmarks``: Stores ``snakemake`` benchmarks. -- ``test``: Includes the test configuration files used for continuous integration. - ``doc``: Includes the documentation of PyPSA-Eur. +- ``graphics``: Includes some graphics for the documentation of PyPSA-Eur. System Requirements =================== diff --git a/doc/plotting.rst b/doc/plotting.rst index 895eab3b..02748cf2 100644 --- a/doc/plotting.rst +++ b/doc/plotting.rst @@ -22,7 +22,22 @@ Rule ``plot_summary`` .. _map_plot: -Rule ``plot_network`` -======================== +Rule ``plot_power_network`` +=========================== -.. automodule:: plot_network +.. automodule:: plot_power_network + +Rule ``plot_power_network_perfect`` +=================================== + +.. automodule:: plot_power_network_perfect + +Rule ``plot_hydrogen_network`` +============================== + +.. automodule:: plot_hydrogen_network + +Rule ``plot_gas_network`` +========================= + +.. automodule:: plot_gas_network diff --git a/doc/preparation.rst b/doc/preparation.rst index 5cdc8031..d8f76839 100644 --- a/doc/preparation.rst +++ b/doc/preparation.rst @@ -94,6 +94,13 @@ Rule ``build_electricity_demand`` .. automodule:: build_electricity_demand +.. _monthlyprices: + +Rule ``build_monthly_prices`` +============================= + +.. automodule:: build_monthly_prices + .. _ship: Rule ``build_ship_raster`` @@ -102,6 +109,12 @@ Rule ``build_ship_raster`` .. automodule:: build_ship_raster +.. _availabilitymatrixmdua: + +Rule ``determine_availability_matrix_MD_UA`` +============================================ + +.. automodule:: determine_availability_matrix_MD_UA .. _renewableprofiles: diff --git a/doc/release_notes.rst b/doc/release_notes.rst index abafff4d..54a7252f 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -10,13 +10,388 @@ Release Notes Upcoming Release ================ -* Updated Global Energy Monitor LNG terminal data to March 2023 version. -* PyPSA-EUR now supports the simultaneous execution of multiple scenarios. For this purpose, a scenarios.yaml file has been introduced which contains customizable scenario names with corresponding configuration overrides. To enable it, set the ``run: scenarios:`` key to ``True`` and define the scenario names to run under ``run: name:`` in the configuration file. The latter must be a subset of toplevel keys in the scenario file. +* PyPSA-EUR now supports the simultaneous execution of multiple scenarios. For + this purpose, a scenarios.yaml file has been introduced which contains + customizable scenario names with corresponding configuration overrides. To + enable it, set the ``run: scenarios:`` key to ``True`` and define the scenario + names to run under ``run: name:`` in the configuration file. The latter must + be a subset of toplevel keys in the scenario file. + +* Add new default to overdimension heating in individual buildings. This allows + them to cover heat demand peaks e.g. 10% higher than those in the data. The + disadvantage of manipulating the costs is that the capacity is then not quite + right. This way at least the costs are right. + +* Add option to specify to set a default heating lifetime for existing heating + (``existing_capacities: default_heating_lifetime:``). + +* Correctly source the existing heating technologies for buildings since the + source URL has changed. It represents the year 2012 and is only for + buildings, not district heating. So the capacities for urban central are now + set to zero from this source. + +* Remove long-deprecated function ``attach_extendable_generators`` in :mod:`add_electricity`. + +* The filtering of power plants in the ``config.default.yaml`` has been updated regarding phased-out power plants in 2023. + +* Upgrade techno-economic assumptions to ``technology-data`` v0.7.0. + +* Bugfix: Correct technology keys for the electricity production plotting to work out the box. + +* New configuration option ``everywhere_powerplants`` to build conventional powerplants everywhere, irrespective of existing powerplants locations, in the network (https://github.com/PyPSA/pypsa-eur/pull/850). + +* Remove option for wave energy as technology data is not maintained. + +* Define global constraint for CO2 emissions on the final state of charge of the + CO2 atmosphere store. This gives a more sparse constraint that should improve + the performance of the solving process. + +* Bugfix: Assure entering of code block which corrects Norwegian heat demand. + +* Add warning when BEV availability weekly profile has negative values in `build_transport_demand`. + +* Stacktrace of uncaught exceptions should now be correctly included inside log files (via `configure_logging(..)`). + +* Cluster residential and services heat buses by default. Can be disabled with ``cluster_heat_buses: false``. + +* Bugfix: Do not reduce district heat share when building population-weighted + energy statistics. Previously the district heating share was being multiplied + by the population weighting, reducing the DH share with multiple nodes. + +* Move building of daily heat profile to its own rule + :mod:`build_hourly_heat_demand` from :mod:`prepare_sector_network`. + +* In :mod:`build_energy_totals`, district heating shares are now reported in a + separate file. + +* Move calculation of district heating share to its own rule + :mod:`build_district_heat_share`. + +* Move building of distribution of existing heating to own rule + :mod:`build_existing_heating_distribution`. This makes the distribution of + existing heating to urban/rural, residential/services and spatially more + transparent. + +* Bugfix: Correctly read out number of solver threads from configuration file. + +* Air-sourced heat pumps can now also be built in rural areas. Previously, only + ground-sourced heat pumps were considered for this category. + +* Bugfix: Correctly read out number of solver threads from configuration file. + +* Add support for the linopy ``io_api`` option; set to ``"direct"`` to increase model reading and writing performance for the highs and gurobi solvers. + +* Add the option to customise map projection in plotting config. + +* The order of buses (bus0, bus1, ...) for DAC components has changed to meet the convention of the other components. Therefore, `bus0` refers to the electricity bus (input), `bus1` to the heat bus (input), 'bus2' to the CO2 atmosphere bus (input), and `bus3` to the CO2 storage bus (output). + +* The rule ``plot_network`` has been split into separate rules for plotting + electricity, hydrogen and gas networks. + +* To determine the optimal topology to meet the number of clusters, the workflow used pyomo in combination with ``ipopt`` or ``gurobi``. This dependency has been replaced by using ``linopy`` in combination with ``scipopt`` or ``gurobi``. The environment file has been updated accordingly. + +* The ``highs`` solver was added to the default environment file. + +* Various minor bugfixes to the perfect foresight workflow, though perfect foresight must still be considered experimental. + +* It is now possible to determine the directory for shared resources by setting `shared_resources` to a string. + +* A ``test.sh`` script was added to the repository to run the tests locally. + +* Default settings for recycling rates and primary product shares of high-value + chemicals have been set in accordance with the values used in `Neumann et al. + (2023) `_ linearly interpolated + between 2020 and 2050. The recycling rates are based on data from `Agora + Energiewende (2021) + `_. + +* Added option to specify turbine and solar panel models for specific years as a + dictionary (e.g. ``renewable: onwind: resource: turbine:``). The years will be + interpreted as years from when the the corresponding turbine model substitutes + the previous model for new installations. This will only have an effect on + workflows with foresight "myopic" and still needs to be added foresight option + "perfect". -* For industry distribution, use EPRTR as fallback if ETS data is not available. +PyPSA-Eur 0.9.0 (5th January 2024) +================================== + +**New Features** + +* Add option to specify losses for bidirectional links, e.g. pipelines or HVDC + links, in configuration file under ``sector: transmission_efficiency:``. Users + can specify static or length-dependent values as well as a length-dependent + electricity demand for compression, which is implemented as a multi-link to + the local electricity buses. The bidirectional links will then be split into + two unidirectional links with linked capacities (https://github.com/PyPSA/pypsa-eur/pull/739). + +* Merged option to extend geographical scope to Ukraine and Moldova. These + countries are excluded by default and is currently constrained to power-sector + only parts of the workflow. A special config file + `config/config.entsoe-all.yaml` was added as an example to run the workflow + with all ENTSO-E member countries (including observer members like Ukraine and + Moldova). Moldova can currently only be included in conjunction with Ukraine + due to the absence of demand data. The Crimean power system is manually + reconnected to the main Ukrainian grid with the configuration option + `reconnect_crimea` (https://github.com/PyPSA/pypsa-eur/pull/321). + +* New experimental support for multi-decade optimisation with perfect foresight + (``foresight: perfect``). Maximum growth rates for carriers, global carbon + budget constraints and emission constraints for particular investment periods. + +* Add option to reference an additional source file where users can specify + custom ``extra_functionality`` constraints in the configuration file. The + default setting points to an empty hull at + ``data/custom_extra_functionality.py`` (https://github.com/PyPSA/pypsa-eur/pull/824). + +* Add locations, capacities and costs of existing gas storage using Global + Energy Monitor's `Europe Gas Tracker + `_ + (https://github.com/PyPSA/pypsa-eur/pull/835). + +* Add option to use `LUISA Base Map + `_ 50m land + coverage dataset for land eligibility analysis in + :mod:`build_renewable_profiles`. Settings are analogous to the CORINE dataset + but with the key ``luisa:`` in the configuration file. To leverage the + dataset's full advantages, set the excluder resolution to 50m + (``excluder_resolution: 50``). For land category codes, see `Annex 1 of the + technical documentation + `_ + (https://github.com/PyPSA/pypsa-eur/pull/842). + +* Add option to capture CO2 contained in biogas when upgrading (``sector: + biogas_to_gas_cc``) (https://github.com/PyPSA/pypsa-eur/pull/615). + +* If load shedding is activated, it is now applied to all carriers, not only + electricity (https://github.com/PyPSA/pypsa-eur/pull/784). + +* Add option for heat vents in district heating (``sector: + central_heat_vent:``). The combination of must-run conditions for some + power-to-X processes, waste heat usage enabled and decreasing heating demand, + can lead to infeasibilities in pathway optimisation for some investment + periods since larger Fischer-Tropsch capacities are needed in early years but + the waste heat exceeds the heat demand in later investment periods. + (https://github.com/PyPSA/pypsa-eur/pull/791). + +* Allow possibility to go from copperplated to regionally resolved methanol and + oil demand with switches ``sector: regional_methanol_demand: true`` and + ``sector: regional_oil_demand: true``. This allows nodal/regional CO2 + constraints to be applied (https://github.com/PyPSA/pypsa-eur/pull/827). + +* Allow retrofitting of existing gas boilers to hydrogen boilers in pathway + optimisation. + +* Add option to add time-varying CO2 emission prices (electricity-only, ``costs: + emission_prices: co2_monthly_prices: true``). This is linked to the new + ``{opts}`` wildcard option ``Ept``. + +* Network clustering can now consider efficiency classes when aggregating + carriers. The option ``clustering: consider_efficiency_classes:`` aggregates + each carriers into the top 10-quantile (high), the bottom 90-quantile (low), + and everything in between (medium). + +* Added option ``conventional: dynamic_fuel_price:`` to consider the monthly + fluctuating fuel prices for conventional generators. Refer to the CSV file + ``data/validation/monthly_fuel_price.csv``. + +* For hydro-electricity, add switches ``flatten_dispatch`` to consider an upper + limit for the hydro dispatch. The limit is given by the average capacity + factor plus the buffer given in ``flatten_dispatch_buffer``. + +* Extend options for waste heat usage from Haber-Bosch, methanolisation and + methanation (https://github.com/PyPSA/pypsa-eur/pull/834). + +* Add new ``sector_opts`` wildcard option "nowasteheat" to disable all waste + heat usage (https://github.com/PyPSA/pypsa-eur/pull/834). + +* Add new rule ``retrieve_irena`` to automatically retrieve up-to-date values + for existing renewables capacities (https://github.com/PyPSA/pypsa-eur/pull/756). + +* Print Irreducible Infeasible Subset (IIS) if model is infeasible. Only for + solvers with IIS support (https://github.com/PyPSA/pypsa-eur/pull/841). + +* More wildcard options now have a corresponding config entry. If the wildcard + is given, then its value is used. If the wildcard is not given but the options + in config are enabled, then the value from config is used. If neither is + given, the options are skipped (https://github.com/PyPSA/pypsa-eur/pull/827). + +* Validate downloads from Zenodo using MD5 checksums. This identifies corrupted + or incomplete downloads (https://github.com/PyPSA/pypsa-eur/pull/821). + +* Add rule ``sync`` to synchronise with a remote machine using the ``rsync`` + library. Configuration settings are found under ``remote:``. + +**Breaking Changes** + +* Remove all negative loads on the ``co2 atmosphere`` bus representing emissions + for e.g. fixed fossil demands for transport oil. Instead these are handled + more transparently with a fixed transport oil demand and a link taking care of + the emissions to the ``co2 atmosphere`` bus. This is also a preparation for + endogenous transport optimisation, where demand will be subject to + optimisation (e.g. fuel switching in the transport sector) + (https://github.com/PyPSA/pypsa-eur/pull/827). + +* Process emissions from steam crackers (i.e. naphtha processing for HVC) are + now piped from the consumption link to the process emissions bus where the + model can decide about carbon capture. Previously the process emissions for + naphtha were a fixed load (https://github.com/PyPSA/pypsa-eur/pull/827). + +* Distinguish between stored and sequestered CO2. Stored CO2 is stored + overground in tanks and can be used for CCU (e.g. methanolisation). + Sequestered CO2 is stored underground and can no longer be used for CCU. This + distinction is made because storage in tanks is more expensive than + underground storage. The link that connects stored and sequestered CO2 is + unidirectional (https://github.com/PyPSA/pypsa-eur/pull/844). + +* Files extracted from sector-coupled data bundle have been moved from ``data/`` + to ``data/sector-bundle``. + +* Split configuration to enable SMR and SMR CC (``sector: smr:`` and ``sector: + smr_cc:``) (https://github.com/PyPSA/pypsa-eur/pull/757). + +* Add separate option to add resistive heaters to the technology choices + (``sector: resistive_heaters:``). Previously they were always added when + boilers were added (https://github.com/PyPSA/pypsa-eur/pull/808). + +* Remove HELMETH option (``sector: helmeth:``). + +* Remove "conservative" renewable potentials estimation option + (https://github.com/PyPSA/pypsa-eur/pull/838). + +* With this release we stop posting updates to the network pre-builts. + +**Changes** + +* Updated Global Energy Monitor LNG terminal data to March 2023 version + (https://github.com/PyPSA/pypsa-eur/pull/707). + +* For industry distribution, use EPRTR as fallback if ETS data is not available + (https://github.com/PyPSA/pypsa-eur/pull/721). + +* It is now possible to specify years for biomass potentials which do not exist + in the JRC-ENSPRESO database, e.g. 2037. These are linearly interpolated + (https://github.com/PyPSA/pypsa-eur/pull/744). + +* In pathway mode, the biomass potential is linked to the investment year + (https://github.com/PyPSA/pypsa-eur/pull/744). + +* Increase allowed deployment density of solar to 5.1 MW/sqkm by default. + +* Default to full electrification of land transport by 2050. + +* Provide exogenous transition settings in 5-year steps. + +* Default to approximating transmission losses in HVAC lines + (``transmission_losses: 2``). + +* Use electrolysis waste heat by default. + +* Set minimum part loads for PtX processes to 30% for methanolisation and + methanation, and to 70% for Fischer-Tropsch synthesis. + +* Add VOM as marginal cost to PtX processes + (https://github.com/PyPSA/pypsa-eur/pull/830). + +* Add pelletizing costs for biomass boilers (https://github.com/PyPSA/pypsa-eur/pull/833). + +* Update default offshore wind turbine model to "NREL Reference 2020 ATB 5.5 MW" + (https://github.com/PyPSA/pypsa-eur/pull/832). + +* Switch to using hydrogen and electricity inputs for Haber-Bosch from + https://github.com/PyPSA/technology-data (https://github.com/PyPSA/pypsa-eur/pull/831). + +* The configuration setting for country focus weights when clustering the + network has been moved from ``focus_weights:`` to ``clustering: + focus_weights:``. Backwards compatibility to old config files is maintained + (https://github.com/PyPSA/pypsa-eur/pull/794). + +* The ``mock_snakemake`` function can now be used with a Snakefile from a + different directory using the new ``root_dir`` argument + (https://github.com/PyPSA/pypsa-eur/pull/771). + +* Rule ``purge`` now initiates a dialog to confirm if purge is desired + (https://github.com/PyPSA/pypsa-eur/pull/745). + +* Files downloaded from zenodo are now write-protected to prevent accidental + re-download (https://github.com/PyPSA/pypsa-eur/pull/730). + +* Performance improvements for rule ``build_ship_raster`` + (https://github.com/PyPSA/pypsa-eur/pull/845). + +* Improve time logging in :mod:`build_renewable_profiles` + (https://github.com/PyPSA/pypsa-eur/pull/837). + +* In myopic pathway optimisation, disable power grid expansion if line volume + already hit (https://github.com/PyPSA/pypsa-eur/pull/840). + +* JRC-ENSPRESO data is now downloaded from a Zenodo mirror because the link was + unreliable (https://github.com/PyPSA/pypsa-eur/pull/801). + +* Add focus weights option for clustering to documentation + (https://github.com/PyPSA/pypsa-eur/pull/781). + +* Add proxy for biomass transport costs if no explicit biomass transport network + is considered (https://github.com/PyPSA/pypsa-eur/pull/711). + +**Bugs and Compatibility** + +* The minimum PyPSA version is now 0.26.1. + +* Update to ``tsam>=0.2.3`` for performance improvents in temporal clustering. + +* Pin ``snakemake`` version to below 8.0.0, as the new version is not yet + supported. The next release will switch to the requirement ``snakemake>=8``. + +* Bugfix: Add coke and coal demand for integrated steelworks + (https://github.com/PyPSA/pypsa-eur/pull/718). + +* Bugfix: Make :mod:`build_renewable_profiles` consider subsets of cutout time + scope (https://github.com/PyPSA/pypsa-eur/pull/709). + +* Bugfix: In :mod:`simplify network`, remove 'underground' column to avoid + consense error (https://github.com/PyPSA/pypsa-eur/pull/714). + +* Bugfix: Fix in :mod:`add_existing_baseyear` to account for the case when there + is no rural heating demand for some nodes in network + (https://github.com/PyPSA/pypsa-eur/pull/706). + +* Bugfix: The unit of the capital cost of Haber-Bosch plants was corrected + (https://github.com/PyPSA/pypsa-eur/pull/829). + +* The minimum capacity for renewable generators when using the myopic option has + been fixed (https://github.com/PyPSA/pypsa-eur/pull/728). + +* Compatibility for running with single node and single country + (https://github.com/PyPSA/pypsa-eur/pull/839). + +* A bug preventing the addition of custom powerplants specified in + ``data/custom_powerplants.csv`` was fixed. + (https://github.com/PyPSA/pypsa-eur/pull/732) + +* Fix nodal fraction in :mod:`add_existing_year` when using distributed + generators (https://github.com/PyPSA/pypsa-eur/pull/798). + +* Bugfix: District heating without progress caused division by zero + (https://github.com/PyPSA/pypsa-eur/pull/796). + +* Bugfix: Drop duplicates in :mod:`build_industrial_distribution_keys`, which + can occur through the geopandas ``.sjoin()`` function if a point is located on + a border (https://github.com/PyPSA/pypsa-eur/pull/726). + +* For network clustering fall back to ``ipopt`` when ``highs`` is designated + solver (https://github.com/PyPSA/pypsa-eur/pull/795). + +* Fix typo in buses definition for oil boilers in ``add_industry`` in + :mod:`prepare_sector_network` (https://github.com/PyPSA/pypsa-eur/pull/812). + +* Resolve code issues for endogenous building retrofitting. Select correct + sector names, address deprecations, distinguish between district heating, + decentral heating in urban areas or rural areas for floor area calculations + (https://github.com/PyPSA/pypsa-eur/pull/808). + +* Addressed various deprecations. -* The minimum capacity for renewable generators when using the myopic option has been fixed. PyPSA-Eur 0.8.1 (27th July 2023) ================================ @@ -141,6 +516,8 @@ PyPSA-Eur 0.8.1 (27th July 2023) (https://github.com/PyPSA/pypsa-eur/pull/672) +* Addressed deprecation warnings for ``pandas=2.0``. ``pandas=2.0`` is now minimum requirement. + PyPSA-Eur 0.8.0 (18th March 2023) ================================= @@ -1402,8 +1779,4 @@ Release Process * Make a `GitHub release `_, which automatically triggers archiving to the `zenodo code repository `_ with `MIT license `_. -* Create pre-built networks for ``config.default.yaml`` by running ``snakemake -call prepare_sector_networks``. - -* Upload pre-built networks to `zenodo data repository `_ with `CC BY 4.0 `_ license. - * Send announcement on the `PyPSA mailing list `_. diff --git a/doc/retrieve.rst b/doc/retrieve.rst index 66c996f5..aac3c201 100644 --- a/doc/retrieve.rst +++ b/doc/retrieve.rst @@ -22,11 +22,11 @@ Rule ``retrieve_databundle`` Rule ``retrieve_cutout`` ============================ -.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3517949.svg - :target: https://doi.org/10.5281/zenodo.3517949 +.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.6382570.svg + :target: https://doi.org/10.5281/zenodo.6382570 Cutouts are spatio-temporal subsets of the European weather data from the `ECMWF ERA5 `_ reanalysis dataset and the `CMSAF SARAH-2 `_ solar surface radiation dataset for the year 2013. -They have been prepared by and are for use with the `atlite `_ tool. You can either generate them yourself using the ``build_cutouts`` rule or retrieve them directly from `zenodo `__ through the rule ``retrieve_cutout``. +They have been prepared by and are for use with the `atlite `_ tool. You can either generate them yourself using the ``build_cutouts`` rule or retrieve them directly from `zenodo `__ through the rule ``retrieve_cutout``. The :ref:`tutorial` uses a smaller cutout than required for the full model (30 MB), which is also automatically downloaded. .. note:: @@ -118,6 +118,11 @@ This rule downloads techno-economic assumptions from the `technology-data reposi - ``resources/costs.csv`` +Rule ``retrieve_irena`` +================================ + +.. automodule:: retrieve_irena + Rule ``retrieve_ship_raster`` ================================ diff --git a/doc/sector.rst b/doc/sector.rst index 303e7ed2..411bfd57 100644 --- a/doc/sector.rst +++ b/doc/sector.rst @@ -20,6 +20,12 @@ Rule ``add_existing_baseyear`` .. automodule:: add_existing_baseyear +Rule ``build_existing_heating_distribution`` +============================================================================== + +.. automodule:: build_existing_heating_distribution + + Rule ``build_ammonia_production`` ============================================================================== @@ -60,10 +66,20 @@ Rule ``build_gas_network`` .. automodule:: build_gas_network -Rule ``build_heat_demand`` +Rule ``build_daily_heat_demand`` ============================================================================== -.. automodule:: build_heat_demand +.. automodule:: build_daily_heat_demand + +Rule ``build_hourly_heat_demand`` +============================================================================== + +.. automodule:: build_hourly_heat_demand + +Rule ``build_district_heat_share`` +============================================================================== + +.. automodule:: build_district_heat_share Rule ``build_industrial_distribution_key`` ============================================================================== diff --git a/doc/spatial_resolution.rst b/doc/spatial_resolution.rst index 0293a5ce..c6e9c3de 100644 --- a/doc/spatial_resolution.rst +++ b/doc/spatial_resolution.rst @@ -45,7 +45,7 @@ Here are some examples of how spatial resolution is set for different sectors in • CO2: It can be modeled as a single node for Europe or it can be nodally resolved with CO2 transport pipelines if activated in the `config `_. It should mentioned that in single node mode a transport and storage cost is added for sequestered CO2, the cost of which can be adjusted in the `config `_. -• Liquid hydrocarbons: Modeled as a single node for Europe, since transport costs for liquids are low and no bottlenecks are expected. +• Carbonaceous fuels: Modeled as a single node for Europe by default, since transport costs for liquids are low and no bottlenecks are expected. Can be regionally resolved in configuration. **Electricity distribution network** diff --git a/doc/tutorial.rst b/doc/tutorial.rst index f0ded3fb..e58ad123 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -25,7 +25,7 @@ full model, which allows the user to explore most of its functionalities on a local machine. The tutorial will cover examples on how to configure and customise the PyPSA-Eur model and run the ``snakemake`` workflow step by step from network creation to the solved network. The configuration for the tutorial -is located at ``test/config.electricity.yaml``. It includes parts deviating from +is located at ``config/test/config.electricity.yaml``. It includes parts deviating from the default config file ``config/config.default.yaml``. To run the tutorial with this configuration, execute @@ -96,7 +96,7 @@ open-source solver GLPK. :start-at: solver: :end-before: plotting: -Note, that ``test/config.electricity.yaml`` only includes changes relative to +Note, that ``config/test/config.electricity.yaml`` only includes changes relative to the default configuration. There are many more configuration options, which are documented at :ref:`config`. @@ -133,89 +133,82 @@ This triggers a workflow of multiple preceding jobs that depend on each rule's i graph[bgcolor=white, margin=0]; node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2]; edge[penwidth=2, color=grey]; - 0[label = "solve_network", color = "0.21 0.6 0.85", style="rounded"]; - 1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.02 0.6 0.85", style="rounded"]; - 2[label = "add_extra_components", color = "0.37 0.6 0.85", style="rounded"]; - 3[label = "cluster_network\nclusters: 6", color = "0.39 0.6 0.85", style="rounded"]; - 4[label = "simplify_network\nsimpl: ", color = "0.11 0.6 0.85", style="rounded"]; - 5[label = "add_electricity", color = "0.23 0.6 0.85", style="rounded"]; - 6[label = "build_renewable_profiles\ntechnology: onwind", color = "0.57 0.6 0.85", style="rounded"]; - 7[label = "base_network", color = "0.09 0.6 0.85", style="rounded"]; - 8[label = "build_shapes", color = "0.41 0.6 0.85", style="rounded"]; - 9[label = "retrieve_databundle", color = "0.28 0.6 0.85", style="rounded"]; - 10[label = "retrieve_natura_raster", color = "0.62 0.6 0.85", style="rounded"]; - 11[label = "build_bus_regions", color = "0.53 0.6 0.85", style="rounded"]; - 12[label = "retrieve_cutout\ncutout: europe-2013-era5", color = "0.05 0.6 0.85", style="rounded,dashed"]; - 13[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.57 0.6 0.85", style="rounded"]; - 14[label = "build_ship_raster", color = "0.64 0.6 0.85", style="rounded"]; - 15[label = "retrieve_ship_raster", color = "0.07 0.6 0.85", style="rounded,dashed"]; - 16[label = "retrieve_cutout\ncutout: europe-2013-sarah", color = "0.05 0.6 0.85", style="rounded,dashed"]; - 17[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.57 0.6 0.85", style="rounded"]; - 18[label = "build_renewable_profiles\ntechnology: solar", color = "0.57 0.6 0.85", style="rounded"]; - 19[label = "build_hydro_profile", color = "0.44 0.6 0.85", style="rounded"]; - 20[label = "retrieve_cost_data", color = "0.30 0.6 0.85", style="rounded"]; - 21[label = "build_powerplants", color = "0.16 0.6 0.85", style="rounded"]; - 22[label = "build_electricity_demand", color = "0.00 0.6 0.85", style="rounded"]; - 23[label = "retrieve_electricity_demand", color = "0.34 0.6 0.85", style="rounded,dashed"]; - 1 -> 0 - 2 -> 1 - 20 -> 1 - 3 -> 2 - 20 -> 2 - 4 -> 3 - 20 -> 3 - 5 -> 4 - 20 -> 4 - 11 -> 4 - 6 -> 5 - 13 -> 5 - 17 -> 5 - 18 -> 5 - 19 -> 5 - 7 -> 5 - 20 -> 5 - 11 -> 5 - 21 -> 5 - 9 -> 5 - 22 -> 5 - 8 -> 5 - 7 -> 6 - 9 -> 6 - 10 -> 6 - 8 -> 6 - 11 -> 6 - 12 -> 6 - 8 -> 7 - 9 -> 8 - 8 -> 11 - 7 -> 11 - 7 -> 13 - 9 -> 13 - 10 -> 13 - 14 -> 13 - 8 -> 13 - 11 -> 13 - 12 -> 13 - 15 -> 14 - 12 -> 14 - 16 -> 14 - 7 -> 17 - 9 -> 17 - 10 -> 17 - 14 -> 17 - 8 -> 17 - 11 -> 17 - 12 -> 17 - 7 -> 18 - 9 -> 18 - 10 -> 18 - 8 -> 18 - 11 -> 18 - 16 -> 18 - 8 -> 19 - 12 -> 19 - 7 -> 21 - 23 -> 22 + 0[label = "solve_network", color = "0.33 0.6 0.85", style="rounded"]; + 1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.03 0.6 0.85", style="rounded"]; + 2[label = "add_extra_components", color = "0.45 0.6 0.85", style="rounded"]; + 3[label = "cluster_network\nclusters: 6", color = "0.46 0.6 0.85", style="rounded"]; + 4[label = "simplify_network\nsimpl: ", color = "0.52 0.6 0.85", style="rounded"]; + 5[label = "add_electricity", color = "0.55 0.6 0.85", style="rounded"]; + 6[label = "build_renewable_profiles\ntechnology: solar", color = "0.15 0.6 0.85", style="rounded"]; + 7[label = "base_network", color = "0.37 0.6 0.85", style="rounded,dashed"]; + 8[label = "build_shapes", color = "0.07 0.6 0.85", style="rounded,dashed"]; + 9[label = "retrieve_databundle", color = "0.60 0.6 0.85", style="rounded"]; + 10[label = "retrieve_natura_raster", color = "0.42 0.6 0.85", style="rounded"]; + 11[label = "build_bus_regions", color = "0.09 0.6 0.85", style="rounded,dashed"]; + 12[label = "build_renewable_profiles\ntechnology: onwind", color = "0.15 0.6 0.85", style="rounded"]; + 13[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.15 0.6 0.85", style="rounded"]; + 14[label = "build_ship_raster", color = "0.02 0.6 0.85", style="rounded"]; + 15[label = "retrieve_ship_raster", color = "0.40 0.6 0.85", style="rounded"]; + 16[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.15 0.6 0.85", style="rounded"]; + 17[label = "build_line_rating", color = "0.32 0.6 0.85", style="rounded"]; + 18[label = "retrieve_cost_data\nyear: 2030", color = "0.50 0.6 0.85", style="rounded"]; + 19[label = "build_powerplants", color = "0.64 0.6 0.85", style="rounded,dashed"]; + 20[label = "build_electricity_demand", color = "0.13 0.6 0.85", style="rounded,dashed"]; + 21[label = "retrieve_electricity_demand", color = "0.31 0.6 0.85", style="rounded"]; + 22[label = "copy_config", color = "0.23 0.6 0.85", style="rounded"]; + 1 -> 0 + 22 -> 0 + 2 -> 1 + 18 -> 1 + 3 -> 2 + 18 -> 2 + 4 -> 3 + 18 -> 3 + 5 -> 4 + 18 -> 4 + 11 -> 4 + 6 -> 5 + 12 -> 5 + 13 -> 5 + 16 -> 5 + 7 -> 5 + 17 -> 5 + 18 -> 5 + 11 -> 5 + 19 -> 5 + 9 -> 5 + 20 -> 5 + 8 -> 5 + 7 -> 6 + 9 -> 6 + 10 -> 6 + 8 -> 6 + 11 -> 6 + 8 -> 7 + 9 -> 8 + 8 -> 11 + 7 -> 11 + 7 -> 12 + 9 -> 12 + 10 -> 12 + 8 -> 12 + 11 -> 12 + 7 -> 13 + 9 -> 13 + 10 -> 13 + 14 -> 13 + 8 -> 13 + 11 -> 13 + 15 -> 14 + 7 -> 16 + 9 -> 16 + 10 -> 16 + 14 -> 16 + 8 -> 16 + 11 -> 16 + 7 -> 17 + 7 -> 19 + 21 -> 20 } | diff --git a/doc/tutorial_sector.rst b/doc/tutorial_sector.rst index faa8adca..53a60353 100644 --- a/doc/tutorial_sector.rst +++ b/doc/tutorial_sector.rst @@ -59,7 +59,7 @@ To run an overnight / greenfiled scenario with the specifications above, run .. code:: bash - snakemake -call --configfile config/test/config.overnight.yaml all + snakemake -call all --configfile config/test/config.overnight.yaml which will result in the following *additional* jobs ``snakemake`` wants to run on top of those already included in the electricity-only tutorial: @@ -318,7 +318,7 @@ To run a myopic foresight scenario with the specifications above, run .. code:: bash - snakemake -call --configfile config/test/config.myopic.yaml all + snakemake -call all --configfile config/test/config.myopic.yaml which will result in the following *additional* jobs ``snakemake`` wants to run: diff --git a/envs/environment.fixed.yaml b/envs/environment.fixed.yaml index ca2ae848..31a58835 100644 --- a/envs/environment.fixed.yaml +++ b/envs/environment.fixed.yaml @@ -1,10 +1,11 @@ -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: CC0-1.0 name: pypsa-eur channels: - bioconda +- gurobi - http://conda.anaconda.org/gurobi - conda-forge - defaults @@ -12,94 +13,96 @@ dependencies: - _libgcc_mutex=0.1 - _openmp_mutex=4.5 - affine=2.4.0 -- alsa-lib=1.2.9 +- alsa-lib=1.2.10 - ampl-mp=3.1.0 - amply=0.1.6 -- anyio=3.7.1 +- anyio=4.2.0 - appdirs=1.4.4 -- argon2-cffi=21.3.0 +- argon2-cffi=23.1.0 - argon2-cffi-bindings=21.2.0 -- asttokens=2.2.1 -- async-lru=2.0.3 +- arrow=1.3.0 +- asttokens=2.4.1 +- async-lru=2.0.4 - atk-1.0=2.38.0 -- atlite=0.2.11 +- atlite=0.2.12 - attr=2.5.1 -- attrs=23.1.0 -- aws-c-auth=0.7.0 -- aws-c-cal=0.6.0 -- aws-c-common=0.8.23 +- attrs=23.2.0 +- aws-c-auth=0.7.8 +- aws-c-cal=0.6.9 +- aws-c-common=0.9.10 - aws-c-compression=0.2.17 -- aws-c-event-stream=0.3.1 -- aws-c-http=0.7.11 -- aws-c-io=0.13.28 -- aws-c-mqtt=0.8.14 -- aws-c-s3=0.3.13 -- aws-c-sdkutils=0.1.11 -- aws-checksums=0.1.16 -- aws-crt-cpp=0.20.3 -- aws-sdk-cpp=1.10.57 -- babel=2.12.1 -- backcall=0.2.0 -- backports=1.0 -- backports.functools_lru_cache=1.6.5 +- aws-c-event-stream=0.3.2 +- aws-c-http=0.7.15 +- aws-c-io=0.13.36 +- aws-c-mqtt=0.10.0 +- aws-c-s3=0.4.7 +- aws-c-sdkutils=0.1.13 +- aws-checksums=0.1.17 +- aws-crt-cpp=0.25.1 +- aws-sdk-cpp=1.11.210 +- babel=2.14.0 - beautifulsoup4=4.12.2 -- bleach=6.0.0 -- blosc=1.21.4 -- bokeh=3.2.1 -- boost-cpp=1.78.0 +- bleach=6.1.0 +- blosc=1.21.5 +- bokeh=3.3.2 - bottleneck=1.3.7 -- branca=0.6.0 -- brotli=1.0.9 -- brotli-bin=1.0.9 -- brotli-python=1.0.9 +- branca=0.7.0 +- brotli=1.1.0 +- brotli-bin=1.1.0 +- brotli-python=1.1.0 - bzip2=1.0.8 -- c-ares=1.19.1 -- c-blosc2=2.10.0 -- ca-certificates=2023.7.22 -- cairo=1.16.0 -- cartopy=0.21.1 +- c-ares=1.24.0 +- c-blosc2=2.12.0 +- ca-certificates=2023.11.17 +- cached-property=1.5.2 +- cached_property=1.5.2 +- cairo=1.18.0 +- cartopy=0.22.0 - cdsapi=0.6.1 -- certifi=2023.7.22 -- cffi=1.15.1 -- cfitsio=4.2.0 -- cftime=1.6.2 -- charset-normalizer=3.2.0 -- click=8.1.6 +- certifi=2023.11.17 +- cffi=1.16.0 +- cfitsio=4.3.1 +- cftime=1.6.3 +- charset-normalizer=3.3.2 +- click=8.1.7 - click-plugins=1.1.1 - cligj=0.7.2 -- cloudpickle=2.2.1 +- cloudpickle=3.0.0 +- coin-or-cbc=2.10.10 +- coin-or-cgl=0.60.7 +- coin-or-clp=1.17.8 +- coin-or-osi=0.108.8 +- coin-or-utils=2.11.9 +- coincbc=2.10.10 - colorama=0.4.6 -- comm=0.1.3 +- comm=0.1.4 - configargparse=1.7 - connection_pool=0.0.3 -- contourpy=1.1.0 -- country_converter=1.0.0 -- curl=8.2.0 -- cycler=0.11.0 +- contourpy=1.2.0 +- country_converter=1.2 +- cycler=0.12.1 - cytoolz=0.12.2 -- dask=2023.7.1 -- dask-core=2023.7.1 +- dask=2023.12.1 +- dask-core=2023.12.1 - datrie=0.8.2 - dbus=1.13.6 -- debugpy=1.6.7 +- debugpy=1.8.0 - decorator=5.1.1 - defusedxml=0.7.1 - deprecation=2.1.0 - descartes=1.1.0 -- distributed=2023.7.1 +- distributed=2023.12.1 - distro=1.8.0 - docutils=0.20.1 - dpath=2.1.6 - entrypoints=0.4 -- entsoe-py=0.5.10 +- entsoe-py=0.6.1 - et_xmlfile=1.1.0 -- exceptiongroup=1.1.2 -- executing=1.2.0 +- exceptiongroup=1.2.0 +- executing=2.0.1 - expat=2.5.0 -- filelock=3.12.2 -- fiona=1.9.4 -- flit-core=3.9.0 -- folium=0.14.0 +- fiona=1.9.5 +- folium=0.15.1 - font-ttf-dejavu-sans-mono=2.37 - font-ttf-inconsolata=3.000 - font-ttf-source-code-pro=2.038 @@ -107,165 +110,184 @@ dependencies: - fontconfig=2.14.2 - fonts-conda-ecosystem=1 - fonts-conda-forge=1 -- fonttools=4.41.1 +- fonttools=4.47.0 +- fqdn=1.5.1 - freetype=2.12.1 -- freexl=1.0.6 +- freexl=2.0.0 - fribidi=1.0.10 -- fsspec=2023.6.0 -- gdal=3.7.0 +- fsspec=2023.12.2 +- gdal=3.7.3 - gdk-pixbuf=2.42.10 - geographiclib=1.52 -- geojson-rewind=1.0.2 -- geopandas=0.13.2 -- geopandas-base=0.13.2 -- geopy=2.3.0 -- geos=3.11.2 +- geojson-rewind=1.1.0 +- geopandas=0.14.1 +- geopandas-base=0.14.1 +- geopy=2.4.1 +- geos=3.12.1 - geotiff=1.7.1 - gettext=0.21.1 - gflags=2.2.2 - giflib=5.2.1 -- gitdb=4.0.10 -- gitpython=3.1.32 -- glib=2.76.4 -- glib-tools=2.76.4 +- gitdb=4.0.11 +- gitpython=3.1.40 +- glib=2.78.3 +- glib-tools=2.78.3 - glog=0.6.0 -- gmp=6.2.1 +- glpk=5.0 +- gmp=6.3.0 - graphite2=1.3.13 -- graphviz=8.1.0 -- gst-plugins-base=1.22.5 -- gstreamer=1.22.5 +- graphviz=9.0.0 +- gst-plugins-base=1.22.8 +- gstreamer=1.22.8 - gtk2=2.24.33 - gts=0.7.6 -- harfbuzz=7.3.0 +- gurobi=11.0.0 +- harfbuzz=8.3.0 - hdf4=4.2.15 -- hdf5=1.14.1 +- hdf5=1.14.3 - humanfriendly=10.0 -- icu=72.1 -- idna=3.4 -- importlib-metadata=6.8.0 -- importlib_metadata=6.8.0 -- importlib_resources=6.0.0 +- icu=73.2 +- idna=3.6 +- importlib-metadata=7.0.1 +- importlib_metadata=7.0.1 +- importlib_resources=6.1.1 - iniconfig=2.0.0 -- ipopt=3.14.12 -- ipykernel=6.24.0 -- ipython=8.14.0 -- ipython_genutils=0.2.0 -- ipywidgets=8.0.7 -- jedi=0.18.2 +- ipopt=3.14.13 +- ipykernel=6.28.0 +- ipython=8.19.0 +- ipywidgets=8.1.1 +- isoduration=20.11.0 +- jedi=0.19.1 - jinja2=3.1.2 -- joblib=1.3.0 -- json-c=0.16 +- joblib=1.3.2 +- json-c=0.17 - json5=0.9.14 -- jsonschema=4.18.4 -- jsonschema-specifications=2023.7.1 +- jsonpointer=2.4 +- jsonschema=4.20.0 +- jsonschema-specifications=2023.12.1 +- jsonschema-with-format-nongpl=4.20.0 - jupyter=1.0.0 -- jupyter-lsp=2.2.0 -- jupyter_client=8.3.0 +- jupyter-lsp=2.2.1 +- jupyter_client=8.6.0 - jupyter_console=6.6.3 -- jupyter_core=5.3.1 -- jupyter_events=0.6.3 -- jupyter_server=2.7.0 -- jupyter_server_terminals=0.4.4 -- jupyterlab=4.0.3 -- jupyterlab_pygments=0.2.2 -- jupyterlab_server=2.24.0 -- jupyterlab_widgets=3.0.8 -- kealib=1.5.1 +- jupyter_core=5.6.1 +- jupyter_events=0.9.0 +- jupyter_server=2.12.1 +- jupyter_server_terminals=0.5.1 +- jupyterlab=4.0.10 +- jupyterlab_pygments=0.3.0 +- jupyterlab_server=2.25.2 +- jupyterlab_widgets=3.0.9 +- kealib=1.5.3 - keyutils=1.6.1 -- kiwisolver=1.4.4 -- krb5=1.21.1 +- kiwisolver=1.4.5 +- krb5=1.21.2 - lame=3.100 -- lcms2=2.15 +- lcms2=2.16 - ld_impl_linux-64=2.40 - lerc=4.0.0 -- libabseil=20230125.3 -- libaec=1.0.6 -- libarchive=3.6.2 -- libarrow=12.0.1 +- libabseil=20230802.1 +- libaec=1.1.2 +- libarchive=3.7.2 +- libarrow=14.0.2 +- libarrow-acero=14.0.2 +- libarrow-dataset=14.0.2 +- libarrow-flight=14.0.2 +- libarrow-flight-sql=14.0.2 +- libarrow-gandiva=14.0.2 +- libarrow-substrait=14.0.2 - libblas=3.9.0 -- libbrotlicommon=1.0.9 -- libbrotlidec=1.0.9 -- libbrotlienc=1.0.9 -- libcap=2.67 +- libboost-headers=1.84.0 +- libbrotlicommon=1.1.0 +- libbrotlidec=1.1.0 +- libbrotlienc=1.1.0 +- libcap=2.69 - libcblas=3.9.0 - libclang=15.0.7 - libclang13=15.0.7 - libcrc32c=1.1.2 - libcups=2.3.3 -- libcurl=8.2.0 -- libdeflate=1.18 +- libcurl=8.5.0 +- libdeflate=1.19 - libedit=3.1.20191231 - libev=4.33 - libevent=2.1.12 - libexpat=2.5.0 - libffi=3.4.2 - libflac=1.4.3 -- libgcc-ng=13.1.0 -- libgcrypt=1.10.1 +- libgcc-ng=13.2.0 +- libgcrypt=1.10.3 - libgd=2.3.3 -- libgdal=3.7.0 -- libgfortran-ng=13.1.0 -- libgfortran5=13.1.0 -- libglib=2.76.4 -- libgomp=13.1.0 +- libgdal=3.7.3 +- libgfortran-ng=13.2.0 +- libgfortran5=13.2.0 +- libglib=2.78.3 +- libgomp=13.2.0 - libgoogle-cloud=2.12.0 - libgpg-error=1.47 -- libgrpc=1.56.2 +- libgrpc=1.59.3 +- libhwloc=2.9.1 - libiconv=1.17 -- libjpeg-turbo=2.1.5.1 +- libjpeg-turbo=3.0.0 - libkml=1.3.0 - liblapack=3.9.0 - liblapacke=3.9.0 - libllvm15=15.0.7 - libnetcdf=4.9.2 -- libnghttp2=1.52.0 -- libnsl=2.0.0 +- libnghttp2=1.58.0 +- libnl=3.9.0 +- libnsl=2.0.1 - libnuma=2.0.16 - libogg=1.3.4 -- libopenblas=0.3.23 +- libopenblas=0.3.25 - libopus=1.3.1 +- libparquet=14.0.2 - libpng=1.6.39 -- libpq=15.3 -- libprotobuf=4.23.3 -- librsvg=2.56.1 +- libpq=16.1 +- libprotobuf=4.24.4 +- libre2-11=2023.06.02 +- librsvg=2.56.3 - librttopo=1.1.0 -- libsndfile=1.2.0 +- libsndfile=1.2.2 - libsodium=1.0.18 - libspatialindex=1.9.3 -- libspatialite=5.0.1 -- libsqlite=3.42.0 +- libspatialite=5.1.0 +- libspral=2023.08.02 +- libsqlite=3.44.2 - libssh2=1.11.0 -- libstdcxx-ng=13.1.0 -- libsystemd0=253 -- libthrift=0.18.1 -- libtiff=4.5.1 -- libtool=2.4.7 +- libstdcxx-ng=13.2.0 +- libsystemd0=255 +- libthrift=0.19.0 +- libtiff=4.6.0 - libutf8proc=2.8.0 - libuuid=2.38.1 - libvorbis=1.3.7 -- libwebp=1.3.1 -- libwebp-base=1.3.1 +- libwebp=1.3.2 +- libwebp-base=1.3.2 - libxcb=1.15 -- libxkbcommon=1.5.0 -- libxml2=2.11.4 +- libxcrypt=4.4.36 +- libxkbcommon=1.6.0 +- libxml2=2.11.6 - libxslt=1.1.37 -- libzip=1.9.2 +- libzip=1.10.1 - libzlib=1.2.13 +- linopy=0.3.2 - locket=1.0.0 - lxml=4.9.3 - lz4=4.3.2 - lz4-c=1.9.4 - lzo=2.10 -- mapclassify=2.5.0 +- mapclassify=2.6.1 - markupsafe=2.1.3 -- matplotlib=3.5.3 -- matplotlib-base=3.5.3 +- matplotlib=3.8.2 +- matplotlib-base=3.8.2 - matplotlib-inline=0.1.6 - memory_profiler=0.61.0 -- metis=5.1.1 -- mistune=3.0.0 -- mpg123=1.31.3 -- msgpack-python=1.0.5 +- metis=5.1.0 +- minizip=4.0.4 +- mistune=3.0.2 +- mpg123=1.32.3 +- msgpack-python=1.0.7 - mumps-include=5.2.1 - mumps-seq=5.2.1 - munch=4.0.0 @@ -273,200 +295,202 @@ dependencies: - mysql-common=8.0.33 - mysql-libs=8.0.33 - nbclient=0.8.0 -- nbconvert=7.7.2 -- nbconvert-core=7.7.2 -- nbconvert-pandoc=7.7.2 -- nbformat=5.9.1 +- nbconvert=7.14.0 +- nbconvert-core=7.14.0 +- nbconvert-pandoc=7.14.0 +- nbformat=5.9.2 - ncurses=6.4 -- nest-asyncio=1.5.6 -- netcdf4=1.6.4 -- networkx=3.1 +- nest-asyncio=1.5.8 +- netcdf4=1.6.5 +- networkx=3.2.1 - nomkl=1.0 -- notebook=7.0.0 +- notebook=7.0.6 - notebook-shim=0.2.3 - nspr=4.35 -- nss=3.89 -- numexpr=2.8.4 -- numpy=1.25.1 -- openjdk=17.0.3 +- nss=3.96 +- numexpr=2.8.8 +- numpy=1.26.2 +- openjdk=21.0.1 - openjpeg=2.5.0 - openpyxl=3.1.2 -- openssl=3.1.1 -- orc=1.9.0 -- overrides=7.3.1 -- packaging=23.1 -- pandas=2.0.3 +- openssl=3.2.0 +- orc=1.9.2 +- overrides=7.4.0 +- packaging=23.2 +- pandas=2.1.4 - pandoc=3.1.3 - pandocfilters=1.5.0 - pango=1.50.14 - parso=0.8.3 -- partd=1.4.0 -- patsy=0.5.3 -- pcre2=10.40 +- partd=1.4.1 +- patsy=0.5.5 +- pcre2=10.42 - pexpect=4.8.0 - pickleshare=0.7.5 -- pillow=10.0.0 -- pip=23.2.1 -- pixman=0.40.0 +- pillow=10.2.0 +- pip=23.3.2 +- pixman=0.42.2 - pkgutil-resolve-name=1.3.10 -- plac=1.3.5 -- platformdirs=3.9.1 -- pluggy=1.2.0 +- plac=1.4.2 +- platformdirs=4.1.0 +- pluggy=1.3.0 - ply=3.11 -- pooch=1.7.0 -- poppler=23.05.0 +- poppler=23.12.0 - poppler-data=0.4.12 -- postgresql=15.3 -- powerplantmatching=0.5.7 -- progressbar2=4.2.0 -- proj=9.2.1 -- prometheus_client=0.17.1 -- prompt-toolkit=3.0.39 -- prompt_toolkit=3.0.39 -- psutil=5.9.5 +- postgresql=16.1 +- powerplantmatching=0.5.8 +- progressbar2=4.3.2 +- proj=9.3.0 +- prometheus_client=0.19.0 +- prompt-toolkit=3.0.42 +- prompt_toolkit=3.0.42 +- psutil=5.9.7 - pthread-stubs=0.4 - ptyprocess=0.7.0 - pulp=2.7.0 - pulseaudio-client=16.1 - pure_eval=0.2.2 - py-cpuinfo=9.0.0 -- pyarrow=12.0.1 +- pyarrow=14.0.2 +- pyarrow-hotfix=0.6 - pycountry=22.3.5 - pycparser=2.21 -- pygments=2.15.1 +- pygments=2.17.2 - pyomo=6.6.1 -- pyparsing=3.1.0 -- pyproj=3.6.0 -- pyqt=5.15.7 -- pyqt5-sip=12.11.0 +- pyparsing=3.1.1 +- pyproj=3.6.1 +- pypsa=0.26.2 +- pyqt=5.15.9 +- pyqt5-sip=12.12.2 - pyshp=2.3.1 - pysocks=1.7.1 -- pytables=3.8.0 -- pytest=7.4.0 -- python=3.10.12 +- pytables=3.9.2 +- pytest=7.4.4 +- python=3.11.7 - python-dateutil=2.8.2 -- python-fastjsonschema=2.18.0 +- python-fastjsonschema=2.19.1 - python-json-logger=2.0.7 -- python-tzdata=2023.3 -- python-utils=3.7.0 -- python_abi=3.10 -- pytz=2023.3 +- python-tzdata=2023.4 +- python-utils=3.8.1 +- python_abi=3.11 +- pytz=2023.3.post1 - pyxlsb=1.0.10 -- pyyaml=6.0 -- pyzmq=25.1.0 +- pyyaml=6.0.1 +- pyzmq=25.1.2 - qt-main=5.15.8 -- qtconsole=5.4.3 -- qtconsole-base=5.4.3 -- qtpy=2.3.1 -- rasterio=1.3.8 -- rdma-core=28.9 -- re2=2023.03.02 +- qtconsole-base=5.5.1 +- qtpy=2.4.1 +- rasterio=1.3.9 +- rdma-core=49.0 +- re2=2023.06.02 - readline=8.2 -- referencing=0.30.0 +- referencing=0.32.0 - requests=2.31.0 - reretry=0.11.8 - rfc3339-validator=0.1.4 - rfc3986-validator=0.1.1 -- rioxarray=0.14.1 -- rpds-py=0.9.2 -- rtree=1.0.1 -- s2n=1.3.46 -- scikit-learn=1.3.0 -- scipy=1.11.1 +- rioxarray=0.15.0 +- rpds-py=0.16.2 +- rtree=1.1.0 +- s2n=1.4.1 +- scikit-learn=1.3.2 +- scipy=1.11.4 - scotch=6.0.9 -- seaborn=0.12.2 -- seaborn-base=0.12.2 +- seaborn=0.13.0 +- seaborn-base=0.13.0 - send2trash=1.8.2 -- setuptools=68.0.0 -- setuptools-scm=7.1.0 -- setuptools_scm=7.1.0 -- shapely=2.0.1 -- sip=6.7.10 +- setuptools=69.0.3 +- setuptools-scm=8.0.4 +- setuptools_scm=8.0.4 +- shapely=2.0.2 +- sip=6.7.12 - six=1.16.0 -- smart_open=6.3.0 -- smmap=3.0.5 -- snakemake-minimal=7.30.2 +- smart_open=6.4.0 +- smmap=5.0.0 +- snakemake-minimal=7.32.4 - snappy=1.1.10 - sniffio=1.3.0 - snuggs=1.4.7 - sortedcontainers=2.4.0 -- soupsieve=2.3.2.post1 -- sqlite=3.42.0 +- soupsieve=2.5 +- sqlite=3.44.2 - stack_data=0.6.2 -- statsmodels=0.14.0 +- statsmodels=0.14.1 - stopit=1.1.2 -- tabula-py=2.6.0 +- tabula-py=2.7.0 - tabulate=0.9.0 -- tblib=1.7.0 -- terminado=0.17.1 +- tblib=3.0.0 +- terminado=0.18.0 - threadpoolctl=3.2.0 -- throttler=1.2.1 -- tiledb=2.13.2 +- throttler=1.2.2 +- tiledb=2.18.2 - tinycss2=1.2.1 -- tk=8.6.12 +- tk=8.6.13 - toml=0.10.2 - tomli=2.0.1 - toolz=0.12.0 - toposort=1.10 -- tornado=6.3.2 -- tqdm=4.65.0 -- traitlets=5.9.0 -- typing-extensions=4.7.1 -- typing_extensions=4.7.1 +- tornado=6.3.3 +- tqdm=4.66.1 +- traitlets=5.14.1 +- types-python-dateutil=2.8.19.14 +- typing-extensions=4.9.0 +- typing_extensions=4.9.0 - typing_utils=0.1.0 -- tzcode=2023c -- tzdata=2023c -- ucx=1.14.1 -- unicodedata2=15.0.0 -- unidecode=1.3.6 -- unixodbc=2.3.10 -- urllib3=2.0.4 -- wcwidth=0.2.6 +- tzcode=2023d +- tzdata=2023d +- ucx=1.15.0 +- unidecode=1.3.7 +- unixodbc=2.3.12 +- uri-template=1.3.0 +- uriparser=0.9.7 +- urllib3=2.1.0 +- validators=0.22.0 +- wcwidth=0.2.12 +- webcolors=1.13 - webencodings=0.5.1 -- websocket-client=1.6.1 -- wheel=0.41.0 -- widgetsnbextension=4.0.8 -- wrapt=1.15.0 -- xarray=2023.7.0 +- websocket-client=1.7.0 +- wheel=0.42.0 +- widgetsnbextension=4.0.9 +- wrapt=1.16.0 +- xarray=2023.12.0 - xcb-util=0.4.0 - xcb-util-image=0.4.0 - xcb-util-keysyms=0.4.0 - xcb-util-renderutil=0.3.9 - xcb-util-wm=0.4.1 -- xerces-c=3.2.4 -- xkeyboard-config=2.39 +- xerces-c=3.2.5 +- xkeyboard-config=2.40 - xlrd=2.0.1 - xorg-fixesproto=5.0 - xorg-inputproto=2.3.2 - xorg-kbproto=1.0.7 - xorg-libice=1.1.1 - xorg-libsm=1.2.4 -- xorg-libx11=1.8.6 +- xorg-libx11=1.8.7 - xorg-libxau=1.0.11 - xorg-libxdmcp=1.1.3 - xorg-libxext=1.3.4 - xorg-libxfixes=5.0.3 - xorg-libxi=1.7.10 - xorg-libxrender=0.9.11 +- xorg-libxt=1.3.0 - xorg-libxtst=1.2.3 - xorg-recordproto=1.14.2 - xorg-renderproto=0.11.1 - xorg-xextproto=7.3.0 - xorg-xf86vidmodeproto=2.3.1 - xorg-xproto=7.0.31 -- xyzservices=2023.7.0 +- xyzservices=2023.10.1 - xz=5.2.6 - yaml=0.2.5 -- yte=1.5.1 -- zeromq=4.3.4 +- yte=1.5.4 +- zeromq=4.3.5 - zict=3.0.0 -- zipp=3.16.2 +- zipp=3.17.0 - zlib=1.2.13 - zlib-ng=2.0.7 -- zstd=1.5.2 +- zstd=1.5.5 - pip: - - gurobipy==10.0.2 - - linopy==0.2.2 - - pypsa==0.25.1 - - tsam==2.3.0 - - validators==0.20.0 + - highspy==1.5.3 + - tsam==2.3.1 diff --git a/envs/environment.yaml b/envs/environment.yaml index c3af36bb..88e394fa 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -11,6 +11,8 @@ dependencies: - pip - atlite>=0.2.9 +- pypsa>=0.26.1 +- linopy - dask # Dependencies of the workflow itself @@ -18,23 +20,25 @@ dependencies: - openpyxl!=3.1.1 - pycountry - seaborn -- snakemake-minimal>=7.7.0 + # snakemake 8 introduced a number of breaking changes which the workflow has yet to be made compatible with +- snakemake-minimal>=7.7.0,<8.0.0 - memory_profiler - yaml - pytables - lxml -- powerplantmatching>=0.5.5 +- powerplantmatching>=0.5.5,!=0.5.9 - numpy -- pandas>=1.4 +- pandas>=2.1 - geopandas>=0.11.0 -- xarray +- xarray>=2023.11.0 - rioxarray - netcdf4 - networkx - scipy +- glpk - shapely>=2.0 -- pyomo -- matplotlib<3.6 +- pyscipopt +- matplotlib - proj - fiona - country_converter @@ -44,6 +48,7 @@ dependencies: - tabula-py - pyxlsb - graphviz +- pre-commit # Keep in conda environment when calling ipython - ipython @@ -55,5 +60,5 @@ dependencies: - pip: - - tsam>=1.1.0 - - pypsa>=0.25.1 + - tsam>=2.3.1 + - highspy diff --git a/envs/retrieve.yaml b/envs/retrieve.yaml new file mode 100644 index 00000000..b5db795d --- /dev/null +++ b/envs/retrieve.yaml @@ -0,0 +1,13 @@ +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT + +name: pypsa-eur-retrieve +channels: +- conda-forge +- bioconda +dependencies: +- python>=3.8 +- snakemake-minimal>=7.7.0,<8.0.0 +- pandas>=2.1 +- tqdm diff --git a/graphics/workflow.png b/graphics/workflow.png index f60f3462..a43f240d 100644 Binary files a/graphics/workflow.png and b/graphics/workflow.png differ diff --git a/rules/build_electricity.smk b/rules/build_electricity.smk index be72be6d..fd925d1f 100644 --- a/rules/build_electricity.smk +++ b/rules/build_electricity.smk @@ -20,7 +20,7 @@ if config["enable"].get("prepare_links_p_nom", False): rule build_electricity_demand: params: - snapshots=config_provider("snapshots"), + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, # TODO: use config provider countries=config_provider("countries"), load=config_provider("load"), input: @@ -41,6 +41,7 @@ rule build_powerplants: params: powerplants_filter=config_provider("electricity", "powerplants_filter"), custom_powerplants=config_provider("electricity", "custom_powerplants"), + everywhere_powerplants=config_provider("electricity", "everywhere_powerplants"), countries=config_provider("countries"), input: base_network=resources("networks/base.nc"), @@ -61,7 +62,7 @@ rule build_powerplants: rule base_network: params: countries=config_provider("countries"), - snapshots=config_provider("snapshots"), + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, # TODO: use config provider lines=config_provider("lines"), links=config_provider("links"), transformers=config_provider("transformers"), @@ -144,7 +145,7 @@ if config["enable"].get("build_cutout", False): rule build_cutout: params: - snapshots=config_provider("snapshots"), + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, # TODO: use config provider cutouts=config_provider("atlite", "cutouts"), input: regions_onshore=resources("regions_onshore.geojson"), @@ -206,10 +207,62 @@ rule build_ship_raster: "../scripts/build_ship_raster.py" +rule determine_availability_matrix_MD_UA: + input: + copernicus="data/Copernicus_LC100_global_v3.0.1_2019-nrt_Discrete-Classification-map_EPSG-4326.tif", + wdpa="data/WDPA.gpkg", + wdpa_marine="data/WDPA_WDOECM_marine.gpkg", + gebco=lambda w: ( + "data/bundle/GEBCO_2014_2D.nc" + if "max_depth" in config["renewable"][w.technology].keys() + else [] + ), + ship_density=lambda w: ( + RESOURCES + "shipdensity_raster.tif" + if "ship_threshold" in config["renewable"][w.technology].keys() + else [] + ), + country_shapes=RESOURCES + "country_shapes.geojson", + offshore_shapes=RESOURCES + "offshore_shapes.geojson", + regions=lambda w: ( + RESOURCES + "regions_onshore.geojson" + if w.technology in ("onwind", "solar") + else RESOURCES + "regions_offshore.geojson" + ), + cutout=lambda w: "cutouts/" + + CDIR + + config["renewable"][w.technology]["cutout"] + + ".nc", + output: + availability_matrix=RESOURCES + "availability_matrix_MD-UA_{technology}.nc", + availability_map=RESOURCES + "availability_matrix_MD-UA_{technology}.png", + log: + LOGS + "determine_availability_matrix_MD_UA_{technology}.log", + threads: ATLITE_NPROCESSES + resources: + mem_mb=ATLITE_NPROCESSES * 5000, + conda: + "../envs/environment.yaml" + script: + "../scripts/determine_availability_matrix_MD_UA.py" + + +# Optional input when having Ukraine (UA) or Moldova (MD) in the countries list +if {"UA", "MD"}.intersection(set(config["countries"])): + opt = { + "availability_matrix_MD_UA": RESOURCES + + "availability_matrix_MD-UA_{technology}.nc" + } +else: + opt = {} + + rule build_renewable_profiles: params: + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, # TODO: use config provider renewable=config_provider("renewable"), input: + **opt, base_network=resources("networks/base.nc"), corine=ancient("data/bundle/corine/g250_clc06_V18_5.tif"), natura=lambda w: ( @@ -217,6 +270,11 @@ rule build_renewable_profiles: if config_provider("renewable", w.technology, "natura")(w) else [] ), + luisa=lambda w: ( + "data/LUISA_basemap_020321_50m.tif" + if config["renewable"][w.technology].get("luisa") + else [] + ), gebco=ancient( lambda w: ( "data/bundle/GEBCO_2014_2D.nc" @@ -298,6 +356,8 @@ rule build_hydro_profile: if config["lines"]["dynamic_line_rating"]["activate"]: rule build_line_rating: + params: + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, input: base_network=resources("networks/base.nc"), cutout="cutouts/" @@ -355,6 +415,7 @@ rule add_electricity: else [], load=resources("load.csv"), nuts3_shapes=resources("nuts3_shapes.geojson"), + ua_md_gdp="data/GDP_PPP_30arcsec_v3_mapped_default.csv", output: resources("networks/elec.nc"), log: @@ -376,7 +437,7 @@ rule simplify_network: aggregation_strategies=config_provider( "clustering", "aggregation_strategies", default={} ), - focus_weights=config_provider("focus_weights", default=None), + focus_weights=config_provider("clustering", "focus_weights", default=None), renewable_carriers=config_provider("electricity", "renewable_carriers"), max_hours=config_provider("electricity", "max_hours"), length_factor=config_provider("lines", "length_factor"), @@ -413,7 +474,7 @@ rule cluster_network: "clustering", "aggregation_strategies", default={} ), custom_busmap=config_provider("enable", "custom_busmap", default=False), - focus_weights=config_provider("focus_weights", default=None), + focus_weights=config_provider("clustering", "focus_weights", default=None), renewable_carriers=config_provider("electricity", "renewable_carriers"), conventional_carriers=config_provider( "electricity", "conventional_carriers", default=[] @@ -476,17 +537,24 @@ rule add_extra_components: rule prepare_network: params: + snapshots={ + "resolution": config["snapshots"].get("resolution", False), + "segmentation": config["snapshots"].get("segmentation", False), + }, # TODO: use config provider links=config_provider("links"), lines=config_provider("lines"), co2base=config_provider("electricity", "co2base"), + co2limit_enable=config_provider("electricity", "co2limit_enable", default=False), co2limit=config_provider("electricity", "co2limit"), + gaslimit_enable=config_provider("electricity", "gaslimit_enable", default=False), gaslimit=config_provider("electricity", "gaslimit"), max_hours=config_provider("electricity", "max_hours"), costs=config_provider("costs"), + autarky=config_provider("electricity", "autarky", default={}), input: resources("networks/elec_s{simpl}_{clusters}_ec.nc"), tech_costs=COSTS, - co2_price=resources("co2_price.csv"), + co2_price=lambda w: resources("co2_price.csv") if "Ept" in w.opts else [], output: resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"), log: diff --git a/rules/build_sector.smk b/rules/build_sector.smk index 483dcdb7..f0081d32 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -67,107 +67,107 @@ rule build_simplified_population_layouts: "../scripts/build_clustered_population_layouts.py" -if config["sector"]["gas_network"] or config["sector"]["H2_retrofit"]: - - rule build_gas_network: - input: - gas_network="data/gas_network/scigrid-gas/data/IGGIELGN_PipeSegments.geojson", - output: - cleaned_gas_network=resources("gas_network.csv"), - resources: - mem_mb=4000, - log: - logs("build_gas_network.log"), - conda: - "../envs/environment.yaml" - script: - "../scripts/build_gas_network.py" - - rule build_gas_input_locations: - input: - lng=HTTP.remote( - "https://globalenergymonitor.org/wp-content/uploads/2023/07/Europe-Gas-Tracker-2023-03-v3.xlsx", - keep_local=True, - ), - entry="data/gas_network/scigrid-gas/data/IGGIELGN_BorderPoints.geojson", - production="data/gas_network/scigrid-gas/data/IGGIELGN_Productions.geojson", - regions_onshore=resources( - "regions_onshore_elec_s{simpl}_{clusters}.geojson" - ), - regions_offshore=resources( - "regions_offshore_elec_s{simpl}_{clusters}.geojson" - ), - output: - gas_input_nodes=resources("gas_input_locations_s{simpl}_{clusters}.geojson"), - gas_input_nodes_simplified=resources( - "gas_input_locations_s{simpl}_{clusters}_simplified.csv" - ), - resources: - mem_mb=2000, - log: - logs("build_gas_input_locations_s{simpl}_{clusters}.log"), - conda: - "../envs/environment.yaml" - script: - "../scripts/build_gas_input_locations.py" - - rule cluster_gas_network: - input: - cleaned_gas_network=resources("gas_network.csv"), - regions_onshore=resources( - "regions_onshore_elec_s{simpl}_{clusters}.geojson" - ), - regions_offshore=resources( - "regions_offshore_elec_s{simpl}_{clusters}.geojson" - ), - output: - clustered_gas_network=resources("gas_network_elec_s{simpl}_{clusters}.csv"), - resources: - mem_mb=4000, - log: - logs("cluster_gas_network_s{simpl}_{clusters}.log"), - conda: - "../envs/environment.yaml" - script: - "../scripts/cluster_gas_network.py" - - gas_infrastructure = { - **rules.cluster_gas_network.output, - **rules.build_gas_input_locations.output, - } +rule build_gas_network: + input: + gas_network="data/gas_network/scigrid-gas/data/IGGIELGN_PipeSegments.geojson", + output: + cleaned_gas_network=resources("gas_network.csv"), + resources: + mem_mb=4000, + log: + logs("build_gas_network.log"), + conda: + "../envs/environment.yaml" + script: + "../scripts/build_gas_network.py" -if not (config["sector"]["gas_network"] or config["sector"]["H2_retrofit"]): - # this is effecively an `else` statement which is however not liked by snakefmt - - gas_infrastructure = {} +rule build_gas_input_locations: + input: + gem=HTTP.remote( + "https://globalenergymonitor.org/wp-content/uploads/2023/07/Europe-Gas-Tracker-2023-03-v3.xlsx", + keep_local=True, + ), + entry="data/gas_network/scigrid-gas/data/IGGIELGN_BorderPoints.geojson", + storage="data/gas_network/scigrid-gas/data/IGGIELGN_Storages.geojson", + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"), + output: + gas_input_nodes=resources("gas_input_locations_s{simpl}_{clusters}.geojson"), + gas_input_nodes_simplified=resources("gas_input_locations_s{simpl}_{clusters}_simplified.csv"), + resources: + mem_mb=2000, + log: + logs("build_gas_input_locations_s{simpl}_{clusters}.log"), + conda: + "../envs/environment.yaml" + script: + "../scripts/build_gas_input_locations.py" -rule build_heat_demands: +rule cluster_gas_network: + input: + cleaned_gas_network=resources("gas_network.csv"), + regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), + regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"), + output: + clustered_gas_network=resources("gas_network_elec_s{simpl}_{clusters}.csv"), + resources: + mem_mb=4000, + log: + logs("cluster_gas_network_s{simpl}_{clusters}.log"), + conda: + "../envs/environment.yaml" + script: + "../scripts/cluster_gas_network.py" + + +rule build_daily_heat_demand: params: - snapshots=config_provider("snapshots"), + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, # TODO: use config_provider input: pop_layout=resources("pop_layout_{scope}.nc"), regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", output: - heat_demand=resources("heat_demand_{scope}_elec_s{simpl}_{clusters}.nc"), + heat_demand=resources("daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc"), resources: mem_mb=20000, threads: 8 log: - logs("build_heat_demands_{scope}_{simpl}_{clusters}.loc"), + logs("build_daily_heat_demand_{scope}_{simpl}_{clusters}.loc"), benchmark: - benchmarks("build_heat_demands/{scope}_s{simpl}_{clusters}") + benchmarks("build_daily_heat_demand/{scope}_s{simpl}_{clusters}") conda: "../envs/environment.yaml" script: - "../scripts/build_heat_demand.py" + "../scripts/build_daily_heat_demand.py" + + +rule build_hourly_heat_demand: + params: + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, + input: + heat_profile="data/heat_load_profile_BDEW.csv", + heat_demand=RESOURCES + "daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + output: + heat_demand=RESOURCES + "hourly_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + resources: + mem_mb=2000, + threads: 8 + log: + LOGS + "build_hourly_heat_demand_{scope}_{simpl}_{clusters}.loc", + benchmark: + BENCHMARKS + "build_hourly_heat_demand/{scope}_s{simpl}_{clusters}" + conda: + "../envs/environment.yaml" + script: + "../scripts/build_hourly_heat_demand.py" rule build_temperature_profiles: params: - snapshots=config_provider("snapshots"), + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, # TODO: use config_provider input: pop_layout=resources("pop_layout_{scope}.nc"), regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), @@ -219,7 +219,7 @@ rule build_cop_profiles: rule build_solar_thermal_profiles: params: - snapshots=config_provider("snapshots"), + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, # TODO use config_provider solar_thermal=config_provider("solar_thermal"), input: pop_layout=resources("pop_layout_{scope}.nc"), @@ -246,15 +246,16 @@ rule build_energy_totals: energy=config_provider("energy"), input: nuts3_shapes=resources("nuts3_shapes.geojson"), - co2="data/eea/UNFCCC_v23.csv", - swiss="data/switzerland-sfoe/switzerland-new_format.csv", - idees="data/jrc-idees-2015", + co2="data/bundle-sector/eea/UNFCCC_v23.csv", + swiss="data/bundle-sector/switzerland-sfoe/switzerland-new_format.csv", + idees="data/bundle-sector/jrc-idees-2015", district_heat_share="data/district_heat_share.csv", eurostat=input_eurostat, output: energy_name=resources("energy_totals.csv"), co2_name=resources("co2_totals.csv"), transport_name=resources("transport_data.csv"), + district_heat_share=resources("district_heat_share.csv"), threads: 16 resources: mem_mb=10000, @@ -273,10 +274,10 @@ rule build_biomass_potentials: biomass=config_provider("biomass"), input: enspreso_biomass=HTTP.remote( - "https://cidportal.jrc.ec.europa.eu/ftp/jrc-opendata/ENSPRESO/ENSPRESO_BIOMASS.xlsx", + "https://zenodo.org/records/10356004/files/ENSPRESO_BIOMASS.xlsx", keep_local=True, ), - nuts2="data/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", # https://gisco-services.ec.europa.eu/distribution/v2/nuts/download/#nuts21 + nuts2="data/bundle-sector/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", # https://gisco-services.ec.europa.eu/distribution/v2/nuts/download/#nuts21 regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), nuts3_population=ancient("data/bundle/nama_10r_3popgdp.tsv.gz"), swiss_cantons=ancient("data/bundle/ch_cantons.csv"), @@ -284,16 +285,16 @@ rule build_biomass_potentials: country_shapes=resources("country_shapes.geojson"), output: biomass_potentials_all=resources( - "biomass_potentials_all_s{simpl}_{clusters}.csv" + "biomass_potentials_all_s{simpl}_{clusters}_{planning_horizons}.csv" ), - biomass_potentials=resources("biomass_potentials_s{simpl}_{clusters}.csv"), + biomass_potentials=resources("biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv"), threads: 1 resources: mem_mb=1000, log: - logs("build_biomass_potentials_s{simpl}_{clusters}.log"), + logs("build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.log"), benchmark: - benchmarks("build_biomass_potentials_s{simpl}_{clusters}") + benchmarks("build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}") conda: "../envs/environment.yaml" script: @@ -374,7 +375,7 @@ if not config["sector"]["regional_co2_sequestration_potential"]["enable"]: rule build_salt_cavern_potentials: input: - salt_caverns="data/h2_salt_caverns_GWh_per_sqkm.geojson", + salt_caverns="data/bundle-sector/h2_salt_caverns_GWh_per_sqkm.geojson", regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), regions_offshore=resources("regions_offshore_elec_s{simpl}_{clusters}.geojson"), output: @@ -396,7 +397,7 @@ rule build_ammonia_production: params: countries=config_provider("countries"), input: - usgs="data/myb1-2017-nitro.xls", + usgs="data/bundle-sector/myb1-2017-nitro.xls", output: ammonia_production=resources("ammonia_production.csv"), threads: 1 @@ -418,7 +419,7 @@ rule build_industry_sector_ratios: ammonia=config_provider("sector", "ammonia", default=False), input: ammonia_production=resources("ammonia_production.csv"), - idees="data/jrc-idees-2015", + idees="data/bundle-sector/jrc-idees-2015", output: industry_sector_ratios=resources("industry_sector_ratios.csv"), threads: 1 @@ -440,8 +441,8 @@ rule build_industrial_production_per_country: countries=config_provider("countries"), input: ammonia_production=resources("ammonia_production.csv"), - jrc="data/jrc-idees-2015", - eurostat="data/eurostat-energy_balances-may_2018_edition", + jrc="data/bundle-sector/jrc-idees-2015", + eurostat="data/bundle-sector/eurostat-energy_balances-may_2018_edition", output: industrial_production_per_country=resources( "industrial_production_per_country.csv" @@ -496,7 +497,7 @@ rule build_industrial_distribution_key: input: regions_onshore=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), - hotmaps_industrial_database="data/Industrial_Database.csv", + hotmaps_industrial_database="data/bundle-sector/Industrial_Database.csv", output: industrial_distribution_key=resources( "industrial_distribution_key_elec_s{simpl}_{clusters}.csv" @@ -582,7 +583,7 @@ rule build_industrial_energy_demand_per_country_today: countries=config_provider("countries"), industry=config_provider("industry"), input: - jrc="data/jrc-idees-2015", + jrc="data/bundle-sector/jrc-idees-2015", ammonia_production=resources("ammonia_production.csv"), industrial_production_per_country=resources( "industrial_production_per_country.csv" @@ -637,7 +638,7 @@ if config["sector"]["retrofitting"]["retro_endogen"]: countries=config_provider("countries"), input: building_stock="data/retro/data_building_stock.csv", - data_tabula="data/retro/tabula-calculator-calcsetbuilding.csv", + data_tabula="data/bundle-sector/retro/tabula-calculator-calcsetbuilding.csv", air_temperature=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"), u_values_PL="data/retro/u_values_poland.csv", tax_w="data/retro/electricity_taxes_eu.csv", @@ -706,7 +707,7 @@ rule build_shipping_demand: rule build_transport_demand: params: - snapshots=config_provider("snapshots"), + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, # TODO: use config_provider sector=config_provider("sector"), input: clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), @@ -714,8 +715,8 @@ rule build_transport_demand: "pop_weighted_energy_totals_s{simpl}_{clusters}.csv" ), transport_data=resources("transport_data.csv"), - traffic_data_KFZ="data/emobility/KFZ__count", - traffic_data_Pkw="data/emobility/Pkw__count", + traffic_data_KFZ="data/bundle-sector/emobility/KFZ__count", + traffic_data_Pkw="data/bundle-sector/emobility/Pkw__count", temp_air_total=resources("temp_air_total_elec_s{simpl}_{clusters}.nc"), output: transport_demand=resources("transport_demand_s{simpl}_{clusters}.csv"), @@ -733,6 +734,60 @@ rule build_transport_demand: "../scripts/build_transport_demand.py" +rule build_district_heat_share: + params: + sector=config["sector"], + input: + district_heat_share=RESOURCES + "district_heat_share.csv", + clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + output: + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + threads: 1 + resources: + mem_mb=1000, + log: + LOGS + "build_district_heat_share_s{simpl}_{clusters}_{planning_horizons}.log", + conda: + "../envs/environment.yaml" + script: + "../scripts/build_district_heat_share.py" + + +rule build_existing_heating_distribution: + params: + baseyear=config["scenario"]["planning_horizons"][0], + sector=config["sector"], + existing_capacities=config["existing_capacities"], + input: + existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + clustered_pop_energy_layout=RESOURCES + + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + output: + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + wildcard_constraints: + planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear + threads: 1 + resources: + mem_mb=2000, + log: + LOGS + + "build_existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.log", + benchmark: + ( + BENCHMARKS + + "build_existing_heating_distribution/elec_s{simpl}_{clusters}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/build_existing_heating_distribution.py" + + rule prepare_sector_network: params: co2_budget=config_provider("co2_budget"), @@ -753,26 +808,31 @@ rule prepare_sector_network: input: **build_retro_cost_output, **build_biomass_transport_costs_output, - **gas_infrastructure, + **rules.cluster_gas_network.output, + **rules.build_gas_input_locations.output, **build_sequestration_potentials_output, network=resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"), energy_totals_name=resources("energy_totals.csv"), eurostat=input_eurostat, - pop_weighted_energy_totals=resources( - "pop_weighted_energy_totals_s{simpl}_{clusters}.csv" - ), + pop_weighted_energy_totals=resources("pop_weighted_energy_totals_s{simpl}_{clusters}.csv"), shipping_demand=resources("shipping_demand_s{simpl}_{clusters}.csv"), transport_demand=resources("transport_demand_s{simpl}_{clusters}.csv"), transport_data=resources("transport_data_s{simpl}_{clusters}.csv"), avail_profile=resources("avail_profile_s{simpl}_{clusters}.csv"), dsm_profile=resources("dsm_profile_s{simpl}_{clusters}.csv"), co2_totals_name=resources("co2_totals.csv"), - co2="data/eea/UNFCCC_v23.csv", - biomass_potentials=resources("biomass_potentials_s{simpl}_{clusters}.csv"), - heat_profile="data/heat_load_profile_BDEW.csv", - costs="data/costs_{}.csv".format(config["costs"]["year"]) - if config["foresight"] == "overnight" - else "data/costs_{planning_horizons}.csv", + co2="data/bundle-sector/eea/UNFCCC_v23.csv", + biomass_potentials=( + resources("biomass_potentials_s{simpl}_{clusters}_" + + "{}.csv".format(config["biomass"]["year"])) + if config["foresight"] == "overnight" + else resources("biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv") + ), + costs=( + "data/costs_{}.csv".format(config["costs"]["year"]) + if config["foresight"] == "overnight" + else "data/costs_{planning_horizons}.csv" + ), profile_offwind_ac=resources("profile_offwind-ac.nc"), profile_offwind_dc=resources("profile_offwind-dc.nc"), h2_cavern=resources("salt_cavern_potentials_s{simpl}_{clusters}.csv"), @@ -780,12 +840,9 @@ rule prepare_sector_network: busmap=resources("busmap_elec_s{simpl}_{clusters}.csv"), clustered_pop_layout=resources("pop_layout_elec_s{simpl}_{clusters}.csv"), simplified_pop_layout=resources("pop_layout_elec_s{simpl}.csv"), - industrial_demand=resources( - "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv" - ), - heat_demand_urban=resources("heat_demand_urban_elec_s{simpl}_{clusters}.nc"), - heat_demand_rural=resources("heat_demand_rural_elec_s{simpl}_{clusters}.nc"), - heat_demand_total=resources("heat_demand_total_elec_s{simpl}_{clusters}.nc"), + industrial_demand=resources("industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv"), + hourly_heat_demand_total=resources("hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc"), + district_heat_share=resources("district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv"), temp_soil_total=resources("temp_soil_total_elec_s{simpl}_{clusters}.nc"), temp_soil_rural=resources("temp_soil_rural_elec_s{simpl}_{clusters}.nc"), temp_soil_urban=resources("temp_soil_urban_elec_s{simpl}_{clusters}.nc"), @@ -798,21 +855,21 @@ rule prepare_sector_network: cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"), cop_air_rural=resources("cop_air_rural_elec_s{simpl}_{clusters}.nc"), cop_air_urban=resources("cop_air_urban_elec_s{simpl}_{clusters}.nc"), - solar_thermal_total=resources( - "solar_thermal_total_elec_s{simpl}_{clusters}.nc" - ) - if config["sector"]["solar_thermal"] - else [], - solar_thermal_urban=resources( - "solar_thermal_urban_elec_s{simpl}_{clusters}.nc" - ) - if config["sector"]["solar_thermal"] - else [], - solar_thermal_rural=resources( - "solar_thermal_rural_elec_s{simpl}_{clusters}.nc" - ) - if config["sector"]["solar_thermal"] - else [], + solar_thermal_total=( + resources("solar_thermal_total_elec_s{simpl}_{clusters}.nc") + if config["sector"]["solar_thermal"] + else [] + ), + solar_thermal_urban=( + resources("solar_thermal_urban_elec_s{simpl}_{clusters}.nc") + if config["sector"]["solar_thermal"] + else [] + ), + solar_thermal_rural=( + resources("solar_thermal_rural_elec_s{simpl}_{clusters}.nc") + if config["sector"]["solar_thermal"] + else [] + ), output: RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", diff --git a/rules/collect.smk b/rules/collect.smk index e0f19a4c..0fb19165 100644 --- a/rules/collect.smk +++ b/rules/collect.smk @@ -11,7 +11,6 @@ localrules: prepare_sector_networks, solve_elec_networks, solve_sector_networks, - plot_networks, rule all: @@ -76,17 +75,7 @@ rule solve_sector_networks: ), -rule plot_elec_networks: - input: - expand( - RESULTS - + "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}", - **config["scenario"], - run=config["run"]["name"] - ), - - -rule plot_networks: +rule solve_sector_networks_perfect: input: expand( RESULTS diff --git a/rules/common.smk b/rules/common.smk index c4b8e6d2..b6c0f734 100644 --- a/rules/common.smk +++ b/rules/common.smk @@ -5,6 +5,16 @@ import copy from functools import partial, lru_cache +import os, sys, glob + +helper_source_path = [match for match in glob.glob("**/_helpers.py", recursive=True)] + +for path in helper_source_path: + path = os.path.dirname(os.path.abspath(path)) + sys.path.insert(0, os.path.abspath(path)) + +from _helpers import validate_checksum + def get_config(config, keys, default=None): """Retrieve a nested value from a dictionary using a tuple of keys.""" @@ -67,6 +77,13 @@ def config_provider(*keys, default=None): return partial(static_getter, keys=keys, default=default) +def solver_threads(w): + solver_options = config["solving"]["solver_options"] + option_set = config["solving"]["solver"]["options"] + threads = solver_options[option_set].get("threads", 4) + return threads + + def memory(w): factor = 3.0 for o in w.opts.split("-"): @@ -87,6 +104,13 @@ def memory(w): return int(factor * (10000 + 195 * int(w.clusters))) +def input_custom_extra_functionality(w): + path = config["solving"]["options"].get("custom_extra_functionality", False) + if path: + return os.path.join(os.path.dirname(workflow.snakefile), path) + return [] + + # Check if the workflow has access to the internet by trying to access the HEAD of specified url def has_internet_access(url="www.zenodo.org") -> bool: import http.client as http_client @@ -106,7 +130,7 @@ def has_internet_access(url="www.zenodo.org") -> bool: def input_eurostat(w): # 2016 includes BA, 2017 does not report_year = config["energy"]["eurostat_report_year"] - return f"data/eurostat-energy_balances-june_{report_year}_edition" + return f"data/bundle-sector/eurostat-energy_balances-june_{report_year}_edition" def solved_previous_horizon(wildcards): diff --git a/rules/postprocess.smk b/rules/postprocess.smk index c37c688e..19dc34be 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -7,31 +7,139 @@ localrules: copy_config, -rule plot_network: - params: - foresight=config_provider("foresight"), - plotting=config_provider("plotting"), - input: - network=RESULTS - + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", - regions=resources("regions_onshore_elec_s{simpl}_{clusters}.geojson"), - output: - map=RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - today=RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf", - threads: 2 - resources: - mem_mb=10000, - benchmark: - ( +if config_provider("foresight") != "perfect": + + rule plot_power_network_clustered: + params: + plotting=config_provider("plotting"), + input: + network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", + regions_onshore=RESOURCES + + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + output: + map=RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", + threads: 1 + resources: + mem_mb=4000, + benchmark: + BENCHMARKS + "plot_power_network_clustered/elec_s{simpl}_{clusters}" + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_power_network_clustered.py" + + rule plot_power_network: + params: + plotting=config_provider("plotting"), + input: + network=RESULTS + + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + output: + map=RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", + threads: 2 + resources: + mem_mb=10000, + log: + ( + LOGS + + "plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log" + ), + benchmark: + ( + BENCHMARKS + + "plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_power_network.py" + + rule plot_hydrogen_network: + params: + plotting=config_provider("plotting"), + foresight=config_provider("foresight"), + input: + network=RESULTS + + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + output: + map=RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf", + threads: 2 + resources: + mem_mb=10000, + log: + ( + LOGS + + "plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log" + ), + benchmark: + ( + BENCHMARKS + + "plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_hydrogen_network.py" + + rule plot_gas_network: + params: + plotting=config_provider("plotting"), + input: + network=RESULTS + + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + output: + map=RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf", + threads: 2 + resources: + mem_mb=10000, + log: + ( + LOGS + + "plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log" + ), + benchmark: + ( + BENCHMARKS + + "plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_gas_network.py" + + +if config_provider("foresight") == "perfect": + + rule plot_power_network_perfect: + params: + plotting=config_provider("plotting"), + input: + network=RESULTS + + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", + regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + output: + **{ + f"map_{year}": RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_" + + f"{year}.pdf" + for year in config_provider("scenario", "planning_horizons") + }, + threads: 2 + resources: + mem_mb=10000, + benchmark: BENCHMARKS - + "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" - ) - conda: - "../envs/environment.yaml" - script: - "../scripts/plot_network.py" + +"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_benchmark" + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_power_network_perfect.py" rule copy_config: @@ -54,25 +162,55 @@ rule make_summary: params: foresight=config_provider("foresight"), costs=config_provider("costs"), - snapshots=config_provider("snapshots"), + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, # TODO: use config_provider scenario=config_provider("scenario"), RDIR=RDIR, input: + expand( + RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", + **config["scenario"], + ), networks=expand( RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", **config["scenario"], run=config["run"]["name"] ), - costs="data/costs_{}.csv".format(config["costs"]["year"]) - if config["foresight"] == "overnight" - else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), - plots=expand( + costs=( + "data/costs_{}.csv".format(config["costs"]["year"]) + if config_provider("foresight") == "overnight" + else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]) + ), + ac_plot=expand( + RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", + **config["scenario"], + ), + costs_plot=expand( RESULTS + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", **config["scenario"], run=config["run"]["name"] ), + h2_plot=expand( + ( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf" + if config["sector"]["H2_network"] + else [] + ), + **config["scenario"], + run=config["run"]["name"] + ), + ch4_plot=expand( + ( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf" + if config["sector"]["gas_network"] + else [] + ), + **config["scenario"], + run=config["run"]["name"] + ), output: nodal_costs=RESULTS + "csvs/nodal_costs.csv", nodal_capacities=RESULTS + "csvs/nodal_capacities.csv", @@ -116,7 +254,7 @@ rule plot_summary: energy=RESULTS + "csvs/energy.csv", balances=RESULTS + "csvs/supply_energy.csv", eurostat=input_eurostat, - co2="data/eea/UNFCCC_v23.csv", + co2="data/bundle-sector/eea/UNFCCC_v23.csv", output: costs=RESULTS + "graphs/costs.pdf", energy=RESULTS + "graphs/energy.pdf", diff --git a/rules/retrieve.smk b/rules/retrieve.smk index 1c454633..46741830 100644 --- a/rules/retrieve.smk +++ b/rules/retrieve.smk @@ -2,6 +2,9 @@ # # SPDX-License-Identifier: MIT +import requests +from datetime import datetime, timedelta + if config["enable"].get("retrieve", "auto") == "auto": config["enable"]["retrieve"] = has_internet_access() @@ -27,18 +30,36 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_databundle", rule retrieve_databundle: output: - expand("data/bundle/{file}", file=datafiles), + protected(expand("data/bundle/{file}", file=datafiles)), log: "logs/retrieve_databundle.log", resources: mem_mb=1000, retries: 2 conda: - "../envs/environment.yaml" + "../envs/retrieve.yaml" script: "../scripts/retrieve_databundle.py" +if config["enable"].get("retrieve_irena"): + + rule retrieve_irena: + output: + offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv", + onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", + solar="data/existing_infrastructure/solar_capacity_IRENA.csv", + log: + LOGS + "retrieve_irena.log", + resources: + mem_mb=1000, + retries: 2 + conda: + "../envs/retrieve.yaml" + script: + "../scripts/retrieve_irena.py" + + if config["enable"]["retrieve"] and config["enable"].get("retrieve_cutout", True): rule retrieve_cutout: @@ -56,6 +77,7 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_cutout", True retries: 2 run: move(input[0], output[0]) + validate_checksum(output[0], input[0]) if config["enable"]["retrieve"] and config["enable"].get("retrieve_cost_data", True): @@ -100,55 +122,65 @@ if config["enable"]["retrieve"] and config["enable"].get( retries: 2 run: move(input[0], output[0]) + validate_checksum(output[0], input[0]) if config["enable"]["retrieve"] and config["enable"].get( "retrieve_sector_databundle", True ): datafiles = [ - "data/eea/UNFCCC_v23.csv", - "data/switzerland-sfoe/switzerland-new_format.csv", - "data/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", - "data/myb1-2017-nitro.xls", - "data/Industrial_Database.csv", - "data/emobility/KFZ__count", - "data/emobility/Pkw__count", - "data/h2_salt_caverns_GWh_per_sqkm.geojson", - directory("data/eurostat-energy_balances-june_2016_edition"), - directory("data/eurostat-energy_balances-may_2018_edition"), - directory("data/jrc-idees-2015"), + "eea/UNFCCC_v23.csv", + "switzerland-sfoe/switzerland-new_format.csv", + "nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", + "myb1-2017-nitro.xls", + "Industrial_Database.csv", + "emobility/KFZ__count", + "emobility/Pkw__count", + "h2_salt_caverns_GWh_per_sqkm.geojson", + ] + + datafolders = [ + protected( + directory("data/bundle-sector/eurostat-energy_balances-june_2016_edition") + ), + protected( + directory("data/bundle-sector/eurostat-energy_balances-may_2018_edition") + ), + protected(directory("data/bundle-sector/jrc-idees-2015")), ] rule retrieve_sector_databundle: output: - *datafiles, + protected(expand("data/bundle-sector/{files}", files=datafiles)), + *datafolders, log: "logs/retrieve_sector_databundle.log", retries: 2 conda: - "../envs/environment.yaml" + "../envs/retrieve.yaml" script: "../scripts/retrieve_sector_databundle.py" -if config["enable"]["retrieve"] and ( - config["sector"]["gas_network"] or config["sector"]["H2_retrofit"] -): +if config["enable"]["retrieve"]: datafiles = [ "IGGIELGN_LNGs.geojson", "IGGIELGN_BorderPoints.geojson", "IGGIELGN_Productions.geojson", + "IGGIELGN_Storages.geojson", "IGGIELGN_PipeSegments.geojson", ] rule retrieve_gas_infrastructure_data: output: - expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles), + protected( + expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles) + ), log: "logs/retrieve_gas_infrastructure_data.log", retries: 2 conda: - "../envs/environment.yaml" + "../envs/retrieve.yaml" script: "../scripts/retrieve_gas_infrastructure_data.py" @@ -179,7 +211,7 @@ if config["enable"]["retrieve"]: static=True, ), output: - "data/shipdensity_global.zip", + protected("data/shipdensity_global.zip"), log: "logs/retrieve_ship_raster.log", resources: @@ -187,6 +219,122 @@ if config["enable"]["retrieve"]: retries: 2 run: move(input[0], output[0]) + validate_checksum(output[0], input[0]) + + +if config["enable"]["retrieve"]: + + # Downloading Copernicus Global Land Cover for land cover and land use: + # Website: https://land.copernicus.eu/global/products/lc + rule download_copernicus_land_cover: + input: + HTTP.remote( + "zenodo.org/record/3939050/files/PROBAV_LC100_global_v3.0.1_2019-nrt_Discrete-Classification-map_EPSG-4326.tif", + static=True, + ), + output: + "data/Copernicus_LC100_global_v3.0.1_2019-nrt_Discrete-Classification-map_EPSG-4326.tif", + run: + move(input[0], output[0]) + validate_checksum(output[0], input[0]) + + +if config["enable"]["retrieve"]: + + # Downloading LUISA Base Map for land cover and land use: + # Website: https://ec.europa.eu/jrc/en/luisa + rule retrieve_luisa_land_cover: + input: + HTTP.remote( + "jeodpp.jrc.ec.europa.eu/ftp/jrc-opendata/LUISA/EUROPE/Basemaps/LandUse/2018/LATEST/LUISA_basemap_020321_50m.tif", + static=True, + ), + output: + "data/LUISA_basemap_020321_50m.tif", + run: + move(input[0], output[0]) + + +if config["enable"]["retrieve"]: + # Some logic to find the correct file URL + # Sometimes files are released delayed or ahead of schedule, check which file is currently available + + def check_file_exists(url): + response = requests.head(url) + return response.status_code == 200 + + # Basic pattern where WDPA files can be found + url_pattern = ( + "https://d1gam3xoknrgr2.cloudfront.net/current/WDPA_{bYYYY}_Public_shp.zip" + ) + + # 3-letter month + 4 digit year for current/previous/next month to test + current_monthyear = datetime.now().strftime("%b%Y") + prev_monthyear = (datetime.now() - timedelta(30)).strftime("%b%Y") + next_monthyear = (datetime.now() + timedelta(30)).strftime("%b%Y") + + # Test prioritised: current month -> previous -> next + for bYYYY in [current_monthyear, prev_monthyear, next_monthyear]: + if check_file_exists(url := url_pattern.format(bYYYY=bYYYY)): + break + else: + # If None of the three URLs are working + url = False + + assert ( + url + ), f"No WDPA files found at {url_pattern} for bY='{current_monthyear}, {prev_monthyear}, or {next_monthyear}'" + + # Downloading protected area database from WDPA + # extract the main zip and then merge the contained 3 zipped shapefiles + # Website: https://www.protectedplanet.net/en/thematic-areas/wdpa + rule download_wdpa: + input: + HTTP.remote( + url, + static=True, + keep_local=True, + ), + params: + zip="data/WDPA_shp.zip", + folder=directory("data/WDPA"), + output: + gpkg=protected("data/WDPA.gpkg"), + run: + shell("cp {input} {params.zip}") + shell("unzip -o {params.zip} -d {params.folder}") + for i in range(3): + # vsizip is special driver for directly working with zipped shapefiles in ogr2ogr + layer_path = ( + f"/vsizip/{params.folder}/WDPA_{bYYYY}_Public_shp_{i}.zip" + ) + print(f"Adding layer {i + 1} of 3 to combined output file.") + shell("ogr2ogr -f gpkg -update -append {output.gpkg} {layer_path}") + + rule download_wdpa_marine: + # Downloading Marine protected area database from WDPA + # extract the main zip and then merge the contained 3 zipped shapefiles + # Website: https://www.protectedplanet.net/en/thematic-areas/marine-protected-areas + input: + HTTP.remote( + f"d1gam3xoknrgr2.cloudfront.net/current/WDPA_WDOECM_{bYYYY}_Public_marine_shp.zip", + static=True, + keep_local=True, + ), + params: + zip="data/WDPA_WDOECM_marine.zip", + folder=directory("data/WDPA_WDOECM_marine"), + output: + gpkg=protected("data/WDPA_WDOECM_marine.gpkg"), + run: + shell("cp {input} {params.zip}") + shell("unzip -o {params.zip} -d {params.folder}") + for i in range(3): + # vsizip is special driver for directly working with zipped shapefiles in ogr2ogr + layer_path = f"/vsizip/{params.folder}/WDPA_WDOECM_{bYYYY}_Public_marine_shp_{i}.zip" + print(f"Adding layer {i + 1} of 3 to combined output file.") + shell("ogr2ogr -f gpkg -update -append {output.gpkg} {layer_path}") + if config["enable"]["retrieve"]: @@ -220,6 +368,6 @@ if config["enable"]["retrieve"]: mem_mb=5000, retries: 2 conda: - "../envs/environment.yaml" + "../envs/retrieve.yaml" script: "../scripts/retrieve_monthly_fuel_prices.py" diff --git a/rules/solve_electricity.smk b/rules/solve_electricity.smk index 424748e2..fc8e8cea 100644 --- a/rules/solve_electricity.smk +++ b/rules/solve_electricity.smk @@ -11,6 +11,7 @@ rule solve_network: co2_sequestration_potential=config_provider( "sector", "co2_sequestration_potential", default=200 ), + custom_extra_functionality=input_custom_extra_functionality, input: network=resources("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"), config=RESULTS + "config.yaml", @@ -24,7 +25,7 @@ rule solve_network: + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", benchmark: BENCHMARKS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" - threads: 4 + threads: solver_threads resources: mem_mb=memory, walltime=config_provider("solving", "walltime", default="12:00:00"), diff --git a/rules/solve_myopic.smk b/rules/solve_myopic.smk index 7f851326..fac58456 100644 --- a/rules/solve_myopic.smk +++ b/rules/solve_myopic.smk @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-4 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -21,7 +21,7 @@ rule add_existing_baseyear: ), cop_soil_total=resources("cop_soil_total_elec_s{simpl}_{clusters}.nc"), cop_air_total=resources("cop_air_total_elec_s{simpl}_{clusters}.nc"), - existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + existing_heating_distribution=resources("existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv"), existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv", @@ -54,7 +54,16 @@ rule add_brownfield: "sector", "H2_retrofit_capacity_per_CH4" ), threshold_capacity=config_provider("existing_capacities", " threshold_capacity"), + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, # TODO: use config_provider + carriers=config_provider("electricity", "renewable_carriers"), input: + **{ + f"profile_{tech}": RESOURCES + f"profile_{tech}.nc" + for tech in config["electricity"]["renewable_carriers"] + if tech != "hydro" + }, + simplify_busmap=RESOURCES + "busmap_elec_s{simpl}.csv", + cluster_busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv", network=RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", network_p=solved_previous_horizon, #solved network at previous time step @@ -92,6 +101,7 @@ rule solve_sector_network_myopic: co2_sequestration_potential=config_provider( "sector", "co2_sequestration_potential", default=200 ), + custom_extra_functionality=input_custom_extra_functionality, input: network=RESULTS + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", @@ -107,7 +117,7 @@ rule solve_sector_network_myopic: + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", python=LOGS + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", - threads: 4 + threads: solver_threads resources: mem_mb=config_provider("solving", "mem"), walltime=config_provider("solving", "walltime", default="12:00:00"), diff --git a/rules/solve_overnight.smk b/rules/solve_overnight.smk index 8f2ff139..76621012 100644 --- a/rules/solve_overnight.smk +++ b/rules/solve_overnight.smk @@ -11,6 +11,7 @@ rule solve_sector_network: co2_sequestration_potential=config_provider( "sector", "co2_sequestration_potential", default=200 ), + custom_extra_functionality=input_custom_extra_functionality, input: network=RESULTS + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", @@ -21,11 +22,13 @@ rule solve_sector_network: shadow: "shallow" log: - solver=LOGS - + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", - python=LOGS - + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", - threads: config["solving"]["solver"].get("threads", 4) + solver=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", + memory=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log", + python=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", + threads: solver_threads resources: mem_mb=config_provider("solving", "mem"), walltime=config_provider("solving", "walltime", default="12:00:00"), diff --git a/rules/solve_perfect.smk b/rules/solve_perfect.smk new file mode 100644 index 00000000..9e164a16 --- /dev/null +++ b/rules/solve_perfect.smk @@ -0,0 +1,162 @@ +# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +rule add_existing_baseyear: + params: + baseyear=config["scenario"]["planning_horizons"][0], + sector=config["sector"], + existing_capacities=config["existing_capacities"], + costs=config["costs"], + input: + network=RESULTS + + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + powerplants=RESOURCES + "powerplants.csv", + busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv", + busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv", + clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), + cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", + cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", + existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", + existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv", + output: + RESULTS + + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + wildcard_constraints: + planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear + threads: 1 + resources: + mem_mb=2000, + log: + LOGS + + "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log", + benchmark: + ( + BENCHMARKS + + "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/add_existing_baseyear.py" + + +rule prepare_perfect_foresight: + input: + **{ + f"network_{year}": RESULTS + + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_" + + f"{year}.nc" + for year in config["scenario"]["planning_horizons"][1:] + }, + brownfield_network=lambda w: ( + RESULTS + + "prenetworks-brownfield/" + + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_" + + "{}.nc".format(str(config["scenario"]["planning_horizons"][0])) + ), + output: + RESULTS + + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", + threads: 2 + resources: + mem_mb=10000, + log: + LOGS + + "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log", + benchmark: + ( + BENCHMARKS + + "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/prepare_perfect_foresight.py" + + +rule solve_sector_network_perfect: + params: + solving=config["solving"], + foresight=config["foresight"], + sector=config["sector"], + planning_horizons=config["scenario"]["planning_horizons"], + co2_sequestration_potential=config["sector"].get( + "co2_sequestration_potential", 200 + ), + custom_extra_functionality=input_custom_extra_functionality, + input: + network=RESULTS + + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", + costs="data/costs_2030.csv", + config=RESULTS + "config.yaml", + output: + RESULTS + + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", + threads: solver_threads + resources: + mem_mb=config["solving"]["mem"], + shadow: + "shallow" + log: + solver=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_solver.log", + python=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_python.log", + memory=RESULTS + + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_memory.log", + benchmark: + ( + BENCHMARKS + + "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/solve_network.py" + + +rule make_summary_perfect: + input: + **{ + f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS + + f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc" + for simpl in config["scenario"]["simpl"] + for clusters in config["scenario"]["clusters"] + for opts in config["scenario"]["opts"] + for sector_opts in config["scenario"]["sector_opts"] + for ll in config["scenario"]["ll"] + }, + costs="data/costs_2020.csv", + output: + nodal_costs=RESULTS + "csvs/nodal_costs.csv", + nodal_capacities=RESULTS + "csvs/nodal_capacities.csv", + nodal_cfs=RESULTS + "csvs/nodal_cfs.csv", + cfs=RESULTS + "csvs/cfs.csv", + costs=RESULTS + "csvs/costs.csv", + capacities=RESULTS + "csvs/capacities.csv", + curtailment=RESULTS + "csvs/curtailment.csv", + energy=RESULTS + "csvs/energy.csv", + supply=RESULTS + "csvs/supply.csv", + supply_energy=RESULTS + "csvs/supply_energy.csv", + prices=RESULTS + "csvs/prices.csv", + weighted_prices=RESULTS + "csvs/weighted_prices.csv", + market_values=RESULTS + "csvs/market_values.csv", + price_statistics=RESULTS + "csvs/price_statistics.csv", + metrics=RESULTS + "csvs/metrics.csv", + co2_emissions=RESULTS + "csvs/co2_emissions.csv", + threads: 2 + resources: + mem_mb=10000, + log: + LOGS + "make_summary_perfect.log", + benchmark: + (BENCHMARKS + "make_summary_perfect") + conda: + "../envs/environment.yaml" + script: + "../scripts/make_summary_perfect.py" diff --git a/rules/validate.smk b/rules/validate.smk index 09344673..fefb6ba6 100644 --- a/rules/validate.smk +++ b/rules/validate.smk @@ -17,7 +17,7 @@ rule build_electricity_production: The data is used for validation of the optimization results. """ params: - snapshots=config["snapshots"], + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, countries=config["countries"], output: resources("historical_electricity_production.csv"), @@ -35,7 +35,7 @@ rule build_cross_border_flows: The data is used for validation of the optimization results. """ params: - snapshots=config["snapshots"], + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, countries=config["countries"], input: network=resources("networks/base.nc"), @@ -55,7 +55,7 @@ rule build_electricity_prices: The data is used for validation of the optimization results. """ params: - snapshots=config["snapshots"], + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, countries=config["countries"], output: resources("historical_electricity_prices.csv"), diff --git a/scripts/_benchmark.py b/scripts/_benchmark.py new file mode 100644 index 00000000..ced102ba --- /dev/null +++ b/scripts/_benchmark.py @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" + +""" + +from __future__ import absolute_import, print_function + +import logging +import os +import sys +import time + +from memory_profiler import _get_memory, choose_backend + +logger = logging.getLogger(__name__) + +# TODO: provide alternative when multiprocessing is not available +try: + from multiprocessing import Pipe, Process +except ImportError: + from multiprocessing.dummy import Pipe, Process + + +# The memory logging facilities have been adapted from memory_profiler +class MemTimer(Process): + """ + Write memory consumption over a time interval to file until signaled to + stop on the pipe. + """ + + def __init__( + self, monitor_pid, interval, pipe, filename, max_usage, backend, *args, **kw + ): + self.monitor_pid = monitor_pid + self.interval = interval + self.pipe = pipe + self.filename = filename + self.max_usage = max_usage + self.backend = backend + + self.timestamps = kw.pop("timestamps", True) + self.include_children = kw.pop("include_children", True) + + super(MemTimer, self).__init__(*args, **kw) + + def run(self): + # get baseline memory usage + cur_mem = _get_memory( + self.monitor_pid, + self.backend, + timestamps=self.timestamps, + include_children=self.include_children, + ) + + n_measurements = 1 + mem_usage = cur_mem if self.max_usage else [cur_mem] + + if self.filename is not None: + stream = open(self.filename, "w") + stream.write("MEM {0:.6f} {1:.4f}\n".format(*cur_mem)) + stream.flush() + else: + stream = None + + self.pipe.send(0) # we're ready + stop = False + while True: + cur_mem = _get_memory( + self.monitor_pid, + self.backend, + timestamps=self.timestamps, + include_children=self.include_children, + ) + + if stream is not None: + stream.write("MEM {0:.6f} {1:.4f}\n".format(*cur_mem)) + stream.flush() + + n_measurements += 1 + if not self.max_usage: + mem_usage.append(cur_mem) + else: + mem_usage = max(cur_mem, mem_usage) + + if stop: + break + stop = self.pipe.poll(self.interval) + # do one more iteration + + if stream is not None: + stream.close() + + self.pipe.send(mem_usage) + self.pipe.send(n_measurements) + + +class memory_logger(object): + """ + Context manager for taking and reporting memory measurements at fixed + intervals from a separate process, for the duration of a context. + + Parameters + ---------- + filename : None|str + Name of the text file to log memory measurements, if None no log is + created (defaults to None) + interval : float + Interval between measurements (defaults to 1.) + max_usage : bool + If True, only store and report the maximum value (defaults to True) + timestamps : bool + Whether to record tuples of memory usage and timestamps; if logging to + a file timestamps are always kept (defaults to True) + include_children : bool + Whether the memory of subprocesses is to be included (default: True) + + Arguments + --------- + n_measurements : int + Number of measurements that have been taken + mem_usage : (float, float)|[(float, float)] + All memory measurements and timestamps (if timestamps was True) or only + the maximum memory usage and its timestamp + + Note + ---- + The arguments are only set after all the measurements, i.e. outside of the + with statement. + + Example + ------- + with memory_logger(filename="memory.log", max_usage=True) as mem: + # Do a lot of long running memory intensive stuff + hard_memory_bound_stuff() + + max_mem, timestamp = mem.mem_usage + """ + + def __init__( + self, + filename=None, + interval=1.0, + max_usage=True, + timestamps=True, + include_children=True, + ): + if filename is not None: + timestamps = True + + self.filename = filename + self.interval = interval + self.max_usage = max_usage + self.timestamps = timestamps + self.include_children = include_children + + def __enter__(self): + backend = choose_backend() + + self.child_conn, self.parent_conn = Pipe() # this will store MemTimer's results + self.p = MemTimer( + os.getpid(), + self.interval, + self.child_conn, + self.filename, + backend=backend, + timestamps=self.timestamps, + max_usage=self.max_usage, + include_children=self.include_children, + ) + self.p.start() + self.parent_conn.recv() # wait until memory logging in subprocess is ready + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is None: + self.parent_conn.send(0) # finish timing + + self.mem_usage = self.parent_conn.recv() + self.n_measurements = self.parent_conn.recv() + else: + self.p.terminate() + + return False + + +class timer(object): + level = 0 + opened = False + + def __init__(self, name="", verbose=True): + self.name = name + self.verbose = verbose + + def __enter__(self): + if self.verbose: + if self.opened: + sys.stdout.write("\n") + + if len(self.name) > 0: + sys.stdout.write((".. " * self.level) + self.name + ": ") + sys.stdout.flush() + + self.__class__.opened = True + + self.__class__.level += 1 + + self.start = time.time() + return self + + def print_usec(self, usec): + if usec < 1000: + print("%.1f usec" % usec) + else: + msec = usec / 1000 + if msec < 1000: + print("%.1f msec" % msec) + else: + sec = msec / 1000 + print("%.1f sec" % sec) + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.opened and self.verbose: + sys.stdout.write(".. " * self.level) + + if exc_type is None: + stop = time.time() + self.usec = usec = (stop - self.start) * 1e6 + if self.verbose: + self.print_usec(usec) + elif self.verbose: + print("failed") + sys.stdout.flush() + + self.__class__.level -= 1 + if self.verbose: + self.__class__.opened = False + return False + + +class optional(object): + def __init__(self, variable, contextman): + self.variable = variable + self.contextman = contextman + + def __enter__(self): + if self.variable: + return self.contextman.__enter__() + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.variable: + return self.contextman.__exit__(exc_type, exc_val, exc_tb) + return False diff --git a/scripts/_helpers.py b/scripts/_helpers.py index ed46db77..1aa90168 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -4,6 +4,7 @@ # SPDX-License-Identifier: MIT import contextlib +import hashlib import logging import os import re @@ -13,6 +14,7 @@ from pathlib import Path import pandas as pd import pytz +import requests import yaml from snakemake.utils import update_config from tqdm import tqdm @@ -90,6 +92,35 @@ def path_provider(dir, rdir, shared_resources): return partial(get_run_path, dir=dir, rdir=rdir, shared_resources=shared_resources) +def get_opt(opts, expr, flags=None): + """ + Return the first option matching the regular expression. + + The regular expression is case-insensitive by default. + """ + if flags is None: + flags = re.IGNORECASE + for o in opts: + match = re.match(expr, o, flags=flags) + if match: + return match.group(0) + return None + + +def find_opt(opts, expr): + """ + Return if available the float after the expression. + """ + for o in opts: + if expr in o: + m = re.findall("[0-9]*\.?[0-9]+$", o) + if len(m) > 0: + return True, float(m[0]) + else: + return True, None + return False, None + + # Define a context manager to temporarily mute print statements @contextlib.contextmanager def mute_print(): @@ -132,6 +163,7 @@ def configure_logging(snakemake, skip_handlers=False): Do (not) skip the default handlers created for redirecting output to STDERR and file. """ import logging + import sys kwargs = snakemake.config.get("logging", dict()).copy() kwargs.setdefault("level", "INFO") @@ -155,6 +187,16 @@ def configure_logging(snakemake, skip_handlers=False): ) logging.basicConfig(**kwargs) + # Setup a function to handle uncaught exceptions and include them with their stacktrace into logfiles + def handle_exception(exc_type, exc_value, exc_traceback): + # Log the exception + logger = logging.getLogger() + logger.error( + "Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback) + ) + + sys.excepthook = handle_exception + def update_p_nom_max(n): # if extendable carriers (solar/onwind/...) have capacity >= 0, @@ -275,7 +317,13 @@ def progress_retrieve(url, file, disable=False): urllib.request.urlretrieve(url, file, reporthook=update_to) -def mock_snakemake(rulename, configfiles=[], **wildcards): +def mock_snakemake( + rulename, + root_dir=None, + configfiles=[], + submodule_dir="workflow/submodules/pypsa-eur", + **wildcards, +): """ This function is expected to be executed from the 'scripts'-directory of ' the snakemake project. It returns a snakemake.script.Snakemake object, @@ -287,8 +335,13 @@ def mock_snakemake(rulename, configfiles=[], **wildcards): ---------- rulename: str name of the rule for which the snakemake object should be generated + root_dir: str/path-like + path to the root directory of the snakemake project configfiles: list, str list of configfiles to be used to update the config + submodule_dir: str, Path + in case PyPSA-Eur is used as a submodule, submodule_dir is + the path of pypsa-eur relative to the project directory. **wildcards: keyword arguments fixing the wildcards. Only necessary if wildcards are needed. @@ -296,15 +349,20 @@ def mock_snakemake(rulename, configfiles=[], **wildcards): import os import snakemake as sm - from packaging.version import Version, parse from pypsa.descriptors import Dict from snakemake.script import Snakemake script_dir = Path(__file__).parent.resolve() - root_dir = script_dir.parent + if root_dir is None: + root_dir = script_dir.parent + else: + root_dir = Path(root_dir).resolve() user_in_script_dir = Path.cwd().resolve() == script_dir - if user_in_script_dir: + if str(submodule_dir) in __file__: + # the submodule_dir path is only need to locate the project dir + os.chdir(Path(__file__[: __file__.find(str(submodule_dir))])) + elif user_in_script_dir: os.chdir(root_dir) elif Path.cwd().resolve() != root_dir: raise RuntimeError( @@ -316,13 +374,12 @@ def mock_snakemake(rulename, configfiles=[], **wildcards): if os.path.exists(p): snakefile = p break - kwargs = ( - dict(rerun_triggers=[]) if parse(sm.__version__) > Version("7.7.0") else {} - ) if isinstance(configfiles, str): configfiles = [configfiles] - workflow = sm.Workflow(snakefile, overwrite_configfiles=configfiles, **kwargs) + workflow = sm.Workflow( + snakefile, overwrite_configfiles=configfiles, rerun_triggers=[] + ) workflow.include(snakefile) if configfiles: @@ -386,17 +443,89 @@ def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None): return week_df -def parse(l): - if len(l) == 1: - return yaml.safe_load(l[0]) +def parse(infix): + """ + Recursively parse a chained wildcard expression into a dictionary or a YAML + object. + + Parameters + ---------- + list_to_parse : list + The list to parse. + + Returns + ------- + dict or YAML object + The parsed list. + """ + if len(infix) == 1: + return yaml.safe_load(infix[0]) else: - return {l.pop(0): parse(l)} + return {infix.pop(0): parse(infix)} def update_config_with_sector_opts(config, sector_opts): - from snakemake.utils import update_config - for o in sector_opts.split("-"): if o.startswith("CF+"): - l = o.split("+")[1:] - update_config(config, parse(l)) + infix = o.split("+")[1:] + update_config(config, parse(infix)) + + +def get_checksum_from_zenodo(file_url): + parts = file_url.split("/") + record_id = parts[parts.index("record") + 1] + filename = parts[-1] + + response = requests.get(f"https://zenodo.org/api/records/{record_id}", timeout=30) + response.raise_for_status() + data = response.json() + + for file in data["files"]: + if file["key"] == filename: + return file["checksum"] + return None + + +def validate_checksum(file_path, zenodo_url=None, checksum=None): + """ + Validate file checksum against provided or Zenodo-retrieved checksum. + Calculates the hash of a file using 64KB chunks. Compares it against a + given checksum or one from a Zenodo URL. + + Parameters + ---------- + file_path : str + Path to the file for checksum validation. + zenodo_url : str, optional + URL of the file on Zenodo to fetch the checksum. + checksum : str, optional + Checksum (format 'hash_type:checksum_value') for validation. + + Raises + ------ + AssertionError + If the checksum does not match, or if neither `checksum` nor `zenodo_url` is provided. + + + Examples + -------- + >>> validate_checksum("/path/to/file", checksum="md5:abc123...") + >>> validate_checksum( + ... "/path/to/file", + ... zenodo_url="https://zenodo.org/record/12345/files/example.txt", + ... ) + + If the checksum is invalid, an AssertionError will be raised. + """ + assert checksum or zenodo_url, "Either checksum or zenodo_url must be provided" + if zenodo_url: + checksum = get_checksum_from_zenodo(zenodo_url) + hash_type, checksum = checksum.split(":") + hasher = hashlib.new(hash_type) + with open(file_path, "rb") as f: + for chunk in iter(lambda: f.read(65536), b""): # 64kb chunks + hasher.update(chunk) + calculated_checksum = hasher.hexdigest() + assert ( + calculated_checksum == checksum + ), "Checksum is invalid. This may be due to an incomplete download. Delete the file and re-execute the rule." diff --git a/scripts/add_brownfield.py b/scripts/add_brownfield.py index 597792c0..3b77c437 100644 --- a/scripts/add_brownfield.py +++ b/scripts/add_brownfield.py @@ -8,16 +8,16 @@ Prepares brownfield data from previous planning horizon. import logging -logger = logging.getLogger(__name__) - -import pandas as pd - -idx = pd.IndexSlice - import numpy as np +import pandas as pd import pypsa +import xarray as xr from _helpers import update_config_with_sector_opts from add_existing_baseyear import add_build_year_to_new_assets +from pypsa.clustering.spatial import normed_or_uniform + +logger = logging.getLogger(__name__) +idx = pd.IndexSlice def add_brownfield(n, n_p, year): @@ -41,12 +41,9 @@ def add_brownfield(n, n_p, year): # remove assets if their optimized nominal capacity is lower than a threshold # since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible chp_heat = c.df.index[ - ( - c.df[attr + "_nom_extendable"] - & c.df.index.str.contains("urban central") - & c.df.index.str.contains("CHP") - & c.df.index.str.contains("heat") - ) + (c.df[f"{attr}_nom_extendable"] & c.df.index.str.contains("urban central")) + & c.df.index.str.contains("CHP") + & c.df.index.str.contains("heat") ] threshold = snakemake.params.threshold_capacity @@ -60,21 +57,20 @@ def add_brownfield(n, n_p, year): ) n_p.mremove( c.name, - chp_heat[c.df.loc[chp_heat, attr + "_nom_opt"] < threshold_chp_heat], + chp_heat[c.df.loc[chp_heat, f"{attr}_nom_opt"] < threshold_chp_heat], ) n_p.mremove( c.name, c.df.index[ - c.df[attr + "_nom_extendable"] - & ~c.df.index.isin(chp_heat) - & (c.df[attr + "_nom_opt"] < threshold) + (c.df[f"{attr}_nom_extendable"] & ~c.df.index.isin(chp_heat)) + & (c.df[f"{attr}_nom_opt"] < threshold) ], ) # copy over assets but fix their capacity - c.df[attr + "_nom"] = c.df[attr + "_nom_opt"] - c.df[attr + "_nom_extendable"] = False + c.df[f"{attr}_nom"] = c.df[f"{attr}_nom_opt"] + c.df[f"{attr}_nom_extendable"] = False n.import_components_from_dataframe(c.df, c.name) @@ -124,7 +120,82 @@ def add_brownfield(n, n_p, year): n.links.loc[new_pipes, "p_nom_min"] = 0.0 -# %% +def disable_grid_expansion_if_LV_limit_hit(n): + if "lv_limit" not in n.global_constraints.index: + return + + total_expansion = ( + n.lines.eval("s_nom_min * length").sum() + + n.links.query("carrier == 'DC'").eval("p_nom_min * length").sum() + ).sum() + + lv_limit = n.global_constraints.at["lv_limit", "constant"] + + # allow small numerical differences + if lv_limit - total_expansion < 1: + logger.info("LV is already reached, disabling expansion and LV limit") + extendable_acs = n.lines.query("s_nom_extendable").index + n.lines.loc[extendable_acs, "s_nom_extendable"] = False + n.lines.loc[extendable_acs, "s_nom"] = n.lines.loc[extendable_acs, "s_nom_min"] + + extendable_dcs = n.links.query("carrier == 'DC' and p_nom_extendable").index + n.links.loc[extendable_dcs, "p_nom_extendable"] = False + n.links.loc[extendable_dcs, "p_nom"] = n.links.loc[extendable_dcs, "p_nom_min"] + + n.global_constraints.drop("lv_limit", inplace=True) + + +def adjust_renewable_profiles(n, input_profiles, params, year): + """ + Adjusts renewable profiles according to the renewable technology specified, + using the latest year below or equal to the selected year. + """ + + # spatial clustering + cluster_busmap = pd.read_csv(snakemake.input.cluster_busmap, index_col=0).squeeze() + simplify_busmap = pd.read_csv( + snakemake.input.simplify_busmap, index_col=0 + ).squeeze() + clustermaps = simplify_busmap.map(cluster_busmap) + clustermaps.index = clustermaps.index.astype(str) + + # temporal clustering + dr = pd.date_range(**params["snapshots"], freq="h") + snapshotmaps = ( + pd.Series(dr, index=dr).where(lambda x: x.isin(n.snapshots), pd.NA).ffill() + ) + + for carrier in params["carriers"]: + if carrier == "hydro": + continue + with xr.open_dataset(getattr(input_profiles, "profile_" + carrier)) as ds: + if ds.indexes["bus"].empty or "year" not in ds.indexes: + continue + + closest_year = max( + (y for y in ds.year.values if y <= year), default=min(ds.year.values) + ) + + p_max_pu = ( + ds["profile"] + .sel(year=closest_year) + .transpose("time", "bus") + .to_pandas() + ) + + # spatial clustering + weight = ds["weight"].sel(year=closest_year).to_pandas() + weight = weight.groupby(clustermaps).transform(normed_or_uniform) + p_max_pu = (p_max_pu * weight).T.groupby(clustermaps).sum().T + p_max_pu.columns = p_max_pu.columns + f" {carrier}" + + # temporal_clustering + p_max_pu = p_max_pu.groupby(snapshotmaps).mean() + + # replace renewable time series + n.generators_t.p_max_pu.loc[:, p_max_pu.columns] = p_max_pu + + if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake @@ -135,7 +206,7 @@ if __name__ == "__main__": clusters="37", opts="", ll="v1.0", - sector_opts="168H-T-H-B-I-solar+p3-dist1", + sector_opts="168H-T-H-B-I-dist1", planning_horizons=2030, ) @@ -149,11 +220,15 @@ if __name__ == "__main__": n = pypsa.Network(snakemake.input.network) + adjust_renewable_profiles(n, snakemake.input, snakemake.params, year) + add_build_year_to_new_assets(n, year) n_p = pypsa.Network(snakemake.input.network_p) add_brownfield(n, n_p, year) + disable_grid_expansion_if_LV_limit_hit(n) + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 5ba473e8..1f1f2089 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -84,6 +84,7 @@ It further adds extendable ``generators`` with **zero** capacity for import logging from itertools import product +from typing import Dict, List import geopandas as gpd import numpy as np @@ -177,6 +178,15 @@ def sanitize_carriers(n, config): n.carriers["color"] = n.carriers.color.where(n.carriers.color != "", colors) +def sanitize_locations(n): + n.buses["x"] = n.buses.x.where(n.buses.x != 0, n.buses.location.map(n.buses.x)) + n.buses["y"] = n.buses.y.where(n.buses.y != 0, n.buses.location.map(n.buses.y)) + n.buses["country"] = n.buses.country.where( + n.buses.country.ne("") & n.buses.country.notnull(), + n.buses.location.map(n.buses.country), + ) + + def add_co2_emissions(n, costs, carriers): """ Add CO2 emissions to the network's carriers attribute. @@ -255,6 +265,7 @@ def load_powerplants(ppl_fn): "bioenergy": "biomass", "ccgt, thermal": "CCGT", "hard coal": "coal", + "natural gas": "OCGT", } return ( pd.read_csv(ppl_fn, index_col=0, dtype={"bus": "str"}) @@ -279,38 +290,43 @@ def shapes_to_shapes(orig, dest): return transfer -def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.0): +def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1.0): substation_lv_i = n.buses.index[n.buses["substation_lv"]] regions = gpd.read_file(regions).set_index("name").reindex(substation_lv_i) opsd_load = pd.read_csv(load, index_col=0, parse_dates=True).filter(items=countries) - logger.info(f"Load data scaled with scalling factor {scaling}.") + ua_md_gdp = pd.read_csv(ua_md_gdp, dtype={"name": "str"}).set_index("name") + + logger.info(f"Load data scaled by factor {scaling}.") opsd_load *= scaling nuts3 = gpd.read_file(nuts3_shapes).set_index("index") def upsample(cntry, group): - l = opsd_load[cntry] - if len(group) == 1: - return pd.DataFrame({group.index[0]: l}) - else: - nuts3_cntry = nuts3.loc[nuts3.country == cntry] - transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr() - gdp_n = pd.Series( - transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index - ) - pop_n = pd.Series( - transfer.dot(nuts3_cntry["pop"].fillna(1.0).values), index=group.index - ) + load = opsd_load[cntry] - # relative factors 0.6 and 0.4 have been determined from a linear - # regression on the country to continent load data - factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n)) - return pd.DataFrame( - factors.values * l.values[:, np.newaxis], - index=l.index, - columns=factors.index, - ) + if len(group) == 1: + return pd.DataFrame({group.index[0]: load}) + nuts3_cntry = nuts3.loc[nuts3.country == cntry] + transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr() + gdp_n = pd.Series( + transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index + ) + pop_n = pd.Series( + transfer.dot(nuts3_cntry["pop"].fillna(1.0).values), index=group.index + ) + + # relative factors 0.6 and 0.4 have been determined from a linear + # regression on the country to continent load data + factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n)) + if cntry in ["UA", "MD"]: + # overwrite factor because nuts3 provides no data for UA+MD + factors = normed(ua_md_gdp.loc[group.index, "GDP_PPP"].squeeze()) + return pd.DataFrame( + factors.values * load.values[:, np.newaxis], + index=load.index, + columns=factors.index, + ) load = pd.concat( [ @@ -320,7 +336,9 @@ def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.0): axis=1, ) - n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load) + n.madd( + "Load", substation_lv_i, bus=substation_lv_i, p_set=load + ) # carrier="electricity" def update_transmission_costs(n, costs, length_factor=1.0): @@ -367,6 +385,10 @@ def attach_wind_and_solar( if ds.indexes["bus"].empty: continue + # if-statement for compatibility with old profiles + if "year" in ds.indexes: + ds = ds.sel(year=ds.year.min(), drop=True) + supcar = car.split("-", 2)[0] if supcar == "offwind": underwater_fraction = ds["underwater_fraction"].to_pandas() @@ -406,6 +428,7 @@ def attach_wind_and_solar( capital_cost=capital_cost, efficiency=costs.at[supcar, "efficiency"], p_max_pu=ds["profile"].transpose("time", "bus").to_pandas(), + lifetime=costs.at[supcar, "lifetime"], ) @@ -434,7 +457,7 @@ def attach_conventional_generators( ppl = ( ppl.query("carrier in @carriers") .join(costs, on="carrier", rsuffix="_r") - .rename(index=lambda s: "C" + str(s)) + .rename(index=lambda s: f"C{str(s)}") ) ppl["efficiency"] = ppl.efficiency.fillna(ppl.efficiency_r) @@ -496,8 +519,8 @@ def attach_conventional_generators( snakemake.input[f"conventional_{carrier}_{attr}"], index_col=0 ).iloc[:, 0] bus_values = n.buses.country.map(values) - n.generators[attr].update( - n.generators.loc[idx].bus.map(bus_values).dropna() + n.generators.update( + {attr: n.generators.loc[idx].bus.map(bus_values).dropna()} ) else: # Single value affecting all generators of technology k indiscriminantely of country @@ -511,7 +534,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par ppl = ( ppl.query('carrier == "hydro"') .reset_index(drop=True) - .rename(index=lambda s: str(s) + " hydro") + .rename(index=lambda s: f"{str(s)} hydro") ) ror = ppl.query('technology == "Run-Of-River"') phs = ppl.query('technology == "Pumped Storage"') @@ -608,16 +631,13 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par ) if not missing_countries.empty: logger.warning( - "Assuming max_hours=6 for hydro reservoirs in the countries: {}".format( - ", ".join(missing_countries) - ) + f'Assuming max_hours=6 for hydro reservoirs in the countries: {", ".join(missing_countries)}' ) hydro_max_hours = hydro.max_hours.where( hydro.max_hours > 0, hydro.country.map(max_hours_country) ).fillna(6) - flatten_dispatch = params.get("flatten_dispatch", False) - if flatten_dispatch: + if params.get("flatten_dispatch", False): buffer = params.get("flatten_dispatch_buffer", 0.2) average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"] p_max_pu = (average_capacity_factor + buffer).clip(upper=1) @@ -642,78 +662,17 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par ) -def attach_extendable_generators(n, costs, ppl, carriers): - logger.warning( - "The function `attach_extendable_generators` is deprecated in v0.5.0." - ) - add_missing_carriers(n, carriers) - add_co2_emissions(n, costs, carriers) +def attach_OPSD_renewables(n: pypsa.Network, tech_map: Dict[str, List[str]]) -> None: + """ + Attach renewable capacities from the OPSD dataset to the network. - for tech in carriers: - if tech.startswith("OCGT"): - ocgt = ( - ppl.query("carrier in ['OCGT', 'CCGT']") - .groupby("bus", as_index=False) - .first() - ) - n.madd( - "Generator", - ocgt.index, - suffix=" OCGT", - bus=ocgt["bus"], - carrier=tech, - p_nom_extendable=True, - p_nom=0.0, - capital_cost=costs.at["OCGT", "capital_cost"], - marginal_cost=costs.at["OCGT", "marginal_cost"], - efficiency=costs.at["OCGT", "efficiency"], - ) + Args: + - n: The PyPSA network to attach the capacities to. + - tech_map: A dictionary mapping fuel types to carrier names. - elif tech.startswith("CCGT"): - ccgt = ( - ppl.query("carrier in ['OCGT', 'CCGT']") - .groupby("bus", as_index=False) - .first() - ) - n.madd( - "Generator", - ccgt.index, - suffix=" CCGT", - bus=ccgt["bus"], - carrier=tech, - p_nom_extendable=True, - p_nom=0.0, - capital_cost=costs.at["CCGT", "capital_cost"], - marginal_cost=costs.at["CCGT", "marginal_cost"], - efficiency=costs.at["CCGT", "efficiency"], - ) - - elif tech.startswith("nuclear"): - nuclear = ( - ppl.query("carrier == 'nuclear'").groupby("bus", as_index=False).first() - ) - n.madd( - "Generator", - nuclear.index, - suffix=" nuclear", - bus=nuclear["bus"], - carrier=tech, - p_nom_extendable=True, - p_nom=0.0, - capital_cost=costs.at["nuclear", "capital_cost"], - marginal_cost=costs.at["nuclear", "marginal_cost"], - efficiency=costs.at["nuclear", "efficiency"], - ) - - else: - raise NotImplementedError( - "Adding extendable generators for carrier " - "'{tech}' is not implemented, yet. " - "Only OCGT, CCGT and nuclear are allowed at the moment." - ) - - -def attach_OPSD_renewables(n, tech_map): + Returns: + - None + """ tech_string = ", ".join(sum(tech_map.values(), [])) logger.info(f"Using OPSD renewable capacities for carriers {tech_string}.") @@ -734,11 +693,30 @@ def attach_OPSD_renewables(n, tech_map): caps = caps.groupby(["bus"]).Capacity.sum() caps = caps / gens_per_bus.reindex(caps.index, fill_value=1) - n.generators.p_nom.update(gens.bus.map(caps).dropna()) - n.generators.p_nom_min.update(gens.bus.map(caps).dropna()) + n.generators.update({"p_nom": gens.bus.map(caps).dropna()}) + n.generators.update({"p_nom_min": gens.bus.map(caps).dropna()}) -def estimate_renewable_capacities(n, year, tech_map, expansion_limit, countries): +def estimate_renewable_capacities( + n: pypsa.Network, year: int, tech_map: dict, expansion_limit: bool, countries: list +) -> None: + """ + Estimate a different between renewable capacities in the network and + reported country totals from IRENASTAT dataset. Distribute the difference + with a heuristic. + + Heuristic: n.generators_t.p_max_pu.mean() * n.generators.p_nom_max + + Args: + - n: The PyPSA network. + - year: The year of optimisation. + - tech_map: A dictionary mapping fuel types to carrier names. + - expansion_limit: Boolean value from config file + - countries: A list of country codes to estimate capacities for. + + Returns: + - None + """ if not len(countries) or not len(tech_map): return @@ -755,7 +733,10 @@ def estimate_renewable_capacities(n, year, tech_map, expansion_limit, countries) for ppm_technology, techs in tech_map.items(): tech_i = n.generators.query("carrier in @techs").index - stats = capacities.loc[ppm_technology].reindex(countries, fill_value=0.0) + if ppm_technology in capacities.index.get_level_values("Technology"): + stats = capacities.loc[ppm_technology].reindex(countries, fill_value=0.0) + else: + stats = pd.Series(0.0, index=countries) country = n.generators.bus[tech_i].map(n.buses.country) existent = n.generators.p_nom[tech_i].groupby(country).sum() missing = stats - existent @@ -829,6 +810,7 @@ if __name__ == "__main__": snakemake.input.regions, snakemake.input.load, snakemake.input.nuts3_shapes, + snakemake.input.ua_md_gdp, params.countries, params.scaling_factor, ) diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index 08810470..c0d37a5b 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -8,25 +8,20 @@ horizon. """ import logging - -logger = logging.getLogger(__name__) - -import pandas as pd - -idx = pd.IndexSlice - from types import SimpleNamespace import country_converter as coco import numpy as np +import pandas as pd import pypsa import xarray as xr from _helpers import update_config_with_sector_opts from add_electricity import sanitize_carriers from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs +logger = logging.getLogger(__name__) cc = coco.CountryConverter() - +idx = pd.IndexSlice spatial = SimpleNamespace() @@ -45,7 +40,7 @@ def add_build_year_to_new_assets(n, baseyear): # add -baseyear to name rename = pd.Series(c.df.index, c.df.index) - rename[assets] += "-" + str(baseyear) + rename[assets] += f"-{str(baseyear)}" c.df.rename(index=rename, inplace=True) # rename time-dependent @@ -53,7 +48,7 @@ def add_build_year_to_new_assets(n, baseyear): "series" ) & n.component_attrs[c.name].status.str.contains("Input") for attr in n.component_attrs[c.name].index[selection]: - c.pnl[attr].rename(columns=rename, inplace=True) + c.pnl[attr] = c.pnl[attr].rename(columns=rename) def add_existing_renewables(df_agg): @@ -88,7 +83,9 @@ def add_existing_renewables(df_agg): ] cfs = n.generators_t.p_max_pu[gens].mean() cfs_key = cfs / cfs.sum() - nodal_fraction.loc[n.generators.loc[gens, "bus"]] = cfs_key.values + nodal_fraction.loc[n.generators.loc[gens, "bus"]] = cfs_key.groupby( + n.generators.loc[gens, "bus"] + ).sum() nodal_df = df.loc[n.buses.loc[elec_buses, "country"]] nodal_df.index = elec_buses @@ -252,7 +249,7 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas if "m" in snakemake.wildcards.clusters: for ind in new_capacity.index: # existing capacities are split evenly among regions in every country - inv_ind = [i for i in inv_busmap[ind]] + inv_ind = list(inv_busmap[ind]) # for offshore the splitting only includes coastal regions inv_ind = [ @@ -303,7 +300,19 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas else: bus0 = vars(spatial)[carrier[generator]].nodes if "EU" not in vars(spatial)[carrier[generator]].locations: - bus0 = bus0.intersection(capacity.index + " gas") + bus0 = bus0.intersection(capacity.index + " " + carrier[generator]) + + # check for missing bus + missing_bus = pd.Index(bus0).difference(n.buses.index) + if not missing_bus.empty: + logger.info(f"add buses {bus0}") + n.madd( + "Bus", + bus0, + carrier=generator, + location=vars(spatial)[carrier[generator]].locations, + unit="MWh_el", + ) already_build = n.links.index.intersection(asset_i) new_build = asset_i.difference(n.links.index) @@ -393,104 +402,18 @@ def add_heating_capacities_installed_before_baseyear( """ logger.debug(f"Adding heating capacities installed before {baseyear}") - # Add existing heating capacities, data comes from the study - # "Mapping and analyses of the current and future (2020 - 2030) - # heating/cooling fuel deployment (fossil/renewables) " - # https://ec.europa.eu/energy/studies/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment_en?redir=1 - # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". - # TODO start from original file - - # retrieve existing heating capacities - techs = [ - "gas boiler", - "oil boiler", - "resistive heater", - "air heat pump", - "ground heat pump", - ] - df = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) - - # data for Albania, Montenegro and Macedonia not included in database - df.loc["Albania"] = np.nan - df.loc["Montenegro"] = np.nan - df.loc["Macedonia"] = np.nan - - df.fillna(0.0, inplace=True) - - # convert GW to MW - df *= 1e3 - - df.index = cc.convert(df.index, to="iso2") - - # coal and oil boilers are assimilated to oil boilers - df["oil boiler"] = df["oil boiler"] + df["coal boiler"] - df.drop(["coal boiler"], axis=1, inplace=True) - - # distribute technologies to nodes by population - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - - nodal_df = df.loc[pop_layout.ct] - nodal_df.index = pop_layout.index - nodal_df = nodal_df.multiply(pop_layout.fraction, axis=0) - - # split existing capacities between residential and services - # proportional to energy demand - p_set_sum = n.loads_t.p_set.sum() - ratio_residential = pd.Series( - [ - ( - p_set_sum[f"{node} residential rural heat"] - / ( - p_set_sum[f"{node} residential rural heat"] - + p_set_sum[f"{node} services rural heat"] - ) - ) - # if rural heating demand for one of the nodes doesn't exist, - # then columns were dropped before and heating demand share should be 0.0 - if all( - f"{node} {service} rural heat" in p_set_sum.index - for service in ["residential", "services"] - ) - else 0.0 - for node in nodal_df.index - ], - index=nodal_df.index, + existing_heating = pd.read_csv( + snakemake.input.existing_heating_distribution, header=[0, 1], index_col=0 ) - for tech in techs: - nodal_df["residential " + tech] = nodal_df[tech] * ratio_residential - nodal_df["services " + tech] = nodal_df[tech] * (1 - ratio_residential) + techs = existing_heating.columns.get_level_values(1).unique() - names = [ - "residential rural", - "services rural", - "residential urban decentral", - "services urban decentral", - "urban central", - ] - - nodes = {} - p_nom = {} - for name in names: + for name in existing_heating.columns.get_level_values(0).unique(): name_type = "central" if name == "urban central" else "decentral" - nodes[name] = pd.Index( - [ - n.buses.at[index, "location"] - for index in n.buses.index[ - n.buses.index.str.contains(name) - & n.buses.index.str.contains("heat") - ] - ] - ) - heat_pump_type = "air" if "urban" in name else "ground" - heat_type = "residential" if "residential" in name else "services" - if name == "urban central": - p_nom[name] = nodal_df["air heat pump"][nodes[name]] - else: - p_nom[name] = nodal_df[f"{heat_type} {heat_pump_type} heat pump"][ - nodes[name] - ] + nodes = pd.Index(n.buses.location[n.buses.index.str.contains(f"{name} heat")]) + + heat_pump_type = "air" if "urban" in name else "ground" # Add heat pumps costs_name = f"decentral {heat_pump_type}-sourced heat pump" @@ -498,7 +421,7 @@ def add_heating_capacities_installed_before_baseyear( cop = {"air": ashp_cop, "ground": gshp_cop} if time_dep_hp_cop: - efficiency = cop[heat_pump_type][nodes[name]] + efficiency = cop[heat_pump_type][nodes] else: efficiency = costs.at[costs_name, "efficiency"] @@ -506,82 +429,90 @@ def add_heating_capacities_installed_before_baseyear( if int(grouping_year) + default_lifetime <= int(baseyear): continue - # installation is assumed to be linear for the past 25 years (default lifetime) + # installation is assumed to be linear for the past default_lifetime years ratio = (int(grouping_year) - int(grouping_years[i - 1])) / default_lifetime n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} {heat_pump_type} heat pump-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes, + bus1=nodes + " " + name + " heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] * costs.at[costs_name, "fixed"], - p_nom=p_nom[name] * ratio / costs.at[costs_name, "efficiency"], + p_nom=existing_heating.loc[nodes, (name, f"{heat_pump_type} heat pump")] + * ratio + / costs.at[costs_name, "efficiency"], build_year=int(grouping_year), lifetime=costs.at[costs_name, "lifetime"], ) # add resistive heater, gas boilers and oil boilers - # (50% capacities to rural buses, 50% to urban buses) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} resistive heater-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes, + bus1=nodes + " " + name + " heat", carrier=name + " resistive heater", - efficiency=costs.at[name_type + " resistive heater", "efficiency"], - capital_cost=costs.at[name_type + " resistive heater", "efficiency"] - * costs.at[name_type + " resistive heater", "fixed"], - p_nom=0.5 - * nodal_df[f"{heat_type} resistive heater"][nodes[name]] - * ratio - / costs.at[name_type + " resistive heater", "efficiency"], + efficiency=costs.at[f"{name_type} resistive heater", "efficiency"], + capital_cost=( + costs.at[f"{name_type} resistive heater", "efficiency"] + * costs.at[f"{name_type} resistive heater", "fixed"] + ), + p_nom=( + existing_heating.loc[nodes, (name, "resistive heater")] + * ratio + / costs.at[f"{name_type} resistive heater", "efficiency"] + ), build_year=int(grouping_year), - lifetime=costs.at[costs_name, "lifetime"], + lifetime=costs.at[f"{name_type} resistive heater", "lifetime"], ) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} gas boiler-{grouping_year}", - bus0=spatial.gas.nodes, - bus1=nodes[name] + " " + name + " heat", + bus0="EU gas" if "EU gas" in spatial.gas.nodes else nodes + " gas", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " gas boiler", - efficiency=costs.at[name_type + " gas boiler", "efficiency"], + efficiency=costs.at[f"{name_type} gas boiler", "efficiency"], efficiency2=costs.at["gas", "CO2 intensity"], - capital_cost=costs.at[name_type + " gas boiler", "efficiency"] - * costs.at[name_type + " gas boiler", "fixed"], - p_nom=0.5 - * nodal_df[f"{heat_type} gas boiler"][nodes[name]] - * ratio - / costs.at[name_type + " gas boiler", "efficiency"], + capital_cost=( + costs.at[f"{name_type} gas boiler", "efficiency"] + * costs.at[f"{name_type} gas boiler", "fixed"] + ), + p_nom=( + existing_heating.loc[nodes, (name, "gas boiler")] + * ratio + / costs.at[f"{name_type} gas boiler", "efficiency"] + ), build_year=int(grouping_year), - lifetime=costs.at[name_type + " gas boiler", "lifetime"], + lifetime=costs.at[f"{name_type} gas boiler", "lifetime"], ) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} oil boiler-{grouping_year}", bus0=spatial.oil.nodes, - bus1=nodes[name] + " " + name + " heat", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] * costs.at["decentral oil boiler", "fixed"], - p_nom=0.5 - * nodal_df[f"{heat_type} oil boiler"][nodes[name]] - * ratio - / costs.at["decentral oil boiler", "efficiency"], + p_nom=( + existing_heating.loc[nodes, (name, "oil boiler")] + * ratio + / costs.at["decentral oil boiler", "efficiency"] + ), build_year=int(grouping_year), - lifetime=costs.at[name_type + " gas boiler", "lifetime"], + lifetime=costs.at[f"{name_type} gas boiler", "lifetime"], ) # delete links with p_nom=nan corresponding to extra nodes in country @@ -606,20 +537,19 @@ def add_heating_capacities_installed_before_baseyear( ) -# %% if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake snakemake = mock_snakemake( "add_existing_baseyear", - configfiles="config/test/config.myopic.yaml", + # configfiles="config/test/config.myopic.yaml", simpl="", - clusters="5", - ll="v1.5", + clusters="37", + ll="v1.0", opts="", - sector_opts="24H-T-H-B-I-A-solar+p3-dist1", - planning_horizons=2030, + sector_opts="1p7-4380H-T-H-B-I-A-dist1", + planning_horizons=2020, ) logging.basicConfig(level=snakemake.config["logging"]["level"]) @@ -662,7 +592,9 @@ if __name__ == "__main__": .to_pandas() .reindex(index=n.snapshots) ) - default_lifetime = snakemake.params.costs["fill_values"]["lifetime"] + default_lifetime = snakemake.params.existing_capacities[ + "default_heating_lifetime" + ] add_heating_capacities_installed_before_baseyear( n, baseyear, diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 9fe20066..f813e35f 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -56,7 +56,7 @@ import numpy as np import pandas as pd import pypsa from _helpers import configure_logging, set_scenario_config -from add_electricity import load_costs, sanitize_carriers +from add_electricity import load_costs, sanitize_carriers, sanitize_locations idx = pd.IndexSlice @@ -100,10 +100,9 @@ def attach_stores(n, costs, extendable_carriers): n.madd("Carrier", carriers) buses_i = n.buses.index - bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]} if "H2" in carriers: - h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict) + h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", location=buses_i) n.madd( "Store", @@ -143,7 +142,7 @@ def attach_stores(n, costs, extendable_carriers): if "battery" in carriers: b_buses_i = n.madd( - "Bus", buses_i + " battery", carrier="battery", **bus_sub_dict + "Bus", buses_i + " battery", carrier="battery", location=buses_i ) n.madd( @@ -247,6 +246,7 @@ if __name__ == "__main__": attach_hydrogen_pipelines(n, costs, extendable_carriers) sanitize_carriers(n, snakemake.config) + sanitize_locations(n) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/base_network.py b/scripts/base_network.py index 32b54d28..04bbc45b 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -78,10 +78,13 @@ import shapely.prepared import shapely.wkt import yaml from _helpers import configure_logging, set_scenario_config +from packaging.version import Version, parse from scipy import spatial from scipy.sparse import csgraph from shapely.geometry import LineString, Point +PD_GE_2_2 = parse(pd.__version__) >= Version("2.2") + logger = logging.getLogger(__name__) @@ -138,7 +141,9 @@ def _load_buses_from_eg(eg_buses, europe_shape, config_elec): ) buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"}) - buses["under_construction"] = buses["under_construction"].fillna(False).astype(bool) + buses["under_construction"] = buses.under_construction.where( + lambda s: s.notnull(), False + ).astype(bool) # remove all buses outside of all countries including exclusive economic zones (offshore) europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"] @@ -151,9 +156,7 @@ def _load_buses_from_eg(eg_buses, europe_shape, config_elec): buses.v_nom.isin(config_elec["voltages"]) | buses.v_nom.isnull() ) logger.info( - "Removing buses with voltages {}".format( - pd.Index(buses.v_nom.unique()).dropna().difference(config_elec["voltages"]) - ) + f'Removing buses with voltages {pd.Index(buses.v_nom.unique()).dropna().difference(config_elec["voltages"])}' ) return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b]) @@ -368,6 +371,25 @@ def _apply_parameter_corrections(n, parameter_corrections): df.loc[inds, attr] = r[inds].astype(df[attr].dtype) +def _reconnect_crimea(lines): + logger.info("Reconnecting Crimea to the Ukrainian grid.") + lines_to_crimea = pd.DataFrame( + { + "bus0": ["3065", "3181", "3181"], + "bus1": ["3057", "3055", "3057"], + "v_nom": [300, 300, 300], + "num_parallel": [1, 1, 1], + "length": [140, 120, 140], + "carrier": ["AC", "AC", "AC"], + "underground": [False, False, False], + "under_construction": [False, False, False], + }, + index=["Melitopol", "Liubymivka left", "Luibymivka right"], + ) + + return pd.concat([lines, lines_to_crimea]) + + def _set_electrical_parameters_lines(lines, config): v_noms = config["electricity"]["voltages"] linetypes = config["lines"]["types"] @@ -452,19 +474,15 @@ def _remove_dangling_branches(branches, buses): ) -def _remove_unconnected_components(network): +def _remove_unconnected_components(network, threshold=6): _, labels = csgraph.connected_components(network.adjacency_matrix(), directed=False) component = pd.Series(labels, index=network.buses.index) component_sizes = component.value_counts() - components_to_remove = component_sizes.iloc[1:] + components_to_remove = component_sizes.loc[component_sizes < threshold] logger.info( - "Removing {} unconnected network components with less than {} buses. In total {} buses.".format( - len(components_to_remove), - components_to_remove.max(), - components_to_remove.sum(), - ) + f"Removing {len(components_to_remove)} unconnected network components with less than {components_to_remove.max()} buses. In total {components_to_remove.sum()} buses." ) return network[component == component_sizes.index[0]] @@ -509,12 +527,13 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes): ) return pd.Series(key, index) + compat_kws = dict(include_groups=False) if PD_GE_2_2 else {} gb = buses.loc[substation_b].groupby( ["x", "y"], as_index=False, group_keys=False, sort=False ) - bus_map_low = gb.apply(prefer_voltage, "min") + bus_map_low = gb.apply(prefer_voltage, "min", **compat_kws) lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False) - bus_map_high = gb.apply(prefer_voltage, "max") + bus_map_high = gb.apply(prefer_voltage, "max", **compat_kws) hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False) onshore_b = pd.Series(False, buses.index) @@ -547,7 +566,7 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes): ~buses["under_construction"] ) - c_nan_b = buses.country.isnull() + c_nan_b = buses.country.fillna("na") == "na" if c_nan_b.sum() > 0: c_tag = _get_country(buses.loc[c_nan_b]) c_tag.loc[~c_tag.isin(countries)] = np.nan @@ -705,15 +724,19 @@ def base_network( lines = _load_lines_from_eg(buses, eg_lines) transformers = _load_transformers_from_eg(buses, eg_transformers) + if config["lines"].get("reconnect_crimea", True) and "UA" in config["countries"]: + lines = _reconnect_crimea(lines) + lines = _set_electrical_parameters_lines(lines, config) transformers = _set_electrical_parameters_transformers(transformers, config) links = _set_electrical_parameters_links(links, config, links_p_nom) converters = _set_electrical_parameters_converters(converters, config) + snapshots = snakemake.params.snapshots n = pypsa.Network() n.name = "PyPSA-Eur" - n.set_snapshots(pd.date_range(freq="h", **config["snapshots"])) + n.set_snapshots(pd.date_range(freq="h", **snapshots)) n.madd("Carrier", ["AC", "DC"]) n.import_components_from_dataframe(buses, "Bus") diff --git a/scripts/build_biomass_potentials.py b/scripts/build_biomass_potentials.py index d200a78e..6b5cb147 100644 --- a/scripts/build_biomass_potentials.py +++ b/scripts/build_biomass_potentials.py @@ -7,9 +7,15 @@ Compute biogas and solid biomass potentials for each clustered model region using data from JRC ENSPRESO. """ +import logging + import geopandas as gpd +import numpy as np import pandas as pd +logger = logging.getLogger(__name__) +AVAILABLE_BIOMASS_YEARS = [2010, 2020, 2030, 2040, 2050] + def build_nuts_population_data(year=2013): pop = pd.read_csv( @@ -126,14 +132,14 @@ def disaggregate_nuts0(bio): pop = build_nuts_population_data() # get population in nuts2 - pop_nuts2 = pop.loc[pop.index.str.len() == 4] + pop_nuts2 = pop.loc[pop.index.str.len() == 4].copy() by_country = pop_nuts2.total.groupby(pop_nuts2.ct).sum() pop_nuts2["fraction"] = pop_nuts2.total / pop_nuts2.ct.map(by_country) # distribute nuts0 data to nuts2 by population bio_nodal = bio.loc[pop_nuts2.ct] bio_nodal.index = pop_nuts2.index - bio_nodal = bio_nodal.mul(pop_nuts2.fraction, axis=0) + bio_nodal = bio_nodal.mul(pop_nuts2.fraction, axis=0).astype(float) # update inplace bio.update(bio_nodal) @@ -208,13 +214,41 @@ if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake("build_biomass_potentials", simpl="", clusters="5") + snakemake = mock_snakemake( + "build_biomass_potentials", + simpl="", + clusters="5", + planning_horizons=2050, + ) + overnight = snakemake.config["foresight"] == "overnight" params = snakemake.params.biomass - year = params["year"] + investment_year = int(snakemake.wildcards.planning_horizons) + year = params["year"] if overnight else investment_year scenario = params["scenario"] - enspreso = enspreso_biomass_potentials(year, scenario) + if year > 2050: + logger.info("No biomass potentials for years after 2050, using 2050.") + max_year = max(AVAILABLE_BIOMASS_YEARS) + enspreso = enspreso_biomass_potentials(max_year, scenario) + + elif year not in AVAILABLE_BIOMASS_YEARS: + before = int(np.floor(year / 10) * 10) + after = int(np.ceil(year / 10) * 10) + logger.info( + f"No biomass potentials for {year}, interpolating linearly between {before} and {after}." + ) + + enspreso_before = enspreso_biomass_potentials(before, scenario) + enspreso_after = enspreso_biomass_potentials(after, scenario) + + fraction = (year - before) / (after - before) + + enspreso = enspreso_before + fraction * (enspreso_after - enspreso_before) + + else: + logger.info(f"Using biomass potentials for {year}.") + enspreso = enspreso_biomass_potentials(year, scenario) enspreso = disaggregate_nuts0(enspreso) @@ -229,7 +263,7 @@ if __name__ == "__main__": df.to_csv(snakemake.output.biomass_potentials_all) grouper = {v: k for k, vv in params["classes"].items() for v in vv} - df = df.groupby(grouper, axis=1).sum() + df = df.T.groupby(grouper).sum().T df *= 1e6 # TWh/a to MWh/a df.index.name = "MWh/a" diff --git a/scripts/build_biomass_transport_costs.py b/scripts/build_biomass_transport_costs.py index 9271b600..05b64519 100644 --- a/scripts/build_biomass_transport_costs.py +++ b/scripts/build_biomass_transport_costs.py @@ -80,4 +80,9 @@ def build_biomass_transport_costs(): if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake("build_biomass_transport_costs") + build_biomass_transport_costs() diff --git a/scripts/build_clustered_population_layouts.py b/scripts/build_clustered_population_layouts.py index 083f3de4..f1d386bd 100644 --- a/scripts/build_clustered_population_layouts.py +++ b/scripts/build_clustered_population_layouts.py @@ -25,13 +25,10 @@ if __name__ == "__main__": cutout = atlite.Cutout(snakemake.input.cutout) clustered_regions = ( - gpd.read_file(snakemake.input.regions_onshore) - .set_index("name") - .buffer(0) - .squeeze() + gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0) ) - I = cutout.indicatormatrix(clustered_regions) + I = cutout.indicatormatrix(clustered_regions) # noqa: E741 pop = {} for item in ["total", "urban", "rural"]: diff --git a/scripts/build_heat_demand.py b/scripts/build_daily_heat_demand.py similarity index 85% rename from scripts/build_heat_demand.py rename to scripts/build_daily_heat_demand.py index 73494260..e334b1b3 100644 --- a/scripts/build_heat_demand.py +++ b/scripts/build_daily_heat_demand.py @@ -18,7 +18,8 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "build_heat_demands", + "build_daily_heat_demands", + scope="total", simpl="", clusters=48, ) @@ -31,13 +32,10 @@ if __name__ == "__main__": cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time) clustered_regions = ( - gpd.read_file(snakemake.input.regions_onshore) - .set_index("name") - .buffer(0) - .squeeze() + gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0) ) - I = cutout.indicatormatrix(clustered_regions) + I = cutout.indicatormatrix(clustered_regions) # noqa: E741 pop_layout = xr.open_dataarray(snakemake.input.pop_layout) diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py new file mode 100644 index 00000000..86c42631 --- /dev/null +++ b/scripts/build_district_heat_share.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build district heat shares at each node, depending on investment year. +""" + +import logging + +import pandas as pd +from prepare_sector_network import get + +logger = logging.getLogger(__name__) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_district_heat_share", + simpl="", + clusters=48, + planning_horizons="2050", + ) + + investment_year = int(snakemake.wildcards.planning_horizons[-4:]) + + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) + + district_heat_share = pd.read_csv(snakemake.input.district_heat_share, index_col=0)[ + "district heat share" + ] + + # make ct-based share nodal + district_heat_share = district_heat_share.loc[pop_layout.ct] + district_heat_share.index = pop_layout.index + + # total urban population per country + ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() + + # distribution of urban population within a country + pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) + + # fraction of node that is urban + urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) + + # maximum potential of urban demand covered by district heating + central_fraction = snakemake.config["sector"]["district_heating"]["potential"] + + # district heating share at each node + dist_fraction_node = ( + district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] + ) + + # if district heating share larger than urban fraction -> set urban + # fraction to district heating share + urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) + + # difference of max potential and today's share of district heating + diff = (urban_fraction * central_fraction) - dist_fraction_node + progress = get( + snakemake.config["sector"]["district_heating"]["progress"], investment_year + ) + dist_fraction_node += diff * progress + logger.info( + f"Increase district heating share by a progress factor of {progress:.2%} " + f"resulting in new average share of {dist_fraction_node.mean():.2%}" + ) + + df = pd.DataFrame( + { + "original district heat share": district_heat_share, + "district fraction of node": dist_fraction_node, + "urban fraction": urban_fraction, + }, + dtype=float, + ) + + df.to_csv(snakemake.output.district_heat_share) diff --git a/scripts/build_electricity_demand.py b/scripts/build_electricity_demand.py index 376af247..25503880 100755 --- a/scripts/build_electricity_demand.py +++ b/scripts/build_electricity_demand.py @@ -41,13 +41,13 @@ Outputs import logging -logger = logging.getLogger(__name__) -import dateutil import numpy as np import pandas as pd from _helpers import configure_logging, set_scenario_config from pandas import Timedelta as Delta +logger = logging.getLogger(__name__) + def load_timeseries(fn, years, countries): """ @@ -69,7 +69,7 @@ def load_timeseries(fn, years, countries): Load time-series with UTC timestamps x ISO-2 countries """ return ( - pd.read_csv(fn, index_col=0, parse_dates=[0]) + pd.read_csv(fn, index_col=0, parse_dates=[0], date_format="%Y-%m-%dT%H:%M:%SZ") .tz_localize(None) .dropna(how="all", axis=0) .rename(columns={"GB_UKM": "GB"}) @@ -247,6 +247,14 @@ def manual_adjustment(load, fn_load): copy_timeslice(load, "LU", "2019-01-02 11:00", "2019-01-05 05:00", Delta(weeks=-1)) copy_timeslice(load, "LU", "2019-02-05 20:00", "2019-02-06 19:00", Delta(weeks=-1)) + if "UA" in countries: + copy_timeslice( + load, "UA", "2013-01-25 14:00", "2013-01-28 21:00", Delta(weeks=1) + ) + copy_timeslice( + load, "UA", "2013-10-28 03:00", "2013-10-28 20:00", Delta(weeks=1) + ) + return load @@ -267,6 +275,20 @@ if __name__ == "__main__": load = load_timeseries(snakemake.input[0], years, countries) + if "UA" in countries: + # attach load of UA (best data only for entsoe transparency) + load_ua = load_timeseries(snakemake.input[0], "2018", ["UA"], False) + snapshot_year = str(snapshots.year.unique().item()) + time_diff = pd.Timestamp("2018") - pd.Timestamp(snapshot_year) + load_ua.index -= ( + time_diff # hack indices (currently, UA is manually set to 2018) + ) + load["UA"] = load_ua + # attach load of MD (no time-series available, use 2020-totals and distribute according to UA): + # https://www.iea.org/data-and-statistics/data-browser/?country=MOLDOVA&fuel=Energy%20consumption&indicator=TotElecCons + if "MD" in countries: + load["MD"] = 6.2e6 * (load_ua / load_ua.sum()) + if snakemake.params.load["manual_adjustments"]: load = manual_adjustment(load, snakemake.input[0]) diff --git a/scripts/build_electricity_production.py b/scripts/build_electricity_production.py index 38be2ba0..7f87c39a 100644 --- a/scripts/build_electricity_production.py +++ b/scripts/build_electricity_production.py @@ -59,7 +59,7 @@ if __name__ == "__main__": gen = client.query_generation(country, start=start, end=end, nett=True) gen = gen.tz_localize(None).resample("1h").mean() gen = gen.loc[start.tz_localize(None) : end.tz_localize(None)] - gen = gen.rename(columns=carrier_grouper).groupby(level=0, axis=1).sum() + gen = gen.rename(columns=carrier_grouper).T.groupby(level=0).sum().T generation.append(gen) except NoMatchingDataError: unavailable_countries.append(country) diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 891c4e2a..c67bb49d 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -7,9 +7,6 @@ Build total energy demands per country using JRC IDEES, eurostat, and EEA data. """ import logging - -logger = logging.getLogger(__name__) - import multiprocessing as mp from functools import partial @@ -21,7 +18,7 @@ from _helpers import mute_print from tqdm import tqdm cc = coco.CountryConverter() - +logger = logging.getLogger(__name__) idx = pd.IndexSlice @@ -172,8 +169,6 @@ def build_swiss(year): def idees_per_country(ct, year, base_dir): - ct_totals = {} - ct_idees = idees_rename.get(ct, ct) fn_residential = f"{base_dir}/JRC-IDEES-2015_Residential_{ct_idees}.xlsx" fn_tertiary = f"{base_dir}/JRC-IDEES-2015_Tertiary_{ct_idees}.xlsx" @@ -183,20 +178,20 @@ def idees_per_country(ct, year, base_dir): df = pd.read_excel(fn_residential, "RES_hh_fec", index_col=0)[year] - ct_totals["total residential space"] = df["Space heating"] - rows = ["Advanced electric heating", "Conventional electric heating"] - ct_totals["electricity residential space"] = df[rows].sum() - + ct_totals = { + "total residential space": df["Space heating"], + "electricity residential space": df[rows].sum(), + } ct_totals["total residential water"] = df.at["Water heating"] assert df.index[23] == "Electricity" - ct_totals["electricity residential water"] = df[23] + ct_totals["electricity residential water"] = df.iloc[23] ct_totals["total residential cooking"] = df["Cooking"] assert df.index[30] == "Electricity" - ct_totals["electricity residential cooking"] = df[30] + ct_totals["electricity residential cooking"] = df.iloc[30] df = pd.read_excel(fn_residential, "RES_summary", index_col=0)[year] @@ -204,13 +199,13 @@ def idees_per_country(ct, year, base_dir): ct_totals["total residential"] = df[row] assert df.index[47] == "Electricity" - ct_totals["electricity residential"] = df[47] + ct_totals["electricity residential"] = df.iloc[47] assert df.index[46] == "Derived heat" - ct_totals["derived heat residential"] = df[46] + ct_totals["derived heat residential"] = df.iloc[46] assert df.index[50] == "Thermal uses" - ct_totals["thermal uses residential"] = df[50] + ct_totals["thermal uses residential"] = df.iloc[50] # services @@ -224,12 +219,12 @@ def idees_per_country(ct, year, base_dir): ct_totals["total services water"] = df["Hot water"] assert df.index[24] == "Electricity" - ct_totals["electricity services water"] = df[24] + ct_totals["electricity services water"] = df.iloc[24] ct_totals["total services cooking"] = df["Catering"] assert df.index[31] == "Electricity" - ct_totals["electricity services cooking"] = df[31] + ct_totals["electricity services cooking"] = df.iloc[31] df = pd.read_excel(fn_tertiary, "SER_summary", index_col=0)[year] @@ -237,13 +232,13 @@ def idees_per_country(ct, year, base_dir): ct_totals["total services"] = df[row] assert df.index[50] == "Electricity" - ct_totals["electricity services"] = df[50] + ct_totals["electricity services"] = df.iloc[50] assert df.index[49] == "Derived heat" - ct_totals["derived heat services"] = df[49] + ct_totals["derived heat services"] = df.iloc[49] assert df.index[53] == "Thermal uses" - ct_totals["thermal uses services"] = df[53] + ct_totals["thermal uses services"] = df.iloc[53] # agriculture, forestry and fishing @@ -284,28 +279,28 @@ def idees_per_country(ct, year, base_dir): ct_totals["total two-wheel"] = df["Powered 2-wheelers (Gasoline)"] assert df.index[19] == "Passenger cars" - ct_totals["total passenger cars"] = df[19] + ct_totals["total passenger cars"] = df.iloc[19] assert df.index[30] == "Battery electric vehicles" - ct_totals["electricity passenger cars"] = df[30] + ct_totals["electricity passenger cars"] = df.iloc[30] assert df.index[31] == "Motor coaches, buses and trolley buses" - ct_totals["total other road passenger"] = df[31] + ct_totals["total other road passenger"] = df.iloc[31] assert df.index[39] == "Battery electric vehicles" - ct_totals["electricity other road passenger"] = df[39] + ct_totals["electricity other road passenger"] = df.iloc[39] assert df.index[41] == "Light duty vehicles" - ct_totals["total light duty road freight"] = df[41] + ct_totals["total light duty road freight"] = df.iloc[41] assert df.index[49] == "Battery electric vehicles" - ct_totals["electricity light duty road freight"] = df[49] + ct_totals["electricity light duty road freight"] = df.iloc[49] row = "Heavy duty vehicles (Diesel oil incl. biofuels)" ct_totals["total heavy duty road freight"] = df[row] assert df.index[61] == "Passenger cars" - ct_totals["passenger car efficiency"] = df[61] + ct_totals["passenger car efficiency"] = df.iloc[61] df = pd.read_excel(fn_transport, "TrRail_ene", index_col=0)[year] @@ -314,39 +309,39 @@ def idees_per_country(ct, year, base_dir): ct_totals["electricity rail"] = df["Electricity"] assert df.index[15] == "Passenger transport" - ct_totals["total rail passenger"] = df[15] + ct_totals["total rail passenger"] = df.iloc[15] assert df.index[16] == "Metro and tram, urban light rail" assert df.index[19] == "Electric" assert df.index[20] == "High speed passenger trains" - ct_totals["electricity rail passenger"] = df[[16, 19, 20]].sum() + ct_totals["electricity rail passenger"] = df.iloc[[16, 19, 20]].sum() assert df.index[21] == "Freight transport" - ct_totals["total rail freight"] = df[21] + ct_totals["total rail freight"] = df.iloc[21] assert df.index[23] == "Electric" - ct_totals["electricity rail freight"] = df[23] + ct_totals["electricity rail freight"] = df.iloc[23] df = pd.read_excel(fn_transport, "TrAvia_ene", index_col=0)[year] assert df.index[6] == "Passenger transport" - ct_totals["total aviation passenger"] = df[6] + ct_totals["total aviation passenger"] = df.iloc[6] assert df.index[10] == "Freight transport" - ct_totals["total aviation freight"] = df[10] + ct_totals["total aviation freight"] = df.iloc[10] assert df.index[7] == "Domestic" - ct_totals["total domestic aviation passenger"] = df[7] + ct_totals["total domestic aviation passenger"] = df.iloc[7] assert df.index[8] == "International - Intra-EU" assert df.index[9] == "International - Extra-EU" - ct_totals["total international aviation passenger"] = df[[8, 9]].sum() + ct_totals["total international aviation passenger"] = df.iloc[[8, 9]].sum() assert df.index[11] == "Domestic and International - Intra-EU" - ct_totals["total domestic aviation freight"] = df[11] + ct_totals["total domestic aviation freight"] = df.iloc[11] assert df.index[12] == "International - Extra-EU" - ct_totals["total international aviation freight"] = df[12] + ct_totals["total international aviation freight"] = df.iloc[12] ct_totals["total domestic aviation"] = ( ct_totals["total domestic aviation freight"] @@ -366,7 +361,7 @@ def idees_per_country(ct, year, base_dir): df = pd.read_excel(fn_transport, "TrRoad_act", index_col=0)[year] assert df.index[85] == "Passenger cars" - ct_totals["passenger cars"] = df[85] + ct_totals["passenger cars"] = df.iloc[85] return pd.Series(ct_totals, name=ct) @@ -396,13 +391,6 @@ def build_idees(countries, year): # convert TWh/100km to kWh/km totals.loc["passenger car efficiency"] *= 10 - # district heating share - district_heat = totals.loc[ - ["derived heat residential", "derived heat services"] - ].sum() - total_heat = totals.loc[["thermal uses residential", "thermal uses services"]].sum() - totals.loc["district heat share"] = district_heat.div(total_heat) - return totals.T @@ -481,7 +469,7 @@ def build_energy_totals(countries, eurostat, swiss, idees): # The main heating source for about 73 per cent of the households is based on electricity # => 26% is non-electric - if "NO" in df: + if "NO" in df.index: elec_fraction = 0.73 no_norway = df.drop("NO") @@ -577,16 +565,36 @@ def build_energy_totals(countries, eurostat, swiss, idees): ratio = df.at["BA", "total residential"] / df.at["RS", "total residential"] df.loc["BA", missing] = ratio * df.loc["RS", missing] + return df + + +def build_district_heat_share(countries, idees): + # district heating share + district_heat = idees[["derived heat residential", "derived heat services"]].sum( + axis=1 + ) + total_heat = idees[["thermal uses residential", "thermal uses services"]].sum( + axis=1 + ) + + district_heat_share = district_heat / total_heat + + district_heat_share = district_heat_share.reindex(countries) + # Missing district heating share - dh_share = pd.read_csv( - snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] + dh_share = ( + pd.read_csv(snakemake.input.district_heat_share, index_col=0, usecols=[0, 1]) + .div(100) + .squeeze() ) # make conservative assumption and take minimum from both data sets - df["district heat share"] = pd.concat( - [df["district heat share"], dh_share.reindex(index=df.index) / 100], axis=1 + district_heat_share = pd.concat( + [district_heat_share, dh_share.reindex_like(district_heat_share)], axis=1 ).min(axis=1) - return df + district_heat_share.name = "district heat share" + + return district_heat_share def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): @@ -755,6 +763,9 @@ if __name__ == "__main__": energy = build_energy_totals(countries, eurostat, swiss, idees) energy.to_csv(snakemake.output.energy_name) + district_heat_share = build_district_heat_share(countries, idees) + district_heat_share.to_csv(snakemake.output.district_heat_share) + base_year_emissions = params["base_emissions_year"] emissions_scope = snakemake.params.energy["emissions"] eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope) diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py new file mode 100644 index 00000000..78518597 --- /dev/null +++ b/scripts/build_existing_heating_distribution.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Builds table of existing heat generation capacities for initial planning +horizon. +""" +import country_converter as coco +import numpy as np +import pandas as pd + +cc = coco.CountryConverter() + + +def build_existing_heating(): + # retrieve existing heating capacities + + # Add existing heating capacities, data comes from the study + # "Mapping and analyses of the current and future (2020 - 2030) + # heating/cooling fuel deployment (fossil/renewables) " + # https://energy.ec.europa.eu/publications/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment-fossilrenewables-1_en + # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". + # data is for buildings only (i.e. NOT district heating) and represents the year 2012 + # TODO start from original file + + existing_heating = pd.read_csv( + snakemake.input.existing_heating, index_col=0, header=0 + ) + + # data for Albania, Montenegro and Macedonia not included in database + existing_heating.loc["Albania"] = np.nan + existing_heating.loc["Montenegro"] = np.nan + existing_heating.loc["Macedonia"] = np.nan + + existing_heating.fillna(0.0, inplace=True) + + # convert GW to MW + existing_heating *= 1e3 + + existing_heating.index = cc.convert(existing_heating.index, to="iso2") + + # coal and oil boilers are assimilated to oil boilers + existing_heating["oil boiler"] = ( + existing_heating["oil boiler"] + existing_heating["coal boiler"] + ) + existing_heating.drop(["coal boiler"], axis=1, inplace=True) + + # distribute technologies to nodes by population + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) + + nodal_heating = existing_heating.loc[pop_layout.ct] + nodal_heating.index = pop_layout.index + nodal_heating = nodal_heating.multiply(pop_layout.fraction, axis=0) + + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] + + energy_layout = pd.read_csv( + snakemake.input.clustered_pop_energy_layout, index_col=0 + ) + + uses = ["space", "water"] + sectors = ["residential", "services"] + + nodal_sectoral_totals = pd.DataFrame(dtype=float) + + for sector in sectors: + nodal_sectoral_totals[sector] = energy_layout[ + [f"total {sector} {use}" for use in uses] + ].sum(axis=1) + + nodal_sectoral_fraction = nodal_sectoral_totals.div( + nodal_sectoral_totals.sum(axis=1), axis=0 + ) + + nodal_heat_name_fraction = pd.DataFrame(index=district_heat_info.index, dtype=float) + + nodal_heat_name_fraction["urban central"] = 0.0 + + for sector in sectors: + nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[ + sector + ] * (1 - urban_fraction) + nodal_heat_name_fraction[f"{sector} urban decentral"] = ( + nodal_sectoral_fraction[sector] * urban_fraction + ) + + nodal_heat_name_tech = pd.concat( + { + name: nodal_heating.multiply(nodal_heat_name_fraction[name], axis=0) + for name in nodal_heat_name_fraction.columns + }, + axis=1, + names=["heat name", "technology"], + ) + + # move all ground HPs to rural, all air to urban + + for sector in sectors: + nodal_heat_name_tech[(f"{sector} rural", "ground heat pump")] += ( + nodal_heat_name_tech[("urban central", "ground heat pump")] + * nodal_sectoral_fraction[sector] + + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] + ) + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] = 0.0 + + nodal_heat_name_tech[ + (f"{sector} urban decentral", "air heat pump") + ] += nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] + nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] = 0.0 + + nodal_heat_name_tech[("urban central", "ground heat pump")] = 0.0 + + nodal_heat_name_tech.to_csv(snakemake.output.existing_heating_distribution) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_existing_heating_distribution", + simpl="", + clusters=48, + planning_horizons=2050, + ) + + build_existing_heating() diff --git a/scripts/build_gas_input_locations.py b/scripts/build_gas_input_locations.py index a3b945ab..081f74b9 100644 --- a/scripts/build_gas_input_locations.py +++ b/scripts/build_gas_input_locations.py @@ -9,12 +9,12 @@ production sites with data from SciGRID_gas and Global Energy Monitor. import logging -logger = logging.getLogger(__name__) - import geopandas as gpd import pandas as pd from cluster_gas_network import load_bus_regions +logger = logging.getLogger(__name__) + def read_scigrid_gas(fn): df = gpd.read_file(fn) @@ -23,13 +23,15 @@ def read_scigrid_gas(fn): return df -def build_gem_lng_data(lng_fn): - df = pd.read_excel(lng_fn[0], sheet_name="LNG terminals - data") +def build_gem_lng_data(fn): + df = pd.read_excel(fn[0], sheet_name="LNG terminals - data") df = df.set_index("ComboID") - remove_status = ["Cancelled"] - remove_country = ["Cyprus", "Turkey"] - remove_terminal = ["Puerto de la Luz LNG Terminal", "Gran Canaria LNG Terminal"] + remove_country = ["Cyprus", "Turkey"] # noqa: F841 + remove_terminal = [ # noqa: F841 + "Puerto de la Luz LNG Terminal", + "Gran Canaria LNG Terminal", + ] df = df.query( "Status != 'Cancelled' \ @@ -42,9 +44,50 @@ def build_gem_lng_data(lng_fn): return gpd.GeoDataFrame(df, geometry=geometry, crs="EPSG:4326") -def build_gas_input_locations(lng_fn, entry_fn, prod_fn, countries): +def build_gem_prod_data(fn): + df = pd.read_excel(fn[0], sheet_name="Gas extraction - main") + df = df.set_index("GEM Unit ID") + + remove_country = ["Cyprus", "Türkiye"] # noqa: F841 + remove_fuel_type = ["oil"] # noqa: F841 + + df = df.query( + "Status != 'shut in' \ + & 'Fuel type' != 'oil' \ + & Country != @remove_country \ + & ~Latitude.isna() \ + & ~Longitude.isna()" + ).copy() + + p = pd.read_excel(fn[0], sheet_name="Gas extraction - production") + p = p.set_index("GEM Unit ID") + p = p[p["Fuel description"] == "gas"] + + capacities = pd.DataFrame(index=df.index) + for key in ["production", "production design capacity", "reserves"]: + cap = ( + p.loc[p["Production/reserves"] == key, "Quantity (converted)"] + .groupby("GEM Unit ID") + .sum() + .reindex(df.index) + ) + # assume capacity such that 3% of reserves can be extracted per year (25% quantile) + annualization_factor = 0.03 if key == "reserves" else 1.0 + capacities[key] = cap * annualization_factor + + df["mcm_per_year"] = ( + capacities["production"] + .combine_first(capacities["production design capacity"]) + .combine_first(capacities["reserves"]) + ) + + geometry = gpd.points_from_xy(df["Longitude"], df["Latitude"]) + return gpd.GeoDataFrame(df, geometry=geometry, crs="EPSG:4326") + + +def build_gas_input_locations(gem_fn, entry_fn, sto_fn, countries): # LNG terminals - lng = build_gem_lng_data(lng_fn) + lng = build_gem_lng_data(gem_fn) # Entry points from outside the model scope entry = read_scigrid_gas(entry_fn) @@ -55,25 +98,30 @@ def build_gas_input_locations(lng_fn, entry_fn, prod_fn, countries): | (entry.from_country == "NO") # malformed datapoint # entries from NO to GB ] + sto = read_scigrid_gas(sto_fn) + remove_country = ["RU", "UA", "TR", "BY"] # noqa: F841 + sto = sto.query("country_code not in @remove_country") + # production sites inside the model scope - prod = read_scigrid_gas(prod_fn) - prod = prod.loc[ - (prod.geometry.y > 35) & (prod.geometry.x < 30) & (prod.country_code != "DE") - ] + prod = build_gem_prod_data(gem_fn) mcm_per_day_to_mw = 437.5 # MCM/day to MWh/h + mcm_per_year_to_mw = 1.199 # MCM/year to MWh/h mtpa_to_mw = 1649.224 # mtpa to MWh/h - lng["p_nom"] = lng["CapacityInMtpa"] * mtpa_to_mw - entry["p_nom"] = entry["max_cap_from_to_M_m3_per_d"] * mcm_per_day_to_mw - prod["p_nom"] = prod["max_supply_M_m3_per_d"] * mcm_per_day_to_mw + mcm_to_gwh = 11.36 # MCM to GWh + lng["capacity"] = lng["CapacityInMtpa"] * mtpa_to_mw + entry["capacity"] = entry["max_cap_from_to_M_m3_per_d"] * mcm_per_day_to_mw + prod["capacity"] = prod["mcm_per_year"] * mcm_per_year_to_mw + sto["capacity"] = sto["max_cushionGas_M_m3"] * mcm_to_gwh lng["type"] = "lng" entry["type"] = "pipeline" prod["type"] = "production" + sto["type"] = "storage" - sel = ["geometry", "p_nom", "type"] + sel = ["geometry", "capacity", "type"] - return pd.concat([prod[sel], entry[sel], lng[sel]], ignore_index=True) + return pd.concat([prod[sel], entry[sel], lng[sel], sto[sel]], ignore_index=True) if __name__ == "__main__": @@ -83,7 +131,7 @@ if __name__ == "__main__": snakemake = mock_snakemake( "build_gas_input_locations", simpl="", - clusters="37", + clusters="128", ) logging.basicConfig(level=snakemake.config["logging"]["level"]) @@ -104,9 +152,9 @@ if __name__ == "__main__": countries = regions.index.str[:2].unique().str.replace("GB", "UK") gas_input_locations = build_gas_input_locations( - snakemake.input.lng, + snakemake.input.gem, snakemake.input.entry, - snakemake.input.production, + snakemake.input.storage, countries, ) @@ -116,9 +164,13 @@ if __name__ == "__main__": gas_input_nodes.to_file(snakemake.output.gas_input_nodes, driver="GeoJSON") + ensure_columns = ["lng", "pipeline", "production", "storage"] gas_input_nodes_s = ( - gas_input_nodes.groupby(["bus", "type"])["p_nom"].sum().unstack() + gas_input_nodes.groupby(["bus", "type"])["capacity"] + .sum() + .unstack() + .reindex(columns=ensure_columns) ) - gas_input_nodes_s.columns.name = "p_nom" + gas_input_nodes_s.columns.name = "capacity" gas_input_nodes_s.to_csv(snakemake.output.gas_input_nodes_simplified) diff --git a/scripts/build_gas_network.py b/scripts/build_gas_network.py index 23f58caa..13cd75ba 100644 --- a/scripts/build_gas_network.py +++ b/scripts/build_gas_network.py @@ -9,13 +9,13 @@ Preprocess gas network based on data from bthe SciGRID_gas project import logging -logger = logging.getLogger(__name__) - import geopandas as gpd import pandas as pd from pypsa.geo import haversine_pts from shapely.geometry import Point +logger = logging.getLogger(__name__) + def diameter_to_capacity(pipe_diameter_mm): """ @@ -29,25 +29,25 @@ def diameter_to_capacity(pipe_diameter_mm): Based on p.15 of https://gasforclimate2050.eu/wp-content/uploads/2020/07/2020_European-Hydrogen-Backbone_Report.pdf """ - # slopes definitions - m0 = (1500 - 0) / (500 - 0) m1 = (5000 - 1500) / (600 - 500) m2 = (11250 - 5000) / (900 - 600) - m3 = (21700 - 11250) / (1200 - 900) - - # intercept - a0 = 0 a1 = -16000 a2 = -7500 - a3 = -20100 - if pipe_diameter_mm < 500: + # slopes definitions + m0 = (1500 - 0) / (500 - 0) + # intercept + a0 = 0 return a0 + m0 * pipe_diameter_mm elif pipe_diameter_mm < 600: return a1 + m1 * pipe_diameter_mm elif pipe_diameter_mm < 900: return a2 + m2 * pipe_diameter_mm else: + m3 = (21700 - 11250) / (1200 - 900) + + a3 = -20100 + return a3 + m3 * pipe_diameter_mm @@ -114,12 +114,10 @@ def prepare_dataset( df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity) ratio = df.p_nom / df.p_nom_diameter not_nordstream = df.max_pressure_bar < 220 - df.p_nom.update( - df.p_nom_diameter.where( - (df.p_nom <= 500) - | ((ratio > correction_threshold_p_nom) & not_nordstream) - | ((ratio < 1 / correction_threshold_p_nom) & not_nordstream) - ) + df["p_nom"] = df.p_nom_diameter.where( + (df.p_nom <= 500) + | ((ratio > correction_threshold_p_nom) & not_nordstream) + | ((ratio < 1 / correction_threshold_p_nom) & not_nordstream) ) # lines which have way too discrepant line lengths @@ -130,12 +128,10 @@ def prepare_dataset( axis=1, ) ratio = df.eval("length / length_haversine") - df["length"].update( - df.length_haversine.where( - (df["length"] < 20) - | (ratio > correction_threshold_length) - | (ratio < 1 / correction_threshold_length) - ) + df["length"] = df.length_haversine.where( + (df["length"] < 20) + | (ratio > correction_threshold_length) + | (ratio < 1 / correction_threshold_length) ) return df diff --git a/scripts/build_hourly_heat_demand.py b/scripts/build_hourly_heat_demand.py new file mode 100644 index 00000000..c972da89 --- /dev/null +++ b/scripts/build_hourly_heat_demand.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build hourly heat demand time series from daily ones. +""" + +from itertools import product + +import pandas as pd +import xarray as xr +from _helpers import generate_periodic_profiles + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_hourly_heat_demands", + scope="total", + simpl="", + clusters=48, + ) + + snapshots = pd.date_range(freq="h", **snakemake.params.snapshots) + + daily_space_heat_demand = ( + xr.open_dataarray(snakemake.input.heat_demand) + .to_pandas() + .reindex(index=snapshots, method="ffill") + ) + + intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) + + sectors = ["residential", "services"] + uses = ["water", "space"] + + heat_demand = {} + for sector, use in product(sectors, uses): + weekday = list(intraday_profiles[f"{sector} {use} weekday"]) + weekend = list(intraday_profiles[f"{sector} {use} weekend"]) + weekly_profile = weekday * 5 + weekend * 2 + intraday_year_profile = generate_periodic_profiles( + daily_space_heat_demand.index.tz_localize("UTC"), + nodes=daily_space_heat_demand.columns, + weekly_profile=weekly_profile, + ) + + if use == "space": + heat_demand[f"{sector} {use}"] = ( + daily_space_heat_demand * intraday_year_profile + ) + else: + heat_demand[f"{sector} {use}"] = intraday_year_profile + + heat_demand = pd.concat(heat_demand, axis=1, names=["sector use", "node"]) + + heat_demand.index.name = "snapshots" + + ds = heat_demand.stack().to_xarray() + + ds.to_netcdf(snakemake.output.heat_demand) diff --git a/scripts/build_hydro_profile.py b/scripts/build_hydro_profile.py index 883f33d2..39097535 100644 --- a/scripts/build_hydro_profile.py +++ b/scripts/build_hydro_profile.py @@ -26,7 +26,7 @@ Relevant Settings Inputs ------ -- ``data/bundle/EIA_hydro_generation_2000_2014.csv``: Hydroelectricity net generation per country and year (`EIA `_) +- ``data/bundle/eia_hydro_annual_generation.csv``: Hydroelectricity net generation per country and year (`EIA `_) .. image:: img/hydrogeneration.png :scale: 33 % @@ -72,12 +72,14 @@ cc = coco.CountryConverter() def get_eia_annual_hydro_generation(fn, countries): # in billion kWh/a = TWh/a - df = pd.read_csv(fn, skiprows=2, index_col=1, na_values=[" ", "--"]).iloc[1:, 1:] + df = pd.read_csv( + fn, skiprows=2, index_col=1, na_values=[" ", "--"], decimal="," + ).iloc[1:, 1:] df.index = df.index.str.strip() former_countries = { "Former Czechoslovakia": dict( - countries=["Czech Republic", "Slovakia"], start=1980, end=1992 + countries=["Czechia", "Slovakia"], start=1980, end=1992 ), "Former Serbia and Montenegro": dict( countries=["Serbia", "Montenegro"], start=1992, end=2005 diff --git a/scripts/build_industrial_distribution_key.py b/scripts/build_industrial_distribution_key.py index e6d515b0..9b234e29 100644 --- a/scripts/build_industrial_distribution_key.py +++ b/scripts/build_industrial_distribution_key.py @@ -7,17 +7,14 @@ Build spatial distribution of industries from Hotmaps database. """ import logging - -logger = logging.getLogger(__name__) - import uuid from itertools import product import country_converter as coco import geopandas as gpd import pandas as pd -from packaging.version import Version, parse +logger = logging.getLogger(__name__) cc = coco.CountryConverter() @@ -32,7 +29,7 @@ def locate_missing_industrial_sites(df): try: from geopy.extra.rate_limiter import RateLimiter from geopy.geocoders import Nominatim - except: + except ImportError: raise ModuleNotFoundError( "Optional dependency 'geopy' not found." "Install via 'conda install -c conda-forge geopy'" @@ -86,12 +83,7 @@ def prepare_hotmaps_database(regions): gdf = gpd.GeoDataFrame(df, geometry="coordinates", crs="EPSG:4326") - kws = ( - dict(op="within") - if parse(gpd.__version__) < Version("0.10") - else dict(predicate="within") - ) - gdf = gpd.sjoin(gdf, regions, how="inner", **kws) + gdf = gpd.sjoin(gdf, regions, how="inner", predicate="within") gdf.rename(columns={"index_right": "bus"}, inplace=True) gdf["country"] = gdf.bus.str[:2] @@ -101,7 +93,7 @@ def prepare_hotmaps_database(regions): # get all duplicated entries duplicated_i = gdf.index[gdf.index.duplicated()] # convert from raw data country name to iso-2-code - code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2") + code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2") # noqa: F841 # screen out malformed country allocation gdf_filtered = gdf.loc[duplicated_i].query("country == @code") # concat not duplicated and filtered gdf diff --git a/scripts/build_industrial_energy_demand_per_country_today.py b/scripts/build_industrial_energy_demand_per_country_today.py index 9ca0d003..d1c672f1 100644 --- a/scripts/build_industrial_energy_demand_per_country_today.py +++ b/scripts/build_industrial_energy_demand_per_country_today.py @@ -167,9 +167,7 @@ def industrial_energy_demand(countries, year): with mp.Pool(processes=nprocesses) as pool: demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs)) - demand = pd.concat(demand_l, keys=countries) - - return demand + return pd.concat(demand_l, keys=countries) if __name__ == "__main__": diff --git a/scripts/build_industrial_production_per_country.py b/scripts/build_industrial_production_per_country.py index 74cb1949..0aea4f15 100644 --- a/scripts/build_industrial_production_per_country.py +++ b/scripts/build_industrial_production_per_country.py @@ -7,11 +7,8 @@ Build industrial production per country. """ import logging -from functools import partial - -logger = logging.getLogger(__name__) - import multiprocessing as mp +from functools import partial import country_converter as coco import numpy as np @@ -19,6 +16,7 @@ import pandas as pd from _helpers import mute_print from tqdm import tqdm +logger = logging.getLogger(__name__) cc = coco.CountryConverter() tj_to_ktoe = 0.0238845 diff --git a/scripts/build_line_rating.py b/scripts/build_line_rating.py index abc6b286..5b4642d1 100755 --- a/scripts/build_line_rating.py +++ b/scripts/build_line_rating.py @@ -41,7 +41,7 @@ The following heat gains and losses are considered: - heat gain through resistive losses - heat gain through solar radiation -- heat loss through radiation of the trasnmission line +- heat loss through radiation of the transmission line - heat loss through forced convection with wind - heat loss through natural convection @@ -50,7 +50,6 @@ With a heat balance considering the maximum temperature threshold of the transmi the maximal possible capacity factor "s_max_pu" for each transmission line at each time step is calculated. """ -import logging import re import atlite @@ -83,8 +82,7 @@ def calculate_resistance(T, R_ref, T_ref=293, alpha=0.00403): ------- Resistance of at given temperature. """ - R = R_ref * (1 + alpha * (T - T_ref)) - return R + return R_ref * (1 + alpha * (T - T_ref)) def calculate_line_rating(n, cutout): @@ -100,7 +98,7 @@ def calculate_line_rating(n, cutout): ------- xarray DataArray object with maximal power. """ - relevant_lines = n.lines[(n.lines["underground"] == False)] + relevant_lines = n.lines[~n.lines["underground"]].copy() buses = relevant_lines[["bus0", "bus1"]].values x = n.buses.x y = n.buses.y @@ -120,18 +118,17 @@ def calculate_line_rating(n, cutout): .apply(lambda x: int(re.findall(r"(\d+)-bundle", x)[0])) ) # Set default number of bundles per line - relevant_lines["n_bundle"].fillna(1, inplace=True) + relevant_lines["n_bundle"] = relevant_lines["n_bundle"].fillna(1) R *= relevant_lines["n_bundle"] R = calculate_resistance(T=353, R_ref=R) Imax = cutout.line_rating(shapes, R, D=0.0218, Ts=353, epsilon=0.8, alpha=0.8) line_factor = relevant_lines.eval("v_nom * n_bundle * num_parallel") / 1e3 # in mW - da = xr.DataArray( + return xr.DataArray( data=np.sqrt(3) * Imax * line_factor.values.reshape(-1, 1), attrs=dict( description="Maximal possible power in MW for given line considering line rating" ), ) - return da if __name__ == "__main__": @@ -149,8 +146,10 @@ if __name__ == "__main__": configure_logging(snakemake) set_scenario_config(snakemake) + snapshots = snakemake.params.snapshots + n = pypsa.Network(snakemake.input.base_network) - time = pd.date_range(freq="h", **snakemake.config["snapshots"]) + time = pd.date_range(freq="h", **snapshots) cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time) da = calculate_line_rating(n, cutout) diff --git a/scripts/build_monthly_prices.py b/scripts/build_monthly_prices.py index 89edde79..bb023980 100644 --- a/scripts/build_monthly_prices.py +++ b/scripts/build_monthly_prices.py @@ -6,11 +6,8 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ -Created on Tue May 16 10:37:35 2023. - -This script extracts monthly fuel prices of oil, gas, coal and lignite, -as well as CO2 prices - +This script extracts monthly fuel prices of oil, gas, coal and lignite, as well +as CO2 prices. Inputs ------ diff --git a/scripts/build_population_layouts.py b/scripts/build_population_layouts.py index e864d925..cb63c27e 100644 --- a/scripts/build_population_layouts.py +++ b/scripts/build_population_layouts.py @@ -8,15 +8,14 @@ Build mapping between cutout grid cells and population (total, urban, rural). import logging -logger = logging.getLogger(__name__) - - import atlite import geopandas as gpd import numpy as np import pandas as pd import xarray as xr +logger = logging.getLogger(__name__) + if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake @@ -34,7 +33,7 @@ if __name__ == "__main__": nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index") # Indicator matrix NUTS3 -> grid cells - I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells) + I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells) # noqa: E741 # Indicator matrix grid_cells -> NUTS3; inprinciple Iinv*I is identity # but imprecisions mean not perfect @@ -84,7 +83,8 @@ if __name__ == "__main__": # correct for imprecision of Iinv*I pop_ct = nuts3.loc[nuts3.country == ct, "pop"].sum() - pop_cells_ct *= pop_ct / pop_cells_ct.sum() + if pop_cells_ct.sum() != 0: + pop_cells_ct *= pop_ct / pop_cells_ct.sum() # The first low density grid cells to reach rural fraction are rural asc_density_i = density_cells_ct.sort_values().index diff --git a/scripts/build_powerplants.py b/scripts/build_powerplants.py index 2ad1e010..66a01624 100755 --- a/scripts/build_powerplants.py +++ b/scripts/build_powerplants.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -10,6 +10,7 @@ Retrieves conventional powerplant capacities and locations from these to buses and creates a ``.csv`` file. It is possible to amend the powerplant database with custom entries provided in ``data/custom_powerplants.csv``. +Lastly, for every substation, powerplants with zero-initial capacity can be added for certain fuel types automatically. Relevant Settings ----------------- @@ -19,6 +20,7 @@ Relevant Settings electricity: powerplants_filter: custom_powerplants: + everywhere_powerplants: .. seealso:: Documentation of the configuration file ``config/config.yaml`` at @@ -44,6 +46,7 @@ Description ----------- The configuration options ``electricity: powerplants_filter`` and ``electricity: custom_powerplants`` can be used to control whether data should be retrieved from the original powerplants database or from custom amendmends. These specify `pandas.query `_ commands. +In addition the configuration option ``electricity: everywhere_powerplants`` can be used to place powerplants with zero-initial capacity of certain fuel types at all substations. 1. Adding all powerplants from custom: @@ -73,10 +76,18 @@ The configuration options ``electricity: powerplants_filter`` and ``electricity: powerplants_filter: Country not in ['Germany'] and YearCommissioned <= 2015 custom_powerplants: YearCommissioned <= 2015 + +4. Adding powerplants at all substations for 4 conventional carrier types: + + .. code:: yaml + + everywhere_powerplants: ['Natural Gas', 'Coal', 'nuclear', 'OCGT'] """ +import itertools import logging +import numpy as np import pandas as pd import powerplantmatching as pm import pypsa @@ -89,7 +100,7 @@ logger = logging.getLogger(__name__) def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False): if not custom_ppl_query: return ppl - add_ppls = pd.read_csv(custom_powerplants, index_col=0, dtype={"bus": "str"}) + add_ppls = pd.read_csv(custom_powerplants, dtype={"bus": "str"}) if isinstance(custom_ppl_query, str): add_ppls.query(custom_ppl_query, inplace=True) return pd.concat( @@ -97,6 +108,45 @@ def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False): ) +def add_everywhere_powerplants(ppl, substations, everywhere_powerplants): + # Create a dataframe with "everywhere_powerplants" of stated carriers at the location of all substations + everywhere_ppl = ( + pd.DataFrame( + itertools.product(substations.index.values, everywhere_powerplants), + columns=["substation_index", "Fueltype"], + ).merge( + substations[["x", "y", "country"]], + left_on="substation_index", + right_index=True, + ) + ).drop(columns="substation_index") + + # PPL uses different columns names compared to substations dataframe -> rename + everywhere_ppl = everywhere_ppl.rename( + columns={"x": "lon", "y": "lat", "country": "Country"} + ) + + # Add default values for the powerplants + everywhere_ppl["Name"] = ( + "Automatically added everywhere-powerplant " + everywhere_ppl.Fueltype + ) + everywhere_ppl["Set"] = "PP" + everywhere_ppl["Technology"] = everywhere_ppl["Fueltype"] + everywhere_ppl["Capacity"] = 0.0 + + # Assign plausible values for the commissioning and decommissioning years + # required for multi-year models + everywhere_ppl["DateIn"] = ppl["DateIn"].min() + everywhere_ppl["DateOut"] = ppl["DateOut"].max() + + # NaN values for efficiency will be replaced by the generic efficiency by attach_conventional_generators(...) in add_electricity.py later + everywhere_ppl["Efficiency"] = np.nan + + return pd.concat( + [ppl, everywhere_ppl], sort=False, ignore_index=True, verify_integrity=True + ) + + def replace_natural_gas_technology(df): mapping = {"Steam Turbine": "CCGT", "Combustion Engine": "OCGT"} tech = df.Technology.replace(mapping).fillna("CCGT") @@ -147,10 +197,14 @@ if __name__ == "__main__": ppl, snakemake.input.custom_powerplants, custom_ppl_query ) - countries_wo_ppl = set(countries) - set(ppl.Country.unique()) - if countries_wo_ppl: + if countries_wo_ppl := set(countries) - set(ppl.Country.unique()): logging.warning(f"No powerplants known in: {', '.join(countries_wo_ppl)}") + # Add "everywhere powerplants" to all bus locations + ppl = add_everywhere_powerplants( + ppl, n.buses.query("substation_lv"), snakemake.params.everywhere_powerplants + ) + substations = n.buses.query("substation_lv") ppl = ppl.dropna(subset=["lat", "lon"]) ppl = map_country_bus(ppl, substations) diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py index c6d42e6b..5ffafe2a 100644 --- a/scripts/build_renewable_profiles.py +++ b/scripts/build_renewable_profiles.py @@ -26,20 +26,9 @@ Relevant settings renewable: {technology}: - cutout: - corine: - grid_codes: - distance: - natura: - max_depth: - max_shore_distance: - min_shore_distance: - capacity_per_sqkm: - correction_factor: - potential: - min_p_max_pu: - clip_p_max_pu: - resource: + cutout: corine: luisa: grid_codes: distance: natura: max_depth: + max_shore_distance: min_shore_distance: capacity_per_sqkm: + correction_factor: min_p_max_pu: clip_p_max_pu: resource: .. seealso:: Documentation of the configuration file ``config/config.yaml`` at @@ -48,21 +37,37 @@ Relevant settings Inputs ------ -- ``data/bundle/corine/g250_clc06_V18_5.tif``: `CORINE Land Cover (CLC) `_ inventory on `44 classes `_ of land use (e.g. forests, arable land, industrial, urban areas). +- ``data/bundle/corine/g250_clc06_V18_5.tif``: `CORINE Land Cover (CLC) + `_ inventory on `44 + classes `_ of + land use (e.g. forests, arable land, industrial, urban areas) at 100m + resolution. .. image:: img/corine.png :scale: 33 % -- ``data/bundle/GEBCO_2014_2D.nc``: A `bathymetric `_ data set with a global terrain model for ocean and land at 15 arc-second intervals by the `General Bathymetric Chart of the Oceans (GEBCO) `_. +- ``data/LUISA_basemap_020321_50m.tif``: `LUISA Base Map + `_ land + coverage dataset at 50m resolution similar to CORINE. For codes in relation to + CORINE land cover, see `Annex 1 of the technical documentation + `_. + +- ``data/bundle/GEBCO_2014_2D.nc``: A `bathymetric + `_ data set with a global terrain + model for ocean and land at 15 arc-second intervals by the `General + Bathymetric Chart of the Oceans (GEBCO) + `_. .. image:: img/gebco_2019_grid_image.jpg :scale: 50 % - **Source:** `GEBCO `_ + **Source:** `GEBCO + `_ - ``resources/natura.tiff``: confer :ref:`natura` - ``resources/offshore_shapes.geojson``: confer :ref:`shapes` -- ``resources/regions_onshore.geojson``: (if not offshore wind), confer :ref:`busregions` +- ``resources/regions_onshore.geojson``: (if not offshore wind), confer + :ref:`busregions` - ``resources/regions_offshore.geojson``: (if offshore wind), :ref:`busregions` - ``"cutouts/" + params["renewable"][{technology}]['cutout']``: :ref:`cutout` - ``networks/base.nc``: :ref:`base` @@ -128,25 +133,26 @@ Description This script functions at two main spatial resolutions: the resolution of the network nodes and their `Voronoi cells `_, and the resolution of the -cutout grid cells for the weather data. Typically the weather data grid is -finer than the network nodes, so we have to work out the distribution of -generators across the grid cells within each Voronoi cell. This is done by -taking account of a combination of the available land at each grid cell and the -capacity factor there. +cutout grid cells for the weather data. Typically the weather data grid is finer +than the network nodes, so we have to work out the distribution of generators +across the grid cells within each Voronoi cell. This is done by taking account +of a combination of the available land at each grid cell and the capacity factor +there. First the script computes how much of the technology can be installed at each -cutout grid cell and each node using the `GLAES -`_ library. This uses the CORINE land use data, -Natura2000 nature reserves and GEBCO bathymetry data. +cutout grid cell and each node using the `atlite +`_ library. This uses the CORINE land use data, +LUISA land use data, Natura2000 nature reserves, GEBCO bathymetry data, and +shipping lanes. .. image:: img/eligibility.png :scale: 50 % :align: center -To compute the layout of generators in each node's Voronoi cell, the -installable potential in each grid cell is multiplied with the capacity factor -at each grid cell. This is done since we assume more generators are installed -at cells with a higher capacity factor. +To compute the layout of generators in each node's Voronoi cell, the installable +potential in each grid cell is multiplied with the capacity factor at each grid +cell. This is done since we assume more generators are installed at cells with a +higher capacity factor. .. image:: img/offwinddc-gridcell.png :scale: 50 % @@ -164,20 +170,14 @@ at cells with a higher capacity factor. :scale: 50 % :align: center -This layout is then used to compute the generation availability time series -from the weather data cutout from ``atlite``. +This layout is then used to compute the generation availability time series from +the weather data cutout from ``atlite``. -Two methods are available to compute the maximal installable potential for the -node (`p_nom_max`): ``simple`` and ``conservative``: - -- ``simple`` adds up the installable potentials of the individual grid cells. - If the model comes close to this limit, then the time series may slightly - overestimate production since it is assumed the geographical distribution is - proportional to capacity factor. - -- ``conservative`` assertains the nodal limit by increasing capacities - proportional to the layout until the limit of an individual grid cell is - reached. +The maximal installable potential for the node (`p_nom_max`) is computed by +adding up the installable potentials of the individual grid cells. If the model +comes close to this limit, then the time series may slightly overestimate +production since it is assumed the geographical distribution is proportional to +capacity factor. """ import functools import logging @@ -200,9 +200,7 @@ if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake( - "build_renewable_profiles", technology="solar", run="network2019" - ) + snakemake = mock_snakemake("build_renewable_profiles", technology="offwind-dc") configure_logging(snakemake) set_scenario_config(snakemake) @@ -211,12 +209,16 @@ if __name__ == "__main__": noprogress = noprogress or not snakemake.config["atlite"]["show_progress"] params = snakemake.params.renewable[snakemake.wildcards.technology] resource = params["resource"] # pv panel params / wind turbine params + + tech = next(t for t in ["panel", "turbine"] if t in resource) + models = resource[tech] + if not isinstance(models, dict): + models = {0: models} + resource[tech] = models[next(iter(models))] + correction_factor = params.get("correction_factor", 1.0) capacity_per_sqkm = params["capacity_per_sqkm"] - p_nom_max_meth = params.get("potential", "conservative") - - if isinstance(params.get("corine", {}), list): - params["corine"] = {"grid_codes": params["corine"]} + snapshots = snakemake.params.snapshots if correction_factor != 1.0: logger.info(f"correction_factor is set as {correction_factor}") @@ -226,7 +228,7 @@ if __name__ == "__main__": else: client = None - sns = pd.date_range(freq="h", **snakemake.config["snapshots"]) + sns = pd.date_range(freq="h", **snapshots) cutout = atlite.Cutout(snakemake.input.cutout).sel(time=sns) regions = gpd.read_file(snakemake.input.regions) assert not regions.empty, ( @@ -243,18 +245,31 @@ if __name__ == "__main__": if params["natura"]: excluder.add_raster(snakemake.input.natura, nodata=0, allow_no_overlap=True) - corine = params.get("corine", {}) - if "grid_codes" in corine: - codes = corine["grid_codes"] - excluder.add_raster(snakemake.input.corine, codes=codes, invert=True, crs=3035) - if corine.get("distance", 0.0) > 0.0: - codes = corine["distance_grid_codes"] - buffer = corine["distance"] - excluder.add_raster( - snakemake.input.corine, codes=codes, buffer=buffer, crs=3035 - ) + for dataset in ["corine", "luisa"]: + kwargs = {"nodata": 0} if dataset == "luisa" else {} + settings = params.get(dataset, {}) + if not settings: + continue + if dataset == "luisa" and res > 50: + logger.info( + "LUISA data is available at 50m resolution, " + f"but coarser {res}m resolution is used." + ) + if isinstance(settings, list): + settings = {"grid_codes": settings} + if "grid_codes" in settings: + codes = settings["grid_codes"] + excluder.add_raster( + snakemake.input[dataset], codes=codes, invert=True, crs=3035, **kwargs + ) + if settings.get("distance", 0.0) > 0.0: + codes = settings["distance_grid_codes"] + buffer = settings["distance"] + excluder.add_raster( + snakemake.input[dataset], codes=codes, buffer=buffer, crs=3035, **kwargs + ) - if "ship_threshold" in params: + if params.get("ship_threshold"): shipping_threshold = ( params["ship_threshold"] * 8760 * 6 ) # approximation because 6 years of data which is hourly collected @@ -280,15 +295,22 @@ if __name__ == "__main__": snakemake.input.country_shapes, buffer=buffer, invert=True ) + logger.info("Calculate landuse availability...") + start = time.time() + kwargs = dict(nprocesses=nprocesses, disable_progressbar=noprogress) - if noprogress: - logger.info("Calculate landuse availabilities...") - start = time.time() - availability = cutout.availabilitymatrix(regions, excluder, **kwargs) - duration = time.time() - start - logger.info(f"Completed availability calculation ({duration:2.2f}s)") - else: - availability = cutout.availabilitymatrix(regions, excluder, **kwargs) + availability = cutout.availabilitymatrix(regions, excluder, **kwargs) + + duration = time.time() - start + logger.info(f"Completed landuse availability calculation ({duration:2.2f}s)") + + # For Moldova and Ukraine: Overwrite parts not covered by Corine with + # externally determined available areas + if "availability_matrix_MD_UA" in snakemake.input.keys(): + availability_MDUA = xr.open_dataarray( + snakemake.input["availability_matrix_MD_UA"] + ) + availability.loc[availability_MDUA.coords] = availability_MDUA area = cutout.grid.to_crs(3035).area / 1e6 area = xr.DataArray( @@ -299,28 +321,53 @@ if __name__ == "__main__": func = getattr(cutout, resource.pop("method")) if client is not None: resource["dask_kwargs"] = {"scheduler": client} + + logger.info("Calculate average capacity factor...") + start = time.time() + capacity_factor = correction_factor * func(capacity_factor=True, **resource) layout = capacity_factor * area * capacity_per_sqkm - profile, capacities = func( - matrix=availability.stack(spatial=["y", "x"]), - layout=layout, - index=buses, - per_unit=True, - return_capacity=True, - **resource, - ) - logger.info(f"Calculating maximal capacity per bus (method '{p_nom_max_meth}')") - if p_nom_max_meth == "simple": - p_nom_max = capacity_per_sqkm * availability @ area - elif p_nom_max_meth == "conservative": - max_cap_factor = capacity_factor.where(availability != 0).max(["x", "y"]) - p_nom_max = capacities / max_cap_factor - else: - raise AssertionError( - 'Config key `potential` should be one of "simple" ' - f'(default) or "conservative", not "{p_nom_max_meth}"' + duration = time.time() - start + logger.info(f"Completed average capacity factor calculation ({duration:2.2f}s)") + + profiles = [] + capacities = [] + for year, model in models.items(): + + logger.info( + f"Calculate weighted capacity factor time series for model {model}..." ) + start = time.time() + + resource[tech] = model + + profile, capacity = func( + matrix=availability.stack(spatial=["y", "x"]), + layout=layout, + index=buses, + per_unit=True, + return_capacity=True, + **resource, + ) + + dim = {"year": [year]} + profile = profile.expand_dims(dim) + capacity = capacity.expand_dims(dim) + + profiles.append(profile.rename("profile")) + capacities.append(capacity.rename("weight")) + + duration = time.time() - start + logger.info( + f"Completed weighted capacity factor time series calculation for model {model} ({duration:2.2f}s)" + ) + + profiles = xr.merge(profiles) + capacities = xr.merge(capacities) + + logger.info("Calculating maximal capacity per bus") + p_nom_max = capacity_per_sqkm * availability @ area logger.info("Calculate average distances.") layoutmatrix = (layout * availability).stack(spatial=["y", "x"]) @@ -344,8 +391,8 @@ if __name__ == "__main__": ds = xr.merge( [ - (correction_factor * profile).rename("profile"), - capacities.rename("weight"), + correction_factor * profiles, + capacities, p_nom_max.rename("p_nom_max"), potential.rename("potential"), average_distance.rename("average_distance"), @@ -365,9 +412,13 @@ if __name__ == "__main__": ds["underwater_fraction"] = xr.DataArray(underwater_fraction, [buses]) # select only buses with some capacity and minimal capacity factor + mean_profile = ds["profile"].mean("time") + if "year" in ds.indexes: + mean_profile = mean_profile.max("year") + ds = ds.sel( bus=( - (ds["profile"].mean("time") > params.get("min_p_max_pu", 0.0)) + (mean_profile > params.get("min_p_max_pu", 0.0)) & (ds["p_nom_max"] > params.get("min_p_nom_max", 0.0)) ) ) diff --git a/scripts/build_retro_cost.py b/scripts/build_retro_cost.py old mode 100644 new mode 100755 index c830415e..60d74afa --- a/scripts/build_retro_cost.py +++ b/scripts/build_retro_cost.py @@ -102,7 +102,7 @@ solar_energy_transmittance = ( ) # solar global radiation [kWh/(m^2a)] solar_global_radiation = pd.Series( - [246, 401, 246, 148], + [271, 392, 271, 160], index=["east", "south", "west", "north"], name="solar_global_radiation [kWh/(m^2a)]", ) @@ -164,6 +164,12 @@ def prepare_building_stock_data(): }, inplace=True, ) + building_data["feature"].replace( + { + "Construction features (U-value)": "Construction features (U-values)", + }, + inplace=True, + ) building_data.country_code = building_data.country_code.str.upper() building_data["subsector"].replace( @@ -198,12 +204,14 @@ def prepare_building_stock_data(): } ) + building_data["country_code"] = building_data["country"].map(country_iso_dic) + # heated floor area ---------------------------------------------------------- area = building_data[ (building_data.type == "Heated area [Mm²]") & (building_data.subsector != "Total") ] - area_tot = area.groupby(["country", "sector"]).sum() + area_tot = area[["country", "sector", "value"]].groupby(["country", "sector"]).sum() area = pd.concat( [ area, @@ -223,7 +231,7 @@ def prepare_building_stock_data(): usecols=[0, 1, 2, 3], encoding="ISO-8859-1", ) - area_tot = area_tot.append(area_missing.unstack(level=-1).dropna().stack()) + area_tot = pd.concat([area_tot, area_missing.unstack(level=-1).dropna().stack()]) area_tot = area_tot.loc[~area_tot.index.duplicated(keep="last")] # for still missing countries calculate floor area by population size @@ -246,7 +254,7 @@ def prepare_building_stock_data(): averaged_data.index = index averaged_data["estimated"] = 1 if ct not in area_tot.index.levels[0]: - area_tot = area_tot.append(averaged_data, sort=True) + area_tot = pd.concat([area_tot, averaged_data], sort=True) else: area_tot.loc[averaged_data.index] = averaged_data @@ -272,7 +280,7 @@ def prepare_building_stock_data(): ][x["bage"]].iloc[0], axis=1, ) - data_PL_final = data_PL_final.append(data_PL) + data_PL_final = pd.concat([data_PL_final, data_PL]) u_values = pd.concat([u_values, data_PL_final]).reset_index(drop=True) @@ -289,8 +297,8 @@ def prepare_building_stock_data(): errors="ignore", ) - u_values.subsector.replace(rename_sectors, inplace=True) - u_values.btype.replace(rename_sectors, inplace=True) + u_values["subsector"] = u_values.subsector.replace(rename_sectors) + u_values["btype"] = u_values.btype.replace(rename_sectors) # for missing weighting of surfaces of building types assume MFH u_values["assumed_subsector"] = u_values.subsector @@ -298,8 +306,8 @@ def prepare_building_stock_data(): ~u_values.subsector.isin(rename_sectors.values()), "assumed_subsector" ] = "MFH" - u_values.country_code.replace({"UK": "GB"}, inplace=True) - u_values.bage.replace({"Berfore 1945": "Before 1945"}, inplace=True) + u_values["country_code"] = u_values.country_code.replace({"UK": "GB"}) + u_values["bage"] = u_values.bage.replace({"Berfore 1945": "Before 1945"}) u_values = u_values[~u_values.bage.isna()] u_values.set_index(["country_code", "subsector", "bage", "type"], inplace=True) @@ -525,16 +533,16 @@ def prepare_temperature_data(): """ temperature = xr.open_dataarray(snakemake.input.air_temperature).to_pandas() d_heat = ( - temperature.groupby(temperature.columns.str[:2], axis=1) + temperature.T.groupby(temperature.columns.str[:2]) .mean() - .resample("1D") + .T.resample("1D") .mean() < t_threshold ).sum() temperature_average_d_heat = ( - temperature.groupby(temperature.columns.str[:2], axis=1) + temperature.T.groupby(temperature.columns.str[:2]) .mean() - .apply( + .T.apply( lambda x: get_average_temperature_during_heating_season(x, t_threshold=15) ) ) @@ -546,7 +554,7 @@ def prepare_temperature_data(): # windows --------------------------------------------------------------- -def window_limit(l, window_assumptions): +def window_limit(l, window_assumptions): # noqa: E741 """ Define limit u value from which on window is retrofitted. """ @@ -559,7 +567,7 @@ def window_limit(l, window_assumptions): return m * l + a -def u_retro_window(l, window_assumptions): +def u_retro_window(l, window_assumptions): # noqa: E741 """ Define retrofitting value depending on renovation strength. """ @@ -572,7 +580,7 @@ def u_retro_window(l, window_assumptions): return max(m * l + a, 0.8) -def window_cost(u, cost_retro, window_assumptions): +def window_cost(u, cost_retro, window_assumptions): # noqa: E741 """ Get costs for new windows depending on u value. """ @@ -592,34 +600,40 @@ def window_cost(u, cost_retro, window_assumptions): return window_cost -def calculate_costs(u_values, l, cost_retro, window_assumptions): +def calculate_costs(u_values, l, cost_retro, window_assumptions): # noqa: E741 """ Returns costs for a given retrofitting strength weighted by the average surface/volume ratio of the component for each building type. """ return u_values.apply( lambda x: ( - cost_retro.loc[x.name[3], "cost_var"] - * 100 - * float(l) - * l_weight.loc[x.name[3]][0] - + cost_retro.loc[x.name[3], "cost_fix"] - ) - * x.A_element - / x.A_C_Ref - if x.name[3] != "Window" - else ( - window_cost(x["new_U_{}".format(l)], cost_retro, window_assumptions) + ( + cost_retro.loc[x.name[3], "cost_var"] + * 100 + * float(l) + * l_weight.loc[x.name[3]].iloc[0] + + cost_retro.loc[x.name[3], "cost_fix"] + ) * x.A_element / x.A_C_Ref - if x.value > window_limit(float(l), window_assumptions) - else 0 + if x.name[3] != "Window" + else ( + ( + ( + window_cost(x[f"new_U_{l}"], cost_retro, window_assumptions) + * x.A_element + ) + / x.A_C_Ref + ) + if x.value > window_limit(float(l), window_assumptions) + else 0 + ) ), axis=1, ) -def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): +def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): # noqa: E741 """ Calculate U-values after building retrofitting, depending on the old U-values (u_values). This is for simple insulation measuers, adding an @@ -641,12 +655,14 @@ def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): k: thermal conductivity """ return u_values.apply( - lambda x: k / ((k / x.value) + (float(l) * l_weight.loc[x.name[3]])) - if x.name[3] != "Window" - else ( - min(x.value, u_retro_window(float(l), window_assumptions)) - if x.value > window_limit(float(l), window_assumptions) - else x.value + lambda x: ( + k / ((k / x.value) + (float(l) * l_weight.loc[x.name[3]])) + if x.name[3] != "Window" + else ( + min(x.value, u_retro_window(float(l), window_assumptions)) + if x.value > window_limit(float(l), window_assumptions) + else x.value + ) ), axis=1, ) @@ -713,6 +729,7 @@ def map_to_lstrength(l_strength, df): .swaplevel(axis=1) .dropna(axis=1) ) + return pd.concat([df.drop([2, 3], axis=1, level=1), l_strength_df], axis=1) @@ -738,13 +755,13 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor) """ # (1) by transmission # calculate new U values of building elements due to additional insulation - for l in l_strength: - u_values["new_U_{}".format(l)] = calculate_new_u( + for l in l_strength: # noqa: E741 + u_values[f"new_U_{l}"] = calculate_new_u( u_values, l, l_weight, window_assumptions ) # surface area of building components [m^2] area_element = ( - data_tabula[["A_{}".format(e) for e in u_values.index.levels[3]]] + data_tabula[[f"A_{e}" for e in u_values.index.levels[3]]] .rename(columns=lambda x: x[2:]) .stack() .unstack(-2) @@ -756,7 +773,7 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor) # heat transfer H_tr_e [W/m^2K] through building element # U_e * A_e / A_C_Ref - columns = ["value"] + ["new_U_{}".format(l) for l in l_strength] + columns = ["value"] + [f"new_U_{l}" for l in l_strength] heat_transfer = pd.concat( [u_values[columns].mul(u_values.A_element, axis=0), u_values.A_element], axis=1 ) @@ -793,6 +810,7 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor) * data_tabula.A_envelope / data_tabula.A_C_Ref ) + heat_transfer_perm2 = pd.concat( [ heat_transfer_perm2, @@ -829,9 +847,9 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor) F_red_temp = map_to_lstrength(l_strength, F_red_temp) Q_ht = ( - heat_transfer_perm2.groupby(level=1, axis=1) + heat_transfer_perm2.T.groupby(level=1) .sum() - .mul(F_red_temp.droplevel(0, axis=1)) + .T.mul(F_red_temp.droplevel(0, axis=1)) .mul(temperature_factor.reindex(heat_transfer_perm2.index, level=0), axis=0) ) @@ -871,14 +889,11 @@ def calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain): Calculates gain utilisation factor nu. """ # time constant of the building tau [h] = c_m [Wh/(m^2K)] * 1 /(H_tr_e+H_tb*H_ve) [m^2 K /W] - tau = c_m / heat_transfer_perm2.groupby(level=1, axis=1).sum() + tau = c_m / heat_transfer_perm2.T.groupby(axis=1).sum().T alpha = alpha_H_0 + (tau / tau_H_0) # heat balance ratio gamma = (1 / Q_ht).mul(Q_gain.sum(axis=1), axis=0) - # gain utilisation factor - nu = (1 - gamma**alpha) / (1 - gamma ** (alpha + 1)) - - return nu + return (1 - gamma**alpha) / (1 - gamma ** (alpha + 1)) def calculate_space_heat_savings( @@ -947,7 +962,8 @@ def sample_dE_costs_area( .rename(index=rename_sectors, level=2) .reset_index() ) - .rename(columns={"country": "country_code"}) + # if uncommented, leads to the second `country_code` column + # .rename(columns={"country": "country_code"}) .set_index(["country_code", "subsector", "bage"]) ) @@ -960,13 +976,14 @@ def sample_dE_costs_area( ) # map missing countries - for ct in countries.difference(cost_dE.index.levels[0]): + for ct in set(countries).difference(cost_dE.index.levels[0]): averaged_data = ( cost_dE.reindex(index=map_for_missings[ct], level=0) - .mean(level=1) + .groupby(level=1) + .mean() .set_index(pd.MultiIndex.from_product([[ct], cost_dE.index.levels[1]])) ) - cost_dE = cost_dE.append(averaged_data) + cost_dE = pd.concat([cost_dE, averaged_data]) # weights costs after construction index if construction_index: @@ -983,24 +1000,23 @@ def sample_dE_costs_area( # drop not considered countries cost_dE = cost_dE.reindex(countries, level=0) # get share of residential and service floor area - sec_w = area_tot.value / area_tot.value.groupby(level=0).sum() + sec_w = area_tot.div(area_tot.groupby(level=0).transform("sum")) # get the total cost-energy-savings weight by sector area tot = ( - cost_dE.mul(sec_w, axis=0) - .groupby(level="country_code") + # sec_w has columns "estimated" and "value" + cost_dE.mul(sec_w.value, axis=0) + # for some reasons names of the levels were lost somewhere + # .groupby(level="country_code") + .groupby(level=0) .sum() - .set_index( - pd.MultiIndex.from_product( - [cost_dE.index.unique(level="country_code"), ["tot"]] - ) - ) + .set_index(pd.MultiIndex.from_product([cost_dE.index.unique(level=0), ["tot"]])) ) - cost_dE = cost_dE.append(tot).unstack().stack() + cost_dE = pd.concat([cost_dE, tot]).unstack().stack() - summed_area = pd.DataFrame(area_tot.groupby("country").sum()).set_index( - pd.MultiIndex.from_product([area_tot.index.unique(level="country"), ["tot"]]) + summed_area = pd.DataFrame(area_tot.groupby(level=0).sum()).set_index( + pd.MultiIndex.from_product([area_tot.index.unique(level=0), ["tot"]]) ) - area_tot = area_tot.append(summed_area).unstack().stack() + area_tot = pd.concat([area_tot, summed_area]).unstack().stack() cost_per_saving = cost_dE["cost"] / ( 1 - cost_dE["dE"] diff --git a/scripts/build_salt_cavern_potentials.py b/scripts/build_salt_cavern_potentials.py index 956ed431..ed039772 100644 --- a/scripts/build_salt_cavern_potentials.py +++ b/scripts/build_salt_cavern_potentials.py @@ -66,11 +66,7 @@ def salt_cavern_potential_by_region(caverns, regions): "capacity_per_area * share * area_caverns / 1000" ) # TWh - caverns_regions = ( - overlay.groupby(["name", "storage_type"]).e_nom.sum().unstack("storage_type") - ) - - return caverns_regions + return overlay.groupby(["name", "storage_type"]).e_nom.sum().unstack("storage_type") if __name__ == "__main__": diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index 571a7282..715a5f79 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -119,7 +119,7 @@ def countries(naturalearth, country_list): fieldnames = ( df[x].where(lambda s: s != "-99") for x in ("ISO_A2", "WB_A2", "ADM0_A3") ) - df["name"] = reduce(lambda x, y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2] + df["name"] = reduce(lambda x, y: x.fillna(y), fieldnames, next(fieldnames)).str[:2] df = df.loc[ df.name.isin(country_list) & ((df["scalerank"] == 0) | (df["scalerank"] == 5)) @@ -158,7 +158,7 @@ def country_cover(country_shapes, eez_shapes=None): shapes = pd.concat([shapes, eez_shapes]) europe_shape = shapes.unary_union if isinstance(europe_shape, MultiPolygon): - europe_shape = max(europe_shape, key=attrgetter("area")) + europe_shape = max(europe_shape.geoms, key=attrgetter("area")) return Polygon(shell=europe_shape.exterior) @@ -174,8 +174,8 @@ def nuts3(country_shapes, nuts3, nuts3pop, nuts3gdp, ch_cantons, ch_popgdp): pd.MultiIndex.from_tuples(pop.pop("unit,geo\\time").str.split(",")) ) .loc["THS"] - .applymap(lambda x: pd.to_numeric(x, errors="coerce")) - .fillna(method="bfill", axis=1) + .map(lambda x: pd.to_numeric(x, errors="coerce")) + .bfill(axis=1) )["2014"] gdp = pd.read_table(nuts3gdp, na_values=[":"], delimiter=" ?\t", engine="python") @@ -184,8 +184,8 @@ def nuts3(country_shapes, nuts3, nuts3pop, nuts3gdp, ch_cantons, ch_popgdp): pd.MultiIndex.from_tuples(gdp.pop("unit,geo\\time").str.split(",")) ) .loc["EUR_HAB"] - .applymap(lambda x: pd.to_numeric(x, errors="coerce")) - .fillna(method="bfill", axis=1) + .map(lambda x: pd.to_numeric(x, errors="coerce")) + .bfill(axis=1) )["2014"] cantons = pd.read_csv(ch_cantons) diff --git a/scripts/build_ship_raster.py b/scripts/build_ship_raster.py index 25bebcca..c8285180 100644 --- a/scripts/build_ship_raster.py +++ b/scripts/build_ship_raster.py @@ -42,7 +42,6 @@ Description """ import logging -import os import zipfile from pathlib import Path @@ -68,7 +67,7 @@ if __name__ == "__main__": fn = "shipdensity_global.tif" zip_f.extract(fn, resources) with rioxarray.open_rasterio(resources / fn) as ship_density: - ship_density = ship_density.drop(["band"]).sel( + ship_density = ship_density.drop_vars(["band"]).sel( x=slice(min(xs), max(Xs)), y=slice(max(Ys), min(ys)) ) ship_density.rio.to_raster(snakemake.output[0]) diff --git a/scripts/build_solar_thermal_profiles.py b/scripts/build_solar_thermal_profiles.py index d285691a..ee6ed881 100644 --- a/scripts/build_solar_thermal_profiles.py +++ b/scripts/build_solar_thermal_profiles.py @@ -33,10 +33,7 @@ if __name__ == "__main__": cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time) clustered_regions = ( - gpd.read_file(snakemake.input.regions_onshore) - .set_index("name") - .buffer(0) - .squeeze() + gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0) ) I = cutout.indicatormatrix(clustered_regions) diff --git a/scripts/build_temperature_profiles.py b/scripts/build_temperature_profiles.py index 9db37c25..02fa4a71 100644 --- a/scripts/build_temperature_profiles.py +++ b/scripts/build_temperature_profiles.py @@ -31,13 +31,10 @@ if __name__ == "__main__": cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time) clustered_regions = ( - gpd.read_file(snakemake.input.regions_onshore) - .set_index("name") - .buffer(0) - .squeeze() + gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0) ) - I = cutout.indicatormatrix(clustered_regions) + I = cutout.indicatormatrix(clustered_regions) # noqa: E741 pop_layout = xr.open_dataarray(snakemake.input.pop_layout) diff --git a/scripts/build_transport_demand.py b/scripts/build_transport_demand.py index c5bf4632..33c8faae 100644 --- a/scripts/build_transport_demand.py +++ b/scripts/build_transport_demand.py @@ -8,10 +8,14 @@ improvements due to drivetrain changes, time series for electric vehicle availability and demand-side management constraints. """ +import logging + import numpy as np import pandas as pd import xarray as xr -from _helpers import generate_periodic_profiles +from _helpers import configure_logging, generate_periodic_profiles + +logger = logging.getLogger(__name__) def build_nodal_transport_data(fn, pop_layout): @@ -81,14 +85,12 @@ def build_transport_demand(traffic_fn, airtemp_fn, nodes, nodal_transport_data): - pop_weighted_energy_totals["electricity rail"] ) - transport = ( + return ( (transport_shape.multiply(energy_totals_transport) * 1e6 * nyears) .divide(efficiency_gain * ice_correction) .multiply(1 + dd_EV) ) - return transport - def transport_degree_factor( temperature, @@ -132,14 +134,18 @@ def bev_availability_profile(fn, snapshots, nodes, options): traffic.mean() - traffic.min() ) - avail_profile = generate_periodic_profiles( + if not avail[avail < 0].empty: + logger.warning( + "The BEV availability weekly profile has negative values which can " + "lead to infeasibility." + ) + + return generate_periodic_profiles( dt_index=snapshots, nodes=nodes, weekly_profile=avail.values, ) - return avail_profile - def bev_dsm_profile(snapshots, nodes, options): dsm_week = np.zeros((24 * 7,)) @@ -148,14 +154,12 @@ def bev_dsm_profile(snapshots, nodes, options): "bev_dsm_restriction_value" ] - dsm_profile = generate_periodic_profiles( + return generate_periodic_profiles( dt_index=snapshots, nodes=nodes, weekly_profile=dsm_week, ) - return dsm_profile - if __name__ == "__main__": if "snakemake" not in globals(): @@ -166,6 +170,7 @@ if __name__ == "__main__": simpl="", clusters=48, ) + configure_logging(snakemake) pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) diff --git a/scripts/cluster_gas_network.py b/scripts/cluster_gas_network.py index e7554dff..e709d772 100755 --- a/scripts/cluster_gas_network.py +++ b/scripts/cluster_gas_network.py @@ -8,14 +8,13 @@ Cluster gas transmission network to clustered model regions. import logging -logger = logging.getLogger(__name__) - import geopandas as gpd import pandas as pd -from packaging.version import Version, parse from pypsa.geo import haversine_pts from shapely import wkt +logger = logging.getLogger(__name__) + def concat_gdf(gdf_list, crs="EPSG:4326"): """ @@ -41,12 +40,9 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25): for i in [0, 1]: gdf = gpd.GeoDataFrame(geometry=df[f"point{i}"], crs="EPSG:4326") - kws = ( - dict(op="within") - if parse(gpd.__version__) < Version("0.10") - else dict(predicate="within") - ) - bus_mapping = gpd.sjoin(gdf, bus_regions, how="left", **kws).index_right + bus_mapping = gpd.sjoin( + gdf, bus_regions, how="left", predicate="within" + ).index_right bus_mapping = bus_mapping.groupby(bus_mapping.index).first() df[f"bus{i}"] = bus_mapping @@ -75,10 +71,10 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25): return df -def reindex_pipes(df): +def reindex_pipes(df, prefix="gas pipeline"): def make_index(x): connector = " <-> " if x.bidirectional else " -> " - return "gas pipeline " + x.bus0 + connector + x.bus1 + return prefix + " " + x.bus0 + connector + x.bus1 df.index = df.apply(make_index, axis=1) diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index b0ce4796..44b83e99 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -16,8 +16,7 @@ Relevant Settings clustering: cluster_network: aggregation_strategies: - - focus_weights: + focus_weights: solving: solver: @@ -123,17 +122,20 @@ Exemplary unsolved network clustered to 37 nodes: """ import logging +import os import warnings from functools import reduce import geopandas as gpd +import linopy import matplotlib.pyplot as plt import numpy as np import pandas as pd -import pyomo.environ as po import pypsa import seaborn as sns -from _helpers import configure_logging, set_scenario_config, update_p_nom_max +from _helpers import configure_logging, update_p_nom_max, set_scenario_config +from add_electricity import load_costs +from packaging.version import Version, parse from pypsa.clustering.spatial import ( busmap_by_greedy_modularity, busmap_by_hac, @@ -141,12 +143,10 @@ from pypsa.clustering.spatial import ( get_clustering_from_busmap, ) +PD_GE_2_2 = parse(pd.__version__) >= Version("2.2") + warnings.filterwarnings(action="ignore", category=UserWarning) - -from add_electricity import load_costs - idx = pd.IndexSlice - logger = logging.getLogger(__name__) @@ -218,7 +218,7 @@ def get_feature_for_hac(n, buses_i=None, feature=None): return feature_data -def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"): +def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="scip"): """ Determine the number of clusters per country. """ @@ -237,7 +237,7 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"): n_clusters >= len(N) and n_clusters <= N.sum() ), f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries." - if focus_weights is not None: + if isinstance(focus_weights, dict): total_focus = sum(list(focus_weights.values())) assert ( @@ -258,31 +258,22 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"): L.sum(), 1.0, rtol=1e-3 ), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}." - m = po.ConcreteModel() - - def n_bounds(model, *n_id): - return (1, N[n_id]) - - m.n = po.Var(list(L.index), bounds=n_bounds, domain=po.Integers) - m.tot = po.Constraint(expr=(po.summation(m.n) == n_clusters)) - m.objective = po.Objective( - expr=sum((m.n[i] - L.loc[i] * n_clusters) ** 2 for i in L.index), - sense=po.minimize, + m = linopy.Model() + clusters = m.add_variables( + lower=1, upper=N, coords=[L.index], name="n", integer=True ) - - opt = po.SolverFactory(solver_name) - if not opt.has_capability("quadratic_objective"): - logger.warning( - f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`." + m.add_constraints(clusters.sum() == n_clusters, name="tot") + # leave out constant in objective (L * n_clusters) ** 2 + m.objective = (clusters * clusters - 2 * clusters * L * n_clusters).sum() + if solver_name == "gurobi": + logging.getLogger("gurobipy").propagate = False + elif solver_name != "scip": + logger.info( + f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `scip`." ) - opt = po.SolverFactory("ipopt") - - results = opt.solve(m) - assert ( - results["Solver"][0]["Status"] == "ok" - ), f"Solver returned non-optimally: {results}" - - return pd.Series(m.n.get_values(), index=L.index).round().astype(int) + solver_name = "scip" + m.solve(solver_name=solver_name) + return m.solution["n"].to_series().astype(int) def busmap_for_n_clusters( @@ -322,9 +313,9 @@ def busmap_for_n_clusters( neighbor_bus = n.lines.query( "bus0 == @disconnected_bus or bus1 == @disconnected_bus" ).iloc[0][["bus0", "bus1"]] - new_country = list( - set(n.buses.loc[neighbor_bus].country) - set([country]) - )[0] + new_country = list(set(n.buses.loc[neighbor_bus].country) - {country})[ + 0 + ] logger.info( f"overwriting country `{country}` of bus `{disconnected_bus}` " @@ -374,9 +365,11 @@ def busmap_for_n_clusters( f"`algorithm` must be one of 'kmeans' or 'hac'. Is {algorithm}." ) + compat_kws = dict(include_groups=False) if PD_GE_2_2 else {} + return ( n.buses.groupby(["country", "sub_network"], group_keys=False) - .apply(busmap_for_country) + .apply(busmap_for_country, **compat_kws) .squeeze() .rename("busmap") ) @@ -389,7 +382,7 @@ def clustering_for_n_clusters( aggregate_carriers=None, line_length_factor=1.25, aggregation_strategies=dict(), - solver_name="cbc", + solver_name="scip", algorithm="hac", feature=None, extended_link_costs=0, @@ -470,6 +463,9 @@ if __name__ == "__main__": n = pypsa.Network(snakemake.input.network) + # remove integer outputs for compatibility with PyPSA v0.26.0 + n.generators.drop("n_mod", axis=1, inplace=True, errors="ignore") + exclude_carriers = params.cluster_network["exclude_carriers"] aggregate_carriers = set(n.generators.carrier) - set(exclude_carriers) conventional_carriers = set(params.conventional_carriers) @@ -498,7 +494,9 @@ if __name__ == "__main__": gens.efficiency, bins=[0, low, high, 1], labels=labels ).astype(str) carriers += [f"{c} {label} efficiency" for label in labels] - n.generators.carrier.update(gens.carrier + " " + suffix + " efficiency") + n.generators.update( + {"carrier": gens.carrier + " " + suffix + " efficiency"} + ) aggregate_carriers = carriers if n_clusters == len(n.buses): diff --git a/scripts/copy_config.py b/scripts/copy_config.py index d6908a62..d3947116 100644 --- a/scripts/copy_config.py +++ b/scripts/copy_config.py @@ -6,8 +6,6 @@ Copy used configuration files and important scripts for archiving. """ -from pathlib import Path -from shutil import copy import yaml from _helpers import set_scenario_config diff --git a/scripts/determine_availability_matrix_MD_UA.py b/scripts/determine_availability_matrix_MD_UA.py new file mode 100644 index 00000000..efe9a712 --- /dev/null +++ b/scripts/determine_availability_matrix_MD_UA.py @@ -0,0 +1,159 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Create land elibility analysis for Ukraine and Moldova with different datasets. +""" + +import functools +import logging +import time + +import atlite +import fiona +import geopandas as gpd +import matplotlib.pyplot as plt +import numpy as np +from _helpers import configure_logging +from atlite.gis import shape_availability +from rasterio.plot import show + +logger = logging.getLogger(__name__) + + +def get_wdpa_layer_name(wdpa_fn, layer_substring): + """ + Get layername from file "wdpa_fn" whose name contains "layer_substring". + """ + l = fiona.listlayers(wdpa_fn) + return [_ for _ in l if layer_substring in _][0] + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "determine_availability_matrix_MD_UA", technology="solar" + ) + configure_logging(snakemake) + + nprocesses = None # snakemake.config["atlite"].get("nprocesses") + noprogress = not snakemake.config["atlite"].get("show_progress", True) + config = snakemake.config["renewable"][snakemake.wildcards.technology] + + cutout = atlite.Cutout(snakemake.input.cutout) + regions = ( + gpd.read_file(snakemake.input.regions).set_index("name").rename_axis("bus") + ) + buses = regions.index + + excluder = atlite.ExclusionContainer(crs=3035, res=100) + + corine = config.get("corine", {}) + if "grid_codes" in corine: + # Land cover codes to emulate CORINE results + if snakemake.wildcards.technology == "solar": + codes = [20, 30, 40, 50, 60, 90, 100] + elif snakemake.wildcards.technology == "onwind": + codes = [20, 30, 40, 60, 100] + elif snakemake.wildcards.technology == "offwind-ac": + codes = [80, 200] + elif snakemake.wildcards.technology == "offwind-dc": + codes = [80, 200] + else: + assert False, "technology not supported" + + excluder.add_raster( + snakemake.input.copernicus, codes=codes, invert=True, crs="EPSG:4326" + ) + if "distance" in corine and corine.get("distance", 0.0) > 0.0: + # Land cover codes to emulate CORINE results + if snakemake.wildcards.technology == "onwind": + codes = [50] + else: + assert False, "technology not supported" + + buffer = corine["distance"] + excluder.add_raster( + snakemake.input.copernicus, codes=codes, buffer=buffer, crs="EPSG:4326" + ) + + if config["natura"]: + wdpa_fn = ( + snakemake.input.wdpa_marine + if "offwind" in snakemake.wildcards.technology + else snakemake.input.wdpa + ) + layer = get_wdpa_layer_name(wdpa_fn, "polygons") + wdpa = gpd.read_file( + wdpa_fn, + bbox=regions.geometry, + layer=layer, + ).to_crs(3035) + if not wdpa.empty: + excluder.add_geometry(wdpa.geometry) + + layer = get_wdpa_layer_name(wdpa_fn, "points") + wdpa_pts = gpd.read_file( + wdpa_fn, + bbox=regions.geometry, + layer=layer, + ).to_crs(3035) + wdpa_pts = wdpa_pts[wdpa_pts["REP_AREA"] > 1] + wdpa_pts["buffer_radius"] = np.sqrt(wdpa_pts["REP_AREA"] / np.pi) * 1000 + wdpa_pts = wdpa_pts.set_geometry( + wdpa_pts["geometry"].buffer(wdpa_pts["buffer_radius"]) + ) + if not wdpa_pts.empty: + excluder.add_geometry(wdpa_pts.geometry) + + if "max_depth" in config: + # lambda not supported for atlite + multiprocessing + # use named function np.greater with partially frozen argument instead + # and exclude areas where: -max_depth > grid cell depth + func = functools.partial(np.greater, -config["max_depth"]) + excluder.add_raster(snakemake.input.gebco, codes=func, crs=4236, nodata=-1000) + + if "min_shore_distance" in config: + buffer = config["min_shore_distance"] + excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer) + + if "max_shore_distance" in config: + buffer = config["max_shore_distance"] + excluder.add_geometry( + snakemake.input.country_shapes, buffer=buffer, invert=True + ) + + if "ship_threshold" in config: + shipping_threshold = config["ship_threshold"] * 8760 * 6 + func = functools.partial(np.less, shipping_threshold) + excluder.add_raster( + snakemake.input.ship_density, codes=func, crs=4326, allow_no_overlap=True + ) + + kwargs = dict(nprocesses=nprocesses, disable_progressbar=noprogress) + if noprogress: + logger.info("Calculate landuse availabilities...") + start = time.time() + availability = cutout.availabilitymatrix(regions, excluder, **kwargs) + duration = time.time() - start + logger.info(f"Completed availability calculation ({duration:2.2f}s)") + else: + availability = cutout.availabilitymatrix(regions, excluder, **kwargs) + + regions_geometry = regions.to_crs(3035).geometry + band, transform = shape_availability(regions_geometry, excluder) + fig, ax = plt.subplots(figsize=(4, 8)) + gpd.GeoSeries(regions_geometry.unary_union).plot(ax=ax, color="none") + show(band, transform=transform, cmap="Greens", ax=ax) + plt.axis("off") + plt.savefig(snakemake.output.availability_map, bbox_inches="tight", dpi=500) + + # Limit results only to buses for UA and MD + buses = regions.loc[regions["country"].isin(["UA", "MD"])].index.values + availability = availability.sel(bus=buses) + + # Save and plot for verification + availability.to_netcdf(snakemake.output.availability_matrix) diff --git a/scripts/make_summary.py b/scripts/make_summary.py index 98a6a6d7..76d8099c 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -8,9 +8,6 @@ capacity factors, curtailment, energy balances, prices and other metrics. """ import logging - -logger = logging.getLogger(__name__) - import sys import numpy as np @@ -19,7 +16,7 @@ import pypsa from prepare_sector_network import prepare_costs idx = pd.IndexSlice - +logger = logging.getLogger(__name__) opt_name = {"Store": "e", "Line": "s", "Transformer": "s"} @@ -33,10 +30,7 @@ def assign_locations(n): ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) for i in ifind.unique(): names = ifind.index[ifind == i] - if i == -1: - c.df.loc[names, "location"] = "" - else: - c.df.loc[names, "location"] = names.str[:i] + c.df.loc[names, "location"] = "" if i == -1 else names.str[:i] def calculate_nodal_cfs(n, label, nodal_cfs): @@ -397,7 +391,7 @@ def calculate_supply_energy(n, label, supply_energy): for c in n.iterate_components(n.branch_components): for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]: - items = c.df.index[c.df["bus" + str(end)].map(bus_map).fillna(False)] + items = c.df.index[c.df[f"bus{str(end)}"].map(bus_map).fillna(False)] if len(items) == 0: continue @@ -449,6 +443,10 @@ def calculate_metrics(n, label, metrics): if "CO2Limit" in n.global_constraints.index: metrics.at["co2_shadow", label] = n.global_constraints.at["CO2Limit", "mu"] + if "co2_sequestration_limit" in n.global_constraints.index: + metrics.at["co2_storage_shadow", label] = n.global_constraints.at[ + "co2_sequestration_limit", "mu" + ] return metrics @@ -493,7 +491,7 @@ def calculate_weighted_prices(n, label, weighted_prices): "H2": ["Sabatier", "H2 Fuel Cell"], } - for carrier in link_loads: + for carrier, value in link_loads.items(): if carrier == "electricity": suffix = "" elif carrier[:5] == "space": @@ -508,22 +506,16 @@ def calculate_weighted_prices(n, label, weighted_prices): if carrier in ["H2", "gas"]: load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0) - elif carrier[:5] == "space": - load = heat_demand_df[buses.str[:2]].rename( - columns=lambda i: str(i) + suffix - ) else: - load = n.loads_t.p_set[buses] + load = n.loads_t.p_set[buses.intersection(n.loads.index)] - for tech in link_loads[carrier]: + for tech in value: names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech] - if names.empty: - continue - - load += ( - n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum() - ) + if not names.empty: + load += ( + n.links_t.p0[names].T.groupby(n.links.loc[names, "bus0"]).sum().T + ) # Add H2 Store when charging # if carrier == "H2": @@ -562,14 +554,16 @@ def calculate_market_values(n, label, market_values): dispatch = ( n.generators_t.p[gens] - .groupby(n.generators.loc[gens, "bus"], axis=1) + .T.groupby(n.generators.loc[gens, "bus"]) .sum() - .reindex(columns=buses, fill_value=0.0) + .T.reindex(columns=buses, fill_value=0.0) ) - revenue = dispatch * n.buses_t.marginal_price[buses] - market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum() + if total_dispatch := dispatch.sum().sum(): + market_values.at[tech, label] = revenue.sum().sum() / total_dispatch + else: + market_values.at[tech, label] = np.nan ## Now do market value of links ## @@ -585,14 +579,17 @@ def calculate_market_values(n, label, market_values): dispatch = ( n.links_t["p" + i][links] - .groupby(n.links.loc[links, "bus" + i], axis=1) + .T.groupby(n.links.loc[links, "bus" + i]) .sum() - .reindex(columns=buses, fill_value=0.0) + .T.reindex(columns=buses, fill_value=0.0) ) revenue = dispatch * n.buses_t.marginal_price[buses] - market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum() + if total_dispatch := dispatch.sum().sum(): + market_values.at[tech, label] = revenue.sum().sum() / total_dispatch + else: + market_values.at[tech, label] = np.nan return market_values @@ -650,11 +647,7 @@ def make_summaries(networks_dict): networks_dict.keys(), names=["cluster", "ll", "opt", "planning_horizon"] ) - df = {} - - for output in outputs: - df[output] = pd.DataFrame(columns=columns, dtype=float) - + df = {output: pd.DataFrame(columns=columns, dtype=float) for output in outputs} for label, filename in networks_dict.items(): logger.info(f"Make summary for scenario {label}, using {filename}") diff --git a/scripts/make_summary_perfect.py b/scripts/make_summary_perfect.py new file mode 100644 index 00000000..064db454 --- /dev/null +++ b/scripts/make_summary_perfect.py @@ -0,0 +1,754 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Create summary CSV files for all scenario runs with perfect foresight including +costs, capacities, capacity factors, curtailment, energy balances, prices and +other metrics. +""" + + +import numpy as np +import pandas as pd +import pypsa +from make_summary import calculate_cfs # noqa: F401 +from make_summary import calculate_nodal_cfs # noqa: F401 +from make_summary import calculate_nodal_costs # noqa: F401 +from make_summary import assign_carriers, assign_locations +from prepare_sector_network import prepare_costs +from pypsa.descriptors import get_active_assets +from six import iteritems + +idx = pd.IndexSlice + +opt_name = {"Store": "e", "Line": "s", "Transformer": "s"} + + +def reindex_columns(df, cols): + investments = cols.levels[3] + if len(cols.names) != len(df.columns.levels): + df = pd.concat([df] * len(investments), axis=1) + df.columns = cols + df = df.reindex(cols, axis=1) + + return df + + +def calculate_costs(n, label, costs): + investments = n.investment_periods + cols = pd.MultiIndex.from_product( + [ + costs.columns.levels[0], + costs.columns.levels[1], + costs.columns.levels[2], + investments, + ], + names=costs.columns.names[:3] + ["year"], + ) + + costs = reindex_columns(costs, cols) + + for c in n.iterate_components( + n.branch_components | n.controllable_one_port_components ^ {"Load"} + ): + capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"] + active = pd.concat( + [ + get_active_assets(n, c.name, inv_p).rename(inv_p) + for inv_p in investments + ], + axis=1, + ).astype(int) + capital_costs = active.mul(capital_costs, axis=0) + discount = ( + n.investment_period_weightings["objective"] + / n.investment_period_weightings["years"] + ) + capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum().mul(discount) + + capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"]) + capital_costs_grouped = pd.concat([capital_costs_grouped], keys=[c.list_name]) + + costs = costs.reindex(capital_costs_grouped.index.union(costs.index)) + + costs.loc[capital_costs_grouped.index, label] = capital_costs_grouped.values + + if c.name == "Link": + p = ( + c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0) + .groupby(level=0) + .sum() + ) + elif c.name == "Line": + continue + elif c.name == "StorageUnit": + p_all = c.pnl.p.multiply(n.snapshot_weightings.stores, axis=0) + p_all[p_all < 0.0] = 0.0 + p = p_all.groupby(level=0).sum() + else: + p = ( + round(c.pnl.p, ndigits=2) + .multiply(n.snapshot_weightings.generators, axis=0) + .groupby(level=0) + .sum() + ) + + # correct sequestration cost + if c.name == "Store": + items = c.df.index[ + (c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.0) + ] + c.df.loc[items, "marginal_cost"] = -20.0 + + marginal_costs = p.mul(c.df.marginal_cost).T + # marginal_costs = active.mul(marginal_costs, axis=0) + marginal_costs_grouped = ( + marginal_costs.groupby(c.df.carrier).sum().mul(discount) + ) + + marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=["marginal"]) + marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=[c.list_name]) + + costs = costs.reindex(marginal_costs_grouped.index.union(costs.index)) + + costs.loc[marginal_costs_grouped.index, label] = marginal_costs_grouped.values + + # add back in all hydro + # costs.loc[("storage_units","capital","hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro","p_nom"].sum() + # costs.loc[("storage_units","capital","PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS","p_nom"].sum() + # costs.loc[("generators","capital","ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror","p_nom"].sum() + + return costs + + +def calculate_cumulative_cost(): + planning_horizons = snakemake.config["scenario"]["planning_horizons"] + + cumulative_cost = pd.DataFrame( + index=df["costs"].sum().index, + columns=pd.Series(data=np.arange(0, 0.1, 0.01), name="social discount rate"), + ) + + # discount cost and express them in money value of planning_horizons[0] + for r in cumulative_cost.columns: + cumulative_cost[r] = [ + df["costs"].sum()[index] / ((1 + r) ** (index[-1] - planning_horizons[0])) + for index in cumulative_cost.index + ] + + # integrate cost throughout the transition path + for r in cumulative_cost.columns: + for cluster in cumulative_cost.index.get_level_values(level=0).unique(): + for lv in cumulative_cost.index.get_level_values(level=1).unique(): + for sector_opts in cumulative_cost.index.get_level_values( + level=2 + ).unique(): + cumulative_cost.loc[ + (cluster, lv, sector_opts, "cumulative cost"), r + ] = np.trapz( + cumulative_cost.loc[ + idx[cluster, lv, sector_opts, planning_horizons], r + ].values, + x=planning_horizons, + ) + + return cumulative_cost + + +def calculate_nodal_capacities(n, label, nodal_capacities): + # Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff + for c in n.iterate_components( + n.branch_components | n.controllable_one_port_components ^ {"Load"} + ): + nodal_capacities_c = c.df.groupby(["location", "carrier"])[ + opt_name.get(c.name, "p") + "_nom_opt" + ].sum() + index = pd.MultiIndex.from_tuples( + [(c.list_name,) + t for t in nodal_capacities_c.index.to_list()] + ) + nodal_capacities = nodal_capacities.reindex(index.union(nodal_capacities.index)) + nodal_capacities.loc[index, label] = nodal_capacities_c.values + + return nodal_capacities + + +def calculate_capacities(n, label, capacities): + investments = n.investment_periods + cols = pd.MultiIndex.from_product( + [ + capacities.columns.levels[0], + capacities.columns.levels[1], + capacities.columns.levels[2], + investments, + ], + names=capacities.columns.names[:3] + ["year"], + ) + capacities = reindex_columns(capacities, cols) + + for c in n.iterate_components( + n.branch_components | n.controllable_one_port_components ^ {"Load"} + ): + active = pd.concat( + [ + get_active_assets(n, c.name, inv_p).rename(inv_p) + for inv_p in investments + ], + axis=1, + ).astype(int) + caps = c.df[opt_name.get(c.name, "p") + "_nom_opt"] + caps = active.mul(caps, axis=0) + capacities_grouped = ( + caps.groupby(c.df.carrier).sum().drop("load", errors="ignore") + ) + capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name]) + + capacities = capacities.reindex( + capacities_grouped.index.union(capacities.index) + ) + + capacities.loc[capacities_grouped.index, label] = capacities_grouped.values + + return capacities + + +def calculate_curtailment(n, label, curtailment): + avail = ( + n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt) + .sum() + .groupby(n.generators.carrier) + .sum() + ) + used = n.generators_t.p.sum().groupby(n.generators.carrier).sum() + + curtailment[label] = (((avail - used) / avail) * 100).round(3) + + return curtailment + + +def calculate_energy(n, label, energy): + investments = n.investment_periods + cols = pd.MultiIndex.from_product( + [ + energy.columns.levels[0], + energy.columns.levels[1], + energy.columns.levels[2], + investments, + ], + names=energy.columns.names[:3] + ["year"], + ) + energy = reindex_columns(energy, cols) + + for c in n.iterate_components(n.one_port_components | n.branch_components): + if c.name in n.one_port_components: + c_energies = ( + c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0) + .groupby(level=0) + .sum() + .multiply(c.df.sign) + .T.groupby(c.df.carrier) + .sum() + .T + ) + else: + c_energies = pd.DataFrame( + 0.0, columns=c.df.carrier.unique(), index=n.investment_periods + ) + for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]: + totals = ( + c.pnl["p" + port] + .multiply(n.snapshot_weightings.generators, axis=0) + .groupby(level=0) + .sum() + ) + # remove values where bus is missing (bug in nomopyomo) + no_bus = c.df.index[c.df["bus" + port] == ""] + totals[no_bus] = float( + n.component_attrs[c.name].loc["p" + port, "default"] + ) + c_energies -= totals.T.groupby(c.df.carrier).sum().T + + c_energies = pd.concat([c_energies.T], keys=[c.list_name]) + + energy = energy.reindex(c_energies.index.union(energy.index)) + + energy.loc[c_energies.index, label] = c_energies.values + + return energy + + +def calculate_supply(n, label, supply): + """ + Calculate the max dispatch of each component at the buses aggregated by + carrier. + """ + + bus_carriers = n.buses.carrier.unique() + + for i in bus_carriers: + bus_map = n.buses.carrier == i + bus_map.at[""] = False + + for c in n.iterate_components(n.one_port_components): + items = c.df.index[c.df.bus.map(bus_map).fillna(False)] + + if len(items) == 0: + continue + + s = ( + c.pnl.p[items] + .max() + .multiply(c.df.loc[items, "sign"]) + .groupby(c.df.loc[items, "carrier"]) + .sum() + ) + s = pd.concat([s], keys=[c.list_name]) + s = pd.concat([s], keys=[i]) + + supply = supply.reindex(s.index.union(supply.index)) + supply.loc[s.index, label] = s + + for c in n.iterate_components(n.branch_components): + for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]: + items = c.df.index[c.df["bus" + end].map(bus_map).fillna(False)] + + if len(items) == 0: + continue + + # lots of sign compensation for direction and to do maximums + s = (-1) ** (1 - int(end)) * ( + (-1) ** int(end) * c.pnl["p" + end][items] + ).max().groupby(c.df.loc[items, "carrier"]).sum() + s.index = s.index + end + s = pd.concat([s], keys=[c.list_name]) + s = pd.concat([s], keys=[i]) + + supply = supply.reindex(s.index.union(supply.index)) + supply.loc[s.index, label] = s + + return supply + + +def calculate_supply_energy(n, label, supply_energy): + """ + Calculate the total energy supply/consuption of each component at the buses + aggregated by carrier. + """ + + investments = n.investment_periods + cols = pd.MultiIndex.from_product( + [ + supply_energy.columns.levels[0], + supply_energy.columns.levels[1], + supply_energy.columns.levels[2], + investments, + ], + names=supply_energy.columns.names[:3] + ["year"], + ) + supply_energy = reindex_columns(supply_energy, cols) + + bus_carriers = n.buses.carrier.unique() + + for i in bus_carriers: + bus_map = n.buses.carrier == i + bus_map.at[""] = False + + for c in n.iterate_components(n.one_port_components): + items = c.df.index[c.df.bus.map(bus_map).fillna(False)] + + if len(items) == 0: + continue + + if c.name == "Generator": + weightings = n.snapshot_weightings.generators + else: + weightings = n.snapshot_weightings.stores + + if i in ["oil", "co2", "H2"]: + if c.name == "Load": + c.df.loc[items, "carrier"] = [ + load.split("-202")[0] for load in items + ] + if i == "oil" and c.name == "Generator": + c.df.loc[items, "carrier"] = "imported oil" + s = ( + c.pnl.p[items] + .multiply(weightings, axis=0) + .groupby(level=0) + .sum() + .multiply(c.df.loc[items, "sign"]) + .T.groupby(c.df.loc[items, "carrier"]) + .sum() + ) + s = pd.concat([s], keys=[c.list_name]) + s = pd.concat([s], keys=[i]) + + supply_energy = supply_energy.reindex( + s.index.union(supply_energy.index, sort=False) + ) + supply_energy.loc[s.index, label] = s.values + + for c in n.iterate_components(n.branch_components): + for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]: + items = c.df.index[c.df[f"bus{str(end)}"].map(bus_map).fillna(False)] + + if len(items) == 0: + continue + + s = (-1) * c.pnl["p" + end].reindex(items, axis=1).multiply( + n.snapshot_weightings.objective, axis=0 + ).groupby(level=0).sum().T.groupby(c.df.loc[items, "carrier"]).sum() + s.index = s.index + end + s = pd.concat([s], keys=[c.list_name]) + s = pd.concat([s], keys=[i]) + + supply_energy = supply_energy.reindex( + s.index.union(supply_energy.index, sort=False) + ) + + supply_energy.loc[s.index, label] = s.values + + return supply_energy + + +def calculate_metrics(n, label, metrics): + metrics = metrics.reindex( + pd.Index( + [ + "line_volume", + "line_volume_limit", + "line_volume_AC", + "line_volume_DC", + "line_volume_shadow", + "co2_shadow", + ] + ).union(metrics.index) + ) + + metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[ + n.links.carrier == "DC" + ].sum() + metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum() + metrics.at["line_volume", label] = metrics.loc[ + ["line_volume_AC", "line_volume_DC"], label + ].sum() + + if hasattr(n, "line_volume_limit"): + metrics.at["line_volume_limit", label] = n.line_volume_limit + metrics.at["line_volume_shadow", label] = n.line_volume_limit_dual + + if "CO2Limit" in n.global_constraints.index: + metrics.at["co2_shadow", label] = n.global_constraints.at["CO2Limit", "mu"] + + return metrics + + +def calculate_prices(n, label, prices): + prices = prices.reindex(prices.index.union(n.buses.carrier.unique())) + + # WARNING: this is time-averaged, see weighted_prices for load-weighted average + prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean() + + return prices + + +def calculate_weighted_prices(n, label, weighted_prices): + # Warning: doesn't include storage units as loads + + weighted_prices = weighted_prices.reindex( + pd.Index( + [ + "electricity", + "heat", + "space heat", + "urban heat", + "space urban heat", + "gas", + "H2", + ] + ) + ) + + link_loads = { + "electricity": [ + "heat pump", + "resistive heater", + "battery charger", + "H2 Electrolysis", + ], + "heat": ["water tanks charger"], + "urban heat": ["water tanks charger"], + "space heat": [], + "space urban heat": [], + "gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"], + "H2": ["Sabatier", "H2 Fuel Cell"], + } + + for carrier, value in link_loads.items(): + if carrier == "electricity": + suffix = "" + elif carrier[:5] == "space": + suffix = carrier[5:] + else: + suffix = " " + carrier + + buses = n.buses.index[n.buses.index.str[5:] == suffix] + + if buses.empty: + continue + + load = ( + pd.DataFrame(index=n.snapshots, columns=buses, data=0.0) + if carrier in ["H2", "gas"] + else n.loads_t.p_set.reindex(buses, axis=1) + ) + for tech in value: + names = n.links.index[ + n.links.index.to_series().str[-len(tech) - 5 : -5] == tech + ] + + if names.empty: + continue + + load += n.links_t.p0[names].T.groupby(n.links.loc[names, "bus0"]).sum().T + + # Add H2 Store when charging + # if carrier == "H2": + # stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1) + # stores[stores > 0.] = 0. + # load += -stores + + if total_load := load.sum().sum(): + weighted_prices.loc[carrier, label] = ( + load * n.buses_t.marginal_price[buses] + ).sum().sum() / total_load + else: + weighted_prices.loc[carrier, label] = np.nan + + if carrier[:5] == "space": + print(load * n.buses_t.marginal_price[buses]) + + return weighted_prices + + +def calculate_market_values(n, label, market_values): + # Warning: doesn't include storage units + + carrier = "AC" + + buses = n.buses.index[n.buses.carrier == carrier] + + ## First do market value of generators ## + + generators = n.generators.index[n.buses.loc[n.generators.bus, "carrier"] == carrier] + + techs = n.generators.loc[generators, "carrier"].value_counts().index + + market_values = market_values.reindex(market_values.index.union(techs)) + + for tech in techs: + gens = generators[n.generators.loc[generators, "carrier"] == tech] + + dispatch = ( + n.generators_t.p[gens] + .T.groupby(n.generators.loc[gens, "bus"]) + .sum() + .T.reindex(columns=buses, fill_value=0.0) + ) + + revenue = dispatch * n.buses_t.marginal_price[buses] + + if total_dispatch := dispatch.sum().sum(): + market_values.at[tech, label] = revenue.sum().sum() / total_dispatch + else: + market_values.at[tech, label] = np.nan + + ## Now do market value of links ## + + for i in ["0", "1"]: + all_links = n.links.index[n.buses.loc[n.links["bus" + i], "carrier"] == carrier] + + techs = n.links.loc[all_links, "carrier"].value_counts().index + + market_values = market_values.reindex(market_values.index.union(techs)) + + for tech in techs: + links = all_links[n.links.loc[all_links, "carrier"] == tech] + + dispatch = ( + n.links_t["p" + i][links] + .T.groupby(n.links.loc[links, "bus" + i]) + .sum() + .T.reindex(columns=buses, fill_value=0.0) + ) + + revenue = dispatch * n.buses_t.marginal_price[buses] + + if total_dispatch := dispatch.sum().sum(): + market_values.at[tech, label] = revenue.sum().sum() / total_dispatch + else: + market_values.at[tech, label] = np.nan + + return market_values + + +def calculate_price_statistics(n, label, price_statistics): + price_statistics = price_statistics.reindex( + price_statistics.index.union( + pd.Index(["zero_hours", "mean", "standard_deviation"]) + ) + ) + + buses = n.buses.index[n.buses.carrier == "AC"] + + threshold = 0.1 # higher than phoney marginal_cost of wind/solar + + df = pd.DataFrame(data=0.0, columns=buses, index=n.snapshots) + + df[n.buses_t.marginal_price[buses] < threshold] = 1.0 + + price_statistics.at["zero_hours", label] = df.sum().sum() / ( + df.shape[0] * df.shape[1] + ) + + price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].mean().mean() + + price_statistics.at["standard_deviation", label] = ( + n.buses_t.marginal_price[buses].std().std() + ) + + return price_statistics + + +def calculate_co2_emissions(n, label, df): + carattr = "co2_emissions" + emissions = n.carriers.query(f"{carattr} != 0")[carattr] + + if emissions.empty: + return + + weightings = n.snapshot_weightings.generators.mul( + n.investment_period_weightings["years"] + .reindex(n.snapshots) + .fillna(method="bfill") + .fillna(1.0), + axis=0, + ) + + # generators + gens = n.generators.query("carrier in @emissions.index") + if not gens.empty: + em_pu = gens.carrier.map(emissions) / gens.efficiency + em_pu = ( + weightings["generators"].to_frame("weightings") + @ em_pu.to_frame("weightings").T + ) + emitted = n.generators_t.p[gens.index].mul(em_pu) + + emitted_grouped = ( + emitted.groupby(level=0).sum().T.groupby(n.generators.carrier).sum() + ) + + df = df.reindex(emitted_grouped.index.union(df.index)) + + df.loc[emitted_grouped.index, label] = emitted_grouped.values + + if any(n.stores.carrier == "co2"): + co2_i = n.stores[n.stores.carrier == "co2"].index + df[label] = n.stores_t.e.groupby(level=0).last()[co2_i].iloc[:, 0] + + return df + + +outputs = [ + "nodal_costs", + "nodal_capacities", + "nodal_cfs", + "cfs", + "costs", + "capacities", + "curtailment", + "energy", + "supply", + "supply_energy", + "prices", + "weighted_prices", + "price_statistics", + "market_values", + "metrics", + "co2_emissions", +] + + +def make_summaries(networks_dict): + columns = pd.MultiIndex.from_tuples( + networks_dict.keys(), names=["cluster", "lv", "opt"] + ) + df = {} + + for output in outputs: + df[output] = pd.DataFrame(columns=columns, dtype=float) + + for label, filename in iteritems(networks_dict): + print(label, filename) + try: + n = pypsa.Network(filename) + except OSError: + print(label, " not solved yet.") + continue + # del networks_dict[label] + + if not hasattr(n, "objective"): + print(label, " not solved correctly. Check log if infeasible or unbounded.") + continue + assign_carriers(n) + assign_locations(n) + + for output in outputs: + df[output] = globals()["calculate_" + output](n, label, df[output]) + + return df + + +def to_csv(df): + for key in df: + df[key] = df[key].apply(lambda x: pd.to_numeric(x)) + df[key].to_csv(snakemake.output[key]) + + +if __name__ == "__main__": + # Detect running outside of snakemake and mock snakemake for testing + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake("make_summary_perfect") + + run = snakemake.config["run"]["name"] + if run != "": + run += "/" + + networks_dict = { + (clusters, lv, opts + sector_opts): "results/" + + run + + f"postnetworks/elec_s{simpl}_{clusters}_l{lv}_{opts}_{sector_opts}_brownfield_all_years.nc" + for simpl in snakemake.config["scenario"]["simpl"] + for clusters in snakemake.config["scenario"]["clusters"] + for opts in snakemake.config["scenario"]["opts"] + for sector_opts in snakemake.config["scenario"]["sector_opts"] + for lv in snakemake.config["scenario"]["ll"] + } + + print(networks_dict) + + nyears = 1 + costs_db = prepare_costs( + snakemake.input.costs, + snakemake.config["costs"], + nyears, + ) + + df = make_summaries(networks_dict) + + df["metrics"].loc["total costs"] = df["costs"].sum().groupby(level=[0, 1, 2]).sum() + + to_csv(df) diff --git a/scripts/plot_gas_network.py b/scripts/plot_gas_network.py new file mode 100644 index 00000000..e2953604 --- /dev/null +++ b/scripts/plot_gas_network.py @@ -0,0 +1,252 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates map of optimised gas network, storage and selected other +infrastructure. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging +from plot_power_network import assign_location, load_projection +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches + +logger = logging.getLogger(__name__) + + +def plot_ch4_map(n): + # if "gas pipeline" not in n.links.carrier.unique(): + # return + + assign_location(n) + + bus_size_factor = 8e7 + linewidth_factor = 1e4 + # MW below which not drawn + line_lower_threshold = 1e3 + + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + fossil_gas_i = n.generators[n.generators.carrier == "gas"].index + fossil_gas = ( + n.generators_t.p.loc[:, fossil_gas_i] + .mul(n.snapshot_weightings.generators, axis=0) + .sum() + .groupby(n.generators.loc[fossil_gas_i, "bus"]) + .sum() + / bus_size_factor + ) + fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True) + fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0) + # make a fake MultiIndex so that area is correct for legend + fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]]) + + methanation_i = n.links.query("carrier == 'Sabatier'").index + methanation = ( + abs( + n.links_t.p1.loc[:, methanation_i].mul( + n.snapshot_weightings.generators, axis=0 + ) + ) + .sum() + .groupby(n.links.loc[methanation_i, "bus1"]) + .sum() + / bus_size_factor + ) + methanation = ( + methanation.groupby(methanation.index) + .sum() + .rename(index=lambda x: x.replace(" gas", "")) + ) + # make a fake MultiIndex so that area is correct for legend + methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]]) + + biogas_i = n.stores[n.stores.carrier == "biogas"].index + biogas = ( + n.stores_t.p.loc[:, biogas_i] + .mul(n.snapshot_weightings.generators, axis=0) + .sum() + .groupby(n.stores.loc[biogas_i, "bus"]) + .sum() + / bus_size_factor + ) + biogas = ( + biogas.groupby(biogas.index) + .sum() + .rename(index=lambda x: x.replace(" biogas", "")) + ) + # make a fake MultiIndex so that area is correct for legend + biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]]) + + bus_sizes = pd.concat([fossil_gas, methanation, biogas]) + bus_sizes.sort_index(inplace=True) + + to_remove = n.links.index[~n.links.carrier.str.contains("gas pipeline")] + n.links.drop(to_remove, inplace=True) + + link_widths_rem = n.links.p_nom_opt / linewidth_factor + link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + link_widths_orig = n.links.p_nom / linewidth_factor + link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.0 + + max_usage = n.links_t.p0[n.links.index].abs().max(axis=0) + link_widths_used = max_usage / linewidth_factor + link_widths_used[max_usage < line_lower_threshold] = 0.0 + + tech_colors = snakemake.params.plotting["tech_colors"] + + pipe_colors = { + "gas pipeline": "#f08080", + "gas pipeline new": "#c46868", + "gas pipeline (in 2020)": "lightgrey", + "gas pipeline (available)": "#e8d1d1", + } + + link_color_used = n.links.carrier.map(pipe_colors) + + n.links.bus0 = n.links.bus0.str.replace(" gas", "") + n.links.bus1 = n.links.bus1.str.replace(" gas", "") + + bus_colors = { + "fossil gas": tech_colors["fossil gas"], + "methanation": tech_colors["methanation"], + "biogas": "seagreen", + } + + fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) + + n.plot( + bus_sizes=bus_sizes, + bus_colors=bus_colors, + link_colors=pipe_colors["gas pipeline (in 2020)"], + link_widths=link_widths_orig, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + n.plot( + ax=ax, + bus_sizes=0.0, + link_colors=pipe_colors["gas pipeline (available)"], + link_widths=link_widths_rem, + branch_components=["Link"], + color_geomap=False, + boundaries=map_opts["boundaries"], + ) + + n.plot( + ax=ax, + bus_sizes=0.0, + link_colors=link_color_used, + link_widths=link_widths_used, + branch_components=["Link"], + color_geomap=False, + boundaries=map_opts["boundaries"], + ) + + sizes = [100, 10] + labels = [f"{s} TWh" for s in sizes] + sizes = [s / bus_size_factor * 1e6 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.03), + labelspacing=0.8, + frameon=False, + handletextpad=1, + title="gas sources", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [50, 10] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.25, 1.03), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title="gas pipeline", + ) + + add_legend_lines( + ax, + sizes, + labels, + patch_kw=dict(color="lightgrey"), + legend_kw=legend_kw, + ) + + colors = list(pipe_colors.values()) + list(bus_colors.values()) + labels = list(pipe_colors.keys()) + list(bus_colors.keys()) + + # legend on the side + # legend_kw = dict( + # bbox_to_anchor=(1.47, 1.04), + # frameon=False, + # ) + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.24), + ncol=2, + frameon=False, + ) + + add_legend_patches( + ax, + colors, + labels, + legend_kw=legend_kw, + ) + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_gas_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_ch4_map(n) diff --git a/scripts/plot_hydrogen_network.py b/scripts/plot_hydrogen_network.py new file mode 100644 index 00000000..95741170 --- /dev/null +++ b/scripts/plot_hydrogen_network.py @@ -0,0 +1,269 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates map of optimised hydrogen network, storage and selected other +infrastructure. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging +from plot_power_network import assign_location, load_projection +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches + +logger = logging.getLogger(__name__) + + +def group_pipes(df, drop_direction=False): + """ + Group pipes which connect same buses and return overall capacity. + """ + if drop_direction: + positive_order = df.bus0 < df.bus1 + df_p = df[positive_order] + swap_buses = {"bus0": "bus1", "bus1": "bus0"} + df_n = df[~positive_order].rename(columns=swap_buses) + df = pd.concat([df_p, df_n]) + + # there are pipes for each investment period rename to AC buses name for plotting + df.index = df.apply( + lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", + axis=1, + ) + return df.groupby(level=0).agg( + {"p_nom_opt": "sum", "bus0": "first", "bus1": "first"} + ) + + +def plot_h2_map(n, regions): + # if "H2 pipeline" not in n.links.carrier.unique(): + # return + + assign_location(n) + + h2_storage = n.stores.query("carrier == 'H2'") + regions["H2"] = ( + h2_storage.rename(index=h2_storage.bus.map(n.buses.location)) + .e_nom_opt.groupby(level=0) + .sum() + .div(1e6) + ) # TWh + regions["H2"] = regions["H2"].where(regions["H2"] > 0.1) + + bus_size_factor = 1e5 + linewidth_factor = 7e3 + # MW below which not drawn + line_lower_threshold = 750 + + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + carriers = ["H2 Electrolysis", "H2 Fuel Cell"] + + elec = n.links[n.links.carrier.isin(carriers)].index + + bus_sizes = ( + n.links.loc[elec, "p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() + / bus_size_factor + ) + + # make a fake MultiIndex so that area is correct for legend + bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True) + # drop all links which are not H2 pipelines + n.links.drop( + n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True + ) + + h2_new = n.links[n.links.carrier == "H2 pipeline"] + h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"] + + if snakemake.params.foresight == "myopic": + # sum capacitiy for pipelines from different investment periods + h2_new = group_pipes(h2_new) + + if not h2_retro.empty: + h2_retro = ( + group_pipes(h2_retro, drop_direction=True) + .reindex(h2_new.index) + .fillna(0) + ) + + if not h2_retro.empty: + positive_order = h2_retro.bus0 < h2_retro.bus1 + h2_retro_p = h2_retro[positive_order] + swap_buses = {"bus0": "bus1", "bus1": "bus0"} + h2_retro_n = h2_retro[~positive_order].rename(columns=swap_buses) + h2_retro = pd.concat([h2_retro_p, h2_retro_n]) + + h2_retro["index_orig"] = h2_retro.index + h2_retro.index = h2_retro.apply( + lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", + axis=1, + ) + + retro_w_new_i = h2_retro.index.intersection(h2_new.index) + h2_retro_w_new = h2_retro.loc[retro_w_new_i] + + retro_wo_new_i = h2_retro.index.difference(h2_new.index) + h2_retro_wo_new = h2_retro.loc[retro_wo_new_i] + h2_retro_wo_new.index = h2_retro_wo_new.index_orig + + to_concat = [h2_new, h2_retro_w_new, h2_retro_wo_new] + h2_total = pd.concat(to_concat).p_nom_opt.groupby(level=0).sum() + + else: + h2_total = h2_new.p_nom_opt + + link_widths_total = h2_total / linewidth_factor + + n.links.rename(index=lambda x: x.split("-2")[0], inplace=True) + n.links = n.links.groupby(level=0).first() + link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.0) + link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + retro = n.links.p_nom_opt.where( + n.links.carrier == "H2 pipeline retrofitted", other=0.0 + ) + link_widths_retro = retro / linewidth_factor + link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + n.links.bus0 = n.links.bus0.str.replace(" H2", "") + n.links.bus1 = n.links.bus1.str.replace(" H2", "") + + regions = regions.to_crs(proj.proj4_init) + + fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) + + color_h2_pipe = "#b3f3f4" + color_retrofit = "#499a9c" + + bus_colors = {"H2 Electrolysis": "#ff29d9", "H2 Fuel Cell": "#805394"} + + n.plot( + geomap=True, + bus_sizes=bus_sizes, + bus_colors=bus_colors, + link_colors=color_h2_pipe, + link_widths=link_widths_total, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + n.plot( + geomap=True, + bus_sizes=0, + link_colors=color_retrofit, + link_widths=link_widths_retro, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + regions.plot( + ax=ax, + column="H2", + cmap="Blues", + linewidths=0, + legend=True, + vmax=6, + vmin=0, + legend_kwds={ + "label": "Hydrogen Storage [TWh]", + "shrink": 0.7, + "extend": "max", + }, + ) + + sizes = [50, 10] + labels = [f"{s} GW" for s in sizes] + sizes = [s / bus_size_factor * 1e3 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1), + labelspacing=0.8, + handletextpad=0, + frameon=False, + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [30, 10] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.23, 1), + frameon=False, + labelspacing=0.8, + handletextpad=1, + ) + + add_legend_lines( + ax, + sizes, + labels, + patch_kw=dict(color="lightgrey"), + legend_kw=legend_kw, + ) + + colors = [bus_colors[c] for c in carriers] + [color_h2_pipe, color_retrofit] + labels = carriers + ["H2 pipeline (total)", "H2 pipeline (repurposed)"] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.13), + ncol=2, + frameon=False, + ) + + add_legend_patches(ax, colors, labels, legend_kw=legend_kw) + + ax.set_facecolor("white") + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_hydrogen_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_h2_map(n, regions) diff --git a/scripts/plot_network.py b/scripts/plot_network.py deleted file mode 100644 index ae1d0e0a..00000000 --- a/scripts/plot_network.py +++ /dev/null @@ -1,953 +0,0 @@ -# -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors -# -# SPDX-License-Identifier: MIT -""" -Creates plots for optimised network topologies, including electricity, gas and -hydrogen networks, and regional generation, storage and conversion capacities -built. - -This rule plots a map of the network with technology capacities at the -nodes. -""" - -import logging - -logger = logging.getLogger(__name__) - -import cartopy.crs as ccrs -import geopandas as gpd -import matplotlib.pyplot as plt -import pandas as pd -import pypsa -from make_summary import assign_carriers -from plot_summary import preferred_order, rename_techs -from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches - -plt.style.use(["ggplot", "matplotlibrc"]) - - -def rename_techs_tyndp(tech): - tech = rename_techs(tech) - if "heat pump" in tech or "resistive heater" in tech: - return "power-to-heat" - elif tech in ["H2 Electrolysis", "methanation", "helmeth", "H2 liquefaction"]: - return "power-to-gas" - elif tech == "H2": - return "H2 storage" - elif tech in ["NH3", "Haber-Bosch", "ammonia cracker", "ammonia store"]: - return "ammonia" - elif tech in ["OCGT", "CHP", "gas boiler", "H2 Fuel Cell"]: - return "gas-to-power/heat" - # elif "solar" in tech: - # return "solar" - elif tech in ["Fischer-Tropsch", "methanolisation"]: - return "power-to-liquid" - elif "offshore wind" in tech: - return "offshore wind" - elif "CC" in tech or "sequestration" in tech: - return "CCS" - else: - return tech - - -def assign_location(n): - for c in n.iterate_components(n.one_port_components | n.branch_components): - ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) - for i in ifind.value_counts().index: - # these have already been assigned defaults - if i == -1: - continue - names = ifind.index[ifind == i] - c.df.loc[names, "location"] = names.str[:i] - - -def plot_map( - network, - components=["links", "stores", "storage_units", "generators"], - bus_size_factor=1.7e10, - transmission=False, - with_legend=True, -): - tech_colors = snakemake.params.plotting["tech_colors"] - - n = network.copy() - assign_location(n) - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - costs = pd.DataFrame(index=n.buses.index) - - for comp in components: - df_c = getattr(n, comp) - - if df_c.empty: - continue - - df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) - - attr = "e_nom_opt" if comp == "stores" else "p_nom_opt" - - costs_c = ( - (df_c.capital_cost * df_c[attr]) - .groupby([df_c.location, df_c.nice_group]) - .sum() - .unstack() - .fillna(0.0) - ) - costs = pd.concat([costs, costs_c], axis=1) - - logger.debug(f"{comp}, {costs}") - - costs = costs.groupby(costs.columns, axis=1).sum() - - costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True) - - new_columns = preferred_order.intersection(costs.columns).append( - costs.columns.difference(preferred_order) - ) - costs = costs[new_columns] - - for item in new_columns: - if item not in tech_colors: - logger.warning(f"{item} not in config/plotting/tech_colors") - - costs = costs.stack() # .sort_index() - - # hack because impossible to drop buses... - eu_location = snakemake.params.plotting.get("eu_node_location", dict(x=-5.5, y=46)) - n.buses.loc["EU gas", "x"] = eu_location["x"] - n.buses.loc["EU gas", "y"] = eu_location["y"] - - n.links.drop( - n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], - inplace=True, - ) - - # drop non-bus - to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) - if len(to_drop) != 0: - logger.info(f"Dropping non-buses {to_drop.tolist()}") - costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") - - # make sure they are removed from index - costs.index = pd.MultiIndex.from_tuples(costs.index.values) - - threshold = 100e6 # 100 mEUR/a - carriers = costs.groupby(level=1).sum() - carriers = carriers.where(carriers > threshold).dropna() - carriers = list(carriers.index) - - # PDF has minimum width, so set these to zero - line_lower_threshold = 500.0 - line_upper_threshold = 1e4 - linewidth_factor = 4e3 - ac_color = "rosybrown" - dc_color = "darkseagreen" - - if snakemake.wildcards["ll"] == "v1.0": - # should be zero - line_widths = n.lines.s_nom_opt - n.lines.s_nom - link_widths = n.links.p_nom_opt - n.links.p_nom - title = "added grid" - - if transmission: - line_widths = n.lines.s_nom_opt - link_widths = n.links.p_nom_opt - linewidth_factor = 2e3 - line_lower_threshold = 0.0 - title = "current grid" - else: - line_widths = n.lines.s_nom_opt - n.lines.s_nom_min - link_widths = n.links.p_nom_opt - n.links.p_nom_min - title = "added grid" - - if transmission: - line_widths = n.lines.s_nom_opt - link_widths = n.links.p_nom_opt - title = "total grid" - - line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) - link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) - - line_widths = line_widths.replace(line_lower_threshold, 0) - link_widths = link_widths.replace(line_lower_threshold, 0) - - fig, ax = plt.subplots(subplot_kw={"projection": ccrs.EqualEarth()}) - fig.set_size_inches(7, 6) - - n.plot( - bus_sizes=costs / bus_size_factor, - bus_colors=tech_colors, - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, - **map_opts, - ) - - sizes = [20, 10, 5] - labels = [f"{s} bEUR/a" for s in sizes] - sizes = [s / bus_size_factor * 1e9 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.01, 1.06), - labelspacing=0.8, - frameon=False, - handletextpad=0, - title="system cost", - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [10, 5] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.27, 1.06), - frameon=False, - labelspacing=0.8, - handletextpad=1, - title=title, - ) - - add_legend_lines( - ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw - ) - - legend_kw = dict( - bbox_to_anchor=(1.52, 1.04), - frameon=False, - ) - - if with_legend: - colors = [tech_colors[c] for c in carriers] + [ac_color, dc_color] - labels = carriers + ["HVAC line", "HVDC link"] - - add_legend_patches( - ax, - colors, - labels, - legend_kw=legend_kw, - ) - - fig.savefig(snakemake.output.map, transparent=True, bbox_inches="tight") - - -def group_pipes(df, drop_direction=False): - """ - Group pipes which connect same buses and return overall capacity. - """ - if drop_direction: - positive_order = df.bus0 < df.bus1 - df_p = df[positive_order] - swap_buses = {"bus0": "bus1", "bus1": "bus0"} - df_n = df[~positive_order].rename(columns=swap_buses) - df = pd.concat([df_p, df_n]) - - # there are pipes for each investment period rename to AC buses name for plotting - df.index = df.apply( - lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", - axis=1, - ) - # group pipe lines connecting the same buses and rename them for plotting - pipe_capacity = df.groupby(level=0).agg( - {"p_nom_opt": sum, "bus0": "first", "bus1": "first"} - ) - - return pipe_capacity - - -def plot_h2_map(network, regions): - n = network.copy() - if "H2 pipeline" not in n.links.carrier.unique(): - return - - assign_location(n) - - h2_storage = n.stores.query("carrier == 'H2'") - regions["H2"] = h2_storage.rename( - index=h2_storage.bus.map(n.buses.location) - ).e_nom_opt.div( - 1e6 - ) # TWh - regions["H2"] = regions["H2"].where(regions["H2"] > 0.1) - - bus_size_factor = 1e5 - linewidth_factor = 7e3 - # MW below which not drawn - line_lower_threshold = 750 - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - carriers = ["H2 Electrolysis", "H2 Fuel Cell"] - - elec = n.links[n.links.carrier.isin(carriers)].index - - bus_sizes = ( - n.links.loc[elec, "p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() - / bus_size_factor - ) - - # make a fake MultiIndex so that area is correct for legend - bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True) - # drop all links which are not H2 pipelines - n.links.drop( - n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True - ) - - h2_new = n.links[n.links.carrier == "H2 pipeline"] - h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"] - - if snakemake.params.foresight == "myopic": - # sum capacitiy for pipelines from different investment periods - h2_new = group_pipes(h2_new) - - if not h2_retro.empty: - h2_retro = ( - group_pipes(h2_retro, drop_direction=True) - .reindex(h2_new.index) - .fillna(0) - ) - - if not h2_retro.empty: - positive_order = h2_retro.bus0 < h2_retro.bus1 - h2_retro_p = h2_retro[positive_order] - swap_buses = {"bus0": "bus1", "bus1": "bus0"} - h2_retro_n = h2_retro[~positive_order].rename(columns=swap_buses) - h2_retro = pd.concat([h2_retro_p, h2_retro_n]) - - h2_retro["index_orig"] = h2_retro.index - h2_retro.index = h2_retro.apply( - lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", - axis=1, - ) - - retro_w_new_i = h2_retro.index.intersection(h2_new.index) - h2_retro_w_new = h2_retro.loc[retro_w_new_i] - - retro_wo_new_i = h2_retro.index.difference(h2_new.index) - h2_retro_wo_new = h2_retro.loc[retro_wo_new_i] - h2_retro_wo_new.index = h2_retro_wo_new.index_orig - - to_concat = [h2_new, h2_retro_w_new, h2_retro_wo_new] - h2_total = pd.concat(to_concat).p_nom_opt.groupby(level=0).sum() - - else: - h2_total = h2_new.p_nom_opt - - link_widths_total = h2_total / linewidth_factor - - n.links.rename(index=lambda x: x.split("-2")[0], inplace=True) - n.links = n.links.groupby(level=0).first() - link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.0) - link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - retro = n.links.p_nom_opt.where( - n.links.carrier == "H2 pipeline retrofitted", other=0.0 - ) - link_widths_retro = retro / linewidth_factor - link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - n.links.bus0 = n.links.bus0.str.replace(" H2", "") - n.links.bus1 = n.links.bus1.str.replace(" H2", "") - - proj = ccrs.EqualEarth() - regions = regions.to_crs(proj.proj4_init) - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) - - color_h2_pipe = "#b3f3f4" - color_retrofit = "#499a9c" - - bus_colors = {"H2 Electrolysis": "#ff29d9", "H2 Fuel Cell": "#805394"} - - n.plot( - geomap=True, - bus_sizes=bus_sizes, - bus_colors=bus_colors, - link_colors=color_h2_pipe, - link_widths=link_widths_total, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - n.plot( - geomap=True, - bus_sizes=0, - link_colors=color_retrofit, - link_widths=link_widths_retro, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - regions.plot( - ax=ax, - column="H2", - cmap="Blues", - linewidths=0, - legend=True, - vmax=6, - vmin=0, - legend_kwds={ - "label": "Hydrogen Storage [TWh]", - "shrink": 0.7, - "extend": "max", - }, - ) - - sizes = [50, 10] - labels = [f"{s} GW" for s in sizes] - sizes = [s / bus_size_factor * 1e3 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1), - labelspacing=0.8, - handletextpad=0, - frameon=False, - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [30, 10] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.23, 1), - frameon=False, - labelspacing=0.8, - handletextpad=1, - ) - - add_legend_lines( - ax, - sizes, - labels, - patch_kw=dict(color="lightgrey"), - legend_kw=legend_kw, - ) - - colors = [bus_colors[c] for c in carriers] + [color_h2_pipe, color_retrofit] - labels = carriers + ["H2 pipeline (total)", "H2 pipeline (repurposed)"] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.13), - ncol=2, - frameon=False, - ) - - add_legend_patches(ax, colors, labels, legend_kw=legend_kw) - - ax.set_facecolor("white") - - fig.savefig( - snakemake.output.map.replace("-costs-all", "-h2_network"), bbox_inches="tight" - ) - - -def plot_ch4_map(network): - n = network.copy() - - if "gas pipeline" not in n.links.carrier.unique(): - return - - assign_location(n) - - bus_size_factor = 8e7 - linewidth_factor = 1e4 - # MW below which not drawn - line_lower_threshold = 1e3 - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - fossil_gas_i = n.generators[n.generators.carrier == "gas"].index - fossil_gas = ( - n.generators_t.p.loc[:, fossil_gas_i] - .mul(n.snapshot_weightings.generators, axis=0) - .sum() - .groupby(n.generators.loc[fossil_gas_i, "bus"]) - .sum() - / bus_size_factor - ) - fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True) - fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0) - # make a fake MultiIndex so that area is correct for legend - fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]]) - - methanation_i = n.links[n.links.carrier.isin(["helmeth", "Sabatier"])].index - methanation = ( - abs( - n.links_t.p1.loc[:, methanation_i].mul( - n.snapshot_weightings.generators, axis=0 - ) - ) - .sum() - .groupby(n.links.loc[methanation_i, "bus1"]) - .sum() - / bus_size_factor - ) - methanation = ( - methanation.groupby(methanation.index) - .sum() - .rename(index=lambda x: x.replace(" gas", "")) - ) - # make a fake MultiIndex so that area is correct for legend - methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]]) - - biogas_i = n.stores[n.stores.carrier == "biogas"].index - biogas = ( - n.stores_t.p.loc[:, biogas_i] - .mul(n.snapshot_weightings.generators, axis=0) - .sum() - .groupby(n.stores.loc[biogas_i, "bus"]) - .sum() - / bus_size_factor - ) - biogas = ( - biogas.groupby(biogas.index) - .sum() - .rename(index=lambda x: x.replace(" biogas", "")) - ) - # make a fake MultiIndex so that area is correct for legend - biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]]) - - bus_sizes = pd.concat([fossil_gas, methanation, biogas]) - bus_sizes.sort_index(inplace=True) - - to_remove = n.links.index[~n.links.carrier.str.contains("gas pipeline")] - n.links.drop(to_remove, inplace=True) - - link_widths_rem = n.links.p_nom_opt / linewidth_factor - link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - link_widths_orig = n.links.p_nom / linewidth_factor - link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.0 - - max_usage = n.links_t.p0.abs().max(axis=0) - link_widths_used = max_usage / linewidth_factor - link_widths_used[max_usage < line_lower_threshold] = 0.0 - - tech_colors = snakemake.params.plotting["tech_colors"] - - pipe_colors = { - "gas pipeline": "#f08080", - "gas pipeline new": "#c46868", - "gas pipeline (in 2020)": "lightgrey", - "gas pipeline (available)": "#e8d1d1", - } - - link_color_used = n.links.carrier.map(pipe_colors) - - n.links.bus0 = n.links.bus0.str.replace(" gas", "") - n.links.bus1 = n.links.bus1.str.replace(" gas", "") - - bus_colors = { - "fossil gas": tech_colors["fossil gas"], - "methanation": tech_colors["methanation"], - "biogas": "seagreen", - } - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": ccrs.EqualEarth()}) - - n.plot( - bus_sizes=bus_sizes, - bus_colors=bus_colors, - link_colors=pipe_colors["gas pipeline (in 2020)"], - link_widths=link_widths_orig, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - n.plot( - ax=ax, - bus_sizes=0.0, - link_colors=pipe_colors["gas pipeline (available)"], - link_widths=link_widths_rem, - branch_components=["Link"], - color_geomap=False, - boundaries=map_opts["boundaries"], - ) - - n.plot( - ax=ax, - bus_sizes=0.0, - link_colors=link_color_used, - link_widths=link_widths_used, - branch_components=["Link"], - color_geomap=False, - boundaries=map_opts["boundaries"], - ) - - sizes = [100, 10] - labels = [f"{s} TWh" for s in sizes] - sizes = [s / bus_size_factor * 1e6 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.03), - labelspacing=0.8, - frameon=False, - handletextpad=1, - title="gas sources", - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [50, 10] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.25, 1.03), - frameon=False, - labelspacing=0.8, - handletextpad=1, - title="gas pipeline", - ) - - add_legend_lines( - ax, - sizes, - labels, - patch_kw=dict(color="lightgrey"), - legend_kw=legend_kw, - ) - - colors = list(pipe_colors.values()) + list(bus_colors.values()) - labels = list(pipe_colors.keys()) + list(bus_colors.keys()) - - # legend on the side - # legend_kw = dict( - # bbox_to_anchor=(1.47, 1.04), - # frameon=False, - # ) - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.24), - ncol=2, - frameon=False, - ) - - add_legend_patches( - ax, - colors, - labels, - legend_kw=legend_kw, - ) - - fig.savefig( - snakemake.output.map.replace("-costs-all", "-ch4_network"), bbox_inches="tight" - ) - - -def plot_map_without(network): - n = network.copy() - assign_location(n) - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": ccrs.EqualEarth()}) - - # PDF has minimum width, so set these to zero - line_lower_threshold = 200.0 - line_upper_threshold = 1e4 - linewidth_factor = 3e3 - ac_color = "rosybrown" - dc_color = "darkseagreen" - - # hack because impossible to drop buses... - if "EU gas" in n.buses.index: - eu_location = snakemake.params.plotting.get( - "eu_node_location", dict(x=-5.5, y=46) - ) - n.buses.loc["EU gas", "x"] = eu_location["x"] - n.buses.loc["EU gas", "y"] = eu_location["y"] - - to_drop = n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")] - n.links.drop(to_drop, inplace=True) - - if snakemake.wildcards["ll"] == "v1.0": - line_widths = n.lines.s_nom - link_widths = n.links.p_nom - else: - line_widths = n.lines.s_nom_min - link_widths = n.links.p_nom_min - - line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) - link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) - - line_widths = line_widths.replace(line_lower_threshold, 0) - link_widths = link_widths.replace(line_lower_threshold, 0) - - n.plot( - bus_colors="k", - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, - **map_opts, - ) - - handles = [] - labels = [] - - for s in (10, 5): - handles.append( - plt.Line2D([0], [0], color=ac_color, linewidth=s * 1e3 / linewidth_factor) - ) - labels.append(f"{s} GW") - l1_1 = ax.legend( - handles, - labels, - loc="upper left", - bbox_to_anchor=(0.05, 1.01), - frameon=False, - labelspacing=0.8, - handletextpad=1.5, - title="Today's transmission", - ) - ax.add_artist(l1_1) - - fig.savefig(snakemake.output.today, transparent=True, bbox_inches="tight") - - -def plot_series(network, carrier="AC", name="test"): - n = network.copy() - assign_location(n) - assign_carriers(n) - - buses = n.buses.index[n.buses.carrier.str.contains(carrier)] - - supply = pd.DataFrame(index=n.snapshots) - for c in n.iterate_components(n.branch_components): - n_port = 4 if c.name == "Link" else 2 - for i in range(n_port): - supply = pd.concat( - ( - supply, - (-1) - * c.pnl["p" + str(i)] - .loc[:, c.df.index[c.df["bus" + str(i)].isin(buses)]] - .groupby(c.df.carrier, axis=1) - .sum(), - ), - axis=1, - ) - - for c in n.iterate_components(n.one_port_components): - comps = c.df.index[c.df.bus.isin(buses)] - supply = pd.concat( - ( - supply, - ((c.pnl["p"].loc[:, comps]).multiply(c.df.loc[comps, "sign"])) - .groupby(c.df.carrier, axis=1) - .sum(), - ), - axis=1, - ) - - supply = supply.groupby(rename_techs_tyndp, axis=1).sum() - - both = supply.columns[(supply < 0.0).any() & (supply > 0.0).any()] - - positive_supply = supply[both] - negative_supply = supply[both] - - positive_supply[positive_supply < 0.0] = 0.0 - negative_supply[negative_supply > 0.0] = 0.0 - - supply[both] = positive_supply - - suffix = " charging" - - negative_supply.columns = negative_supply.columns + suffix - - supply = pd.concat((supply, negative_supply), axis=1) - - # 14-21.2 for flaute - # 19-26.1 for flaute - - start = "2013-02-19" - stop = "2013-02-26" - - threshold = 10e3 - - to_drop = supply.columns[(abs(supply) < threshold).all()] - - if len(to_drop) != 0: - logger.info(f"Dropping {to_drop.tolist()} from supply") - supply.drop(columns=to_drop, inplace=True) - - supply.index.name = None - - supply = supply / 1e3 - - supply.rename( - columns={"electricity": "electric demand", "heat": "heat demand"}, inplace=True - ) - supply.columns = supply.columns.str.replace("residential ", "") - supply.columns = supply.columns.str.replace("services ", "") - supply.columns = supply.columns.str.replace("urban decentral ", "decentral ") - - preferred_order = pd.Index( - [ - "electric demand", - "transmission lines", - "hydroelectricity", - "hydro reservoir", - "run of river", - "pumped hydro storage", - "CHP", - "onshore wind", - "offshore wind", - "solar PV", - "solar thermal", - "building retrofitting", - "ground heat pump", - "air heat pump", - "resistive heater", - "OCGT", - "gas boiler", - "gas", - "natural gas", - "methanation", - "hydrogen storage", - "battery storage", - "hot water storage", - ] - ) - - new_columns = preferred_order.intersection(supply.columns).append( - supply.columns.difference(preferred_order) - ) - - supply = supply.groupby(supply.columns, axis=1).sum() - fig, ax = plt.subplots() - fig.set_size_inches((8, 5)) - - ( - supply.loc[start:stop, new_columns].plot( - ax=ax, - kind="area", - stacked=True, - linewidth=0.0, - color=[ - snakemake.params.plotting["tech_colors"][i.replace(suffix, "")] - for i in new_columns - ], - ) - ) - - handles, labels = ax.get_legend_handles_labels() - - handles.reverse() - labels.reverse() - - new_handles = [] - new_labels = [] - - for i, item in enumerate(labels): - if "charging" not in item: - new_handles.append(handles[i]) - new_labels.append(labels[i]) - - ax.legend(new_handles, new_labels, ncol=3, loc="upper left", frameon=False) - ax.set_xlim([start, stop]) - ax.set_ylim([-1300, 1900]) - ax.grid(True) - ax.set_ylabel("Power [GW]") - fig.tight_layout() - - fig.savefig( - "{}/{RDIR}maps/series-{}-{}-{}-{}-{}.pdf".format( - "results", - snakemake.params.RDIR, - snakemake.wildcards["ll"], - carrier, - start, - stop, - name, - ), - transparent=True, - ) - - -if __name__ == "__main__": - if "snakemake" not in globals(): - from _helpers import mock_snakemake - - snakemake = mock_snakemake( - "plot_network", - simpl="", - opts="", - clusters="5", - ll="v1.5", - sector_opts="CO2L0-1H-T-H-B-I-A-solar+p3-dist1", - planning_horizons="2030", - ) - - logging.basicConfig(level=snakemake.config["logging"]["level"]) - - n = pypsa.Network(snakemake.input.network) - - regions = gpd.read_file(snakemake.input.regions).set_index("name") - - map_opts = snakemake.params.plotting["map"] - - if map_opts["boundaries"] is None: - map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] - - plot_map( - n, - components=["generators", "links", "stores", "storage_units"], - bus_size_factor=2e10, - transmission=False, - ) - - plot_h2_map(n, regions) - plot_ch4_map(n) - plot_map_without(n) - - # plot_series(n, carrier="AC", name=suffix) - # plot_series(n, carrier="heat", name=suffix) diff --git a/scripts/plot_power_network.py b/scripts/plot_power_network.py new file mode 100644 index 00000000..0e13e497 --- /dev/null +++ b/scripts/plot_power_network.py @@ -0,0 +1,272 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates plots for optimised power network topologies and regional generation, +storage and conversion capacities built. +""" + +import logging + +import cartopy.crs as ccrs +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging +from plot_summary import preferred_order, rename_techs +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches + +logger = logging.getLogger(__name__) + + +def rename_techs_tyndp(tech): + tech = rename_techs(tech) + if "heat pump" in tech or "resistive heater" in tech: + return "power-to-heat" + elif tech in ["H2 Electrolysis", "methanation", "H2 liquefaction"]: + return "power-to-gas" + elif tech == "H2": + return "H2 storage" + elif tech in ["NH3", "Haber-Bosch", "ammonia cracker", "ammonia store"]: + return "ammonia" + elif tech in ["OCGT", "CHP", "gas boiler", "H2 Fuel Cell"]: + return "gas-to-power/heat" + # elif "solar" in tech: + # return "solar" + elif tech in ["Fischer-Tropsch", "methanolisation"]: + return "power-to-liquid" + elif "offshore wind" in tech: + return "offshore wind" + elif "CC" in tech or "sequestration" in tech: + return "CCS" + else: + return tech + + +def assign_location(n): + for c in n.iterate_components(n.one_port_components | n.branch_components): + ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) + for i in ifind.value_counts().index: + # these have already been assigned defaults + if i == -1: + continue + names = ifind.index[ifind == i] + c.df.loc[names, "location"] = names.str[:i] + + +def load_projection(plotting_params): + proj_kwargs = plotting_params.get("projection", dict(name="EqualEarth")) + proj_func = getattr(ccrs, proj_kwargs.pop("name")) + return proj_func(**proj_kwargs) + + +def plot_map( + n, + components=["links", "stores", "storage_units", "generators"], + bus_size_factor=2e10, + transmission=False, + with_legend=True, +): + tech_colors = snakemake.params.plotting["tech_colors"] + + assign_location(n) + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + costs = pd.DataFrame(index=n.buses.index) + + for comp in components: + df_c = getattr(n, comp) + + if df_c.empty: + continue + + df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) + + attr = "e_nom_opt" if comp == "stores" else "p_nom_opt" + + costs_c = ( + (df_c.capital_cost * df_c[attr]) + .groupby([df_c.location, df_c.nice_group]) + .sum() + .unstack() + .fillna(0.0) + ) + costs = pd.concat([costs, costs_c], axis=1) + + logger.debug(f"{comp}, {costs}") + + costs = costs.T.groupby(costs.columns).sum().T + + costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True) + + new_columns = preferred_order.intersection(costs.columns).append( + costs.columns.difference(preferred_order) + ) + costs = costs[new_columns] + + for item in new_columns: + if item not in tech_colors: + logger.warning(f"{item} not in config/plotting/tech_colors") + + costs = costs.stack() # .sort_index() + + # hack because impossible to drop buses... + eu_location = snakemake.params.plotting.get("eu_node_location", dict(x=-5.5, y=46)) + n.buses.loc["EU gas", "x"] = eu_location["x"] + n.buses.loc["EU gas", "y"] = eu_location["y"] + + n.links.drop( + n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], + inplace=True, + ) + + # drop non-bus + to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) + if len(to_drop) != 0: + logger.info(f"Dropping non-buses {to_drop.tolist()}") + costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") + + # make sure they are removed from index + costs.index = pd.MultiIndex.from_tuples(costs.index.values) + + threshold = 100e6 # 100 mEUR/a + carriers = costs.groupby(level=1).sum() + carriers = carriers.where(carriers > threshold).dropna() + carriers = list(carriers.index) + + # PDF has minimum width, so set these to zero + line_lower_threshold = 500.0 + line_upper_threshold = 1e4 + linewidth_factor = 4e3 + ac_color = "rosybrown" + dc_color = "darkseagreen" + + title = "added grid" + + if snakemake.wildcards["ll"] == "v1.0": + # should be zero + line_widths = n.lines.s_nom_opt - n.lines.s_nom + link_widths = n.links.p_nom_opt - n.links.p_nom + if transmission: + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + linewidth_factor = 2e3 + line_lower_threshold = 0.0 + title = "current grid" + else: + line_widths = n.lines.s_nom_opt - n.lines.s_nom_min + link_widths = n.links.p_nom_opt - n.links.p_nom_min + if transmission: + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + title = "total grid" + + line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) + link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) + + line_widths = line_widths.replace(line_lower_threshold, 0) + link_widths = link_widths.replace(line_lower_threshold, 0) + + fig, ax = plt.subplots(subplot_kw={"projection": proj}) + fig.set_size_inches(7, 6) + + n.plot( + bus_sizes=costs / bus_size_factor, + bus_colors=tech_colors, + line_colors=ac_color, + link_colors=dc_color, + line_widths=line_widths / linewidth_factor, + link_widths=link_widths / linewidth_factor, + ax=ax, + **map_opts, + ) + + sizes = [20, 10, 5] + labels = [f"{s} bEUR/a" for s in sizes] + sizes = [s / bus_size_factor * 1e9 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.01, 1.06), + labelspacing=0.8, + frameon=False, + handletextpad=0, + title="system cost", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [10, 5] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.27, 1.06), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title=title, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw + ) + + legend_kw = dict( + bbox_to_anchor=(1.52, 1.04), + frameon=False, + ) + + if with_legend: + colors = [tech_colors[c] for c in carriers] + [ac_color, dc_color] + labels = carriers + ["HVAC line", "HVDC link"] + + add_legend_patches( + ax, + colors, + labels, + legend_kw=legend_kw, + ) + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_map(n) diff --git a/scripts/plot_power_network_clustered.py b/scripts/plot_power_network_clustered.py new file mode 100644 index 00000000..8217ac2e --- /dev/null +++ b/scripts/plot_power_network_clustered.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2023-2024 PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Plot clustered electricity transmission network. +""" + +import cartopy.crs as ccrs +import geopandas as gpd +import matplotlib.pyplot as plt +import pypsa +from matplotlib.lines import Line2D +from plot_power_network import load_projection +from pypsa.plot import add_legend_lines + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network_clustered", + clusters=128, + configfiles=["../../config/config.test.yaml"], + ) + + lw_factor = 2e3 + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions_onshore).set_index("name") + + proj = load_projection(snakemake.params.plotting) + + fig, ax = plt.subplots(figsize=(8, 8), subplot_kw={"projection": proj}) + regions.to_crs(proj.proj4_init).plot( + ax=ax, facecolor="none", edgecolor="lightgray", linewidth=0.75 + ) + n.plot( + ax=ax, + margin=0.06, + line_widths=n.lines.s_nom / lw_factor, + link_colors=n.links.p_nom.apply( + lambda x: "darkseagreen" if x > 0 else "skyblue" + ), + link_widths=2.0, + ) + + sizes = [10, 20] + labels = [f"HVAC ({s} GW)" for s in sizes] + scale = 1e3 / lw_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc=[0.25, 0.9], + frameon=False, + labelspacing=0.5, + handletextpad=1, + fontsize=13, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="rosybrown"), legend_kw=legend_kw + ) + + handles = [ + Line2D([0], [0], color="darkseagreen", lw=2), + Line2D([0], [0], color="skyblue", lw=2), + ] + plt.legend( + handles, + ["HVDC existing", "HVDC planned"], + frameon=False, + loc=[0.0, 0.9], + fontsize=13, + ) + + plt.savefig(snakemake.output.map, bbox_inches="tight") diff --git a/scripts/plot_power_network_perfect.py b/scripts/plot_power_network_perfect.py new file mode 100644 index 00000000..ff576d33 --- /dev/null +++ b/scripts/plot_power_network_perfect.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates plots for optimised power network topologies and regional generation, +storage and conversion capacities built for the perfect foresight scenario. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging +from plot_power_network import assign_location, load_projection, rename_techs_tyndp +from plot_summary import preferred_order +from pypsa.plot import add_legend_circles, add_legend_lines + +logger = logging.getLogger(__name__) + + +def plot_map_perfect( + n, + components=["Link", "Store", "StorageUnit", "Generator"], + bus_size_factor=2e10, +): + assign_location(n) + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + # investment periods + investments = n.snapshots.levels[0] + + costs = {} + for comp in components: + df_c = n.df(comp) + if df_c.empty: + continue + df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) + + attr = "e_nom_opt" if comp == "Store" else "p_nom_opt" + + active = pd.concat( + [n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments], + axis=1, + ).astype(int) + capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost + capital_cost_t = ( + (active.mul(capital_cost, axis=0)) + .groupby([n.df(comp).location, n.df(comp).nice_group]) + .sum() + ) + + capital_cost_t.drop("load", level=1, inplace=True, errors="ignore") + + costs[comp] = capital_cost_t + + costs = pd.concat(costs).groupby(level=[1, 2]).sum() + costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True) + + new_columns = preferred_order.intersection(costs.index.levels[1]).append( + costs.index.levels[1].difference(preferred_order) + ) + costs = costs.reindex(new_columns, level=1) + + for item in new_columns: + if item not in snakemake.config["plotting"]["tech_colors"]: + print( + "Warning!", + item, + "not in config/plotting/tech_colors, assign random color", + ) + snakemake.config["plotting"]["tech_colors"] = "pink" + + n.links.drop( + n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], + inplace=True, + ) + + # drop non-bus + to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) + if len(to_drop) != 0: + print("dropping non-buses", to_drop) + costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") + + # make sure they are removed from index + costs.index = pd.MultiIndex.from_tuples(costs.index.values) + + # PDF has minimum width, so set these to zero + line_lower_threshold = 500.0 + line_upper_threshold = 1e4 + linewidth_factor = 2e3 + ac_color = "gray" + dc_color = "m" + + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + linewidth_factor = 2e3 + line_lower_threshold = 0.0 + title = "Today's transmission" + + line_widths[line_widths < line_lower_threshold] = 0.0 + link_widths[link_widths < line_lower_threshold] = 0.0 + + line_widths[line_widths > line_upper_threshold] = line_upper_threshold + link_widths[link_widths > line_upper_threshold] = line_upper_threshold + + for year in costs.columns: + fig, ax = plt.subplots(subplot_kw={"projection": proj}) + fig.set_size_inches(7, 6) + fig.suptitle(year) + + n.plot( + bus_sizes=costs[year] / bus_size_factor, + bus_colors=snakemake.config["plotting"]["tech_colors"], + line_colors=ac_color, + link_colors=dc_color, + line_widths=line_widths / linewidth_factor, + link_widths=link_widths / linewidth_factor, + ax=ax, + **map_opts, + ) + + sizes = [20, 10, 5] + labels = [f"{s} bEUR/a" for s in sizes] + sizes = [s / bus_size_factor * 1e9 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.01, 1.06), + labelspacing=0.8, + frameon=False, + handletextpad=0, + title="system cost", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [10, 5] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.27, 1.06), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title=title, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw + ) + + legend_kw = dict( + bbox_to_anchor=(1.52, 1.04), + frameon=False, + ) + + fig.savefig(snakemake.output[f"map_{year}"], bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network_perfect", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_map_perfect(n) diff --git a/scripts/plot_statistics.py b/scripts/plot_statistics.py index 11293c08..1f936c93 100644 --- a/scripts/plot_statistics.py +++ b/scripts/plot_statistics.py @@ -34,8 +34,6 @@ if __name__ == "__main__": lambda s: s != "", "lightgrey" ) - # %% - def rename_index(ds): specific = ds.index.map(lambda x: f"{x[1]}\n({x[0]})") generic = ds.index.get_level_values("carrier") diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index 072c7128..cfb32441 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -8,17 +8,14 @@ Creates plots from summary CSV files. import logging -logger = logging.getLogger(__name__) - import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt -import numpy as np import pandas as pd - -plt.style.use("ggplot") - from prepare_sector_network import co2_emissions_year +logger = logging.getLogger(__name__) +plt.style.use("ggplot") + # consolidate and rename def rename_techs(label): @@ -49,6 +46,10 @@ def rename_techs(label): # "H2 Fuel Cell": "hydrogen storage", # "H2 pipeline": "hydrogen storage", "battery": "battery storage", + "H2 for industry": "H2 for industry", + "land transport fuel cell": "land transport fuel cell", + "land transport oil": "land transport oil", + "oil shipping": "shipping oil", # "CC": "CC" } @@ -117,7 +118,6 @@ preferred_order = pd.Index( "gas boiler", "gas", "natural gas", - "helmeth", "methanation", "ammonia", "hydrogen storage", @@ -151,17 +151,17 @@ def plot_costs(): df = df.drop(to_drop) - logger.info(f"Total system cost of {round(df.sum()[0])} EUR billion per year") + logger.info(f"Total system cost of {round(df.sum().iloc[0])} EUR billion per year") new_index = preferred_order.intersection(df.index).append( df.index.difference(preferred_order) ) - new_columns = df.sum().sort_values().index + # new_columns = df.sum().sort_values().index fig, ax = plt.subplots(figsize=(12, 8)) - df.loc[new_index, new_columns].T.plot( + df.loc[new_index].T.plot( kind="bar", ax=ax, stacked=True, @@ -211,19 +211,24 @@ def plot_energy(): df = df.drop(to_drop) - logger.info(f"Total energy of {round(df.sum()[0])} TWh/a") + logger.info(f"Total energy of {round(df.sum().iloc[0])} TWh/a") + + if df.empty: + fig, ax = plt.subplots(figsize=(12, 8)) + fig.savefig(snakemake.output.energy, bbox_inches="tight") + return new_index = preferred_order.intersection(df.index).append( df.index.difference(preferred_order) ) - new_columns = df.columns.sort_values() + # new_columns = df.columns.sort_values() fig, ax = plt.subplots(figsize=(12, 8)) - logger.debug(df.loc[new_index, new_columns]) + logger.debug(df.loc[new_index]) - df.loc[new_index, new_columns].T.plot( + df.loc[new_index].T.plot( kind="bar", ax=ax, stacked=True, @@ -267,8 +272,6 @@ def plot_balances(): i for i in balances_df.index.levels[0] if i not in co2_carriers ] - fig, ax = plt.subplots(figsize=(12, 8)) - for k, v in balances.items(): df = balances_df.loc[v] df = df.groupby(df.index.get_level_values(2)).sum() @@ -278,9 +281,14 @@ def plot_balances(): # remove trailing link ports df.index = [ - i[:-1] - if ((i not in ["co2", "NH3"]) and (i[-1:] in ["0", "1", "2", "3"])) - else i + ( + i[:-1] + if ( + (i not in ["co2", "NH3", "H2"]) + and (i[-1:] in ["0", "1", "2", "3", "4"]) + ) + else i + ) for i in df.index ] @@ -290,11 +298,7 @@ def plot_balances(): df.abs().max(axis=1) < snakemake.params.plotting["energy_threshold"] / 10 ] - if v[0] in co2_carriers: - units = "MtCO2/a" - else: - units = "TWh/a" - + units = "MtCO2/a" if v[0] in co2_carriers else "TWh/a" logger.debug( f"Dropping technology energy balance smaller than {snakemake.params['plotting']['energy_threshold']/10} {units}" ) @@ -302,7 +306,9 @@ def plot_balances(): df = df.drop(to_drop) - logger.debug(f"Total energy balance for {v} of {round(df.sum()[0],2)} {units}") + logger.debug( + f"Total energy balance for {v} of {round(df.sum().iloc[0],2)} {units}" + ) if df.empty: continue @@ -313,6 +319,8 @@ def plot_balances(): new_columns = df.columns.sort_values() + fig, ax = plt.subplots(figsize=(12, 8)) + df.loc[new_index, new_columns].T.plot( kind="bar", ax=ax, @@ -345,8 +353,6 @@ def plot_balances(): fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight") - plt.cla() - def historical_emissions(countries): """ @@ -354,8 +360,7 @@ def historical_emissions(countries): """ # https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16 # downloaded 201228 (modified by EEA last on 201221) - fn = "data/eea/UNFCCC_v23.csv" - df = pd.read_csv(fn, encoding="latin-1") + df = pd.read_csv(snakemake.input.co2, encoding="latin-1", low_memory=False) df.loc[df["Year"] == "1985-1987", "Year"] = 1986 df["Year"] = df["Year"].astype(int) df = df.set_index( @@ -379,18 +384,21 @@ def historical_emissions(countries): e["waste management"] = "5 - Waste management" e["other"] = "6 - Other Sector" e["indirect"] = "ind_CO2 - Indirect CO2" - e["total wL"] = "Total (with LULUCF)" - e["total woL"] = "Total (without LULUCF)" + e["other LULUCF"] = "4.H - Other LULUCF" pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"] if "GB" in countries: countries.remove("GB") countries.append("UK") - # remove countries which are not included in eea historical emission dataset - countries_to_remove = {"AL", "BA", "ME", "MK", "RS"} - countries = list(set(countries) - countries_to_remove) - year = np.arange(1990, 2018).tolist() + year = df.index.levels[0][df.index.levels[0] >= 1990] + + missing = pd.Index(countries).difference(df.index.levels[2]) + if not missing.empty: + logger.warning( + f"The following countries are missing and not considered when plotting historic CO2 emissions: {missing}" + ) + countries = pd.Index(df.index.levels[2]).intersection(countries) idx = pd.IndexSlice co2_totals = ( @@ -447,24 +455,17 @@ def plot_carbon_budget_distribution(input_eurostat): sns.set() sns.set_style("ticks") - plt.style.use("seaborn-ticks") plt.rcParams["xtick.direction"] = "in" plt.rcParams["ytick.direction"] = "in" plt.rcParams["xtick.labelsize"] = 20 plt.rcParams["ytick.labelsize"] = 20 - plt.figure(figsize=(10, 7)) - gs1 = gridspec.GridSpec(1, 1) - ax1 = plt.subplot(gs1[0, 0]) - ax1.set_ylabel("CO$_2$ emissions (Gt per year)", fontsize=22) - ax1.set_ylim([0, 5]) - ax1.set_xlim([1990, snakemake.params.planning_horizons[-1] + 1]) - - path_cb = "results/" + snakemake.params.RDIR + "csvs/" - countries = snakemake.params.countries emissions_scope = snakemake.params.emissions_scope report_year = snakemake.params.eurostat_report_year input_co2 = snakemake.input.co2 + + # historic emissions + countries = snakemake.params.countries e_1990 = co2_emissions_year( countries, input_eurostat, @@ -474,15 +475,37 @@ def plot_carbon_budget_distribution(input_eurostat): input_co2, year=1990, ) - CO2_CAP = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0) - - ax1.plot(e_1990 * CO2_CAP[o], linewidth=3, color="dodgerblue", label=None) - emissions = historical_emissions(countries) + # add other years https://sdi.eea.europa.eu/data/0569441f-2853-4664-a7cd-db969ef54de0 + emissions.loc[2019] = 2.971372 + emissions.loc[2020] = 2.691958 + emissions.loc[2021] = 2.869355 + + if snakemake.config["foresight"] == "myopic": + path_cb = "results/" + snakemake.params.RDIR + "/csvs/" + co2_cap = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)[ + ["cb"] + ] + co2_cap *= e_1990 + else: + supply_energy = pd.read_csv( + snakemake.input.balances, index_col=[0, 1, 2], header=[0, 1, 2, 3] + ) + co2_cap = ( + supply_energy.loc["co2"].droplevel(0).drop("co2").sum().unstack().T / 1e9 + ) + co2_cap.rename(index=lambda x: int(x), inplace=True) + + plt.figure(figsize=(10, 7)) + gs1 = gridspec.GridSpec(1, 1) + ax1 = plt.subplot(gs1[0, 0]) + ax1.set_ylabel("CO$_2$ emissions \n [Gt per year]", fontsize=22) + # ax1.set_ylim([0, 5]) + ax1.set_xlim([1990, snakemake.params.planning_horizons[-1] + 1]) ax1.plot(emissions, color="black", linewidth=3, label=None) - # plot committed and uder-discussion targets + # plot committed and under-discussion targets # (notice that historical emissions include all countries in the # network, but targets refer to EU) ax1.plot( @@ -499,7 +522,7 @@ def plot_carbon_budget_distribution(input_eurostat): [0.45 * emissions[1990]], marker="*", markersize=12, - markerfacecolor="white", + markerfacecolor="black", markeredgecolor="black", ) @@ -523,21 +546,7 @@ def plot_carbon_budget_distribution(input_eurostat): ax1.plot( [2050], - [0.01 * emissions[1990]], - marker="*", - markersize=12, - markerfacecolor="white", - linewidth=0, - markeredgecolor="black", - label="EU under-discussion target", - zorder=10, - clip_on=False, - ) - - ax1.plot( - [2050], - [0.125 * emissions[1990]], - "ro", + [0.0 * emissions[1990]], marker="*", markersize=12, markerfacecolor="black", @@ -545,12 +554,16 @@ def plot_carbon_budget_distribution(input_eurostat): label="EU committed target", ) + for col in co2_cap.columns: + ax1.plot(co2_cap[col], linewidth=3, label=col) + ax1.legend( fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True ) - path_cb_plot = "results/" + snakemake.params.RDIR + "graphs/" - plt.savefig(path_cb_plot + "carbon_budget_plot.pdf", dpi=300) + plt.grid(axis="y") + path = snakemake.output.balances.split("balances")[0] + "carbon_budget.pdf" + plt.savefig(path, bbox_inches="tight") if __name__ == "__main__": @@ -571,6 +584,5 @@ if __name__ == "__main__": for sector_opts in snakemake.params.sector_opts: opts = sector_opts.split("-") - for o in opts: - if "cb" in o: - plot_carbon_budget_distribution(snakemake.input.eurostat) + if any("cb" in o for o in opts) or snakemake.config["foresight"] == "perfect": + plot_carbon_budget_distribution(snakemake.input.eurostat) diff --git a/scripts/plot_validation_cross_border_flows.py b/scripts/plot_validation_cross_border_flows.py index 8b063d8c..37c09666 100644 --- a/scripts/plot_validation_cross_border_flows.py +++ b/scripts/plot_validation_cross_border_flows.py @@ -84,13 +84,9 @@ def cross_border_time_series(countries, data): df_neg.plot.area( ax=ax[axis], stacked=True, linewidth=0.0, color=color, ylim=[-1, 1] ) - if (axis % 2) == 0: - title = "Historic" - else: - title = "Optimized" - + title = "Historic" if (axis % 2) == 0 else "Optimized" ax[axis].set_title( - title + " Import / Export for " + cc.convert(country, to="name_short") + f"{title} Import / Export for " + cc.convert(country, to="name_short") ) # Custom legend elements @@ -137,16 +133,12 @@ def cross_border_bar(countries, data): df_country = sort_one_country(country, df) df_neg, df_pos = df_country.clip(upper=0), df_country.clip(lower=0) - if (order % 2) == 0: - title = "Historic" - else: - title = "Optimized" - + title = "Historic" if (order % 2) == 0 else "Optimized" df_positive_new = pd.DataFrame(data=df_pos.sum()).T.rename( - {0: title + " " + cc.convert(country, to="name_short")} + {0: f"{title} " + cc.convert(country, to="name_short")} ) df_negative_new = pd.DataFrame(data=df_neg.sum()).T.rename( - {0: title + " " + cc.convert(country, to="name_short")} + {0: f"{title} " + cc.convert(country, to="name_short")} ) df_positive = pd.concat([df_positive_new, df_positive]) diff --git a/scripts/plot_validation_electricity_prices.py b/scripts/plot_validation_electricity_prices.py index c229e382..5bb42bfb 100644 --- a/scripts/plot_validation_electricity_prices.py +++ b/scripts/plot_validation_electricity_prices.py @@ -9,7 +9,6 @@ import pandas as pd import pypsa import seaborn as sns from _helpers import configure_logging, set_scenario_config -from pypsa.statistics import get_bus_and_carrier sns.set_theme("paper", style="whitegrid") diff --git a/scripts/plot_validation_electricity_production.py b/scripts/plot_validation_electricity_production.py index 3e81faff..89ab9e6b 100644 --- a/scripts/plot_validation_electricity_production.py +++ b/scripts/plot_validation_electricity_production.py @@ -46,6 +46,12 @@ if __name__ == "__main__": header=[0, 1], parse_dates=True, ) + subset_technologies = ["Geothermal", "Nuclear", "Biomass", "Lignite", "Oil", "Coal"] + lowercase_technologies = [ + technology.lower() if technology in subset_technologies else technology + for technology in historic.columns.levels[1] + ] + historic.columns = historic.columns.set_levels(lowercase_technologies, level=1) colors = n.carriers.set_index("nice_name").color.where( lambda s: s != "", "lightgrey" diff --git a/scripts/prepare_network.py b/scripts/prepare_network.py index a7f1ddf3..85c20813 100755 --- a/scripts/prepare_network.py +++ b/scripts/prepare_network.py @@ -58,12 +58,11 @@ Description """ import logging -import re import numpy as np import pandas as pd import pypsa -from _helpers import configure_logging, set_scenario_config +from _helpers import configure_logging, find_opt, get_opt, set_scenario_config from add_electricity import load_costs, update_transmission_costs from pypsa.descriptors import expand_series @@ -195,7 +194,7 @@ def apply_time_segmentation(n, segments, solver_name="cbc"): logger.info(f"Aggregating time series to {segments} segments.") try: import tsam.timeseriesaggregation as tsam - except: + except ImportError: raise ModuleNotFoundError( "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" ) @@ -270,11 +269,10 @@ def set_line_nom_max( hvdc = n.links.index[n.links.carrier == "DC"] n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + p_nom_max_ext - n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True) - n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True) + n.lines["s_nom_max"] = n.lines.s_nom_max.clip(upper=s_nom_max_set) + n.links["p_nom_max"] = n.links.p_nom_max.clip(upper=p_nom_max_set) -# %% if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake @@ -298,42 +296,42 @@ if __name__ == "__main__": set_line_s_max_pu(n, snakemake.params.lines["s_max_pu"]) - for o in opts: - m = re.match(r"^\d+h$", o, re.IGNORECASE) - if m is not None: - n = average_every_nhours(n, m.group(0)) - break + # temporal averaging + nhours_config = snakemake.params.snapshots.get("resolution", False) + nhours_wildcard = get_opt(opts, r"^\d+h$") + nhours = nhours_wildcard or nhours_config + if nhours: + n = average_every_nhours(n, nhours) - for o in opts: - m = re.match(r"^\d+seg$", o, re.IGNORECASE) - if m is not None: - solver_name = snakemake.config["solving"]["solver"]["name"] - n = apply_time_segmentation(n, m.group(0)[:-3], solver_name) - break + # segments with package tsam + time_seg_config = snakemake.params.snapshots.get("segmentation", False) + time_seg_wildcard = get_opt(opts, r"^\d+seg$") + time_seg = time_seg_wildcard or time_seg_config + if time_seg: + solver_name = snakemake.config["solving"]["solver"]["name"] + n = apply_time_segmentation(n, time_seg.replace("seg", ""), solver_name) - for o in opts: - if "Co2L" in o: - m = re.findall("[0-9]*\.?[0-9]+$", o) - if len(m) > 0: - co2limit = float(m[0]) * snakemake.params.co2base - add_co2limit(n, co2limit, Nyears) - logger.info("Setting CO2 limit according to wildcard value.") - else: - add_co2limit(n, snakemake.params.co2limit, Nyears) - logger.info("Setting CO2 limit according to config value.") - break + Co2L_config = snakemake.params.co2limit_enable + Co2L_wildcard, co2limit_wildcard = find_opt(opts, "Co2L") + if Co2L_wildcard or Co2L_config: + if co2limit_wildcard is not None: + co2limit = co2limit_wildcard * snakemake.params.co2base + add_co2limit(n, co2limit, Nyears) + logger.info("Setting CO2 limit according to wildcard value.") + else: + add_co2limit(n, snakemake.params.co2limit, Nyears) + logger.info("Setting CO2 limit according to config value.") - for o in opts: - if "CH4L" in o: - m = re.findall("[0-9]*\.?[0-9]+$", o) - if len(m) > 0: - limit = float(m[0]) * 1e6 - add_gaslimit(n, limit, Nyears) - logger.info("Setting gas usage limit according to wildcard value.") - else: - add_gaslimit(n, snakemake.params.gaslimit, Nyears) - logger.info("Setting gas usage limit according to config value.") - break + CH4L_config = snakemake.params.gaslimit_enable + CH4L_wildcard, gaslimit_wildcard = find_opt(opts, "CH4L") + if CH4L_wildcard or CH4L_config: + if gaslimit_wildcard is not None: + gaslimit = gaslimit_wildcard * 1e6 + add_gaslimit(n, gaslimit, Nyears) + logger.info("Setting gas usage limit according to wildcard value.") + else: + add_gaslimit(n, snakemake.params.gaslimit, Nyears) + logger.info("Setting gas usage limit according to config value.") for o in opts: if "+" not in o: @@ -354,21 +352,26 @@ if __name__ == "__main__": sel = c.df.carrier.str.contains(carrier) c.df.loc[sel, attr] *= factor - for o in opts: - if "Ept" in o: - logger.info( - "Setting time dependent emission prices according spot market price" + emission_prices = snakemake.params.costs["emission_prices"] + Ept_config = emission_prices.get("co2_monthly_prices", False) + Ept_wildcard = "Ept" in opts + Ep_config = emission_prices.get("enable", False) + Ep_wildcard, co2_wildcard = find_opt(opts, "Ep") + + if Ept_wildcard or Ept_config: + logger.info( + "Setting time dependent emission prices according spot market price" + ) + add_dynamic_emission_prices(n) + elif Ep_wildcard or Ep_config: + if co2_wildcard is not None: + logger.info("Setting CO2 prices according to wildcard value.") + add_emission_prices(n, dict(co2=co2_wildcard)) + else: + logger.info("Setting CO2 prices according to config value.") + add_emission_prices( + n, dict(co2=snakemake.params.costs["emission_prices"]["co2"]) ) - add_dynamic_emission_prices(n) - elif "Ep" in o: - m = re.findall("[0-9]*\.?[0-9]+$", o) - if len(m) > 0: - logger.info("Setting emission prices according to wildcard value.") - add_emission_prices(n, dict(co2=float(m[0]))) - else: - logger.info("Setting emission prices according to config value.") - add_emission_prices(n, snakemake.params.costs["emission_prices"]) - break ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:] set_transmission_limit(n, ll_type, factor, costs, Nyears) @@ -381,10 +384,12 @@ if __name__ == "__main__": p_nom_max_ext=snakemake.params.links.get("max_extension", np.inf), ) - if "ATK" in opts: - enforce_autarky(n) - elif "ATKc" in opts: - enforce_autarky(n, only_crossborder=True) + autarky_config = snakemake.params.autarky + if "ATK" in opts or autarky_config.get("enable", False): + only_crossborder = False + if "ATKc" in opts or autarky_config.get("by_country", False): + only_crossborder = True + enforce_autarky(n, only_crossborder=only_crossborder) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/prepare_perfect_foresight.py b/scripts/prepare_perfect_foresight.py new file mode 100644 index 00000000..cf013577 --- /dev/null +++ b/scripts/prepare_perfect_foresight.py @@ -0,0 +1,557 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Concats pypsa networks of single investment periods to one network. +""" + +import logging +import re + +import numpy as np +import pandas as pd +import pypsa +from _helpers import update_config_with_sector_opts +from add_existing_baseyear import add_build_year_to_new_assets +from pypsa.descriptors import expand_series +from pypsa.io import import_components_from_dataframe +from six import iterkeys + +logger = logging.getLogger(__name__) + + +# helper functions --------------------------------------------------- +def get_missing(df, n, c): + """ + Get in network n missing assets of df for component c. + + Input: + df: pandas DataFrame, static values of pypsa components + n : pypsa Network to which new assets should be added + c : string, pypsa component.list_name (e.g. "generators") + Return: + pd.DataFrame with static values of missing assets + """ + df_final = getattr(n, c) + missing_i = df.index.difference(df_final.index) + return df.loc[missing_i] + + +def get_social_discount(t, r=0.01): + """ + Calculate for a given time t and social discount rate r [per unit] the + social discount. + """ + return 1 / (1 + r) ** t + + +def get_investment_weighting(time_weighting, r=0.01): + """ + Define cost weighting. + + Returns cost weightings depending on the the time_weighting + (pd.Series) and the social discountrate r + """ + end = time_weighting.cumsum() + start = time_weighting.cumsum().shift().fillna(0) + return pd.concat([start, end], axis=1).apply( + lambda x: sum( + get_social_discount(t, r) for t in range(int(x.iloc[0]), int(x.iloc[1])) + ), + axis=1, + ) + + +def add_year_to_constraints(n, baseyear): + """ + Add investment period to global constraints and rename index. + + Parameters + ---------- + n : pypsa.Network + baseyear : int + year in which optimized assets are built + """ + + for c in n.iterate_components(["GlobalConstraint"]): + c.df["investment_period"] = baseyear + c.df.rename(index=lambda x: x + "-" + str(baseyear), inplace=True) + + +def hvdc_transport_model(n): + """ + Convert AC lines to DC links for multi-decade optimisation with line + expansion. + + Losses of DC links are assumed to be 3% per 1000km + """ + + logger.info("Convert AC lines to DC links to perform multi-decade optimisation.") + + n.madd( + "Link", + n.lines.index, + bus0=n.lines.bus0, + bus1=n.lines.bus1, + p_nom_extendable=True, + p_nom=n.lines.s_nom, + p_nom_min=n.lines.s_nom, + p_min_pu=-1, + efficiency=1 - 0.03 * n.lines.length / 1000, + marginal_cost=0, + carrier="DC", + length=n.lines.length, + capital_cost=n.lines.capital_cost, + ) + + # Remove AC lines + logger.info("Removing AC lines") + lines_rm = n.lines.index + n.mremove("Line", lines_rm) + + # Set efficiency of all DC links to include losses depending on length + n.links.loc[n.links.carrier == "DC", "efficiency"] = ( + 1 - 0.03 * n.links.loc[n.links.carrier == "DC", "length"] / 1000 + ) + + +def adjust_electricity_grid(n, year, years): + """ + Add carrier to lines. Replace AC lines with DC links in case of line + expansion. Add lifetime to DC links in case of line expansion. + + Parameters + ---------- + n : pypsa.Network + year : int + year in which optimized assets are built + years: list + investment periods + """ + n.lines["carrier"] = "AC" + links_i = n.links[n.links.carrier == "DC"].index + if n.lines.s_nom_extendable.any() or n.links.loc[links_i, "p_nom_extendable"].any(): + hvdc_transport_model(n) + links_i = n.links[n.links.carrier == "DC"].index + n.links.loc[links_i, "lifetime"] = 100 + if year != years[0]: + n.links.loc[links_i, "p_nom_min"] = 0 + n.links.loc[links_i, "p_nom"] = 0 + + +# -------------------------------------------------------------------- +def concat_networks(years): + """ + Concat given pypsa networks and adds build_year. + + Return: + n : pypsa.Network for the whole planning horizon + """ + + # input paths of sector coupling networks + network_paths = [snakemake.input.brownfield_network] + [ + snakemake.input[f"network_{year}"] for year in years[1:] + ] + # final concatenated network + n = pypsa.Network() + + # iterate over single year networks and concat to perfect foresight network + for i, network_path in enumerate(network_paths): + year = years[i] + network = pypsa.Network(network_path) + adjust_electricity_grid(network, year, years) + add_build_year_to_new_assets(network, year) + + # static ---------------------------------- + for component in network.iterate_components( + [ + "Bus", + "Carrier", + "Generator", + "Link", + "Store", + "Load", + "Line", + "StorageUnit", + ] + ): + df_year = component.df.copy() + missing = get_missing(df_year, n, component.list_name) + + import_components_from_dataframe(n, missing, component.name) + + # time variant -------------------------------------------------- + network_sns = pd.MultiIndex.from_product([[year], network.snapshots]) + snapshots = n.snapshots.drop("now", errors="ignore").union(network_sns) + n.set_snapshots(snapshots) + + for component in network.iterate_components(): + pnl = getattr(n, component.list_name + "_t") + for k in iterkeys(component.pnl): + pnl_year = component.pnl[k].copy().reindex(snapshots, level=1) + if pnl_year.empty and (not (component.name == "Load" and k == "p_set")): + continue + if component.name == "Load": + static_load = network.loads.loc[network.loads.p_set != 0] + static_load_t = expand_series(static_load.p_set, network_sns).T + pnl_year = pd.concat( + [pnl_year.reindex(network_sns), static_load_t], axis=1 + ) + columns = (pnl[k].columns.union(pnl_year.columns)).unique() + pnl[k] = pnl[k].reindex(columns=columns) + pnl[k].loc[pnl_year.index, pnl_year.columns] = pnl_year + + else: + # For components that aren't new, we just extend + # time-varying data from the previous investment + # period. + if i > 0: + pnl[k].loc[(year,)] = pnl[k].loc[(years[i - 1],)].values + + # Now, add time-varying data for new components. + cols = pnl_year.columns.difference(pnl[k].columns) + pnl[k] = pd.concat([pnl[k], pnl_year[cols]], axis=1) + + n.snapshot_weightings.loc[year, :] = network.snapshot_weightings.values + + # (3) global constraints + for component in network.iterate_components(["GlobalConstraint"]): + add_year_to_constraints(network, year) + import_components_from_dataframe(n, component.df, component.name) + + # set investment periods + n.investment_periods = n.snapshots.levels[0] + # weighting of the investment period -> assuming last period same weighting as the period before + time_w = n.investment_periods.to_series().diff().shift(-1).ffill() + n.investment_period_weightings["years"] = time_w + # set objective weightings + objective_w = get_investment_weighting( + n.investment_period_weightings["years"], social_discountrate + ) + n.investment_period_weightings["objective"] = objective_w + # all former static loads are now time-dependent -> set static = 0 + n.loads["p_set"] = 0 + n.loads_t.p_set.fillna(0, inplace=True) + + return n + + +def adjust_stores(n): + """ + Make sure that stores still behave cyclic over one year and not whole + modelling horizon. + """ + # cyclic constraint + cyclic_i = n.stores[n.stores.e_cyclic].index + n.stores.loc[cyclic_i, "e_cyclic_per_period"] = True + n.stores.loc[cyclic_i, "e_cyclic"] = False + # non cyclic store assumptions + non_cyclic_store = ["co2", "co2 stored", "solid biomass", "biogas", "Li ion"] + co2_i = n.stores[n.stores.carrier.isin(non_cyclic_store)].index + n.stores.loc[co2_i, "e_cyclic_per_period"] = False + n.stores.loc[co2_i, "e_cyclic"] = False + # e_initial at beginning of each investment period + e_initial_store = ["solid biomass", "biogas"] + co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index + n.stores.loc[co2_i, "e_initial_per_period"] = True + # n.stores.loc[co2_i, "e_initial"] *= 10 + # n.stores.loc[co2_i, "e_nom"] *= 10 + e_initial_store = ["co2 stored"] + co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index + n.stores.loc[co2_i, "e_initial_per_period"] = True + + return n + + +def set_phase_out(n, carrier, ct, phase_out_year): + """ + Set planned phase outs for given carrier,country (ct) and planned year of + phase out (phase_out_year). + """ + df = n.links[(n.links.carrier.isin(carrier)) & (n.links.bus1.str[:2] == ct)] + # assets which are going to be phased out before end of their lifetime + assets_i = df[df[["build_year", "lifetime"]].sum(axis=1) > phase_out_year].index + build_year = n.links.loc[assets_i, "build_year"] + # adjust lifetime + n.links.loc[assets_i, "lifetime"] = (phase_out_year - build_year).astype(float) + + +def set_all_phase_outs(n): + # TODO move this to a csv or to the config + planned = [ + (["nuclear"], "DE", 2022), + (["nuclear"], "BE", 2025), + (["nuclear"], "ES", 2027), + (["coal", "lignite"], "DE", 2030), + (["coal", "lignite"], "ES", 2027), + (["coal", "lignite"], "FR", 2022), + (["coal", "lignite"], "GB", 2024), + (["coal", "lignite"], "IT", 2025), + (["coal", "lignite"], "DK", 2030), + (["coal", "lignite"], "FI", 2030), + (["coal", "lignite"], "HU", 2030), + (["coal", "lignite"], "SK", 2030), + (["coal", "lignite"], "GR", 2030), + (["coal", "lignite"], "IE", 2030), + (["coal", "lignite"], "NL", 2030), + (["coal", "lignite"], "RS", 2030), + ] + for carrier, ct, phase_out_year in planned: + set_phase_out(n, carrier, ct, phase_out_year) + # remove assets which are already phased out + remove_i = n.links[n.links[["build_year", "lifetime"]].sum(axis=1) < years[0]].index + n.mremove("Link", remove_i) + + +def set_carbon_constraints(n, opts): + """ + Add global constraints for carbon emissions. + """ + budget = None + for o in opts: + # other budgets + m = re.match(r"^\d+p\d$", o, re.IGNORECASE) + if m is not None: + budget = snakemake.config["co2_budget"][m.group(0)] * 1e9 + if budget is not None: + logger.info(f"add carbon budget of {budget}") + n.add( + "GlobalConstraint", + "Budget", + type="Co2Budget", + carrier_attribute="co2_emissions", + sense="<=", + constant=budget, + investment_period=n.investment_periods[-1], + ) + + # drop other CO2 limits + drop_i = n.global_constraints[n.global_constraints.type == "co2_limit"].index + n.mremove("GlobalConstraint", drop_i) + + n.add( + "GlobalConstraint", + "carbon_neutral", + type="co2_limit", + carrier_attribute="co2_emissions", + sense="<=", + constant=0, + investment_period=n.investment_periods[-1], + ) + + # set minimum CO2 emission constraint to avoid too fast reduction + if "co2min" in opts: + emissions_1990 = 4.53693 + emissions_2019 = 3.344096 + target_2030 = 0.45 * emissions_1990 + annual_reduction = (emissions_2019 - target_2030) / 11 + first_year = n.snapshots.levels[0][0] + time_weightings = n.investment_period_weightings.loc[first_year, "years"] + co2min = emissions_2019 - ((first_year - 2019) * annual_reduction) + logger.info(f"add minimum emissions for {first_year} of {co2min} t CO2/a") + n.add( + "GlobalConstraint", + f"Co2Min-{first_year}", + type="Co2min", + carrier_attribute="co2_emissions", + sense=">=", + investment_period=first_year, + constant=co2min * 1e9 * time_weightings, + ) + + return n + + +def adjust_lvlimit(n): + """ + Convert global constraints for single investment period to one uniform if + all attributes stay the same. + """ + c = "GlobalConstraint" + cols = ["carrier_attribute", "sense", "constant", "type"] + glc_type = "transmission_volume_expansion_limit" + if (n.df(c)[n.df(c).type == glc_type][cols].nunique() == 1).all(): + glc = n.df(c)[n.df(c).type == glc_type][cols].iloc[[0]] + glc.index = pd.Index(["lv_limit"]) + remove_i = n.df(c)[n.df(c).type == glc_type].index + n.mremove(c, remove_i) + import_components_from_dataframe(n, glc, c) + + return n + + +def adjust_CO2_glc(n): + c = "GlobalConstraint" + glc_name = "CO2Limit" + glc_type = "primary_energy" + mask = (n.df(c).index.str.contains(glc_name)) & (n.df(c).type == glc_type) + n.df(c).loc[mask, "type"] = "co2_limit" + + return n + + +def add_H2_boilers(n): + """ + Gas boilers can be retrofitted to run with H2. + + Add H2 boilers for heating for all existing gas boilers. + """ + c = "Link" + logger.info("Add H2 boilers.") + # existing gas boilers + mask = n.links.carrier.str.contains("gas boiler") & ~n.links.p_nom_extendable + gas_i = n.links[mask].index + df = n.links.loc[gas_i] + # adjust bus 0 + df["bus0"] = df.bus1.map(n.buses.location) + " H2" + # rename carrier and index + df["carrier"] = df.carrier.apply( + lambda x: x.replace("gas boiler", "retrofitted H2 boiler") + ) + df.rename( + index=lambda x: x.replace("gas boiler", "retrofitted H2 boiler"), inplace=True + ) + # todo, costs for retrofitting + df["capital_costs"] = 100 + # set existing capacity to zero + df["p_nom"] = 0 + df["p_nom_extendable"] = True + # add H2 boilers to network + import_components_from_dataframe(n, df, c) + + +def apply_time_segmentation_perfect( + n, segments, solver_name="cbc", overwrite_time_dependent=True +): + """ + Aggregating time series to segments with different lengths. + + Input: + n: pypsa Network + segments: (int) number of segments in which the typical period should be + subdivided + solver_name: (str) name of solver + overwrite_time_dependent: (bool) overwrite time dependent data of pypsa network + with typical time series created by tsam + """ + try: + import tsam.timeseriesaggregation as tsam + except ImportError: + raise ModuleNotFoundError( + "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" + ) + + # get all time-dependent data + columns = pd.MultiIndex.from_tuples([], names=["component", "key", "asset"]) + raw = pd.DataFrame(index=n.snapshots, columns=columns) + for c in n.iterate_components(): + for attr, pnl in c.pnl.items(): + # exclude e_min_pu which is used for SOC of EVs in the morning + if not pnl.empty and attr != "e_min_pu": + df = pnl.copy() + df.columns = pd.MultiIndex.from_product([[c.name], [attr], df.columns]) + raw = pd.concat([raw, df], axis=1) + raw = raw.dropna(axis=1) + sn_weightings = {} + + for year in raw.index.levels[0]: + logger.info(f"Find representative snapshots for {year}.") + raw_t = raw.loc[year] + # normalise all time-dependent data + annual_max = raw_t.max().replace(0, 1) + raw_t = raw_t.div(annual_max, level=0) + # get representative segments + agg = tsam.TimeSeriesAggregation( + raw_t, + hoursPerPeriod=len(raw_t), + noTypicalPeriods=1, + noSegments=int(segments), + segmentation=True, + solver=solver_name, + ) + segmented = agg.createTypicalPeriods() + + weightings = segmented.index.get_level_values("Segment Duration") + offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0) + timesteps = [raw_t.index[0] + pd.Timedelta(f"{offset}h") for offset in offsets] + snapshots = pd.DatetimeIndex(timesteps) + sn_weightings[year] = pd.Series( + weightings, index=snapshots, name="weightings", dtype="float64" + ) + + sn_weightings = pd.concat(sn_weightings) + n.set_snapshots(sn_weightings.index) + n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0) + + return n + + +def set_temporal_aggregation_SEG(n, opts, solver_name): + """ + Aggregate network temporally with tsam. + """ + for o in opts: + # segments with package tsam + m = re.match(r"^(\d+)seg$", o, re.IGNORECASE) + if m is not None: + segments = int(m[1]) + logger.info(f"Use temporal segmentation with {segments} segments") + n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name) + break + return n + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "prepare_perfect_foresight", + simpl="", + opts="", + clusters="37", + ll="v1.5", + sector_opts="1p7-4380H-T-H-B-I-A-dist1", + ) + + update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) + # parameters ----------------------------------------------------------- + years = snakemake.config["scenario"]["planning_horizons"] + opts = snakemake.wildcards.sector_opts.split("-") + social_discountrate = snakemake.config["costs"]["social_discountrate"] + for o in opts: + if "sdr" in o: + social_discountrate = float(o.replace("sdr", "")) / 100 + + logger.info( + f"Concat networks of investment period {years} with social discount rate of {social_discountrate * 100}%" + ) + + # concat prenetworks of planning horizon to single network ------------ + n = concat_networks(years) + + # temporal aggregate + opts = snakemake.wildcards.sector_opts.split("-") + solver_name = snakemake.config["solving"]["solver"]["name"] + n = set_temporal_aggregation_SEG(n, opts, solver_name) + + # adjust global constraints lv limit if the same for all years + n = adjust_lvlimit(n) + # adjust global constraints CO2 limit + n = adjust_CO2_glc(n) + # adjust stores to multi period investment + n = adjust_stores(n) + + # set phase outs + set_all_phase_outs(n) + + # add H2 boiler + add_H2_boilers(n) + + # set carbon constraints + opts = snakemake.wildcards.sector_opts.split("-") + n = set_carbon_constraints(n, opts) + + # export network + n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py old mode 100644 new mode 100755 index e1cc0462..06aea9ec --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -11,19 +11,15 @@ import logging import os import re from itertools import product +from types import SimpleNamespace import networkx as nx import numpy as np import pandas as pd import pypsa import xarray as xr -from _helpers import ( - configure_logging, - generate_periodic_profiles, - set_scenario_config, - update_config_with_sector_opts, -) -from add_electricity import calculate_annuity, sanitize_carriers +from _helpers import update_config_with_sector_opts, set_scenario_config +from add_electricity import calculate_annuity, sanitize_carriers, sanitize_locations from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2 from networkx.algorithms import complement from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation @@ -31,16 +27,8 @@ from pypsa.geo import haversine_pts from pypsa.io import import_components_from_dataframe from scipy.stats import beta -logger = logging.getLogger(__name__) - -from types import SimpleNamespace - spatial = SimpleNamespace() - -from packaging.version import Version, parse - -pd_version = parse(pd.__version__) -agg_group_kwargs = dict(numeric_only=False) if pd_version >= Version("1.3") else {} +logger = logging.getLogger(__name__) def define_spatial(nodes, options): @@ -100,12 +88,17 @@ def define_spatial(nodes, options): spatial.gas.industry = nodes + " gas for industry" spatial.gas.industry_cc = nodes + " gas for industry CC" spatial.gas.biogas_to_gas = nodes + " biogas to gas" + spatial.gas.biogas_to_gas_cc = nodes + "biogas to gas CC" else: spatial.gas.nodes = ["EU gas"] spatial.gas.locations = ["EU"] spatial.gas.biogas = ["EU biogas"] spatial.gas.industry = ["gas for industry"] spatial.gas.biogas_to_gas = ["EU biogas to gas"] + if options.get("biomass_spatial", options["biomass_transport"]): + spatial.gas.biogas_to_gas_cc = nodes + " biogas to gas CC" + else: + spatial.gas.biogas_to_gas_cc = ["EU biogas to gas CC"] if options.get("co2_spatial", options["co2network"]): spatial.gas.industry_cc = nodes + " gas for industry CC" else: @@ -132,15 +125,43 @@ def define_spatial(nodes, options): spatial.h2.locations = nodes # methanol + + # beware: unlike other carriers, uses locations rather than locations+carriername + # this allows to avoid separation between nodes and locations + spatial.methanol = SimpleNamespace() + spatial.methanol.nodes = ["EU methanol"] spatial.methanol.locations = ["EU"] + if options["regional_methanol_demand"]: + spatial.methanol.demand_locations = nodes + spatial.methanol.shipping = nodes + " shipping methanol" + else: + spatial.methanol.demand_locations = ["EU"] + spatial.methanol.shipping = ["EU shipping methanol"] + # oil spatial.oil = SimpleNamespace() + spatial.oil.nodes = ["EU oil"] spatial.oil.locations = ["EU"] + if options["regional_oil_demand"]: + spatial.oil.demand_locations = nodes + spatial.oil.naphtha = nodes + " naphtha for industry" + spatial.oil.kerosene = nodes + " kerosene for aviation" + spatial.oil.shipping = nodes + " shipping oil" + spatial.oil.agriculture_machinery = nodes + " agriculture machinery oil" + spatial.oil.land_transport = nodes + " land transport oil" + else: + spatial.oil.demand_locations = ["EU"] + spatial.oil.naphtha = ["EU naphtha for industry"] + spatial.oil.kerosene = ["EU kerosene for aviation"] + spatial.oil.shipping = ["EU shipping oil"] + spatial.oil.agriculture_machinery = ["EU agriculture machinery oil"] + spatial.oil.land_transport = ["EU land transport oil"] + # uranium spatial.uranium = SimpleNamespace() spatial.uranium.nodes = ["EU uranium"] @@ -159,8 +180,6 @@ def define_spatial(nodes, options): return spatial -from types import SimpleNamespace - spatial = SimpleNamespace() @@ -189,10 +208,7 @@ def get(item, investment_year=None): """ Check whether item depends on investment year. """ - if isinstance(item, dict): - return item[investment_year] - else: - return item + return item[investment_year] if isinstance(item, dict) else item def co2_emissions_year( @@ -225,7 +241,7 @@ def co2_emissions_year( # TODO: move to own rule with sector-opts wildcard? -def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year, input_co2): +def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year): """ Distribute carbon budget following beta or exponential transition path. """ @@ -264,6 +280,8 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year, inp ) planning_horizons = snakemake.params.planning_horizons + if not isinstance(planning_horizons, list): + planning_horizons = [planning_horizons] t_0 = planning_horizons[0] if "be" in o: @@ -400,6 +418,11 @@ def update_wind_solar_costs(n, costs): tech = "offwind-" + connection profile = snakemake.input["profile_offwind_" + connection] with xr.open_dataset(profile) as ds: + + # if-statement for compatibility with old profiles + if "year" in ds.indexes: + ds = ds.sel(year=ds.year.min(), drop=True) + underwater_fraction = ds["underwater_fraction"].to_pandas() connection_cost = ( snakemake.params.length_factor @@ -418,11 +441,9 @@ def update_wind_solar_costs(n, costs): # e.g. clusters == 37m means that VRE generators are left # at clustering of simplified network, but that they are # connected to 37-node network - if snakemake.wildcards.clusters[-1:] == "m": - genmap = busmap_s - else: - genmap = clustermaps - + genmap = ( + busmap_s if snakemake.wildcards.clusters[-1:] == "m" else clustermaps + ) connection_cost = (connection_cost * weight).groupby( genmap ).sum() / weight.groupby(genmap).sum() @@ -435,13 +456,13 @@ def update_wind_solar_costs(n, costs): logger.info( "Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}".format( - connection_cost[0].min(), connection_cost[0].max(), tech + connection_cost.min(), connection_cost.max(), tech ) ) - n.generators.loc[ - n.generators.carrier == tech, "capital_cost" - ] = capital_cost.rename(index=lambda node: node + " " + tech) + n.generators.loc[n.generators.carrier == tech, "capital_cost"] = ( + capital_cost.rename(index=lambda node: node + " " + tech) + ) def add_carrier_buses(n, carrier, nodes=None): @@ -462,10 +483,11 @@ def add_carrier_buses(n, carrier, nodes=None): n.add("Carrier", carrier) unit = "MWh_LHV" if carrier == "gas" else "MWh_th" + # preliminary value for non-gas carriers to avoid zeros + capital_cost = costs.at["gas storage", "fixed"] if carrier == "gas" else 0.02 n.madd("Bus", nodes, location=location, carrier=carrier, unit=unit) - # capital cost could be corrected to e.g. 0.2 EUR/kWh * annuity and O&M n.madd( "Store", nodes + " Store", @@ -473,8 +495,7 @@ def add_carrier_buses(n, carrier, nodes=None): e_nom_extendable=True, e_cyclic=True, carrier=carrier, - capital_cost=0.2 - * costs.at[carrier, "discount rate"], # preliminary value to avoid zeros + capital_cost=capital_cost, ) n.madd( @@ -510,8 +531,7 @@ def remove_non_electric_buses(n): """ Remove buses from pypsa-eur with carriers which are not AC buses. """ - to_drop = list(n.buses.query("carrier not in ['AC', 'DC']").carrier.unique()) - if to_drop: + if to_drop := list(n.buses.query("carrier not in ['AC', 'DC']").carrier.unique()): logger.info(f"Drop buses from PyPSA-Eur with carrier: {to_drop}") n.buses = n.buses[n.buses.carrier.isin(["AC", "DC"])] @@ -528,7 +548,18 @@ def patch_electricity_network(n): n.loads_t.p_set.rename(lambda x: x.strip(), axis=1, inplace=True) -def add_co2_tracking(n, options): +def add_eu_bus(n, x=-5.5, y=46): + """ + Add EU bus to the network. + + This cosmetic bus serves as a reference point for the location of + the EU buses in the plots and summaries. + """ + n.add("Bus", "EU", location="EU", x=x, y=y, carrier="none") + n.add("Carrier", "none") + + +def add_co2_tracking(n, costs, options): # minus sign because opposite to how fossil fuels used: # CH4 burning puts CH4 down, atmosphere up n.add("Carrier", "co2", co2_emissions=-1.0) @@ -546,7 +577,7 @@ def add_co2_tracking(n, options): bus="co2 atmosphere", ) - # this tracks CO2 stored, e.g. underground + # add CO2 tanks n.madd( "Bus", spatial.co2.nodes, @@ -555,6 +586,39 @@ def add_co2_tracking(n, options): unit="t_co2", ) + n.madd( + "Store", + spatial.co2.nodes, + e_nom_extendable=True, + capital_cost=costs.at["CO2 storage tank", "fixed"], + carrier="co2 stored", + e_cyclic=True, + bus=spatial.co2.nodes, + ) + n.add("Carrier", "co2 stored") + + # this tracks CO2 sequestered, e.g. underground + sequestration_buses = pd.Index(spatial.co2.nodes).str.replace( + " stored", " sequestered" + ) + n.madd( + "Bus", + sequestration_buses, + location=spatial.co2.locations, + carrier="co2 sequestered", + unit="t_co2", + ) + + n.madd( + "Link", + sequestration_buses, + bus0=spatial.co2.nodes, + bus1=sequestration_buses, + carrier="co2 sequestered", + efficiency=1.0, + p_nom_extendable=True, + ) + if options["regional_co2_sequestration_potential"]["enable"]: upper_limit = ( options["regional_co2_sequestration_potential"]["max_size"] * 1e3 @@ -570,21 +634,22 @@ def add_co2_tracking(n, options): .mul(1e6) / annualiser ) # t - e_nom_max = e_nom_max.rename(index=lambda x: x + " co2 stored") + e_nom_max = e_nom_max.rename(index=lambda x: x + " co2 sequestered") else: e_nom_max = np.inf n.madd( "Store", - spatial.co2.nodes, + sequestration_buses, e_nom_extendable=True, e_nom_max=e_nom_max, capital_cost=options["co2_sequestration_cost"], - carrier="co2 stored", - bus=spatial.co2.nodes, + bus=sequestration_buses, + lifetime=options["co2_sequestration_lifetime"], + carrier="co2 sequestered", ) - n.add("Carrier", "co2 stored") + n.add("Carrier", "co2 sequestered") if options["co2_vent"]: n.madd( @@ -613,6 +678,8 @@ def add_co2_network(n, costs): * co2_links.length ) capital_cost = cost_onshore + cost_submarine + cost_factor = snakemake.config["sector"]["co2_network_cost_factor"] + capital_cost *= cost_factor n.madd( "Link", @@ -656,27 +723,27 @@ def add_dac(n, costs): heat_buses = n.buses.index[n.buses.carrier.isin(heat_carriers)] locations = n.buses.location[heat_buses] - efficiency2 = -( + electricity_input = ( costs.at["direct air capture", "electricity-input"] + costs.at["direct air capture", "compression-electricity-input"] - ) - efficiency3 = -( + ) # MWh_el / tCO2 + heat_input = ( costs.at["direct air capture", "heat-input"] - costs.at["direct air capture", "compression-heat-output"] - ) + ) # MWh_th / tCO2 n.madd( "Link", heat_buses.str.replace(" heat", " DAC"), - bus0="co2 atmosphere", - bus1=spatial.co2.df.loc[locations, "nodes"].values, - bus2=locations.values, - bus3=heat_buses, + bus0=locations.values, + bus1=heat_buses, + bus2="co2 atmosphere", + bus3=spatial.co2.df.loc[locations, "nodes"].values, carrier="DAC", - capital_cost=costs.at["direct air capture", "fixed"], - efficiency=1.0, - efficiency2=efficiency2, - efficiency3=efficiency3, + capital_cost=costs.at["direct air capture", "fixed"] / electricity_input, + efficiency=-heat_input / electricity_input, + efficiency2=-1 / electricity_input, + efficiency3=1 / electricity_input, p_nom_extendable=True, lifetime=costs.at["direct air capture", "lifetime"], ) @@ -701,6 +768,7 @@ def add_co2limit(n, nyears=1.0, limit=0.0): "CO2Limit", carrier_attribute="co2_emissions", sense="<=", + type="co2_atmosphere", constant=co2_limit, ) @@ -815,14 +883,13 @@ def add_ammonia(n, costs): bus2=nodes + " H2", p_nom_extendable=True, carrier="Haber-Bosch", - efficiency=1 - / ( - cf_industry["MWh_elec_per_tNH3_electrolysis"] - / cf_industry["MWh_NH3_per_tNH3"] - ), # output: MW_NH3 per MW_elec - efficiency2=-cf_industry["MWh_H2_per_tNH3_electrolysis"] - / cf_industry["MWh_elec_per_tNH3_electrolysis"], # input: MW_H2 per MW_elec - capital_cost=costs.at["Haber-Bosch", "fixed"], + efficiency=1 / costs.at["Haber-Bosch", "electricity-input"], + efficiency2=-costs.at["Haber-Bosch", "hydrogen-input"] + / costs.at["Haber-Bosch", "electricity-input"], + capital_cost=costs.at["Haber-Bosch", "fixed"] + / costs.at["Haber-Bosch", "electricity-input"], + marginal_cost=costs.at["Haber-Bosch", "VOM"] + / costs.at["Haber-Bosch", "electricity-input"], lifetime=costs.at["Haber-Bosch", "lifetime"], ) @@ -854,47 +921,6 @@ def add_ammonia(n, costs): ) -def add_wave(n, wave_cost_factor): - # TODO: handle in Snakefile - wave_fn = "data/WindWaveWEC_GLTB.xlsx" - - # in kW - capacity = pd.Series({"Attenuator": 750, "F2HB": 1000, "MultiPA": 600}) - - # in EUR/MW - annuity_factor = calculate_annuity(25, 0.07) + 0.03 - costs = ( - 1e6 - * wave_cost_factor - * annuity_factor - * pd.Series({"Attenuator": 2.5, "F2HB": 2, "MultiPA": 1.5}) - ) - - sheets = pd.read_excel( - wave_fn, - sheet_name=["FirthForth", "Hebrides"], - usecols=["Attenuator", "F2HB", "MultiPA"], - index_col=0, - skiprows=[0], - parse_dates=True, - ) - - wave = pd.concat( - [sheets[l].divide(capacity, axis=1) for l in locations], keys=locations, axis=1 - ) - - for wave_type in costs.index: - n.add( - "Generator", - "Hebrides " + wave_type, - bus="GB4 0", # TODO this location is hardcoded - p_nom_extendable=True, - carrier="wave", - capital_cost=costs[wave_type], - p_max_pu=wave["Hebrides", wave_type], - ) - - def insert_electricity_distribution_grid(n, costs): # TODO pop_layout? # TODO options? @@ -992,6 +1018,7 @@ def insert_electricity_distribution_grid(n, costs): "Store", nodes + " home battery", bus=nodes + " home battery", + location=nodes, e_cyclic=True, e_nom_extendable=True, carrier="home battery", @@ -1033,7 +1060,7 @@ def insert_gas_distribution_costs(n, costs): f"Inserting gas distribution grid with investment cost factor of {f_costs}" ) - capital_cost = costs.loc["electricity distribution grid"]["fixed"] * f_costs + capital_cost = costs.at["electricity distribution grid", "fixed"] * f_costs # gas boilers gas_b = n.links.index[ @@ -1110,6 +1137,7 @@ def add_storage_and_grids(n, costs): efficiency=costs.at["OCGT", "efficiency"], capital_cost=costs.at["OCGT", "fixed"] * costs.at["OCGT", "efficiency"], # NB: fixed cost is per MWel + marginal_cost=costs.at["OCGT", "VOM"], lifetime=costs.at["OCGT", "lifetime"], ) @@ -1170,7 +1198,7 @@ def add_storage_and_grids(n, costs): if options["gas_network"]: logger.info( - "Add natural gas infrastructure, incl. LNG terminals, production and entry-points." + "Add natural gas infrastructure, incl. LNG terminals, production, storage and entry-points." ) if options["H2_retrofit"]: @@ -1215,10 +1243,25 @@ def add_storage_and_grids(n, costs): remove_i = n.generators[gas_i & internal_i].index n.generators.drop(remove_i, inplace=True) - p_nom = gas_input_nodes.sum(axis=1).rename(lambda x: x + " gas") + input_types = ["lng", "pipeline", "production"] + p_nom = gas_input_nodes[input_types].sum(axis=1).rename(lambda x: x + " gas") n.generators.loc[gas_i, "p_nom_extendable"] = False n.generators.loc[gas_i, "p_nom"] = p_nom + # add existing gas storage capacity + gas_i = n.stores.carrier == "gas" + e_nom = ( + gas_input_nodes["storage"] + .rename(lambda x: x + " gas Store") + .reindex(n.stores.index) + .fillna(0.0) + * 1e3 + ) # MWh_LHV + e_nom.clip( + upper=e_nom.quantile(0.98), inplace=True + ) # limit extremely large storage + n.stores.loc[gas_i, "e_nom_min"] = e_nom + # add candidates for new gas pipelines to achieve full connectivity G = nx.Graph() @@ -1236,11 +1279,9 @@ def add_storage_and_grids(n, costs): # apply k_edge_augmentation weighted by length of complement edges k_edge = options.get("gas_network_connectivity_upgrade", 3) - augmentation = list( + if augmentation := list( k_edge_augmentation(G, k_edge, avail=complement_edges.values) - ) - - if augmentation: + ): new_gas_pipes = pd.DataFrame(augmentation, columns=["bus0", "bus1"]) new_gas_pipes["length"] = new_gas_pipes.apply(haversine, axis=1) @@ -1355,6 +1396,7 @@ def add_storage_and_grids(n, costs): bus2=spatial.co2.nodes, p_nom_extendable=True, carrier="Sabatier", + p_min_pu=options.get("min_part_load_methanation", 0), efficiency=costs.at["methanation", "efficiency"], efficiency2=-costs.at["methanation", "efficiency"] * costs.at["gas", "CO2 intensity"], @@ -1363,23 +1405,6 @@ def add_storage_and_grids(n, costs): lifetime=costs.at["methanation", "lifetime"], ) - if options["helmeth"]: - n.madd( - "Link", - spatial.nodes, - suffix=" helmeth", - bus0=nodes, - bus1=spatial.gas.nodes, - bus2=spatial.co2.nodes, - carrier="helmeth", - p_nom_extendable=True, - efficiency=costs.at["helmeth", "efficiency"], - efficiency2=-costs.at["helmeth", "efficiency"] - * costs.at["gas", "CO2 intensity"], - capital_cost=costs.at["helmeth", "fixed"], - lifetime=costs.at["helmeth", "lifetime"], - ) - if options.get("coal_cc"): n.madd( "Link", @@ -1404,7 +1429,7 @@ def add_storage_and_grids(n, costs): lifetime=costs.at["coal", "lifetime"], ) - if options["SMR"]: + if options["SMR_cc"]: n.madd( "Link", spatial.nodes, @@ -1422,6 +1447,7 @@ def add_storage_and_grids(n, costs): lifetime=costs.at["SMR CC", "lifetime"], ) + if options["SMR"]: n.madd( "Link", nodes + " SMR", @@ -1441,7 +1467,6 @@ def add_land_transport(n, costs): # TODO options? logger.info("Add land transport") - nhours = n.snapshot_weightings.generators.sum() transport = pd.read_csv( snakemake.input.transport_demand, index_col=0, parse_dates=True @@ -1478,8 +1503,8 @@ def add_land_transport(n, costs): n.madd( "Bus", nodes, - location=nodes, suffix=" EV battery", + location=nodes, carrier="Li ion", unit="MWh_el", ) @@ -1567,78 +1592,69 @@ def add_land_transport(n, costs): ) if ice_share > 0: - if "oil" not in n.buses.carrier.unique(): - n.madd( - "Bus", - spatial.oil.nodes, - location=spatial.oil.locations, - carrier="oil", - unit="MWh_LHV", - ) + add_carrier_buses(n, "oil") ice_efficiency = options["transport_internal_combustion_efficiency"] - n.madd( - "Load", - nodes, - suffix=" land transport oil", - bus=spatial.oil.nodes, - carrier="land transport oil", - p_set=ice_share / ice_efficiency * transport[nodes], - ) - - co2 = ( + p_set_land_transport_oil = ( ice_share / ice_efficiency - * transport[nodes].sum().sum() - / nhours - * costs.at["oil", "CO2 intensity"] + * transport[nodes].rename(columns=lambda x: x + " land transport oil") ) - n.add( + if not options["regional_oil_demand"]: + p_set_land_transport_oil = p_set_land_transport_oil.sum(axis=1).to_frame( + name="EU land transport oil" + ) + + n.madd( + "Bus", + spatial.oil.land_transport, + location=spatial.oil.demand_locations, + carrier="land transport oil", + unit="land transport", + ) + + n.madd( "Load", - "land transport oil emissions", - bus="co2 atmosphere", - carrier="land transport oil emissions", - p_set=-co2, + spatial.oil.land_transport, + bus=spatial.oil.land_transport, + carrier="land transport oil", + p_set=p_set_land_transport_oil, + ) + + n.madd( + "Link", + spatial.oil.land_transport, + bus0=spatial.oil.nodes, + bus1=spatial.oil.land_transport, + bus2="co2 atmosphere", + carrier="land transport oil", + efficiency2=costs.at["oil", "CO2 intensity"], + p_nom_extendable=True, ) def build_heat_demand(n): - # copy forward the daily average heat demand into each hour, so it can be multiplied by the intraday profile - daily_space_heat_demand = ( - xr.open_dataarray(snakemake.input.heat_demand_total) - .to_pandas() - .reindex(index=n.snapshots, method="ffill") + heat_demand_shape = ( + xr.open_dataset(snakemake.input.hourly_heat_demand_total) + .to_dataframe() + .unstack(level=1) ) - intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) - sectors = ["residential", "services"] uses = ["water", "space"] heat_demand = {} electric_heat_supply = {} for sector, use in product(sectors, uses): - weekday = list(intraday_profiles[f"{sector} {use} weekday"]) - weekend = list(intraday_profiles[f"{sector} {use} weekend"]) - weekly_profile = weekday * 5 + weekend * 2 - intraday_year_profile = generate_periodic_profiles( - daily_space_heat_demand.index.tz_localize("UTC"), - nodes=daily_space_heat_demand.columns, - weekly_profile=weekly_profile, - ) + name = f"{sector} {use}" - if use == "space": - heat_demand_shape = daily_space_heat_demand * intraday_year_profile - else: - heat_demand_shape = intraday_year_profile - - heat_demand[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + heat_demand[name] = ( + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"total {sector} {use}"]) * 1e6 - electric_heat_supply[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + electric_heat_supply[name] = ( + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"electricity {sector} {use}"]) * 1e6 heat_demand = pd.concat(heat_demand, axis=1) @@ -1648,7 +1664,7 @@ def build_heat_demand(n): electric_nodes = n.loads.index[n.loads.carrier == "electricity"] n.loads_t.p_set[electric_nodes] = ( n.loads_t.p_set[electric_nodes] - - electric_heat_supply.groupby(level=1, axis=1).sum()[electric_nodes] + - electric_heat_supply.T.groupby(level=1).sum().T[electric_nodes] ) return heat_demand @@ -1661,7 +1677,11 @@ def add_heat(n, costs): heat_demand = build_heat_demand(n) - nodes, dist_fraction, urban_fraction = create_nodes_for_heat_sector() + overdim_factor = options["overdimension_individual_heating"] + + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] # NB: must add costs of central heating afterwards (EUR 400 / kWpeak, 50a, 1% FOM from Fraunhofer ISE) @@ -1701,26 +1721,44 @@ def add_heat(n, costs): for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" + if name == "urban central": + nodes = dist_fraction.index[dist_fraction > 0] + else: + nodes = pop_layout.index + n.add("Carrier", name + " heat") n.madd( "Bus", - nodes[name] + f" {name} heat", - location=nodes[name], + nodes + f" {name} heat", + location=nodes, carrier=name + " heat", unit="MWh_th", ) + if name == "urban central" and options.get("central_heat_vent"): + n.madd( + "Generator", + nodes + f" {name} heat vent", + bus=nodes + f" {name} heat", + location=nodes, + carrier=name + " heat vent", + p_nom_extendable=True, + p_max_pu=0, + p_min_pu=-1, + unit="MWh_th", + ) + ## Add heat load for sector in sectors: # heat demand weighting if "rural" in name: - factor = 1 - urban_fraction[nodes[name]] + factor = 1 - urban_fraction[nodes] elif "urban central" in name: - factor = dist_fraction[nodes[name]] + factor = dist_fraction[nodes] elif "urban decentral" in name: - factor = urban_fraction[nodes[name]] - dist_fraction[nodes[name]] + factor = urban_fraction[nodes] - dist_fraction[nodes] else: raise NotImplementedError( f" {name} not in " f"heat systems: {heat_systems}" @@ -1729,15 +1767,17 @@ def add_heat(n, costs): if sector in name: heat_load = ( heat_demand[[sector + " water", sector + " space"]] - .groupby(level=1, axis=1) - .sum()[nodes[name]] + .T.groupby(level=1) + .sum() + .T[nodes] .multiply(factor) ) if name == "urban central": heat_load = ( - heat_demand.groupby(level=1, axis=1) - .sum()[nodes[name]] + heat_demand.T.groupby(level=1) + .sum() + .T[nodes] .multiply( factor * (1 + options["district_heating"]["district_heating_loss"]) ) @@ -1745,54 +1785,56 @@ def add_heat(n, costs): n.madd( "Load", - nodes[name], + nodes, suffix=f" {name} heat", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " heat", p_set=heat_load, ) ## Add heat pumps - heat_pump_type = "air" if "urban" in name else "ground" + heat_pump_types = ["air"] if "urban" in name else ["ground", "air"] - costs_name = f"{name_type} {heat_pump_type}-sourced heat pump" - efficiency = ( - cop[heat_pump_type][nodes[name]] - if options["time_dep_hp_cop"] - else costs.at[costs_name, "efficiency"] - ) + for heat_pump_type in heat_pump_types: + costs_name = f"{name_type} {heat_pump_type}-sourced heat pump" + efficiency = ( + cop[heat_pump_type][nodes] + if options["time_dep_hp_cop"] + else costs.at[costs_name, "efficiency"] + ) - n.madd( - "Link", - nodes[name], - suffix=f" {name} {heat_pump_type} heat pump", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", - carrier=f"{name} {heat_pump_type} heat pump", - efficiency=efficiency, - capital_cost=costs.at[costs_name, "efficiency"] - * costs.at[costs_name, "fixed"], - p_nom_extendable=True, - lifetime=costs.at[costs_name, "lifetime"], - ) + n.madd( + "Link", + nodes, + suffix=f" {name} {heat_pump_type} heat pump", + bus0=nodes, + bus1=nodes + f" {name} heat", + carrier=f"{name} {heat_pump_type} heat pump", + efficiency=efficiency, + capital_cost=costs.at[costs_name, "efficiency"] + * costs.at[costs_name, "fixed"] + * overdim_factor, + p_nom_extendable=True, + lifetime=costs.at[costs_name, "lifetime"], + ) if options["tes"]: n.add("Carrier", name + " water tanks") n.madd( "Bus", - nodes[name] + f" {name} water tanks", - location=nodes[name], + nodes + f" {name} water tanks", + location=nodes, carrier=name + " water tanks", unit="MWh_th", ) n.madd( "Link", - nodes[name] + f" {name} water tanks charger", - bus0=nodes[name] + f" {name} heat", - bus1=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks charger", + bus0=nodes + f" {name} heat", + bus1=nodes + f" {name} water tanks", efficiency=costs.at["water tank charger", "efficiency"], carrier=name + " water tanks charger", p_nom_extendable=True, @@ -1800,29 +1842,20 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} water tanks discharger", - bus0=nodes[name] + f" {name} water tanks", - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} water tanks discharger", + bus0=nodes + f" {name} water tanks", + bus1=nodes + f" {name} heat", carrier=name + " water tanks discharger", efficiency=costs.at["water tank discharger", "efficiency"], p_nom_extendable=True, ) - if isinstance(options["tes_tau"], dict): - tes_time_constant_days = options["tes_tau"][name_type] - else: - logger.warning( - "Deprecated: a future version will require you to specify 'tes_tau' ", - "for 'decentral' and 'central' separately.", - ) - tes_time_constant_days = ( - options["tes_tau"] if name_type == "decentral" else 180.0 - ) + tes_time_constant_days = options["tes_tau"][name_type] n.madd( "Store", - nodes[name] + f" {name} water tanks", - bus=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks", + bus=nodes + f" {name} water tanks", e_cyclic=True, e_nom_extendable=True, carrier=name + " water tanks", @@ -1831,34 +1864,39 @@ def add_heat(n, costs): lifetime=costs.at[name_type + " water tank storage", "lifetime"], ) - if options["boilers"]: + if options["resistive_heaters"]: key = f"{name_type} resistive heater" n.madd( "Link", - nodes[name] + f" {name} resistive heater", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} resistive heater", + bus0=nodes, + bus1=nodes + f" {name} heat", carrier=name + " resistive heater", efficiency=costs.at[key, "efficiency"], - capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], + capital_cost=costs.at[key, "efficiency"] + * costs.at[key, "fixed"] + * overdim_factor, p_nom_extendable=True, lifetime=costs.at[key, "lifetime"], ) + if options["boilers"]: key = f"{name_type} gas boiler" n.madd( "Link", - nodes[name] + f" {name} gas boiler", + nodes + f" {name} gas boiler", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[key, "efficiency"], efficiency2=costs.at["gas", "CO2 intensity"], - capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], + capital_cost=costs.at[key, "efficiency"] + * costs.at[key, "fixed"] + * overdim_factor, lifetime=costs.at[key, "lifetime"], ) @@ -1867,13 +1905,14 @@ def add_heat(n, costs): n.madd( "Generator", - nodes[name], + nodes, suffix=f" {name} solar thermal collector", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " solar thermal", p_nom_extendable=True, - capital_cost=costs.at[name_type + " solar thermal", "fixed"], - p_max_pu=solar_thermal[nodes[name]], + capital_cost=costs.at[name_type + " solar thermal", "fixed"] + * overdim_factor, + p_max_pu=solar_thermal[nodes], lifetime=costs.at[name_type + " solar thermal", "lifetime"], ) @@ -1881,10 +1920,10 @@ def add_heat(n, costs): # add gas CHP; biomass CHP is added in biomass section n.madd( "Link", - nodes[name] + " urban central gas CHP", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", carrier="urban central gas CHP", p_nom_extendable=True, @@ -1900,12 +1939,12 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + " urban central gas CHP CC", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP CC", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", - bus4=spatial.co2.df.loc[nodes[name], "nodes"].values, + bus4=spatial.co2.df.loc[nodes, "nodes"].values, carrier="urban central gas CHP CC", p_nom_extendable=True, capital_cost=costs.at["central gas CHP", "fixed"] @@ -1937,11 +1976,11 @@ def add_heat(n, costs): if options["chp"] and options["micro_chp"] and name != "urban central": n.madd( "Link", - nodes[name] + f" {name} micro gas CHP", + nodes + f" {name} micro gas CHP", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + f" {name} heat", bus3="co2 atmosphere", carrier=name + " micro gas CHP", efficiency=costs.at["micro CHP", "efficiency"], @@ -1965,7 +2004,7 @@ def add_heat(n, costs): # demand 'dE' [per unit of original heat demand] for each country and # different retrofitting strengths [additional insulation thickness in m] retro_data = pd.read_csv( - snakemake.input.retro_cost_energy, + snakemake.input.retro_cost, index_col=[0, 1], skipinitialspace=True, header=[0, 1], @@ -1983,7 +2022,7 @@ def add_heat(n, costs): ) w_space["tot"] = ( heat_demand_r["services space"] + heat_demand_r["residential space"] - ) / heat_demand_r.groupby(level=[1], axis=1).sum() + ) / heat_demand_r.T.groupby(level=[1]).sum().T for name in n.loads[ n.loads.carrier.isin([x + " heat" for x in heat_systems]) @@ -1992,11 +2031,21 @@ def add_heat(n, costs): ct = pop_layout.loc[node, "ct"] # weighting 'f' depending on the size of the population at the node - f = urban_fraction[node] if "urban" in name else (1 - urban_fraction[node]) + if "urban central" in name: + f = dist_fraction[node] + elif "urban decentral" in name: + f = urban_fraction[node] - dist_fraction[node] + else: + f = 1 - urban_fraction[node] if f == 0: continue # get sector name ("residential"/"services"/or both "tot" for urban central) - sec = [x if x in name else "tot" for x in sectors][0] + if "urban central" in name: + sec = "tot" + if "residential" in name: + sec = "residential" + if "services" in name: + sec = "services" # get floor aread at node and region (urban/rural) in m^2 floor_area_node = ( @@ -2009,7 +2058,11 @@ def add_heat(n, costs): space_heat_demand = demand * w_space[sec][node] # normed time profile of space heat demand 'space_pu' (values between 0-1), # p_max_pu/p_min_pu of retrofitting generators - space_pu = (space_heat_demand / space_heat_demand.max()).to_frame(name=node) + space_pu = ( + (space_heat_demand / space_heat_demand.max()) + .to_frame(name=node) + .fillna(0) + ) # minimum heat demand 'dE' after retrofitting in units of original heat demand (values between 0-1) dE = retro_data.loc[(ct, sec), ("dE")] @@ -2021,6 +2074,9 @@ def add_heat(n, costs): * floor_area_node / ((1 - dE) * space_heat_demand.max()) ) + if space_heat_demand.max() == 0: + capital_cost = capital_cost.apply(lambda b: 0 if b == np.inf else b) + # number of possible retrofitting measures 'strengths' (set in list at config.yaml 'l_strength') # given in additional insulation thickness [m] # for each measure, a retrofitting generator is added at the node @@ -2033,14 +2089,15 @@ def add_heat(n, costs): strengths = strengths.drop(s) # reindex normed time profile of space heat demand back to hourly resolution - space_pu = space_pu.reindex(index=heat_demand.index).fillna(method="ffill") + space_pu = space_pu.reindex(index=heat_demand.index).ffill() # add for each retrofitting strength a generator with heat generation profile following the profile of the heat demand for strength in strengths: + node_name = " ".join(name.split(" ")[2::]) n.madd( "Generator", [node], - suffix=" retrofitting " + strength + " " + name[6::], + suffix=" retrofitting " + strength + " " + node_name, bus=name, carrier="retrofitting", p_nom_extendable=True, @@ -2054,50 +2111,6 @@ def add_heat(n, costs): ) -def create_nodes_for_heat_sector(): - # TODO pop_layout - - # rural are areas with low heating density and individual heating - # urban are areas with high heating density - # urban can be split into district heating (central) and individual heating (decentral) - - ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() - # distribution of urban population within a country - pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) - - sectors = ["residential", "services"] - - nodes = {} - urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) - - for sector in sectors: - nodes[sector + " rural"] = pop_layout.index - nodes[sector + " urban decentral"] = pop_layout.index - - district_heat_share = pop_weighted_energy_totals["district heat share"] - - # maximum potential of urban demand covered by district heating - central_fraction = options["district_heating"]["potential"] - # district heating share at each node - dist_fraction_node = ( - district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] - ) - nodes["urban central"] = dist_fraction_node.index - # if district heating share larger than urban fraction -> set urban - # fraction to district heating share - urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) - # difference of max potential and today's share of district heating - diff = (urban_fraction * central_fraction) - dist_fraction_node - progress = get(options["district_heating"]["progress"], investment_year) - dist_fraction_node += diff * progress - logger.info( - f"Increase district heating share by a progress factor of {progress:.2%} " - f"resulting in new average share of {dist_fraction_node.mean():.2%}" - ) - - return nodes, dist_fraction_node, urban_fraction - - def add_biomass(n, costs): logger.info("Add biomass") @@ -2164,12 +2177,41 @@ def add_biomass(n, costs): bus1=spatial.gas.nodes, bus2="co2 atmosphere", carrier="biogas to gas", - capital_cost=costs.loc["biogas upgrading", "fixed"], - marginal_cost=costs.loc["biogas upgrading", "VOM"], + capital_cost=costs.at["biogas", "fixed"] + + costs.at["biogas upgrading", "fixed"], + marginal_cost=costs.at["biogas upgrading", "VOM"], + efficiency=costs.at["biogas", "efficiency"], efficiency2=-costs.at["gas", "CO2 intensity"], p_nom_extendable=True, ) + if options.get("biogas_upgrading_cc"): + # Assuming for costs that the CO2 from upgrading is pure, such as in amine scrubbing. I.e., with and without CC is + # equivalent. Adding biomass CHP capture because biogas is often small-scale and decentral so further + # from e.g. CO2 grid or buyers. This is a proxy for the added cost for e.g. a raw biogas pipeline to a central upgrading facility + n.madd( + "Link", + spatial.gas.biogas_to_gas_cc, + bus0=spatial.gas.biogas, + bus1=spatial.gas.nodes, + bus2=spatial.co2.nodes, + bus3="co2 atmosphere", + carrier="biogas to gas CC", + capital_cost=costs.at["biogas CC", "fixed"] + + costs.at["biogas upgrading", "fixed"] + + costs.at["biomass CHP capture", "fixed"] + * costs.at["biogas CC", "CO2 stored"], + marginal_cost=costs.at["biogas CC", "VOM"] + + costs.at["biogas upgrading", "VOM"], + efficiency=costs.at["biogas CC", "efficiency"], + efficiency2=costs.at["biogas CC", "CO2 stored"] + * costs.at["biogas CC", "capture rate"], + efficiency3=-costs.at["gas", "CO2 intensity"] + - costs.at["biogas CC", "CO2 stored"] + * costs.at["biogas CC", "capture rate"], + p_nom_extendable=True, + ) + if options["biomass_transport"]: # add biomass transport transport_costs = pd.read_csv( @@ -2219,6 +2261,14 @@ def add_biomass(n, costs): marginal_cost=costs.at["solid biomass", "fuel"] + bus_transport_costs * average_distance, ) + n.add( + "GlobalConstraint", + "biomass limit", + carrier_attribute="solid biomass", + sense="<=", + constant=biomass_potentials["solid biomass"].sum(), + type="operational_limit", + ) # AC buses with district heating urban_central = n.buses.index[n.buses.carrier == "urban central heat"] @@ -2278,7 +2328,7 @@ def add_biomass(n, costs): if options["biomass_boiler"]: # TODO: Add surcharge for pellets - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", "services rural", @@ -2287,14 +2337,16 @@ def add_biomass(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} biomass boiler", + nodes + f" {name} biomass boiler", p_nom_extendable=True, - bus0=spatial.biomass.df.loc[nodes_heat[name], "nodes"].values, - bus1=nodes_heat[name] + f" {name} heat", + bus0=spatial.biomass.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", carrier=name + " biomass boiler", efficiency=costs.at["biomass boiler", "efficiency"], capital_cost=costs.at["biomass boiler", "efficiency"] - * costs.at["biomass boiler", "fixed"], + * costs.at["biomass boiler", "fixed"] + * options["overdimension_individual_heating"], + marginal_cost=costs.at["biomass boiler", "pelletizing cost"], lifetime=costs.at["biomass boiler", "lifetime"], ) @@ -2314,7 +2366,7 @@ def add_biomass(n, costs): + costs.at["BtL", "CO2 stored"], p_nom_extendable=True, capital_cost=costs.at["BtL", "fixed"], - marginal_cost=costs.at["BtL", "efficiency"] * costs.loc["BtL", "VOM"], + marginal_cost=costs.at["BtL", "efficiency"] * costs.at["BtL", "VOM"], ) # TODO: Update with energy penalty @@ -2335,7 +2387,7 @@ def add_biomass(n, costs): p_nom_extendable=True, capital_cost=costs.at["BtL", "fixed"] + costs.at["biomass CHP capture", "fixed"] * costs.at["BtL", "CO2 stored"], - marginal_cost=costs.at["BtL", "efficiency"] * costs.loc["BtL", "VOM"], + marginal_cost=costs.at["BtL", "efficiency"] * costs.at["BtL", "VOM"], ) # BioSNG from solid biomass @@ -2354,7 +2406,7 @@ def add_biomass(n, costs): + costs.at["BioSNG", "CO2 stored"], p_nom_extendable=True, capital_cost=costs.at["BioSNG", "fixed"], - marginal_cost=costs.at["BioSNG", "efficiency"] * costs.loc["BioSNG", "VOM"], + marginal_cost=costs.at["BioSNG", "efficiency"] * costs.at["BioSNG", "VOM"], ) # TODO: Update with energy penalty for CC @@ -2378,7 +2430,7 @@ def add_biomass(n, costs): capital_cost=costs.at["BioSNG", "fixed"] + costs.at["biomass CHP capture", "fixed"] * costs.at["BioSNG", "CO2 stored"], - marginal_cost=costs.at["BioSNG", "efficiency"] * costs.loc["BioSNG", "VOM"], + marginal_cost=costs.at["BioSNG", "efficiency"] * costs.at["BioSNG", "VOM"], ) @@ -2430,9 +2482,14 @@ def add_industry(n, costs): efficiency=1.0, ) + if len(spatial.biomass.industry_cc) <= 1 and len(spatial.co2.nodes) > 1: + link_names = nodes + " " + spatial.biomass.industry_cc + else: + link_names = spatial.biomass.industry_cc + n.madd( "Link", - spatial.biomass.industry_cc, + link_names, bus0=spatial.biomass.nodes, bus1=spatial.biomass.industry, bus2="co2 atmosphere", @@ -2611,6 +2668,8 @@ def add_industry(n, costs): p_min_pu=options.get("min_part_load_methanolisation", 0), capital_cost=costs.at["methanolisation", "fixed"] * options["MWh_MeOH_per_MWh_H2"], # EUR/MW_H2/a + marginal_cost=options["MWh_MeOH_per_MWh_H2"] + * costs.at["methanolisation", "VOM"], lifetime=costs.at["methanolisation", "lifetime"], efficiency=options["MWh_MeOH_per_MWh_H2"], efficiency2=-options["MWh_MeOH_per_MWh_H2"] / options["MWh_MeOH_per_MWh_e"], @@ -2620,48 +2679,44 @@ def add_industry(n, costs): efficiency = ( options["shipping_oil_efficiency"] / options["shipping_methanol_efficiency"] ) - p_set_methanol = shipping_methanol_share * p_set.sum() * efficiency + + p_set_methanol = ( + shipping_methanol_share + * p_set.rename(lambda x: x + " shipping methanol") + * efficiency + ) + + if not options["regional_methanol_demand"]: + p_set_methanol = p_set_methanol.sum() + + n.madd( + "Bus", + spatial.methanol.shipping, + location=spatial.methanol.demand_locations, + carrier="shipping methanol", + unit="MWh_LHV", + ) n.madd( "Load", - spatial.methanol.nodes, - suffix=" shipping methanol", - bus=spatial.methanol.nodes, + spatial.methanol.shipping, + bus=spatial.methanol.shipping, carrier="shipping methanol", p_set=p_set_methanol, ) - # CO2 intensity methanol based on stoichiometric calculation with 22.7 GJ/t methanol (32 g/mol), CO2 (44 g/mol), 277.78 MWh/TJ = 0.218 t/MWh - co2 = p_set_methanol / options["MWh_MeOH_per_tCO2"] - - n.add( - "Load", - "shipping methanol emissions", - bus="co2 atmosphere", - carrier="shipping methanol emissions", - p_set=-co2, - ) - - if shipping_oil_share: - p_set_oil = shipping_oil_share * p_set.sum() - n.madd( - "Load", - spatial.oil.nodes, - suffix=" shipping oil", - bus=spatial.oil.nodes, - carrier="shipping oil", - p_set=p_set_oil, - ) - - co2 = p_set_oil * costs.at["oil", "CO2 intensity"] - - n.add( - "Load", - "shipping oil emissions", - bus="co2 atmosphere", - carrier="shipping oil emissions", - p_set=-co2, + "Link", + spatial.methanol.shipping, + bus0=spatial.methanol.nodes, + bus1=spatial.methanol.shipping, + bus2="co2 atmosphere", + carrier="shipping methanol", + p_nom_extendable=True, + efficiency2=1 + / options[ + "MWh_MeOH_per_tCO2" + ], # CO2 intensity methanol based on stoichiometric calculation with 22.7 GJ/t methanol (32 g/mol), CO2 (44 g/mol), 277.78 MWh/TJ = 0.218 t/MWh ) if "oil" not in n.buses.carrier.unique(): @@ -2677,7 +2732,8 @@ def add_industry(n, costs): # could correct to e.g. 0.001 EUR/kWh * annuity and O&M n.madd( "Store", - [oil_bus + " Store" for oil_bus in spatial.oil.nodes], + spatial.oil.nodes, + suffix=" Store", bus=spatial.oil.nodes, e_nom_extendable=True, e_cyclic=True, @@ -2694,8 +2750,41 @@ def add_industry(n, costs): marginal_cost=costs.at["oil", "fuel"], ) + if shipping_oil_share: + p_set_oil = shipping_oil_share * p_set.rename(lambda x: x + " shipping oil") + + if not options["regional_oil_demand"]: + p_set_oil = p_set_oil.sum() + + n.madd( + "Bus", + spatial.oil.shipping, + location=spatial.oil.demand_locations, + carrier="shipping oil", + unit="MWh_LHV", + ) + + n.madd( + "Load", + spatial.oil.shipping, + bus=spatial.oil.shipping, + carrier="shipping oil", + p_set=p_set_oil, + ) + + n.madd( + "Link", + spatial.oil.shipping, + bus0=spatial.oil.nodes, + bus1=spatial.oil.shipping, + bus2="co2 atmosphere", + carrier="shipping oil", + p_nom_extendable=True, + efficiency2=costs.at["oil", "CO2 intensity"], + ) + if options["oil_boilers"]: - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", @@ -2705,16 +2794,17 @@ def add_industry(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} oil boiler", + nodes + f" {name} oil boiler", p_nom_extendable=True, bus0=spatial.oil.nodes, - bus1=nodes_heat[name] + f" {name} heat", + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=f"{name} oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] - * costs.at["decentral oil boiler", "fixed"], + * costs.at["decentral oil boiler", "fixed"] + * options["overdimension_individual_heating"], lifetime=costs.at["decentral oil boiler", "lifetime"], ) @@ -2728,6 +2818,8 @@ def add_industry(n, costs): efficiency=costs.at["Fischer-Tropsch", "efficiency"], capital_cost=costs.at["Fischer-Tropsch", "fixed"] * costs.at["Fischer-Tropsch", "efficiency"], # EUR/MW_H2/a + marginal_cost=costs.at["Fischer-Tropsch", "efficiency"] + * costs.at["Fischer-Tropsch", "VOM"], efficiency2=-costs.at["oil", "CO2 intensity"] * costs.at["Fischer-Tropsch", "efficiency"], p_nom_extendable=True, @@ -2735,53 +2827,101 @@ def add_industry(n, costs): lifetime=costs.at["Fischer-Tropsch", "lifetime"], ) + # naphtha demand_factor = options.get("HVC_demand_factor", 1) - p_set = demand_factor * industrial_demand.loc[nodes, "naphtha"].sum() / nhours if demand_factor != 1: logger.warning(f"Changing HVC demand by {demand_factor*100-100:+.2f}%.") - n.madd( - "Load", - ["naphtha for industry"], - bus=spatial.oil.nodes, - carrier="naphtha for industry", - p_set=p_set, - ) - - demand_factor = options.get("aviation_demand_factor", 1) - all_aviation = ["total international aviation", "total domestic aviation"] - p_set = ( + p_set_plastics = ( demand_factor - * pop_weighted_energy_totals.loc[nodes, all_aviation].sum(axis=1).sum() - * 1e6 + * industrial_demand.loc[nodes, "naphtha"].rename( + lambda x: x + " naphtha for industry" + ) / nhours ) + + if not options["regional_oil_demand"]: + p_set_plastics = p_set_plastics.sum() + + n.madd( + "Bus", + spatial.oil.naphtha, + location=spatial.oil.demand_locations, + carrier="naphtha for industry", + unit="MWh_LHV", + ) + + n.madd( + "Load", + spatial.oil.naphtha, + bus=spatial.oil.naphtha, + carrier="naphtha for industry", + p_set=p_set_plastics, + ) + + # some CO2 from naphtha are process emissions from steam cracker + # rest of CO2 released to atmosphere either in waste-to-energy or decay + process_co2_per_naphtha = ( + industrial_demand.loc[nodes, "process emission from feedstock"].sum() + / industrial_demand.loc[nodes, "naphtha"].sum() + ) + emitted_co2_per_naphtha = costs.at["oil", "CO2 intensity"] - process_co2_per_naphtha + + n.madd( + "Link", + spatial.oil.naphtha, + bus0=spatial.oil.nodes, + bus1=spatial.oil.naphtha, + bus2="co2 atmosphere", + bus3=spatial.co2.process_emissions, + carrier="naphtha for industry", + p_nom_extendable=True, + efficiency2=emitted_co2_per_naphtha, + efficiency3=process_co2_per_naphtha, + ) + + # aviation + demand_factor = options.get("aviation_demand_factor", 1) if demand_factor != 1: logger.warning(f"Changing aviation demand by {demand_factor*100-100:+.2f}%.") + all_aviation = ["total international aviation", "total domestic aviation"] + + p_set = ( + demand_factor + * pop_weighted_energy_totals.loc[nodes, all_aviation].sum(axis=1) + * 1e6 + / nhours + ).rename(lambda x: x + " kerosene for aviation") + + if not options["regional_oil_demand"]: + p_set = p_set.sum() + + n.madd( + "Bus", + spatial.oil.kerosene, + location=spatial.oil.demand_locations, + carrier="kerosene for aviation", + unit="MWh_LHV", + ) + n.madd( "Load", - ["kerosene for aviation"], - bus=spatial.oil.nodes, + spatial.oil.kerosene, + bus=spatial.oil.kerosene, carrier="kerosene for aviation", p_set=p_set, ) - # NB: CO2 gets released again to atmosphere when plastics decay or kerosene is burned - # except for the process emissions when naphtha is used for petrochemicals, which can be captured with other industry process emissions - # tco2 per hour - co2_release = ["naphtha for industry", "kerosene for aviation"] - co2 = ( - n.loads.loc[co2_release, "p_set"].sum() * costs.at["oil", "CO2 intensity"] - - industrial_demand.loc[nodes, "process emission from feedstock"].sum() / nhours - ) - - n.add( - "Load", - "oil emissions", - bus="co2 atmosphere", - carrier="oil emissions", - p_set=-co2, + n.madd( + "Link", + spatial.oil.kerosene, + bus0=spatial.oil.nodes, + bus1=spatial.oil.kerosene, + bus2="co2 atmosphere", + carrier="kerosene for aviation", + p_nom_extendable=True, + efficiency2=costs.at["oil", "CO2 intensity"], ) # TODO simplify bus expression @@ -2790,9 +2930,11 @@ def add_industry(n, costs): nodes, suffix=" low-temperature heat for industry", bus=[ - node + " urban central heat" - if node + " urban central heat" in n.buses.index - else node + " services urban decentral heat" + ( + node + " urban central heat" + if node + " urban central heat" in n.buses.index + else node + " services urban decentral heat" + ) for node in nodes ], carrier="low-temperature heat for industry", @@ -2832,19 +2974,16 @@ def add_industry(n, costs): unit="t_co2", ) - sel = ["process emission", "process emission from feedstock"] if options["co2_spatial"] or options["co2network"]: p_set = ( - -industrial_demand.loc[nodes, sel] - .sum(axis=1) - .rename(index=lambda x: x + " process emissions") + -industrial_demand.loc[nodes, "process emission"].rename( + index=lambda x: x + " process emissions" + ) / nhours ) else: - p_set = -industrial_demand.loc[nodes, sel].sum(axis=1).sum() / nhours + p_set = -industrial_demand.loc[nodes, "process emission"].sum() / nhours - # this should be process emissions fossil+feedstock - # then need load on atmosphere for feedstock emissions that are currently going to atmosphere via Link Fischer-Tropsch demand n.madd( "Load", spatial.co2.process_emissions, @@ -2898,19 +3037,49 @@ def add_industry(n, costs): p_set=p_set, ) + primary_steel = get( + snakemake.config["industry"]["St_primary_fraction"], investment_year + ) + dri_steel = get(snakemake.config["industry"]["DRI_fraction"], investment_year) + bof_steel = primary_steel - dri_steel + + if bof_steel > 0: + add_carrier_buses(n, "coal") + + mwh_coal_per_mwh_coke = 1.366 # from eurostat energy balance + p_set = ( + industrial_demand["coal"].sum() + + mwh_coal_per_mwh_coke * industrial_demand["coke"].sum() + ) / nhours + + n.madd( + "Load", + spatial.coal.nodes, + suffix=" for industry", + bus=spatial.coal.nodes, + carrier="coal for industry", + p_set=p_set, + ) + def add_waste_heat(n): # TODO options? logger.info("Add possibility to use industrial waste heat in district heating") + cf_industry = snakemake.params.industry # AC buses with district heating urban_central = n.buses.index[n.buses.carrier == "urban central heat"] if not urban_central.empty: urban_central = urban_central.str[: -len(" urban central heat")] + link_carriers = n.links.carrier.unique() + # TODO what is the 0.95 and should it be a config option? - if options["use_fischer_tropsch_waste_heat"]: + if ( + options["use_fischer_tropsch_waste_heat"] + and "Fischer-Tropsch" in link_carriers + ): n.links.loc[urban_central + " Fischer-Tropsch", "bus3"] = ( urban_central + " urban central heat" ) @@ -2918,8 +3087,48 @@ def add_waste_heat(n): 0.95 - n.links.loc[urban_central + " Fischer-Tropsch", "efficiency"] ) + if options["use_methanation_waste_heat"] and "Sabatier" in link_carriers: + n.links.loc[urban_central + " Sabatier", "bus3"] = ( + urban_central + " urban central heat" + ) + n.links.loc[urban_central + " Sabatier", "efficiency3"] = ( + 0.95 - n.links.loc[urban_central + " Sabatier", "efficiency"] + ) + + # DEA quotes 15% of total input (11% of which are high-value heat) + if options["use_haber_bosch_waste_heat"] and "Haber-Bosch" in link_carriers: + n.links.loc[urban_central + " Haber-Bosch", "bus3"] = ( + urban_central + " urban central heat" + ) + total_energy_input = ( + cf_industry["MWh_H2_per_tNH3_electrolysis"] + + cf_industry["MWh_elec_per_tNH3_electrolysis"] + ) / cf_industry["MWh_NH3_per_tNH3"] + electricity_input = ( + cf_industry["MWh_elec_per_tNH3_electrolysis"] + / cf_industry["MWh_NH3_per_tNH3"] + ) + n.links.loc[urban_central + " Haber-Bosch", "efficiency3"] = ( + 0.15 * total_energy_input / electricity_input + ) + + if ( + options["use_methanolisation_waste_heat"] + and "methanolisation" in link_carriers + ): + n.links.loc[urban_central + " methanolisation", "bus4"] = ( + urban_central + " urban central heat" + ) + n.links.loc[urban_central + " methanolisation", "efficiency4"] = ( + costs.at["methanolisation", "heat-output"] + / costs.at["methanolisation", "hydrogen-input"] + ) + # TODO integrate usable waste heat efficiency into technology-data from DEA - if options.get("use_electrolysis_waste_heat", False): + if ( + options.get("use_electrolysis_waste_heat", False) + and "H2 Electrolysis" in link_carriers + ): n.links.loc[urban_central + " H2 Electrolysis", "bus2"] = ( urban_central + " urban central heat" ) @@ -2927,7 +3136,7 @@ def add_waste_heat(n): 0.84 - n.links.loc[urban_central + " H2 Electrolysis", "efficiency"] ) - if options["use_fuel_cell_waste_heat"]: + if options["use_fuel_cell_waste_heat"] and "H2 Fuel Cell" in link_carriers: n.links.loc[urban_central + " H2 Fuel Cell", "bus2"] = ( urban_central + " urban central heat" ) @@ -2981,9 +3190,9 @@ def add_agriculture(n, costs): f"Total agriculture machinery shares sum up to {total_share:.2%}, corresponding to increased or decreased demand assumptions." ) - machinery_nodal_energy = pop_weighted_energy_totals.loc[ - nodes, "total agriculture machinery" - ] + machinery_nodal_energy = ( + pop_weighted_energy_totals.loc[nodes, "total agriculture machinery"] * 1e6 + ) if electric_share > 0: efficiency_gain = ( @@ -2997,36 +3206,44 @@ def add_agriculture(n, costs): suffix=" agriculture machinery electric", bus=nodes, carrier="agriculture machinery electric", - p_set=electric_share - / efficiency_gain - * machinery_nodal_energy - * 1e6 - / nhours, + p_set=electric_share / efficiency_gain * machinery_nodal_energy / nhours, ) if oil_share > 0: + p_set = ( + oil_share + * machinery_nodal_energy.rename(lambda x: x + " agriculture machinery oil") + / nhours + ) + + if not options["regional_oil_demand"]: + p_set = p_set.sum() + + n.madd( + "Bus", + spatial.oil.agriculture_machinery, + location=spatial.oil.demand_locations, + carrier="agriculture machinery oil", + unit="MWh_LHV", + ) + n.madd( "Load", - ["agriculture machinery oil"], - bus=spatial.oil.nodes, + spatial.oil.agriculture_machinery, + bus=spatial.oil.agriculture_machinery, carrier="agriculture machinery oil", - p_set=oil_share * machinery_nodal_energy.sum() * 1e6 / nhours, + p_set=p_set, ) - co2 = ( - oil_share - * machinery_nodal_energy.sum() - * 1e6 - / nhours - * costs.at["oil", "CO2 intensity"] - ) - - n.add( - "Load", - "agriculture machinery oil emissions", - bus="co2 atmosphere", - carrier="agriculture machinery oil emissions", - p_set=-co2, + n.madd( + "Link", + spatial.oil.agriculture_machinery, + bus0=spatial.oil.nodes, + bus1=spatial.oil.agriculture_machinery, + bus2="co2 atmosphere", + carrier="agriculture machinery oil", + p_nom_extendable=True, + efficiency2=costs.at["oil", "CO2 intensity"], ) @@ -3049,7 +3266,8 @@ def remove_h2_network(n): def maybe_adjust_costs_and_potentials(n, opts): for o in opts: - if "+" not in o: + flags = ["+e", "+p", "+m", "+c"] + if all(flag not in o for flag in flags): continue oo = o.split("+") carrier_list = np.hstack( @@ -3063,7 +3281,12 @@ def maybe_adjust_costs_and_potentials(n, opts): suptechs = map(lambda c: c.split("-", 2)[0], carrier_list) if oo[0].startswith(tuple(suptechs)): carrier = oo[0] - attr_lookup = {"p": "p_nom_max", "e": "e_nom_max", "c": "capital_cost"} + attr_lookup = { + "p": "p_nom_max", + "e": "e_nom_max", + "c": "capital_cost", + "m": "marginal_cost", + } attr = attr_lookup[oo[1][0]] factor = float(oo[1][1:]) # beware if factor is 0 and p_nom_max is np.inf, 0*np.inf is nan @@ -3095,24 +3318,24 @@ def limit_individual_line_extension(n, maxext): aggregate_dict = { - "p_nom": "sum", - "s_nom": "sum", + "p_nom": pd.Series.sum, + "s_nom": pd.Series.sum, "v_nom": "max", "v_mag_pu_max": "min", "v_mag_pu_min": "max", - "p_nom_max": "sum", - "s_nom_max": "sum", - "p_nom_min": "sum", - "s_nom_min": "sum", + "p_nom_max": pd.Series.sum, + "s_nom_max": pd.Series.sum, + "p_nom_min": pd.Series.sum, + "s_nom_min": pd.Series.sum, "v_ang_min": "max", "v_ang_max": "min", "terrain_factor": "mean", "num_parallel": "sum", "p_set": "sum", "e_initial": "sum", - "e_nom": "sum", - "e_nom_max": "sum", - "e_nom_min": "sum", + "e_nom": pd.Series.sum, + "e_nom_max": pd.Series.sum, + "e_nom_min": pd.Series.sum, "state_of_charge_initial": "sum", "state_of_charge_set": "sum", "inflow": "sum", @@ -3169,18 +3392,16 @@ def cluster_heat_buses(n): # cluster heat nodes # static dataframe agg = define_clustering(df.columns, aggregate_dict) - df = df.groupby(level=0).agg(agg, **agg_group_kwargs) + df = df.groupby(level=0).agg(agg, numeric_only=False) # time-varying data pnl = c.pnl agg = define_clustering(pd.Index(pnl.keys()), aggregate_dict) for k in pnl.keys(): - pnl[k].rename( - columns=lambda x: x.replace("residential ", "").replace( - "services ", "" - ), - inplace=True, - ) - pnl[k] = pnl[k].groupby(level=0, axis=1).agg(agg[k], **agg_group_kwargs) + + def renamer(s): + return s.replace("residential ", "").replace("services ", "") + + pnl[k] = pnl[k].T.groupby(renamer).agg(agg[k], numeric_only=False).T # remove unclustered assets of service/residential to_drop = c.df.index.difference(df.index) @@ -3206,7 +3427,7 @@ def apply_time_segmentation( """ try: import tsam.timeseriesaggregation as tsam - except: + except ImportError: raise ModuleNotFoundError( "Optional dependency 'tsam' not found." "Install via 'pip install tsam'" ) @@ -3244,6 +3465,7 @@ def apply_time_segmentation( sn_weightings = pd.Series( weightings, index=snapshots, name="weightings", dtype="float64" ) + logger.info(f"Distribution of snapshot durations:\n{weightings.value_counts()}") n.set_snapshots(sn_weightings.index) n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0) @@ -3285,6 +3507,56 @@ def set_temporal_aggregation(n, opts, solver_name): return n +def lossy_bidirectional_links(n, carrier, efficiencies={}): + "Split bidirectional links into two unidirectional links to include transmission losses." + + carrier_i = n.links.query("carrier == @carrier").index + + if ( + not any((v != 1.0) or (v >= 0) for v in efficiencies.values()) + or carrier_i.empty + ): + return + + efficiency_static = efficiencies.get("efficiency_static", 1) + efficiency_per_1000km = efficiencies.get("efficiency_per_1000km", 1) + compression_per_1000km = efficiencies.get("compression_per_1000km", 0) + + logger.info( + f"Specified losses for {carrier} transmission " + f"(static: {efficiency_static}, per 1000km: {efficiency_per_1000km}, compression per 1000km: {compression_per_1000km}). " + "Splitting bidirectional links." + ) + + n.links.loc[carrier_i, "p_min_pu"] = 0 + n.links.loc[carrier_i, "efficiency"] = ( + efficiency_static + * efficiency_per_1000km ** (n.links.loc[carrier_i, "length"] / 1e3) + ) + rev_links = ( + n.links.loc[carrier_i].copy().rename({"bus0": "bus1", "bus1": "bus0"}, axis=1) + ) + rev_links["length_original"] = rev_links["length"] + rev_links["capital_cost"] = 0 + rev_links["length"] = 0 + rev_links["reversed"] = True + rev_links.index = rev_links.index.map(lambda x: x + "-reversed") + + n.links = pd.concat([n.links, rev_links], sort=False) + n.links["reversed"] = n.links["reversed"].fillna(False) + n.links["length_original"] = n.links["length_original"].fillna(n.links.length) + + # do compression losses after concatenation to take electricity consumption at bus0 in either direction + carrier_i = n.links.query("carrier == @carrier").index + if compression_per_1000km > 0: + n.links.loc[carrier_i, "bus2"] = n.links.loc[carrier_i, "bus0"].map( + n.buses.location + ) # electricity + n.links.loc[carrier_i, "efficiency2"] = ( + -compression_per_1000km * n.links.loc[carrier_i, "length_original"] / 1e3 + ) + + if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake @@ -3294,9 +3566,9 @@ if __name__ == "__main__": configfiles="test/config.overnight.yaml", simpl="", opts="", - clusters="5", - ll="v1.5", - sector_opts="CO2L0-24H-T-H-B-I-A-solar+p3-dist1", + clusters="37", + ll="v1.0", + sector_opts="CO2L0-24H-T-H-B-I-A-dist1", planning_horizons="2030", ) @@ -3331,14 +3603,16 @@ if __name__ == "__main__": spatial = define_spatial(pop_layout.index, options) - if snakemake.params.foresight == "myopic": + if snakemake.params.foresight in ["myopic", "perfect"]: add_lifetime_wind_solar(n, costs) conventional = snakemake.params.conventional_carriers for carrier in conventional: add_carrier_buses(n, carrier) - add_co2_tracking(n, options) + add_eu_bus(n) + + add_co2_tracking(n, costs, options) add_generation(n, costs) @@ -3346,12 +3620,6 @@ if __name__ == "__main__": # TODO merge with opts cost adjustment below for o in opts: - if o[:4] == "wave": - wave_cost_factor = float(o[4:].replace("p", ".").replace("m", "-")) - logger.info( - f"Including wave generators with cost factor of {wave_cost_factor}" - ) - add_wave(n, wave_cost_factor) if o[:4] == "dist": options["electricity_distribution_grid"] = True options["electricity_distribution_grid_cost_factor"] = float( @@ -3363,6 +3631,15 @@ if __name__ == "__main__": if "nodistrict" in opts: options["district_heating"]["progress"] = 0.0 + if "nowasteheat" in opts: + logger.info("Disabling waste heat.") + options["use_fischer_tropsch_waste_heat"] = False + options["use_methanolisation_waste_heat"] = False + options["use_haber_bosch_waste_heat"] = False + options["use_methanation_waste_heat"] = False + options["use_fuel_cell_waste_heat"] = False + options["use_electrolysis_waste_heat"] = False + if "T" in opts: add_land_transport(n, costs) @@ -3378,7 +3655,7 @@ if __name__ == "__main__": if "I" in opts: add_industry(n, costs) - if "I" in opts and "H" in opts: + if "H" in opts: add_waste_heat(n) if "A" in opts: # requires H and I @@ -3408,7 +3685,7 @@ if __name__ == "__main__": if "cb" not in o: continue limit_type = "carbon budget" - fn = "results/" + snakemake.params.RDIR + "csvs/carbon_budget_distribution.csv" + fn = "results/" + snakemake.params.RDIR + "/csvs/carbon_budget_distribution.csv" if not os.path.exists(fn): emissions_scope = snakemake.params.emissions_scope report_year = snakemake.params.eurostat_report_year @@ -3452,7 +3729,19 @@ if __name__ == "__main__": if options["electricity_grid_connection"]: add_electricity_grid_connection(n, costs) - first_year_myopic = (snakemake.params.foresight == "myopic") and ( + for k, v in options["transmission_efficiency"].items(): + lossy_bidirectional_links(n, k, v) + + # Workaround: Remove lines with conflicting (and unrealistic) properties + # cf. https://github.com/PyPSA/pypsa-eur/issues/444 + if snakemake.config["solving"]["options"]["transmission_losses"]: + idx = n.lines.query("num_parallel == 0").index + logger.info( + f"Removing {len(idx)} line(s) with properties conflicting with transmission losses functionality." + ) + n.mremove("Line", idx) + + first_year_myopic = (snakemake.params.foresight in ["myopic", "perfect"]) and ( snakemake.params.planning_horizons[0] == investment_year ) @@ -3462,5 +3751,6 @@ if __name__ == "__main__": n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) sanitize_carriers(n, snakemake.config) + sanitize_locations(n) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/retrieve_databundle.py b/scripts/retrieve_databundle.py index cb3bdc11..25894063 100644 --- a/scripts/retrieve_databundle.py +++ b/scripts/retrieve_databundle.py @@ -36,7 +36,7 @@ import logging import tarfile from pathlib import Path -from _helpers import configure_logging, progress_retrieve, set_scenario_config +from _helpers import configure_logging, progress_retrieve, validate_checksum logger = logging.getLogger(__name__) @@ -65,6 +65,8 @@ if __name__ == "__main__": disable_progress = snakemake.config["run"].get("disable_progressbar", False) progress_retrieve(url, tarball_fn, disable=disable_progress) + validate_checksum(tarball_fn, url) + logger.info("Extracting databundle.") tarfile.open(tarball_fn).extractall(to_fn) diff --git a/scripts/retrieve_gas_infrastructure_data.py b/scripts/retrieve_gas_infrastructure_data.py index 42b726db..d984b9fe 100644 --- a/scripts/retrieve_gas_infrastructure_data.py +++ b/scripts/retrieve_gas_infrastructure_data.py @@ -11,7 +11,7 @@ import logging import zipfile from pathlib import Path -from _helpers import progress_retrieve +from _helpers import progress_retrieve, validate_checksum logger = logging.getLogger(__name__) @@ -35,6 +35,8 @@ if __name__ == "__main__": disable_progress = snakemake.config["run"].get("disable_progressbar", False) progress_retrieve(url, zip_fn, disable=disable_progress) + validate_checksum(zip_fn, url) + logger.info("Extracting databundle.") zipfile.ZipFile(zip_fn).extractall(to_fn) diff --git a/scripts/retrieve_irena.py b/scripts/retrieve_irena.py new file mode 100644 index 00000000..7b123475 --- /dev/null +++ b/scripts/retrieve_irena.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Thomas Gilon (Climact) +# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +This rule downloads the existing capacities from `IRENASTAT `_ and extracts it in the ``data/existing_capacities`` sub-directory. + +**Relevant Settings** + +.. code:: yaml + + enable: + retrieve_irena: + +.. seealso:: + Documentation of the configuration file ``config.yaml`` at + :ref:`enable_cf` + +**Outputs** + +- ``data/existing_capacities``: existing capacities for offwind, onwind and solar + +""" + +import logging + +import pandas as pd +from _helpers import configure_logging + +logger = logging.getLogger(__name__) + +REGIONS = [ + "Albania", + "Austria", + "Belgium", + "Bosnia and Herzegovina", + "Bulgaria", + "Croatia", + "Czechia", + "Denmark", + "Estonia", + "Finland", + "France", + "Germany", + "Greece", + "Hungary", + "Ireland", + "Italy", + "Latvia", + "Lithuania", + "Luxembourg", + "Montenegro", + # "Netherlands", + "Netherlands (Kingdom of the)", + "North Macedonia", + "Norway", + "Poland", + "Portugal", + "Romania", + "Serbia", + "Slovakia", + "Slovenia", + "Spain", + "Sweden", + "Switzerland", + # "United Kingdom", + "United Kingdom of Great Britain and Northern Ireland (the)", +] + +REGIONS_DICT = { + "Bosnia and Herzegovina": "Bosnia Herzg", + "Netherlands (Kingdom of the)": "Netherlands", + "United Kingdom of Great Britain and Northern Ireland (the)": "UK", +} + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake("retrieve_irena") + configure_logging(snakemake) + + irena_raw = pd.read_csv( + "https://pxweb.irena.org:443/sq/99e64b12-fe03-4a7b-92ea-a22cc3713b92", + skiprows=2, + index_col=[0, 1, 3], + encoding="latin-1", + ) + + var = "Installed electricity capacity (MW)" + irena = irena_raw[var].unstack(level=2).reset_index(level=1).replace(0, "") + + irena = irena[irena.index.isin(REGIONS)] + irena.rename(index=REGIONS_DICT, inplace=True) + + df_offwind = irena[irena.Technology.str.contains("Offshore")].drop( + columns=["Technology"] + ) + df_onwind = irena[irena.Technology.str.contains("Onshore")].drop( + columns=["Technology"] + ) + df_pv = irena[irena.Technology.str.contains("Solar")].drop(columns=["Technology"]) + + df_offwind.to_csv(snakemake.output["offwind"]) + df_onwind.to_csv(snakemake.output["onwind"]) + df_pv.to_csv(snakemake.output["solar"]) diff --git a/scripts/retrieve_monthly_fuel_prices.py b/scripts/retrieve_monthly_fuel_prices.py index 887014cc..4a1a7830 100644 --- a/scripts/retrieve_monthly_fuel_prices.py +++ b/scripts/retrieve_monthly_fuel_prices.py @@ -7,13 +7,12 @@ Retrieve monthly fuel prices from Destatis. """ import logging - -logger = logging.getLogger(__name__) - from pathlib import Path from _helpers import configure_logging, progress_retrieve, set_scenario_config +logger = logging.getLogger(__name__) + if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake diff --git a/scripts/retrieve_sector_databundle.py b/scripts/retrieve_sector_databundle.py index 1beed478..defc806c 100644 --- a/scripts/retrieve_sector_databundle.py +++ b/scripts/retrieve_sector_databundle.py @@ -7,13 +7,17 @@ Retrieve and extract data bundle for sector-coupled studies. """ import logging - -logger = logging.getLogger(__name__) - import tarfile from pathlib import Path -from _helpers import configure_logging, progress_retrieve, set_scenario_config +from _helpers import ( + configure_logging, + progress_retrieve, + validate_checksum, + set_scenario_config, +) + +logger = logging.getLogger(__name__) if __name__ == "__main__": if "snakemake" not in globals(): @@ -35,6 +39,8 @@ if __name__ == "__main__": disable_progress = snakemake.config["run"].get("disable_progressbar", False) progress_retrieve(url, tarball_fn, disable=disable_progress) + validate_checksum(tarball_fn, url) + logger.info("Extracting databundle.") tarfile.open(tarball_fn).extractall(to_fn) diff --git a/scripts/simplify_network.py b/scripts/simplify_network.py index 440145ff..042cfc3a 100644 --- a/scripts/simplify_network.py +++ b/scripts/simplify_network.py @@ -86,7 +86,7 @@ The rule :mod:`simplify_network` does up to four things: """ import logging -from functools import partial, reduce +from functools import reduce import numpy as np import pandas as pd @@ -152,22 +152,20 @@ def _prepare_connection_costs_per_link(n, costs, renewable_carriers, length_fact if n.links.empty: return {} - connection_costs_per_link = {} - - for tech in renewable_carriers: - if tech.startswith("offwind"): - connection_costs_per_link[tech] = ( - n.links.length - * length_factor - * ( - n.links.underwater_fraction - * costs.at[tech + "-connection-submarine", "capital_cost"] - + (1.0 - n.links.underwater_fraction) - * costs.at[tech + "-connection-underground", "capital_cost"] - ) + return { + tech: ( + n.links.length + * length_factor + * ( + n.links.underwater_fraction + * costs.at[tech + "-connection-submarine", "capital_cost"] + + (1.0 - n.links.underwater_fraction) + * costs.at[tech + "-connection-underground", "capital_cost"] ) - - return connection_costs_per_link + ) + for tech in renewable_carriers + if tech.startswith("offwind") + } def _compute_connection_costs_to_bus( @@ -470,9 +468,9 @@ def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None): dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index ) - dist[ - buses_i - ] = np.inf # bus in buses_i should not be assigned to different bus in buses_i + dist[buses_i] = ( + np.inf + ) # bus in buses_i should not be assigned to different bus in buses_i for c in n.buses.country.unique(): incountry_b = n.buses.country == c @@ -539,6 +537,9 @@ if __name__ == "__main__": n = pypsa.Network(snakemake.input.network) Nyears = n.snapshot_weightings.objective.sum() / 8760 + # remove integer outputs for compatibility with PyPSA v0.26.0 + n.generators.drop("n_mod", axis=1, inplace=True, errors="ignore") + n, trafo_map = simplify_network_to_380(n) technology_costs = load_costs( diff --git a/scripts/solve_network.py b/scripts/solve_network.py index bf860054..5ca0ec82 100644 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -26,22 +26,28 @@ Additionally, some extra constraints specified in :mod:`solve_network` are added the workflow for all scenarios in the configuration file (``scenario:``) based on the rule :mod:`solve_network`. """ +import importlib import logging +import os import re +import sys import numpy as np import pandas as pd import pypsa import xarray as xr +from _benchmark import memory_logger from _helpers import ( configure_logging, - set_scenario_config, + get_opt, update_config_with_sector_opts, + set_scenario_config, ) +from pypsa.descriptors import get_activity_mask +from pypsa.descriptors import get_switchable_as_dense as get_as_dense logger = logging.getLogger(__name__) pypsa.pf.logger.setLevel(logging.WARNING) -from pypsa.descriptors import get_switchable_as_dense as get_as_dense def add_land_use_constraint(n, planning_horizons, config): @@ -51,6 +57,69 @@ def add_land_use_constraint(n, planning_horizons, config): _add_land_use_constraint(n) +def add_land_use_constraint_perfect(n): + """ + Add global constraints for tech capacity limit. + """ + logger.info("Add land-use constraint for perfect foresight") + + def compress_series(s): + def process_group(group): + if group.nunique() == 1: + return pd.Series(group.iloc[0], index=[None]) + else: + return group + + return s.groupby(level=[0, 1]).apply(process_group) + + def new_index_name(t): + # Convert all elements to string and filter out None values + parts = [str(x) for x in t if x is not None] + # Join with space, but use a dash for the last item if not None + return " ".join(parts[:2]) + (f"-{parts[-1]}" if len(parts) > 2 else "") + + def check_p_min_p_max(p_nom_max): + p_nom_min = n.generators[ext_i].groupby(grouper).sum().p_nom_min + p_nom_min = p_nom_min.reindex(p_nom_max.index) + check = ( + p_nom_min.groupby(level=[0, 1]).sum() + > p_nom_max.groupby(level=[0, 1]).min() + ) + if check.sum(): + logger.warning( + f"summed p_min_pu values at node larger than technical potential {check[check].index}" + ) + + grouper = [n.generators.carrier, n.generators.bus, n.generators.build_year] + ext_i = n.generators.p_nom_extendable + # get technical limit per node and investment period + p_nom_max = n.generators[ext_i].groupby(grouper).min().p_nom_max + # drop carriers without tech limit + p_nom_max = p_nom_max[~p_nom_max.isin([np.inf, np.nan])] + # carrier + carriers = p_nom_max.index.get_level_values(0).unique() + gen_i = n.generators[(n.generators.carrier.isin(carriers)) & (ext_i)].index + n.generators.loc[gen_i, "p_nom_min"] = 0 + # check minimum capacities + check_p_min_p_max(p_nom_max) + # drop multi entries in case p_nom_max stays constant in different periods + # p_nom_max = compress_series(p_nom_max) + # adjust name to fit syntax of nominal constraint per bus + df = p_nom_max.reset_index() + df["name"] = df.apply( + lambda row: f"nom_max_{row['carrier']}" + + (f"_{row['build_year']}" if row["build_year"] is not None else ""), + axis=1, + ) + + for name in df.name.unique(): + df_carrier = df[df.name == name] + bus = df_carrier.bus + n.buses.loc[bus, name] = df_carrier.p_nom_max.values + + return n + + def _add_land_use_constraint(n): # warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind' @@ -86,19 +155,13 @@ def _add_land_use_constraint(n): def _add_land_use_constraint_m(n, planning_horizons, config): # if generators clustering is lower than network clustering, land_use accounting is at generators clusters - planning_horizons = param["planning_horizons"] grouping_years = config["existing_capacities"]["grouping_years"] current_horizon = snakemake.wildcards.planning_horizons for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]: existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"] ind = list( - set( - [ - i.split(sep=" ")[0] + " " + i.split(sep=" ")[1] - for i in existing.index - ] - ) + {i.split(sep=" ")[0] + " " + i.split(sep=" ")[1] for i in existing.index} ) previous_years = [ @@ -120,13 +183,10 @@ def _add_land_use_constraint_m(n, planning_horizons, config): n.generators.p_nom_max.clip(lower=0, inplace=True) -def add_co2_sequestration_limit(n, limit=200): +def add_co2_sequestration_limit(n, config, limit=200): """ Add a global constraint on the amount of Mt CO2 that can be sequestered. """ - n.carriers.loc["co2 stored", "co2_absorptions"] = -1 - n.carriers.co2_absorptions = n.carriers.co2_absorptions.fillna(0) - limit = limit * 1e6 for o in opts: if "seq" not in o: @@ -134,16 +194,146 @@ def add_co2_sequestration_limit(n, limit=200): limit = float(o[o.find("seq") + 3 :]) * 1e6 break - n.add( + if not n.investment_periods.empty: + periods = n.investment_periods + names = pd.Index([f"co2_sequestration_limit-{period}" for period in periods]) + else: + periods = [np.nan] + names = pd.Index(["co2_sequestration_limit"]) + + n.madd( "GlobalConstraint", - "co2_sequestration_limit", + names, sense=">=", constant=-limit, type="operational_limit", carrier_attribute="co2 sequestered", + investment_period=periods, ) +def add_carbon_constraint(n, snapshots): + glcs = n.global_constraints.query('type == "co2_atmosphere"') + if glcs.empty: + return + for name, glc in glcs.iterrows(): + carattr = glc.carrier_attribute + emissions = n.carriers.query(f"{carattr} != 0")[carattr] + + if emissions.empty: + continue + + # stores + n.stores["carrier"] = n.stores.bus.map(n.buses.carrier) + stores = n.stores.query("carrier in @emissions.index and not e_cyclic") + if not stores.empty: + last = n.snapshot_weightings.reset_index().groupby("period").last() + last_i = last.set_index([last.index, last.timestep]).index + final_e = n.model["Store-e"].loc[last_i, stores.index] + time_valid = int(glc.loc["investment_period"]) + time_i = pd.IndexSlice[time_valid, :] + lhs = final_e.loc[time_i, :] - final_e.shift(snapshot=1).loc[time_i, :] + + rhs = glc.constant + n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}") + + +def add_carbon_budget_constraint(n, snapshots): + glcs = n.global_constraints.query('type == "Co2Budget"') + if glcs.empty: + return + for name, glc in glcs.iterrows(): + carattr = glc.carrier_attribute + emissions = n.carriers.query(f"{carattr} != 0")[carattr] + + if emissions.empty: + continue + + # stores + n.stores["carrier"] = n.stores.bus.map(n.buses.carrier) + stores = n.stores.query("carrier in @emissions.index and not e_cyclic") + if not stores.empty: + last = n.snapshot_weightings.reset_index().groupby("period").last() + last_i = last.set_index([last.index, last.timestep]).index + final_e = n.model["Store-e"].loc[last_i, stores.index] + time_valid = int(glc.loc["investment_period"]) + time_i = pd.IndexSlice[time_valid, :] + weighting = n.investment_period_weightings.loc[time_valid, "years"] + lhs = final_e.loc[time_i, :] * weighting + + rhs = glc.constant + n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}") + + +def add_max_growth(n, config): + """ + Add maximum growth rates for different carriers. + """ + + opts = snakemake.params["sector"]["limit_max_growth"] + # take maximum yearly difference between investment periods since historic growth is per year + factor = n.investment_period_weightings.years.max() * opts["factor"] + for carrier in opts["max_growth"].keys(): + max_per_period = opts["max_growth"][carrier] * factor + logger.info( + f"set maximum growth rate per investment period of {carrier} to {max_per_period} GW." + ) + n.carriers.loc[carrier, "max_growth"] = max_per_period * 1e3 + + for carrier in opts["max_relative_growth"].keys(): + max_r_per_period = opts["max_relative_growth"][carrier] + logger.info( + f"set maximum relative growth per investment period of {carrier} to {max_r_per_period}." + ) + n.carriers.loc[carrier, "max_relative_growth"] = max_r_per_period + + return n + + +def add_retrofit_gas_boiler_constraint(n, snapshots): + """ + Allow retrofitting of existing gas boilers to H2 boilers. + """ + c = "Link" + logger.info("Add constraint for retrofitting gas boilers to H2 boilers.") + # existing gas boilers + mask = n.links.carrier.str.contains("gas boiler") & ~n.links.p_nom_extendable + gas_i = n.links[mask].index + mask = n.links.carrier.str.contains("retrofitted H2 boiler") + h2_i = n.links[mask].index + + n.links.loc[gas_i, "p_nom_extendable"] = True + p_nom = n.links.loc[gas_i, "p_nom"] + n.links.loc[gas_i, "p_nom"] = 0 + + # heat profile + cols = n.loads_t.p_set.columns[ + n.loads_t.p_set.columns.str.contains("heat") + & ~n.loads_t.p_set.columns.str.contains("industry") + & ~n.loads_t.p_set.columns.str.contains("agriculture") + ] + profile = n.loads_t.p_set[cols].div( + n.loads_t.p_set[cols].groupby(level=0).max(), level=0 + ) + # to deal if max value is zero + profile.fillna(0, inplace=True) + profile.rename(columns=n.loads.bus.to_dict(), inplace=True) + profile = profile.reindex(columns=n.links.loc[gas_i, "bus1"]) + profile.columns = gas_i + + rhs = profile.mul(p_nom) + + dispatch = n.model["Link-p"] + active = get_activity_mask(n, c, snapshots, gas_i) + rhs = rhs[active] + p_gas = dispatch.sel(Link=gas_i) + p_h2 = dispatch.sel(Link=h2_i) + + lhs = p_gas + p_h2 + + n.model.add_constraints(lhs == rhs, name="gas_retrofit") + + def prepare_network( n, solve_opts=None, @@ -156,17 +346,18 @@ def prepare_network( for df in ( n.generators_t.p_max_pu, n.generators_t.p_min_pu, + n.links_t.p_max_pu, + n.links_t.p_min_pu, n.storage_units_t.inflow, ): df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True) - load_shedding = solve_opts.get("load_shedding") - if load_shedding: + if load_shedding := solve_opts.get("load_shedding"): # intersect between macroeconomic and surveybased willingness to pay # http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full # TODO: retrieve color and nice name from config n.add("Carrier", "load", color="#dd2e23", nice_name="Load shedding") - buses_i = n.buses.query("carrier == 'AC'").index + buses_i = n.buses.index if not np.isscalar(load_shedding): # TODO: do not scale via sign attribute (use Eur/MWh instead of Eur/kWh) load_shedding = 1e2 # Eur/kWh @@ -204,9 +395,14 @@ def prepare_network( if foresight == "myopic": add_land_use_constraint(n, planning_horizons, config) - if n.stores.carrier.eq("co2 stored").any(): + if foresight == "perfect": + n = add_land_use_constraint_perfect(n) + if snakemake.params["sector"]["limit_max_growth"]["enable"]: + n = add_max_growth(n, config) + + if n.stores.carrier.eq("co2 sequestered").any(): limit = co2_sequestration_potential - add_co2_sequestration_limit(n, limit=limit) + add_co2_sequestration_limit(n, config, limit=limit) return n @@ -227,7 +423,7 @@ def add_CCL_constraints(n, config): Example ------- scenario: - opts: [Co2L-CCL-24H] + opts: [Co2L-CCL-24h] electricity: agg_p_nom_limits: data/agg_p_nom_minmax.csv """ @@ -272,7 +468,7 @@ def add_EQ_constraints(n, o, scaling=1e-1): Example ------- scenario: - opts: [Co2L-EQ0.7-24H] + opts: [Co2L-EQ0.7-24h] Require each country or node to on average produce a minimal share of its total electricity consumption itself. Example: EQ0.7c demands each country @@ -336,7 +532,7 @@ def add_BAU_constraints(n, config): Example ------- scenario: - opts: [Co2L-BAU-24H] + opts: [Co2L-BAU-24h] electricity: BAU_mincapacities: solar: 0 @@ -373,7 +569,7 @@ def add_SAFE_constraints(n, config): config.yaml requires to specify opts: scenario: - opts: [Co2L-SAFE-24H] + opts: [Co2L-SAFE-24h] electricity: SAFE_reservemargin: 0.1 Which sets a reserve margin of 10% above the peak demand. @@ -381,7 +577,7 @@ def add_SAFE_constraints(n, config): peakdemand = n.loads_t.p_set.sum(axis=1).max() margin = 1.0 + config["electricity"]["SAFE_reservemargin"] reserve_margin = peakdemand * margin - conventional_carriers = config["electricity"]["conventional_carriers"] + conventional_carriers = config["electricity"]["conventional_carriers"] # noqa: F841 ext_gens_i = n.generators.query( "carrier in @conventional_carriers & p_nom_extendable" ).index @@ -498,6 +694,37 @@ def add_battery_constraints(n): n.model.add_constraints(lhs == 0, name="Link-charger_ratio") +def add_lossy_bidirectional_link_constraints(n): + if not n.links.p_nom_extendable.any() or "reversed" not in n.links.columns: + return + + n.links["reversed"] = n.links.reversed.fillna(0).astype(bool) + carriers = n.links.loc[n.links.reversed, "carrier"].unique() # noqa: F841 + + forward_i = n.links.query( + "carrier in @carriers and ~reversed and p_nom_extendable" + ).index + + def get_backward_i(forward_i): + return pd.Index( + [ + ( + re.sub(r"-(\d{4})$", r"-reversed-\1", s) + if re.search(r"-\d{4}$", s) + else s + "-reversed" + ) + for s in forward_i + ] + ) + + backward_i = get_backward_i(forward_i) + + lhs = n.model["Link-p_nom"].loc[backward_i] + rhs = n.model["Link-p_nom"].loc[forward_i] + + n.model.add_constraints(lhs == rhs, name="Link-bidirectional_sync") + + def add_chp_constraints(n): electric = ( n.links.index.str.contains("urban central") @@ -556,9 +783,13 @@ def add_pipe_retrofit_constraint(n): """ Add constraint for retrofitting existing CH4 pipelines to H2 pipelines. """ - gas_pipes_i = n.links.query("carrier == 'gas pipeline' and p_nom_extendable").index + if "reversed" not in n.links.columns: + n.links["reversed"] = False + gas_pipes_i = n.links.query( + "carrier == 'gas pipeline' and p_nom_extendable and ~reversed" + ).index h2_retrofitted_i = n.links.query( - "carrier == 'H2 pipeline retrofitted' and p_nom_extendable" + "carrier == 'H2 pipeline retrofitted' and p_nom_extendable and ~reversed" ).index if h2_retrofitted_i.empty or gas_pipes_i.empty: @@ -573,6 +804,29 @@ def add_pipe_retrofit_constraint(n): n.model.add_constraints(lhs == rhs, name="Link-pipe_retrofit") +def add_co2_atmosphere_constraint(n, snapshots): + glcs = n.global_constraints[n.global_constraints.type == "co2_atmosphere"] + + if glcs.empty: + return + for name, glc in glcs.iterrows(): + carattr = glc.carrier_attribute + emissions = n.carriers.query(f"{carattr} != 0")[carattr] + + if emissions.empty: + continue + + # stores + n.stores["carrier"] = n.stores.bus.map(n.buses.carrier) + stores = n.stores.query("carrier in @emissions.index and not e_cyclic") + if not stores.empty: + last_i = snapshots[-1] + lhs = n.model["Store-e"].loc[last_i, stores.index] + rhs = glc.constant + + n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}") + + def extra_functionality(n, snapshots): """ Collects supplementary constraints which will be passed to @@ -584,26 +838,55 @@ def extra_functionality(n, snapshots): """ opts = n.opts config = n.config - if "BAU" in opts and n.generators.p_nom_extendable.any(): + constraints = config["solving"].get("constraints", {}) + if ( + "BAU" in opts or constraints.get("BAU", False) + ) and n.generators.p_nom_extendable.any(): add_BAU_constraints(n, config) - if "SAFE" in opts and n.generators.p_nom_extendable.any(): + if ( + "SAFE" in opts or constraints.get("SAFE", False) + ) and n.generators.p_nom_extendable.any(): add_SAFE_constraints(n, config) - if "CCL" in opts and n.generators.p_nom_extendable.any(): + if ( + "CCL" in opts or constraints.get("CCL", False) + ) and n.generators.p_nom_extendable.any(): add_CCL_constraints(n, config) + reserve = config["electricity"].get("operational_reserve", {}) if reserve.get("activate"): add_operational_reserve_margin(n, snapshots, config) - for o in opts: - if "EQ" in o: - add_EQ_constraints(n, o) + + EQ_config = constraints.get("EQ", False) + EQ_wildcard = get_opt(opts, r"^EQ+[0-9]*\.?[0-9]+(c|)") + EQ_o = EQ_wildcard or EQ_config + if EQ_o: + add_EQ_constraints(n, EQ_o.replace("EQ", "")) + add_battery_constraints(n) + add_lossy_bidirectional_link_constraints(n) add_pipe_retrofit_constraint(n) + if n._multi_invest: + add_carbon_constraint(n, snapshots) + add_carbon_budget_constraint(n, snapshots) + add_retrofit_gas_boiler_constraint(n, snapshots) + else: + add_co2_atmosphere_constraint(n, snapshots) + + if snakemake.params.custom_extra_functionality: + source_path = snakemake.params.custom_extra_functionality + assert os.path.exists(source_path), f"{source_path} does not exist" + sys.path.append(os.path.dirname(source_path)) + module_name = os.path.splitext(os.path.basename(source_path))[0] + module = importlib.import_module(module_name) + custom_extra_functionality = getattr(module, module_name) + custom_extra_functionality(n, snapshots, snakemake) def solve_network(n, config, solving, opts="", **kwargs): set_of_options = solving["solver"]["options"] cf_solving = solving["options"] + kwargs["multi_investment_periods"] = config["foresight"] == "perfect" kwargs["solver_options"] = ( solving["solver_options"][set_of_options] if set_of_options else {} ) @@ -614,6 +897,10 @@ def solve_network(n, config, solving, opts="", **kwargs): "linearized_unit_commitment", False ) kwargs["assign_all_duals"] = cf_solving.get("assign_all_duals", False) + kwargs["io_api"] = cf_solving.get("io_api", None) + + if kwargs["solver_name"] == "gurobi": + logging.getLogger("gurobipy").setLevel(logging.CRITICAL) rolling_horizon = cf_solving.pop("rolling_horizon", False) skip_iterations = cf_solving.pop("skip_iterations", False) @@ -645,6 +932,9 @@ def solve_network(n, config, solving, opts="", **kwargs): f"Solving status '{status}' with termination condition '{condition}'" ) if "infeasible" in condition: + labels = n.model.compute_infeasibilities() + logger.info(f"Labels:\n{labels}") + n.model.print_infeasibilities() raise RuntimeError("Solving status 'infeasible'") return n @@ -655,13 +945,14 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "solve_network", + "solve_sector_network", + configfiles="../config/test/config.perfect.yaml", simpl="", - opts="Ept", + opts="", clusters="37", ll="v1.0", - sector_opts="", - planning_horizons="2020", + sector_opts="CO2L0-1H-T-H-B-I-A-dist1", + planning_horizons="2030", ) configure_logging(snakemake) set_scenario_config(snakemake) @@ -689,13 +980,18 @@ if __name__ == "__main__": co2_sequestration_potential=snakemake.params["co2_sequestration_potential"], ) - n = solve_network( - n, - config=snakemake.config, - solving=snakemake.params.solving, - opts=opts, - log_fn=snakemake.log.solver, - ) + with memory_logger( + filename=getattr(snakemake.log, "memory", None), interval=30.0 + ) as mem: + n = solve_network( + n, + config=snakemake.config, + solving=snakemake.params.solving, + opts=opts, + log_fn=snakemake.log.solver, + ) + + logger.info(f"Maximum memory usage: {mem.mem_usage}") n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/solve_operations_network.py b/scripts/solve_operations_network.py index 064d735a..3fcb34d8 100644 --- a/scripts/solve_operations_network.py +++ b/scripts/solve_operations_network.py @@ -7,6 +7,7 @@ Solves linear optimal dispatch in hourly resolution using the capacities of previous capacity expansion in rule :mod:`solve_network`. """ + import logging import numpy as np @@ -40,7 +41,7 @@ if __name__ == "__main__": set_scenario_config(snakemake) update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) - opts = (snakemake.wildcards.opts + "-" + snakemake.wildcards.sector_opts).split("-") + opts = f"{snakemake.wildcards.opts}-{snakemake.wildcards.sector_opts}".split("-") opts = [o for o in opts if o != ""] solve_opts = snakemake.params.options diff --git a/test.sh b/test.sh new file mode 100755 index 00000000..a40276b8 --- /dev/null +++ b/test.sh @@ -0,0 +1,8 @@ +# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 + +snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime && \ +snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime && \ +snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime && \ +snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime