This commit is contained in:
LukasFrankenQ 2023-11-09 22:28:09 +00:00
commit 11ecbf3fba
56 changed files with 3084 additions and 582 deletions

View File

@ -83,6 +83,7 @@ jobs:
snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3

1
.gitignore vendored
View File

@ -8,6 +8,7 @@ __pycache__
*dconf *dconf
gurobi.log gurobi.log
.vscode .vscode
*.orig
/bak /bak
/resources /resources

View File

@ -5,7 +5,7 @@ exclude: "^LICENSES"
repos: repos:
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0 rev: v4.5.0
hooks: hooks:
- id: check-merge-conflict - id: check-merge-conflict
- id: end-of-file-fixer - id: end-of-file-fixer
@ -30,10 +30,10 @@ repos:
# Find common spelling mistakes in comments and docstrings # Find common spelling mistakes in comments and docstrings
- repo: https://github.com/codespell-project/codespell - repo: https://github.com/codespell-project/codespell
rev: v2.2.5 rev: v2.2.6
hooks: hooks:
- id: codespell - id: codespell
args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco,berfore'] # Ignore capital case words, e.g. country codes args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco,berfore,vor'] # Ignore capital case words, e.g. country codes
types_or: [python, rst, markdown] types_or: [python, rst, markdown]
files: ^(scripts|doc)/ files: ^(scripts|doc)/
@ -45,13 +45,13 @@ repos:
args: ["--in-place", "--make-summary-multi-line", "--pre-summary-newline"] args: ["--in-place", "--make-summary-multi-line", "--pre-summary-newline"]
- repo: https://github.com/keewis/blackdoc - repo: https://github.com/keewis/blackdoc
rev: v0.3.8 rev: v0.3.9
hooks: hooks:
- id: blackdoc - id: blackdoc
# Formatting with "black" coding style # Formatting with "black" coding style
- repo: https://github.com/psf/black - repo: https://github.com/psf/black
rev: 23.7.0 rev: 23.10.1
hooks: hooks:
# Format Python files # Format Python files
- id: black - id: black
@ -67,14 +67,14 @@ repos:
# Do YAML formatting (before the linter checks it for misses) # Do YAML formatting (before the linter checks it for misses)
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks - repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
rev: v2.10.0 rev: v2.11.0
hooks: hooks:
- id: pretty-format-yaml - id: pretty-format-yaml
args: [--autofix, --indent, "2", --preserve-quotes] args: [--autofix, --indent, "2", --preserve-quotes]
# Format Snakemake rule / workflow files # Format Snakemake rule / workflow files
- repo: https://github.com/snakemake/snakefmt - repo: https://github.com/snakemake/snakefmt
rev: v0.8.4 rev: v0.8.5
hooks: hooks:
- id: snakefmt - id: snakefmt

View File

@ -14,4 +14,3 @@ build:
python: python:
install: install:
- requirements: doc/requirements.txt - requirements: doc/requirements.txt
system_packages: false

View File

@ -66,13 +66,31 @@ if config["foresight"] == "myopic":
include: "rules/solve_myopic.smk" include: "rules/solve_myopic.smk"
if config["foresight"] == "perfect":
include: "rules/solve_perfect.smk"
rule all:
input:
RESULTS + "graphs/costs.pdf",
default_target: True
rule purge: rule purge:
message:
"Purging generated resources, results and docs. Downloads are kept."
run: run:
rmtree("resources/", ignore_errors=True) import builtins
rmtree("results/", ignore_errors=True)
rmtree("doc/_build", ignore_errors=True) do_purge = builtins.input(
"Do you really want to delete all generated resources, \nresults and docs (downloads are kept)? [y/N] "
)
if do_purge == "y":
rmtree("resources/", ignore_errors=True)
rmtree("results/", ignore_errors=True)
rmtree("doc/_build", ignore_errors=True)
print("Purging generated resources, results and docs. Downloads are kept.")
else:
raise Exception(f"Input {do_purge}. Aborting purge.")
rule dag: rule dag:

View File

@ -68,6 +68,7 @@ enable:
retrieve_sector_databundle: true retrieve_sector_databundle: true
retrieve_cost_data: true retrieve_cost_data: true
build_cutout: false build_cutout: false
retrieve_irena: false
retrieve_cutout: true retrieve_cutout: true
build_natura_raster: false build_natura_raster: false
retrieve_natura_raster: true retrieve_natura_raster: true
@ -452,6 +453,7 @@ sector:
hydrogen_fuel_cell: true hydrogen_fuel_cell: true
hydrogen_turbine: false hydrogen_turbine: false
SMR: true SMR: true
SMR_cc: true
regional_co2_sequestration_potential: regional_co2_sequestration_potential:
enable: false enable: false
attribute: 'conservative estimate Mt' attribute: 'conservative estimate Mt'
@ -461,6 +463,7 @@ sector:
years_of_storage: 25 years_of_storage: 25
co2_sequestration_potential: 200 co2_sequestration_potential: 200
co2_sequestration_cost: 10 co2_sequestration_cost: 10
co2_sequestration_lifetime: 50
co2_spatial: false co2_spatial: false
co2network: false co2network: false
cc_fraction: 0.9 cc_fraction: 0.9
@ -491,6 +494,20 @@ sector:
OCGT: gas OCGT: gas
biomass_to_liquid: false biomass_to_liquid: false
biosng: false biosng: false
limit_max_growth:
enable: false
# allowing 30% larger than max historic growth
factor: 1.3
max_growth: # unit GW
onwind: 16 # onshore max grow so far 16 GW in Europe https://www.iea.org/reports/renewables-2020/wind
solar: 28 # solar max grow so far 28 GW in Europe https://www.iea.org/reports/renewables-2020/solar-pv
offwind-ac: 35 # offshore max grow so far 3.5 GW in Europe https://windeurope.org/about-wind/statistics/offshore/european-offshore-wind-industry-key-trends-statistics-2019/
offwind-dc: 35
max_relative_growth:
onwind: 3
solar: 3
offwind-ac: 3
offwind-dc: 3
enhanced_geothermal: true enhanced_geothermal: true
enhanced_geothermal_optimism: false # if true, egs costs are reducing towards 2050 according to Aghahosseini et al., (2020) enhanced_geothermal_optimism: false # if true, egs costs are reducing towards 2050 according to Aghahosseini et al., (2020)
enhanced_geothermal_performant: true # if true, adds only the cheapest patch of EGS potential to each region enhanced_geothermal_performant: true # if true, adds only the cheapest patch of EGS potential to each region
@ -546,11 +563,13 @@ industry:
hotmaps_locate_missing: false hotmaps_locate_missing: false
reference_year: 2015 reference_year: 2015
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#costs # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#costs
costs: costs:
year: 2030 year: 2030
version: v0.6.0 version: v0.6.0
rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person) rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person)
social_discountrate: 0.02
fill_values: fill_values:
FOM: 0 FOM: 0
VOM: 0 VOM: 0
@ -764,6 +783,7 @@ plotting:
gas pipeline new: '#a87c62' gas pipeline new: '#a87c62'
# oil # oil
oil: '#c9c9c9' oil: '#c9c9c9'
imported oil: '#a3a3a3'
oil boiler: '#adadad' oil boiler: '#adadad'
residential rural oil boiler: '#a9a9a9' residential rural oil boiler: '#a9a9a9'
services rural oil boiler: '#a5a5a5' services rural oil boiler: '#a5a5a5'
@ -782,6 +802,7 @@ plotting:
Coal: '#545454' Coal: '#545454'
coal: '#545454' coal: '#545454'
Coal marginal: '#545454' Coal marginal: '#545454'
coal for industry: '#343434'
solid: '#545454' solid: '#545454'
Lignite: '#826837' Lignite: '#826837'
lignite: '#826837' lignite: '#826837'
@ -895,6 +916,7 @@ plotting:
H2 for shipping: "#ebaee0" H2 for shipping: "#ebaee0"
H2: '#bf13a0' H2: '#bf13a0'
hydrogen: '#bf13a0' hydrogen: '#bf13a0'
retrofitted H2 boiler: '#e5a0d9'
SMR: '#870c71' SMR: '#870c71'
SMR CC: '#4f1745' SMR CC: '#4f1745'
H2 liquefaction: '#d647bd' H2 liquefaction: '#d647bd'

View File

@ -0,0 +1,43 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
run:
name: "perfect"
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#foresight
foresight: perfect
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#scenario
# Wildcard docs in https://pypsa-eur.readthedocs.io/en/latest/wildcards.html
scenario:
simpl:
- ''
ll:
- v1.0
clusters:
- 37
opts:
- ''
sector_opts:
- 1p5-4380H-T-H-B-I-A-solar+p3-dist1
- 1p7-4380H-T-H-B-I-A-solar+p3-dist1
- 2p0-4380H-T-H-B-I-A-solar+p3-dist1
planning_horizons:
- 2020
- 2030
- 2040
- 2050
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#co2-budget
co2_budget:
# update of IPCC 6th AR compared to the 1.5SR. (discussed here: https://twitter.com/JoeriRogelj/status/1424743828339167233)
1p5: 34.2 # 25.7 # Budget in Gt CO2 for 1.5 for Europe, global 420 Gt, assuming per capita share
1p6: 43.259666 # 35 # Budget in Gt CO2 for 1.6 for Europe, global 580 Gt
1p7: 51.4 # 45 # Budget in Gt CO2 for 1.7 for Europe, global 800 Gt
2p0: 69.778 # 73.9 # Budget in Gt CO2 for 2 for Europe, global 1170 Gt
sector:
min_part_load_fischer_tropsch: 0
min_part_load_methanolisation: 0

View File

@ -0,0 +1,91 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
tutorial: true
run:
name: "test-sector-perfect"
disable_progressbar: true
shared_resources: true
shared_cutouts: true
foresight: perfect
scenario:
ll:
- v1.0
clusters:
- 5
sector_opts:
- 8760H-T-H-B-I-A-solar+p3-dist1
planning_horizons:
- 2030
- 2040
- 2050
countries: ['BE']
snapshots:
start: "2013-03-01"
end: "2013-03-08"
electricity:
co2limit: 100.e+6
extendable_carriers:
Generator: [OCGT]
StorageUnit: [battery]
Store: [H2]
Link: [H2 pipeline]
renewable_carriers: [solar, onwind, offwind-ac, offwind-dc]
sector:
min_part_load_fischer_tropsch: 0
min_part_load_methanolisation: 0
atlite:
default_cutout: be-03-2013-era5
cutouts:
be-03-2013-era5:
module: era5
x: [4., 15.]
y: [46., 56.]
time: ["2013-03-01", "2013-03-08"]
renewable:
onwind:
cutout: be-03-2013-era5
offwind-ac:
cutout: be-03-2013-era5
max_depth: false
offwind-dc:
cutout: be-03-2013-era5
max_depth: false
solar:
cutout: be-03-2013-era5
industry:
St_primary_fraction:
2020: 0.8
2030: 0.6
2040: 0.5
2050: 0.4
solving:
solver:
name: glpk
options: glpk-default
mem: 4000
plotting:
map:
boundaries:
eu_node_location:
x: -5.5
y: 46.
costs_max: 1000
costs_threshold: 0.0000001
energy_max:
energy_min:
energy_threshold: 0.000001

View File

@ -1,34 +1,34 @@
Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018 Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022
Albania,,,,,,,,,,,,,,,,,,, Albania,,,,,,,,,,,,,,,,,,,,,,,
Austria,,,,,,,,,,,,,,,,,,, Austria,,,,,,,,,,,,,,,,,,,,,,,
Belgium,,,,,,,,,,31.5,196.5,196.5,381,707.7,707.7,712,712.2,877.2,1185.9 Belgium,,,,,,,,,,31.5,196.5,196.5,381.0,707.7,707.7,712.0,712.2,877.2,1185.9,1555.5,2261.8,2261.8,2261.8
Bosnia Herzg,,,,,,,,,,,,,,,,,,, Bosnia Herzg,,,,,,,,,,,,,,,,,,,,,,,
Bulgaria,,,,,,,,,,,,,,,,,,, Bulgaria,,,,,,,,,,,,,,,,,,,,,,,
Croatia,,,,,,,,,,,,,,,,,,, Croatia,,,,,,,,,,,,,,,,,,,,,,,
Czechia,,,,,,,,,,,,,,,,,,, Czechia,,,,,,,,,,,,,,,,,,,,,,,
Denmark,50,50,214,423.4,423.4,423.4,423.4,423.4,423.4,660.9,867.9,871.5,921.9,1271.1,1271.1,1271.1,1271.1,1263.8,1700.8 Denmark,49.95,49.95,213.95,423.35,423.35,423.35,423.35,423.35,423.35,660.85,867.85,871.45,921.85,1271.05,1271.05,1271.05,1271.05,1263.8,1700.8,1700.8,1700.8,2305.6,2305.6
Estonia,,,,,,,,,,,,,,,,,,, Estonia,,,,,,,,,,,,,,,,,,,,,,,
Finland,,,,,,,,,24,24,26.3,26.3,26.3,26.3,26.3,32,32,72.7,72.7 Finland,,,,,,,,,24.0,24.0,26.3,26.3,26.3,26.3,26.3,32.0,32.0,72.7,72.7,73.0,73.0,73.0,73.0
France,,,,,,,,,,,,,,,,,,2,2 France,,,,,,,,,,,,,,,,,,2.0,2.0,2.0,2.0,2.0,482.0
Germany,,,,,,,,,,35,80,188,268,508,994,3283,4132,5406,6396 Germany,,,,,,,,,,35.0,80.0,188.0,268.0,508.0,994.0,3283.0,4132.0,5406.0,6393.0,7555.0,7787.0,7787.0,8129.0
Greece,,,,,,,,,,,,,,,,,,, Greece,,,,,,,,,,,,,,,,,,,,,,,
Hungary,,,,,,,,,,,,,,,,,,, Hungary,,,,,,,,,,,,,,,,,,,,,,,
Ireland,,,,,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2 Ireland,,,,,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2,25.2
Italy,,,,,,,,,,,,,,,,,,, Italy,,,,,,,,,,,,,,,,,,,,,,,30.0
Latvia,,,,,,,,,,,,,,,,,,, Latvia,,,,,,,,,,,,,,,,,,,,,,,
Lithuania,,,,,,,,,,,,,,,,,,, Lithuania,,,,,,,,,,,,,,,,,,,,,,,
Luxembourg,,,,,,,,,,,,,,,,,,, Luxembourg,,,,,,,,,,,,,,,,,,,,,,,
Montenegro,,,,,,,,,,,,,,,,,,, Montenegro,,,,,,,,,,,,,,,,,,,,,,,
Netherlands,,,,,,,108,108,228,228,228,228,228,228,228,357,957,957,957 Netherlands,,,,,,,108.0,108.0,228.0,228.0,228.0,228.0,228.0,228.0,228.0,357.0,957.0,957.0,957.0,957.0,2459.5,2459.5,2571.0
North Macedonia,,,,,,,,,,,,,,,,,,, North Macedonia,,,,,,,,,,,,,,,,,,,,,,,
Norway,,,,,,,,,,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3 Norway,,,,,,,,,,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,2.3,6.3,66.3
Poland,,,,,,,,,,,,,,,,,,, Poland,,,,,,,,,,,,,,,,,,,,,,,
Portugal,,,,,,,,,,,,1.9,2,2,2,2,,, Portugal,,,,,,,,,,,,1.86,2.0,2.0,2.0,2.0,,,,,25.0,25.0,25.0
Romania,,,,,,,,,,,,,,,,,,, Romania,,,,,,,,,,,,,,,,,,,,,,,
Serbia,,,,,,,,,,,,,,,,,,, Serbia,,,,,,,,,,,,,,,,,,,,,,,
Slovakia,,,,,,,,,,,,,,,,,,, Slovakia,,,,,,,,,,,,,,,,,,,,,,,
Slovenia,,,,,,,,,,,,,,,,,,, Slovenia,,,,,,,,,,,,,,,,,,,,,,,
Spain,,,,,,,,,,,,,,5,5,5,5,5,5 Spain,,,,,,,,,,,,,,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0
Sweden,13,22,22,22,22,22,22,131,133,163,163,163,163,212,213,213,203,203,203 Sweden,13.0,22.0,22.0,22.0,22.0,22.0,22.0,131.0,133.0,163.0,163.0,163.0,163.0,212.0,213.0,213.0,203.0,203.0,203.0,203.0,203.0,193.0,193.0
Switzerland,,,,,,,,,,,,,,,,,,, Switzerland,,,,,,,,,,,,,,,,,,,,,,,
UK,3.8,3.8,3.8,63.8,123.8,213.8,303.8,393.8,596.2,951.2,1341.5,1838.3,2995.5,3696,4501.3,5093.4,5293.4,6987.9,8216.5 UK,4.0,4.0,4.0,64.0,124.0,214.0,304.0,394.0,596.2,951.0,1341.0,1838.0,2995.0,3696.0,4501.0,5093.0,5293.0,6988.0,8181.0,9888.0,10383.0,11255.0,13928.0

1 Country/area 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022
2 Albania
3 Austria
4 Belgium 31.5 196.5 196.5 381 381.0 707.7 707.7 712 712.0 712.2 877.2 1185.9 1555.5 2261.8 2261.8 2261.8
5 Bosnia Herzg
6 Bulgaria
7 Croatia
8 Czechia
9 Denmark 50 49.95 50 49.95 214 213.95 423.4 423.35 423.4 423.35 423.4 423.35 423.4 423.35 423.4 423.35 423.4 423.35 660.9 660.85 867.9 867.85 871.5 871.45 921.9 921.85 1271.1 1271.05 1271.1 1271.05 1271.1 1271.05 1271.1 1271.05 1263.8 1700.8 1700.8 1700.8 2305.6 2305.6
10 Estonia
11 Finland 24 24.0 24 24.0 26.3 26.3 26.3 26.3 26.3 32 32.0 32 32.0 72.7 72.7 73.0 73.0 73.0 73.0
12 France 2 2.0 2 2.0 2.0 2.0 2.0 482.0
13 Germany 35 35.0 80 80.0 188 188.0 268 268.0 508 508.0 994 994.0 3283 3283.0 4132 4132.0 5406 5406.0 6396 6393.0 7555.0 7787.0 7787.0 8129.0
14 Greece
15 Hungary
16 Ireland 25.2 25.2 25.2 25.2 25.2 25.2 25.2 25.2 25.2 25.2 25.2 25.2 25.2 25.2 25.2 25.2 25.2 25.2 25.2
17 Italy 30.0
18 Latvia
19 Lithuania
20 Luxembourg
21 Montenegro
22 Netherlands 108 108.0 108 108.0 228 228.0 228 228.0 228 228.0 228 228.0 228 228.0 228 228.0 228 228.0 357 357.0 957 957.0 957 957.0 957 957.0 957.0 2459.5 2459.5 2571.0
23 North Macedonia
24 Norway 2.3 2.3 2.3 2.3 2.3 2.3 2.3 2.3 2.3 2.3 2.3 2.3 6.3 66.3
25 Poland
26 Portugal 1.9 1.86 2 2.0 2 2.0 2 2.0 2 2.0 25.0 25.0 25.0
27 Romania
28 Serbia
29 Slovakia
30 Slovenia
31 Spain 5 5.0 5 5.0 5 5.0 5 5.0 5 5.0 5 5.0 5.0 5.0 5.0 5.0
32 Sweden 13 13.0 22 22.0 22 22.0 22 22.0 22 22.0 22 22.0 22 22.0 131 131.0 133 133.0 163 163.0 163 163.0 163 163.0 163 163.0 212 212.0 213 213.0 213 213.0 203 203.0 203 203.0 203 203.0 203.0 203.0 193.0 193.0
33 Switzerland
34 UK 3.8 4.0 3.8 4.0 3.8 4.0 63.8 64.0 123.8 124.0 213.8 214.0 303.8 304.0 393.8 394.0 596.2 951.2 951.0 1341.5 1341.0 1838.3 1838.0 2995.5 2995.0 3696 3696.0 4501.3 4501.0 5093.4 5093.0 5293.4 5293.0 6987.9 6988.0 8216.5 8181.0 9888.0 10383.0 11255.0 13928.0

View File

@ -1,34 +1,34 @@
Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018 Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022
Albania,,,,,,,,,,,,,,,,,,, Albania,,,,,,,,,,,,,,,,,,,,,,,
Austria,50,67,109,322,581,825.2,968.3,991.2,992,1001,1015.8,1106,1337.2,1674.5,2110.3,2488.7,2730,2886.7,3132.7 Austria,50.0,67.0,109.0,322.0,581.0,825.22,968.27,991.16,991.97,1000.99,1015.83,1105.97,1337.15,1674.54,2110.28,2488.73,2730.0,2886.7,3132.71,3224.12,3225.98,3407.81,3735.81
Belgium,14,26,31,67,96,167,212,276,324,576.5,715.5,872.5,989,1072.3,1236.3,1464,1657.8,1919.3,2074.8 Belgium,14.0,26.0,31.0,67.0,96.0,167.0,212.0,276.0,324.0,576.5,715.5,872.5,985.9,1061.3,1225.0,1469.3,1621.6,1902.2,2119.0,2308.0,2410.9,2686.6,2989.6
Bosnia Herzg,,,,,,,,,,,,0.3,0.3,0.3,0.3,0.3,0.3,0.3,50.9 Bosnia Herzg,,,,,,,,,,,,0.3,0.3,0.3,0.3,0.3,0.3,0.3,51.0,87.0,87.0,135.0,135.0
Bulgaria,,,,,1,8,27,30,114,333,488,541,677,683,699,699,699,698.4,698.9 Bulgaria,,,,,1.0,8.0,27.0,30.0,114.0,333.0,488.0,541.0,677.0,683.0,699.0,699.0,699.0,698.39,698.92,703.12,702.8,704.38,704.38
Croatia,,,,,6,6,17,17,17,70,79,130,180,254,339,418,483,576.1,586.3 Croatia,,,,,6.0,6.0,17.0,17.0,17.0,70.0,79.0,130.0,180.0,254.0,339.0,418.0,483.0,576.1,586.3,646.3,801.3,986.9,1042.9
Czechia,2,,6.4,10.6,16.5,22,43.5,113.8,150,193,213,213,258,262,278,281,282,308.2,316.2 Czechia,2.0,,6.4,10.6,16.5,22.0,43.5,113.8,150.0,193.0,213.0,213.0,258.0,262.0,278.0,281.0,282.0,308.21,316.2,339.41,339.42,339.41,339.41
Denmark,2340.1,2447.2,2680.6,2696.6,2700.4,2704.5,2712.3,2700.9,2739.5,2821.2,2934,3080.5,3240.1,3547.9,3615.4,3805.9,3974.5,4225.8,4419.8 Denmark,2340.07,2447.2,2680.58,2696.57,2700.36,2704.49,2712.35,2700.86,2739.52,2821.24,2933.98,3080.53,3240.09,3547.87,3615.35,3805.92,3974.09,4225.15,4421.86,4409.74,4566.23,4715.24,4782.24
Estonia,,,1,3,7,31,31,50,77,104,108,180,266,248,275,300,310,311.8,310 Estonia,,,1.0,3.0,7.0,31.0,31.0,50.0,77.0,104.0,108.0,180.0,266.0,248.0,275.0,300.0,310.0,311.8,310.0,316.0,317.0,315.0,315.0
Finland,38,39,43,52,82,82,86,110,119,123,170.7,172.7,230.7,420.7,600.7,973,1533,1971.3,1968.3 Finland,38.0,39.0,43.0,52.0,82.0,82.0,86.0,110.0,119.0,123.0,170.7,172.7,230.7,420.7,600.7,973.0,1533.0,1971.3,1968.3,2211.0,2513.0,3184.0,5541.0
France,38,66,138,218,358,690,1412,2223,3403,4582,5912,6758,7607.5,8156,9201.4,10298.2,11566.6,13497.4,14898.1 France,38.0,66.0,138.0,218.0,358.0,690.0,1412.0,2223.0,3403.0,4582.0,5912.0,6758.02,7607.5,8155.96,9201.42,10298.18,11566.56,13497.35,14898.14,16424.85,17512.0,18737.98,20637.98
Germany,6095,8754,12001,14381,16419,18248,20474,22116,22794,25697,26823,28524,30711,32969,37620,41297,45303,50174,52447 Germany,6095.0,8754.0,12001.0,14381.0,16419.0,18248.0,20474.0,22116.0,22794.0,25697.0,26823.0,28524.0,30711.0,32969.0,37620.0,41297.0,45303.0,50174.0,52328.0,53187.0,54414.0,56046.0,58165.0
Greece,226,270,287,371,470,491,749,846,1022,1171,1298,1640,1753,1809,1978,2091,2370,2624,2877.5 Greece,226.0,270.0,287.0,371.0,470.0,491.0,749.0,846.0,1022.0,1171.0,1298.0,1640.0,1753.0,1809.0,1978.0,2091.0,2370.0,2624.0,2877.5,3589.0,4119.25,4649.13,4879.13
Hungary,,1,1,3,3,17,33,61,134,203,293,331,325,329,329,329,329,329,329 Hungary,,1.0,1.0,3.0,3.0,17.0,33.0,61.0,134.0,203.0,293.0,331.0,325.0,329.0,329.0,329.0,329.0,329.0,329.0,323.0,323.0,324.0,324.0
Ireland,116.5,122.9,134.8,210.3,311.2,468.1,651.3,715.3,917.1,1226.1,1365.2,1559.4,1679.2,1983,2258.1,2426,2760.8,3292.8,3650.9 Ireland,116.5,122.9,134.8,210.3,311.2,468.1,651.3,715.3,917.1,1226.1,1365.2,1559.4,1679.15,1898.1,2258.05,2425.95,2776.45,3293.95,3648.65,4101.25,4281.5,4313.84,4593.84
Italy,363,664,780,874,1127,1635,1902,2702,3525,4879,5794,6918,8102,8542,8683,9137,9384,9736.6,10230.2 Italy,363.0,664.0,780.0,874.0,1127.0,1635.0,1902.0,2702.0,3525.0,4879.0,5794.0,6918.0,8102.0,8542.0,8683.0,9137.0,9384.0,9736.58,10230.25,10679.46,10870.62,11253.73,11749.73
Latvia,2,2,22,26,26,26,26,26,28,29,30,36,59,65.9,68.9,68.2,69.9,77.1,78.2 Latvia,2.0,2.0,22.0,26.0,26.0,26.0,26.0,26.0,28.0,29.0,30.0,36.0,59.0,65.89,68.92,68.17,69.91,77.11,78.17,78.07,78.07,77.13,136.13
Lithuania,,,,,1,1,31,47,54,98,133,202,275,279,288,436,509,518,533 Lithuania,,,,,1.0,1.0,31.0,47.0,54.0,98.0,133.0,202.0,275.0,279.0,288.0,436.0,509.0,518.0,533.0,534.0,540.0,671.0,814.0
Luxembourg,14,13.9,13.9,20.5,34.9,34.9,34.9,34.9,42.9,42.9,43.7,44.5,58.3,58.3,58.3,63.8,119.7,119.7,122.9 Luxembourg,14.0,13.9,13.9,20.5,34.9,34.9,34.9,34.9,42.92,42.93,43.73,44.53,58.33,58.33,58.34,63.79,119.69,119.69,122.89,135.79,152.74,136.44,165.44
Montenegro,,,,,,,,,,,,,,,,,,72,118 Montenegro,,,,,,,,,,,,,,,,,,72.0,72.0,118.0,118.0,118.0,118.0
Netherlands,447,486,672,905,1075,1224,1453,1641,1921,1994,2009,2088,2205,2485,2637,3034,3300,3245,3436 Netherlands,447.0,486.0,672.0,905.0,1075.0,1224.0,1453.0,1641.0,1921.0,1994.0,2009.0,2088.0,2205.0,2485.0,2637.0,3033.84,3300.12,3245.0,3436.11,3527.16,4188.38,5309.87,6176.0
North Macedonia,,,,,,,,,,,,,,,37,37,37,37,37 North Macedonia,,,,,,,,,,,,,,,37.0,37.0,37.0,37.0,37.0,37.0,37.0,37.0,37.0
Norway,13,13,97,97,152,265,284,348,395,420.7,422.7,509.7,702.7,815.7,856.7,864.7,880.7,1204.7,1708 Norway,13.0,13.0,97.0,97.0,152.0,265.0,284.0,348.0,395.0,420.7,422.7,509.7,702.7,815.7,856.7,864.7,880.7,1204.7,1707.7,2911.7,4027.7,5042.7,5067.7
Poland,4,19,32,35,40,121,172,306,526,709,1108,1800,2564,3429,3836,4886,5747,5759.4,5766.1 Poland,4.0,19.0,32.0,35.0,40.0,121.0,172.0,306.0,526.0,709.0,1108.0,1800.0,2564.0,3429.0,3836.0,4886.0,5747.0,5759.36,5766.08,5837.76,6298.25,6967.34,7987.34
Portugal,83,125,190,268,553,1064,1681,2201,2857,3326,3796,4254.4,4409.6,4607.9,4854.6,4934.8,5124.1,5124.1,5172.4 Portugal,83.0,125.0,190.0,268.0,553.0,1064.0,1681.0,2201.0,2857.0,3326.0,3796.0,4254.35,4409.55,4607.95,4854.56,4934.84,5124.1,5124.1,5172.36,5222.75,5097.26,5402.33,5430.33
Romania,,,,,,1,1,3,5,15,389,988,1822,2773,3244,3130,3025,3029.8,3032.3 Romania,,,,,,1.0,1.0,3.0,5.0,15.0,389.0,988.0,1822.0,2773.0,3244.0,3130.0,3025.0,3029.8,3032.26,3037.52,3012.53,3014.96,3014.96
Serbia,,,,,,,,,,,,,0.5,0.5,0.5,10.4,17,25,25 Serbia,,,,,,,,,,,,,0.5,0.5,0.5,10.4,17.0,25.0,227.0,398.0,398.0,398.0,398.0
Slovakia,,,,3,3,5,5,5,5,3,3,3,3,5,3,3,3,4,3 Slovakia,,,,3.0,3.0,5.0,5.0,5.0,5.0,3.0,3.0,3.0,3.0,5.0,3.0,3.0,3.0,4.0,3.0,4.0,4.0,4.0,4.0
Slovenia,,,,,,,,,,,,,,4,4,5,5,5,5.2 Slovenia,,,,,,,,,,,,,2.0,2.0,3.0,3.0,3.0,3.3,3.3,3.3,3.3,3.33,3.33
Spain,2206,3397,4891,5945,8317,9918,11722,14820,16555,19176,20693,21529,22789,22953,22920,22938,22985,23119.5,23400.1 Spain,2206.0,3397.0,4891.0,5945.0,8317.0,9918.0,11722.0,14820.0,16555.0,19176.0,20693.0,21529.0,22789.0,22953.0,22920.0,22938.0,22985.0,23119.48,23400.06,25585.08,26814.19,27902.65,29302.84
Sweden,196,273,335,395,453,500,563,692,956,1312,1854,2601,3443,3982,4875,5606,6232,6408,7097 Sweden,196.0,273.0,335.0,395.0,453.0,500.0,563.0,692.0,956.0,1312.0,1854.0,2601.0,3443.0,3982.0,4875.0,5606.0,6232.0,6408.0,7097.0,8478.0,9773.0,11923.0,14364.0
Switzerland,3,5,5,5,9,12,12,12,14,18,42,46,49,60,60,60,75,75,75 Switzerland,3.0,5.0,5.0,5.0,9.0,12.0,12.0,12.0,14.0,18.0,42.0,46.0,49.0,60.0,60.0,60.0,75.0,75.0,75.0,75.0,87.0,87.0,87.0
UK,408.2,489.2,530.2,678.2,809.2,1351.2,1651.2,2083.2,2849.8,3470.8,4079.8,4758,6035,7586.3,8572.7,9212.2,10832.3,12596.9,13553.9 UK,431.0,490.0,531.0,678.0,809.0,1351.0,1651.0,2083.0,2849.8,3468.0,4080.0,4758.0,6035.0,7586.0,8573.0,9212.0,10833.0,12597.0,13425.0,13999.0,14075.0,14492.0,14832.0

1 Country/area 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022
2 Albania
3 Austria 50 50.0 67 67.0 109 109.0 322 322.0 581 581.0 825.2 825.22 968.3 968.27 991.2 991.16 992 991.97 1001 1000.99 1015.8 1015.83 1106 1105.97 1337.2 1337.15 1674.5 1674.54 2110.3 2110.28 2488.7 2488.73 2730 2730.0 2886.7 3132.7 3132.71 3224.12 3225.98 3407.81 3735.81
4 Belgium 14 14.0 26 26.0 31 31.0 67 67.0 96 96.0 167 167.0 212 212.0 276 276.0 324 324.0 576.5 715.5 872.5 989 985.9 1072.3 1061.3 1236.3 1225.0 1464 1469.3 1657.8 1621.6 1919.3 1902.2 2074.8 2119.0 2308.0 2410.9 2686.6 2989.6
5 Bosnia Herzg 0.3 0.3 0.3 0.3 0.3 0.3 0.3 50.9 51.0 87.0 87.0 135.0 135.0
6 Bulgaria 1 1.0 8 8.0 27 27.0 30 30.0 114 114.0 333 333.0 488 488.0 541 541.0 677 677.0 683 683.0 699 699.0 699 699.0 699 699.0 698.4 698.39 698.9 698.92 703.12 702.8 704.38 704.38
7 Croatia 6 6.0 6 6.0 17 17.0 17 17.0 17 17.0 70 70.0 79 79.0 130 130.0 180 180.0 254 254.0 339 339.0 418 418.0 483 483.0 576.1 586.3 646.3 801.3 986.9 1042.9
8 Czechia 2 2.0 6.4 10.6 16.5 22 22.0 43.5 113.8 150 150.0 193 193.0 213 213.0 213 213.0 258 258.0 262 262.0 278 278.0 281 281.0 282 282.0 308.2 308.21 316.2 339.41 339.42 339.41 339.41
9 Denmark 2340.1 2340.07 2447.2 2680.6 2680.58 2696.6 2696.57 2700.4 2700.36 2704.5 2704.49 2712.3 2712.35 2700.9 2700.86 2739.5 2739.52 2821.2 2821.24 2934 2933.98 3080.5 3080.53 3240.1 3240.09 3547.9 3547.87 3615.4 3615.35 3805.9 3805.92 3974.5 3974.09 4225.8 4225.15 4419.8 4421.86 4409.74 4566.23 4715.24 4782.24
10 Estonia 1 1.0 3 3.0 7 7.0 31 31.0 31 31.0 50 50.0 77 77.0 104 104.0 108 108.0 180 180.0 266 266.0 248 248.0 275 275.0 300 300.0 310 310.0 311.8 310 310.0 316.0 317.0 315.0 315.0
11 Finland 38 38.0 39 39.0 43 43.0 52 52.0 82 82.0 82 82.0 86 86.0 110 110.0 119 119.0 123 123.0 170.7 172.7 230.7 420.7 600.7 973 973.0 1533 1533.0 1971.3 1968.3 2211.0 2513.0 3184.0 5541.0
12 France 38 38.0 66 66.0 138 138.0 218 218.0 358 358.0 690 690.0 1412 1412.0 2223 2223.0 3403 3403.0 4582 4582.0 5912 5912.0 6758 6758.02 7607.5 8156 8155.96 9201.4 9201.42 10298.2 10298.18 11566.6 11566.56 13497.4 13497.35 14898.1 14898.14 16424.85 17512.0 18737.98 20637.98
13 Germany 6095 6095.0 8754 8754.0 12001 12001.0 14381 14381.0 16419 16419.0 18248 18248.0 20474 20474.0 22116 22116.0 22794 22794.0 25697 25697.0 26823 26823.0 28524 28524.0 30711 30711.0 32969 32969.0 37620 37620.0 41297 41297.0 45303 45303.0 50174 50174.0 52447 52328.0 53187.0 54414.0 56046.0 58165.0
14 Greece 226 226.0 270 270.0 287 287.0 371 371.0 470 470.0 491 491.0 749 749.0 846 846.0 1022 1022.0 1171 1171.0 1298 1298.0 1640 1640.0 1753 1753.0 1809 1809.0 1978 1978.0 2091 2091.0 2370 2370.0 2624 2624.0 2877.5 3589.0 4119.25 4649.13 4879.13
15 Hungary 1 1.0 1 1.0 3 3.0 3 3.0 17 17.0 33 33.0 61 61.0 134 134.0 203 203.0 293 293.0 331 331.0 325 325.0 329 329.0 329 329.0 329 329.0 329 329.0 329 329.0 329 329.0 323.0 323.0 324.0 324.0
16 Ireland 116.5 122.9 134.8 210.3 311.2 468.1 651.3 715.3 917.1 1226.1 1365.2 1559.4 1679.2 1679.15 1983 1898.1 2258.1 2258.05 2426 2425.95 2760.8 2776.45 3292.8 3293.95 3650.9 3648.65 4101.25 4281.5 4313.84 4593.84
17 Italy 363 363.0 664 664.0 780 780.0 874 874.0 1127 1127.0 1635 1635.0 1902 1902.0 2702 2702.0 3525 3525.0 4879 4879.0 5794 5794.0 6918 6918.0 8102 8102.0 8542 8542.0 8683 8683.0 9137 9137.0 9384 9384.0 9736.6 9736.58 10230.2 10230.25 10679.46 10870.62 11253.73 11749.73
18 Latvia 2 2.0 2 2.0 22 22.0 26 26.0 26 26.0 26 26.0 26 26.0 26 26.0 28 28.0 29 29.0 30 30.0 36 36.0 59 59.0 65.9 65.89 68.9 68.92 68.2 68.17 69.9 69.91 77.1 77.11 78.2 78.17 78.07 78.07 77.13 136.13
19 Lithuania 1 1.0 1 1.0 31 31.0 47 47.0 54 54.0 98 98.0 133 133.0 202 202.0 275 275.0 279 279.0 288 288.0 436 436.0 509 509.0 518 518.0 533 533.0 534.0 540.0 671.0 814.0
20 Luxembourg 14 14.0 13.9 13.9 20.5 34.9 34.9 34.9 34.9 42.9 42.92 42.9 42.93 43.7 43.73 44.5 44.53 58.3 58.33 58.3 58.33 58.3 58.34 63.8 63.79 119.7 119.69 119.7 119.69 122.9 122.89 135.79 152.74 136.44 165.44
21 Montenegro 72 72.0 118 72.0 118.0 118.0 118.0 118.0
22 Netherlands 447 447.0 486 486.0 672 672.0 905 905.0 1075 1075.0 1224 1224.0 1453 1453.0 1641 1641.0 1921 1921.0 1994 1994.0 2009 2009.0 2088 2088.0 2205 2205.0 2485 2485.0 2637 2637.0 3034 3033.84 3300 3300.12 3245 3245.0 3436 3436.11 3527.16 4188.38 5309.87 6176.0
23 North Macedonia 37 37.0 37 37.0 37 37.0 37 37.0 37 37.0 37.0 37.0 37.0 37.0
24 Norway 13 13.0 13 13.0 97 97.0 97 97.0 152 152.0 265 265.0 284 284.0 348 348.0 395 395.0 420.7 422.7 509.7 702.7 815.7 856.7 864.7 880.7 1204.7 1708 1707.7 2911.7 4027.7 5042.7 5067.7
25 Poland 4 4.0 19 19.0 32 32.0 35 35.0 40 40.0 121 121.0 172 172.0 306 306.0 526 526.0 709 709.0 1108 1108.0 1800 1800.0 2564 2564.0 3429 3429.0 3836 3836.0 4886 4886.0 5747 5747.0 5759.4 5759.36 5766.1 5766.08 5837.76 6298.25 6967.34 7987.34
26 Portugal 83 83.0 125 125.0 190 190.0 268 268.0 553 553.0 1064 1064.0 1681 1681.0 2201 2201.0 2857 2857.0 3326 3326.0 3796 3796.0 4254.4 4254.35 4409.6 4409.55 4607.9 4607.95 4854.6 4854.56 4934.8 4934.84 5124.1 5124.1 5172.4 5172.36 5222.75 5097.26 5402.33 5430.33
27 Romania 1 1.0 1 1.0 3 3.0 5 5.0 15 15.0 389 389.0 988 988.0 1822 1822.0 2773 2773.0 3244 3244.0 3130 3130.0 3025 3025.0 3029.8 3032.3 3032.26 3037.52 3012.53 3014.96 3014.96
28 Serbia 0.5 0.5 0.5 10.4 17 17.0 25 25.0 25 227.0 398.0 398.0 398.0 398.0
29 Slovakia 3 3.0 3 3.0 5 5.0 5 5.0 5 5.0 5 5.0 3 3.0 3 3.0 3 3.0 3 3.0 5 5.0 3 3.0 3 3.0 3 3.0 4 4.0 3 3.0 4.0 4.0 4.0 4.0
30 Slovenia 2.0 4 2.0 4 3.0 5 3.0 5 3.0 5 3.3 5.2 3.3 3.3 3.3 3.33 3.33
31 Spain 2206 2206.0 3397 3397.0 4891 4891.0 5945 5945.0 8317 8317.0 9918 9918.0 11722 11722.0 14820 14820.0 16555 16555.0 19176 19176.0 20693 20693.0 21529 21529.0 22789 22789.0 22953 22953.0 22920 22920.0 22938 22938.0 22985 22985.0 23119.5 23119.48 23400.1 23400.06 25585.08 26814.19 27902.65 29302.84
32 Sweden 196 196.0 273 273.0 335 335.0 395 395.0 453 453.0 500 500.0 563 563.0 692 692.0 956 956.0 1312 1312.0 1854 1854.0 2601 2601.0 3443 3443.0 3982 3982.0 4875 4875.0 5606 5606.0 6232 6232.0 6408 6408.0 7097 7097.0 8478.0 9773.0 11923.0 14364.0
33 Switzerland 3 3.0 5 5.0 5 5.0 5 5.0 9 9.0 12 12.0 12 12.0 12 12.0 14 14.0 18 18.0 42 42.0 46 46.0 49 49.0 60 60.0 60 60.0 60 60.0 75 75.0 75 75.0 75 75.0 75.0 87.0 87.0 87.0
34 UK 408.2 431.0 489.2 490.0 530.2 531.0 678.2 678.0 809.2 809.0 1351.2 1351.0 1651.2 1651.0 2083.2 2083.0 2849.8 3470.8 3468.0 4079.8 4080.0 4758 4758.0 6035 6035.0 7586.3 7586.0 8572.7 8573.0 9212.2 9212.0 10832.3 10833.0 12596.9 12597.0 13553.9 13425.0 13999.0 14075.0 14492.0 14832.0

View File

@ -1,34 +1,34 @@
Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018 Country/area,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021,2022
Albania,,0.1,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.3,0.4,0.6,0.7,0.8,0.9,1.1,1,1,1 Albania,,0.1,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.3,0.4,0.56,0.68,0.76,0.87,1.05,1.0,1.0,1.0,14.0,21.0,23.0,28.6
Austria,5,7,9,23,27,21,22.4,24.2,30.1,48.9,88.8,174.1,337.5,626,785.2,937.1,1096,1269,1437.6 Austria,5.0,7.0,9.0,23.0,27.0,18.49,19.61,21.42,27.0,45.56,85.27,169.88,333.09,620.78,779.76,931.56,1089.53,1262.01,1447.94,1694.4,2034.74,2773.91,3538.91
Belgium,,,1,1,1,2,2,20,62,386,1007,1979,2647,2902,3015.2,3131.7,3327,3616.2,3986.5 Belgium,,,1.0,1.0,1.0,2.0,2.0,20.0,62.0,386.0,1006.6,1978.6,2646.6,2901.6,3015.0,3131.6,3328.8,3620.6,4000.0,4636.6,5572.8,6012.4,6898.4
Bosnia Herzg,,,,0.1,0.2,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,1.3,7.2,8.2,14.1,16,18.2 Bosnia Herzg,,,,0.1,0.2,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.35,1.34,7.17,8.17,14.12,16.0,18.15,22.35,34.89,56.51,107.47
Bulgaria,,,,,,,,0,0.1,2,25,154,1013,1020,1026,1029,1028,1035.6,1032.7 Bulgaria,,,,,,,,0.03,0.1,2.0,25.0,154.0,921.99,1038.54,1028.92,1027.89,1029.89,1030.7,1033.06,1044.39,1100.21,1274.71,1948.36
Croatia,,,,,,,,,,0.3,0.3,0.3,4,19,33,47.8,55.8,60,67.7 Croatia,,,,,,,,,,0.3,0.3,0.3,4.0,19.0,33.0,47.8,55.8,60.0,67.7,84.8,108.5,138.3,182.3
Czechia,0.1,0.1,0.2,0.3,0.4,0.6,0.8,4,39.5,464.6,1727,1913,2022,2063.5,2067.4,2074.9,2067.9,2069.5,2075.1 Czechia,0.1,0.1,0.2,0.3,0.4,0.59,0.84,3.96,39.5,464.6,1727.0,1913.0,2022.0,2063.5,2067.4,2074.9,2067.9,2075.44,2081.05,2110.67,2171.96,2246.09,2627.09
Denmark,1,1,2,2,2,3,3,3,3,5,7,17,402,571,607,782.1,851,906.4,998 Denmark,1.0,1.0,2.0,2.0,2.0,3.0,3.0,3.0,3.0,5.0,7.0,17.0,402.0,571.0,607.0,782.11,850.95,906.35,998.0,1080.0,1304.29,1704.04,3122.04
Estonia,,,,,,,,,,0.1,0.1,0.2,0.4,1.5,3.3,6.5,10,15,31.9 Estonia,,,,,,,,,,0.1,0.1,0.2,0.38,1.5,3.34,6.5,10.0,15.0,31.9,120.6,207.67,394.77,534.77
Finland,2,3,3,3,4,4,5,5,6,6,7,7,8,9,11,17,39,82,140 Finland,2.0,3.0,3.0,3.0,4.0,4.0,5.0,5.0,6.0,6.0,7.0,7.0,8.0,9.0,11.0,17.0,39.0,82.0,140.0,222.0,318.0,425.0,590.6
France,7,7,8,9,11,13,15,26,80,277,1044,3003.6,4358.8,5277.3,6034.4,7137.5,7702.1,8610.4,9617 France,7.0,7.0,8.0,9.0,11.0,13.0,15.0,26.0,80.0,277.0,1044.0,3003.57,4358.75,5277.29,6034.42,7137.52,7702.08,8610.44,9638.88,10738.39,11812.2,14436.97,17036.97
Germany,114,195,260,435,1105,2056,2899,4170,6120,10564,18004,25914,34075,36708,37898,39222,40677,42291,45179 Germany,114.0,195.0,260.0,435.0,1105.0,2056.0,2899.0,4170.0,6120.0,10564.0,18004.0,25914.0,34075.0,36708.0,37898.0,39222.0,40677.0,42291.0,45156.0,48912.0,53669.0,59371.0,66662.0
Greece,,1,1,1,1,1,5,9,12,46,202,612,1536,2579,2596,2604,2604,2605.5,2651.6 Greece,,1.0,1.0,1.0,1.0,1.0,5.0,9.0,12.0,46.0,202.0,612.0,1536.0,2579.0,2596.0,2604.0,2604.0,2605.53,2651.57,2833.79,3287.72,4277.42,5557.42
Hungary,,,,,,,,0.4,1,1,2,4,12,35,89,172,235,344,726 Hungary,,,,,,,,0.4,1.0,1.0,2.0,4.0,12.0,35.0,89.0,172.0,235.0,344.0,728.0,1400.0,2131.0,2968.0,2988.0
Ireland,,,,,,,,,,0.6,0.7,0.8,0.9,1,1.6,2.4,5.9,15.7,24.2 Ireland,,,,,,,,,,,,,,,,,,,,,,,
Italy,19,20,22,26,31,34,45,110,483,1264,3592,13131,16785,18185,18594,18901,19283,19682.3,20107.6 Italy,19.0,20.0,22.0,26.0,31.0,34.0,45.0,110.0,483.0,1264.0,3592.0,13131.0,16785.0,18185.0,18594.0,18901.0,19283.0,19682.29,20107.59,20865.28,21650.04,22594.26,25076.56
Latvia,,,,,,,,,,,,,0.2,0.2,0.2,0.2,0.7,0.7,2 Latvia,,,,,,,,,,,,,,,,,0.69,0.69,1.96,3.3,5.1,7.16,56.16
Lithuania,,,,,,,,,0.1,0.1,0.1,0.3,7,68,69,69,70,73.8,82 Lithuania,,,,,,,,,0.1,0.1,0.1,0.3,7.0,68.0,69.0,69.0,70.0,70.08,72.0,73.0,80.0,84.0,397.0
Luxembourg,,0.2,1.6,14.2,23.6,23.6,23.7,23.9,24.6,26.4,29.5,40.7,74.7,95,109.9,116.3,121.9,128.1,130.6 Luxembourg,,0.16,1.59,14.17,23.56,23.58,23.7,23.93,24.56,26.36,29.45,40.67,74.65,95.02,109.93,116.27,121.9,128.1,130.62,159.74,186.64,277.16,319.16
Montenegro,,,,,,,0,0.2,0.4,0.4,0.6,0.8,0.9,1.1,2.1,2.7,3.1,3.4,3.4 Montenegro,,,,,,,,,,,,,,,,,,,,,2.57,2.57,22.2
Netherlands,13,21,26,46,50,51,53,54,59,69,90,149,369,746,1048,1515,2049,2903,4522 Netherlands,13.0,21.0,26.0,46.0,50.0,51.0,53.0,54.0,59.0,69.0,90.0,149.0,287.0,650.0,1007.0,1526.26,2135.02,2910.89,4608.0,7226.0,11108.43,14910.69,18848.69
North Macedonia,,,,,,,,,,,0,2,4,7,15,17,16.7,16.7,20.6 North Macedonia,,,,,,,,,,,,2.0,4.0,7.0,15.0,17.0,16.7,16.7,16.7,16.71,84.93,84.93,84.93
Norway,6,6,6,7,7,7,8,8,8.3,8.7,9.1,9.5,10,11,13,15,26.7,44.9,68.4 Norway,6.0,6.0,6.0,7.0,7.0,7.0,8.0,8.0,8.3,8.7,9.1,9.5,10.0,11.0,13.0,15.0,26.7,44.9,53.11,102.53,141.53,186.53,302.53
Poland,,,,,,,,,,,,1.1,1.3,2.4,27.2,107.8,187.2,287.1,562 Poland,,,,,,,,,,,,1.11,1.3,2.39,27.15,107.78,187.25,287.09,561.98,1539.26,3954.96,7415.52,11166.52
Portugal,1,1,1,2,2,2,3,24,59,115,134,172,238,296,415,447,512.8,579.2,667.4 Portugal,1.0,1.0,1.0,2.0,2.0,2.0,3.0,24.0,59.0,115.0,134.0,169.6,235.6,293.6,412.6,441.75,493.05,539.42,617.85,832.74,1010.07,1474.78,2364.78
Romania,,,,,,,,,0.1,0.1,0.1,1,41,761,1293,1326,1372,1374.1,1385.8 Romania,,,,,,,,,0.1,0.1,0.1,1.0,41.0,761.0,1293.0,1326.0,1372.0,1374.13,1385.82,1397.71,1382.54,1393.92,1413.92
Serbia,,,,,,0.1,0.2,0.4,0.9,1.2,1.3,1.5,3.1,4.7,6,9,11,10,10 Serbia,,,,,,0.1,0.2,0.4,0.9,1.2,1.3,1.5,3.1,4.7,6.0,9.0,11.0,10.0,11.0,11.0,11.5,11.94,11.94
Slovakia,,,,,,,,,,,19,496,513,533,533,533,533,528,472 Slovakia,,,,,,,,,,,19.0,496.0,513.0,533.0,533.0,533.0,533.0,528.0,472.0,590.0,535.0,537.0,537.0
Slovenia,,,0,0,0,0,0.2,0.6,1,4,12,57,142,187,223,238,233,246.8,221.3 Slovenia,1.0,1.0,,,,0.05,0.19,0.59,1.0,4.0,12.0,57.0,142.0,187.0,223.0,238.0,233.0,246.8,246.8,277.88,369.78,461.16,632.16
Spain,10,13,17,22,33,52,130,494,3384,3423,3873,4283,4569,4690,4697,4704,4713,4723,4763.5 Spain,1.0,3.0,6.0,10.0,19.0,37.0,113.0,476.0,3365.0,3403.0,3851.0,4260.0,4545.0,4665.0,4672.0,4677.0,4687.0,4696.0,4730.7,8772.02,10100.42,13678.4,18176.73
Sweden,3,3,3,4,4,4,5,6,8,9,11,12,24,43,60,104,153,402,492 Sweden,3.0,3.0,3.0,4.0,4.0,4.0,5.0,6.0,8.0,9.0,11.0,12.0,24.0,43.0,60.0,104.0,153.0,231.0,411.0,698.0,1090.0,1587.0,2587.0
Switzerland,16,18,20,22,24,28,30,37,49,79,125,223,437,756,1061,1394,1664,1906,2171 Switzerland,16.0,18.0,20.0,22.0,24.0,28.0,30.0,37.0,49.0,79.0,125.0,223.0,437.0,756.0,1061.0,1394.0,1664.0,1906.0,2173.0,2498.0,2973.0,3655.0,4339.92
UK,2,3,4,6,8,11,14,18,23,27,95,1000,1753,2937,5528,9601.2,11930.5,12781.8,13118.3 UK,2.0,3.0,4.0,6.0,8.0,11.0,14.0,18.0,23.0,27.0,95.0,1000.0,1753.0,2937.0,5528.0,9601.0,11914.0,12760.0,13059.0,13345.0,13579.0,13965.0,14660.0

1 Country/area 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022
2 Albania 0.1 0.2 0.2 0.2 0.2 0.2 0.2 0.2 0.3 0.4 0.6 0.56 0.7 0.68 0.8 0.76 0.9 0.87 1.1 1.05 1 1.0 1 1.0 1 1.0 14.0 21.0 23.0 28.6
3 Austria 5 5.0 7 7.0 9 9.0 23 23.0 27 27.0 21 18.49 22.4 19.61 24.2 21.42 30.1 27.0 48.9 45.56 88.8 85.27 174.1 169.88 337.5 333.09 626 620.78 785.2 779.76 937.1 931.56 1096 1089.53 1269 1262.01 1437.6 1447.94 1694.4 2034.74 2773.91 3538.91
4 Belgium 1 1.0 1 1.0 1 1.0 2 2.0 2 2.0 20 20.0 62 62.0 386 386.0 1007 1006.6 1979 1978.6 2647 2646.6 2902 2901.6 3015.2 3015.0 3131.7 3131.6 3327 3328.8 3616.2 3620.6 3986.5 4000.0 4636.6 5572.8 6012.4 6898.4
5 Bosnia Herzg 0.1 0.2 0.3 0.3 0.3 0.3 0.3 0.3 0.3 0.3 0.35 1.3 1.34 7.2 7.17 8.2 8.17 14.1 14.12 16 16.0 18.2 18.15 22.35 34.89 56.51 107.47
6 Bulgaria 0 0.03 0.1 2 2.0 25 25.0 154 154.0 1013 921.99 1020 1038.54 1026 1028.92 1029 1027.89 1028 1029.89 1035.6 1030.7 1032.7 1033.06 1044.39 1100.21 1274.71 1948.36
7 Croatia 0.3 0.3 0.3 4 4.0 19 19.0 33 33.0 47.8 55.8 60 60.0 67.7 84.8 108.5 138.3 182.3
8 Czechia 0.1 0.1 0.2 0.3 0.4 0.6 0.59 0.8 0.84 4 3.96 39.5 464.6 1727 1727.0 1913 1913.0 2022 2022.0 2063.5 2067.4 2074.9 2067.9 2069.5 2075.44 2075.1 2081.05 2110.67 2171.96 2246.09 2627.09
9 Denmark 1 1.0 1 1.0 2 2.0 2 2.0 2 2.0 3 3.0 3 3.0 3 3.0 3 3.0 5 5.0 7 7.0 17 17.0 402 402.0 571 571.0 607 607.0 782.1 782.11 851 850.95 906.4 906.35 998 998.0 1080.0 1304.29 1704.04 3122.04
10 Estonia 0.1 0.1 0.2 0.4 0.38 1.5 3.3 3.34 6.5 10 10.0 15 15.0 31.9 120.6 207.67 394.77 534.77
11 Finland 2 2.0 3 3.0 3 3.0 3 3.0 4 4.0 4 4.0 5 5.0 5 5.0 6 6.0 6 6.0 7 7.0 7 7.0 8 8.0 9 9.0 11 11.0 17 17.0 39 39.0 82 82.0 140 140.0 222.0 318.0 425.0 590.6
12 France 7 7.0 7 7.0 8 8.0 9 9.0 11 11.0 13 13.0 15 15.0 26 26.0 80 80.0 277 277.0 1044 1044.0 3003.6 3003.57 4358.8 4358.75 5277.3 5277.29 6034.4 6034.42 7137.5 7137.52 7702.1 7702.08 8610.4 8610.44 9617 9638.88 10738.39 11812.2 14436.97 17036.97
13 Germany 114 114.0 195 195.0 260 260.0 435 435.0 1105 1105.0 2056 2056.0 2899 2899.0 4170 4170.0 6120 6120.0 10564 10564.0 18004 18004.0 25914 25914.0 34075 34075.0 36708 36708.0 37898 37898.0 39222 39222.0 40677 40677.0 42291 42291.0 45179 45156.0 48912.0 53669.0 59371.0 66662.0
14 Greece 1 1.0 1 1.0 1 1.0 1 1.0 1 1.0 5 5.0 9 9.0 12 12.0 46 46.0 202 202.0 612 612.0 1536 1536.0 2579 2579.0 2596 2596.0 2604 2604.0 2604 2604.0 2605.5 2605.53 2651.6 2651.57 2833.79 3287.72 4277.42 5557.42
15 Hungary 0.4 1 1.0 1 1.0 2 2.0 4 4.0 12 12.0 35 35.0 89 89.0 172 172.0 235 235.0 344 344.0 726 728.0 1400.0 2131.0 2968.0 2988.0
16 Ireland 0.6 0.7 0.8 0.9 1 1.6 2.4 5.9 15.7 24.2
17 Italy 19 19.0 20 20.0 22 22.0 26 26.0 31 31.0 34 34.0 45 45.0 110 110.0 483 483.0 1264 1264.0 3592 3592.0 13131 13131.0 16785 16785.0 18185 18185.0 18594 18594.0 18901 18901.0 19283 19283.0 19682.3 19682.29 20107.6 20107.59 20865.28 21650.04 22594.26 25076.56
18 Latvia 0.2 0.2 0.2 0.2 0.7 0.69 0.7 0.69 2 1.96 3.3 5.1 7.16 56.16
19 Lithuania 0.1 0.1 0.1 0.3 7 7.0 68 68.0 69 69.0 69 69.0 70 70.0 73.8 70.08 82 72.0 73.0 80.0 84.0 397.0
20 Luxembourg 0.2 0.16 1.6 1.59 14.2 14.17 23.6 23.56 23.6 23.58 23.7 23.9 23.93 24.6 24.56 26.4 26.36 29.5 29.45 40.7 40.67 74.7 74.65 95 95.02 109.9 109.93 116.3 116.27 121.9 128.1 130.6 130.62 159.74 186.64 277.16 319.16
21 Montenegro 0 0.2 0.4 0.4 0.6 0.8 0.9 1.1 2.1 2.7 3.1 3.4 3.4 2.57 2.57 22.2
22 Netherlands 13 13.0 21 21.0 26 26.0 46 46.0 50 50.0 51 51.0 53 53.0 54 54.0 59 59.0 69 69.0 90 90.0 149 149.0 369 287.0 746 650.0 1048 1007.0 1515 1526.26 2049 2135.02 2903 2910.89 4522 4608.0 7226.0 11108.43 14910.69 18848.69
23 North Macedonia 0 2 2.0 4 4.0 7 7.0 15 15.0 17 17.0 16.7 16.7 20.6 16.7 16.71 84.93 84.93 84.93
24 Norway 6 6.0 6 6.0 6 6.0 7 7.0 7 7.0 7 7.0 8 8.0 8 8.0 8.3 8.7 9.1 9.5 10 10.0 11 11.0 13 13.0 15 15.0 26.7 44.9 68.4 53.11 102.53 141.53 186.53 302.53
25 Poland 1.1 1.11 1.3 2.4 2.39 27.2 27.15 107.8 107.78 187.2 187.25 287.1 287.09 562 561.98 1539.26 3954.96 7415.52 11166.52
26 Portugal 1 1.0 1 1.0 1 1.0 2 2.0 2 2.0 2 2.0 3 3.0 24 24.0 59 59.0 115 115.0 134 134.0 172 169.6 238 235.6 296 293.6 415 412.6 447 441.75 512.8 493.05 579.2 539.42 667.4 617.85 832.74 1010.07 1474.78 2364.78
27 Romania 0.1 0.1 0.1 1 1.0 41 41.0 761 761.0 1293 1293.0 1326 1326.0 1372 1372.0 1374.1 1374.13 1385.8 1385.82 1397.71 1382.54 1393.92 1413.92
28 Serbia 0.1 0.2 0.4 0.9 1.2 1.3 1.5 3.1 4.7 6 6.0 9 9.0 11 11.0 10 10.0 10 11.0 11.0 11.5 11.94 11.94
29 Slovakia 19 19.0 496 496.0 513 513.0 533 533.0 533 533.0 533 533.0 533 533.0 528 528.0 472 472.0 590.0 535.0 537.0 537.0
30 Slovenia 1.0 1.0 0 0 0 0 0.05 0.2 0.19 0.6 0.59 1 1.0 4 4.0 12 12.0 57 57.0 142 142.0 187 187.0 223 223.0 238 238.0 233 233.0 246.8 221.3 246.8 277.88 369.78 461.16 632.16
31 Spain 10 1.0 13 3.0 17 6.0 22 10.0 33 19.0 52 37.0 130 113.0 494 476.0 3384 3365.0 3423 3403.0 3873 3851.0 4283 4260.0 4569 4545.0 4690 4665.0 4697 4672.0 4704 4677.0 4713 4687.0 4723 4696.0 4763.5 4730.7 8772.02 10100.42 13678.4 18176.73
32 Sweden 3 3.0 3 3.0 3 3.0 4 4.0 4 4.0 4 4.0 5 5.0 6 6.0 8 8.0 9 9.0 11 11.0 12 12.0 24 24.0 43 43.0 60 60.0 104 104.0 153 153.0 402 231.0 492 411.0 698.0 1090.0 1587.0 2587.0
33 Switzerland 16 16.0 18 18.0 20 20.0 22 22.0 24 24.0 28 28.0 30 30.0 37 37.0 49 49.0 79 79.0 125 125.0 223 223.0 437 437.0 756 756.0 1061 1061.0 1394 1394.0 1664 1664.0 1906 1906.0 2171 2173.0 2498.0 2973.0 3655.0 4339.92
34 UK 2 2.0 3 3.0 4 4.0 6 6.0 8 8.0 11 11.0 14 14.0 18 18.0 23 23.0 27 27.0 95 95.0 1000 1000.0 1753 1753.0 2937 2937.0 5528 5528.0 9601.2 9601.0 11930.5 11914.0 12781.8 12760.0 13118.3 13059.0 13345.0 13579.0 13965.0 14660.0

View File

@ -5,6 +5,7 @@ retrieve_databundle,bool,"{true, false}","Switch to retrieve databundle from zen
retrieve_sector_databundle,bool,"{true, false}","Switch to retrieve sector databundle from zenodo via the rule :mod:`retrieve_sector_databundle` or whether to keep a custom databundle located in the corresponding folder." retrieve_sector_databundle,bool,"{true, false}","Switch to retrieve sector databundle from zenodo via the rule :mod:`retrieve_sector_databundle` or whether to keep a custom databundle located in the corresponding folder."
retrieve_cost_data,bool,"{true, false}","Switch to retrieve technology cost data from `technology-data repository <https://github.com/PyPSA/technology-data>`_." retrieve_cost_data,bool,"{true, false}","Switch to retrieve technology cost data from `technology-data repository <https://github.com/PyPSA/technology-data>`_."
build_cutout,bool,"{true, false}","Switch to enable the building of cutouts via the rule :mod:`build_cutout`." build_cutout,bool,"{true, false}","Switch to enable the building of cutouts via the rule :mod:`build_cutout`."
retrieve_irena,bool,"{true, false}",Switch to enable the retrieval of ``existing_capacities`` from IRENASTAT with :mod:`retrieve_irena`.
retrieve_cutout,bool,"{true, false}","Switch to enable the retrieval of cutouts from zenodo with :mod:`retrieve_cutout`." retrieve_cutout,bool,"{true, false}","Switch to enable the retrieval of cutouts from zenodo with :mod:`retrieve_cutout`."
build_natura_raster,bool,"{true, false}","Switch to enable the creation of the raster ``natura.tiff`` via the rule :mod:`build_natura_raster`." build_natura_raster,bool,"{true, false}","Switch to enable the creation of the raster ``natura.tiff`` via the rule :mod:`build_natura_raster`."
retrieve_natura_raster,bool,"{true, false}","Switch to enable the retrieval of ``natura.tiff`` from zenodo with :mod:`retrieve_natura_raster`." retrieve_natura_raster,bool,"{true, false}","Switch to enable the retrieval of ``natura.tiff`` from zenodo with :mod:`retrieve_natura_raster`."

1 Unit Values Description
5 retrieve_sector_databundle bool {true, false} Switch to retrieve sector databundle from zenodo via the rule :mod:`retrieve_sector_databundle` or whether to keep a custom databundle located in the corresponding folder.
6 retrieve_cost_data bool {true, false} Switch to retrieve technology cost data from `technology-data repository <https://github.com/PyPSA/technology-data>`_.
7 build_cutout bool {true, false} Switch to enable the building of cutouts via the rule :mod:`build_cutout`.
8 retrieve_irena bool {true, false} Switch to enable the retrieval of ``existing_capacities`` from IRENASTAT with :mod:`retrieve_irena`.
9 retrieve_cutout bool {true, false} Switch to enable the retrieval of cutouts from zenodo with :mod:`retrieve_cutout`.
10 build_natura_raster bool {true, false} Switch to enable the creation of the raster ``natura.tiff`` via the rule :mod:`build_natura_raster`.
11 retrieve_natura_raster bool {true, false} Switch to enable the retrieval of ``natura.tiff`` from zenodo with :mod:`retrieve_natura_raster`.

View File

@ -79,6 +79,7 @@ allam_cycle,--,"{true, false}",Add option to include `Allam cycle gas power plan
hydrogen_fuel_cell,--,"{true, false}",Add option to include hydrogen fuel cell for re-electrification. Assuming OCGT technology costs hydrogen_fuel_cell,--,"{true, false}",Add option to include hydrogen fuel cell for re-electrification. Assuming OCGT technology costs
hydrogen_turbine,--,"{true, false}",Add option to include hydrogen turbine for re-electrification. Assuming OCGT technology costs hydrogen_turbine,--,"{true, false}",Add option to include hydrogen turbine for re-electrification. Assuming OCGT technology costs
SMR,--,"{true, false}",Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR) SMR,--,"{true, false}",Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR)
SMR CC,--,"{true, false}",Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR) and Carbon Capture (CC)
regional_co2 _sequestration_potential,,, regional_co2 _sequestration_potential,,,
-- enable,--,"{true, false}",Add option for regionally-resolved geological carbon dioxide sequestration potentials based on `CO2StoP <https://setis.ec.europa.eu/european-co2-storage-database_en>`_. -- enable,--,"{true, false}",Add option for regionally-resolved geological carbon dioxide sequestration potentials based on `CO2StoP <https://setis.ec.europa.eu/european-co2-storage-database_en>`_.
-- attribute,--,string,Name of the attribute for the sequestration potential -- attribute,--,string,Name of the attribute for the sequestration potential

1 Unit Values Description
79 hydrogen_fuel_cell -- {true, false} Add option to include hydrogen fuel cell for re-electrification. Assuming OCGT technology costs
80 hydrogen_turbine -- {true, false} Add option to include hydrogen turbine for re-electrification. Assuming OCGT technology costs
81 SMR -- {true, false} Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR)
82 SMR CC -- {true, false} Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR) and Carbon Capture (CC)
83 regional_co2 _sequestration_potential
84 -- enable -- {true, false} Add option for regionally-resolved geological carbon dioxide sequestration potentials based on `CO2StoP <https://setis.ec.europa.eu/european-co2-storage-database_en>`_.
85 -- attribute -- string Name of the attribute for the sequestration potential

View File

@ -41,10 +41,10 @@ Perfect foresight scenarios
.. warning:: .. warning::
Perfect foresight is currently under development and not yet implemented. Perfect foresight is currently implemented as a first test version.
For running perfect foresight scenarios, in future versions you will be able to For running perfect foresight scenarios, you can adjust the
set in the ``config/config.yaml``: ``config/config.perfect.yaml``:
.. code:: yaml .. code:: yaml

View File

@ -185,7 +185,7 @@ For sector-coupling studies: ::
pages = "1--25" pages = "1--25"
year = "2023", year = "2023",
eprint = "2207.05816", eprint = "2207.05816",
doi = "10.1016/j.joule.2022.04.016", doi = "10.1016/j.joule.2023.06.016",
} }
For sector-coupling studies with pathway optimisation: :: For sector-coupling studies with pathway optimisation: ::

View File

@ -20,8 +20,29 @@ Upcoming Release
* Files extracted from sector-coupled data bundle have been moved from ``data/`` to ``data/sector-bundle``. * Files extracted from sector-coupled data bundle have been moved from ``data/`` to ``data/sector-bundle``.
* New feature multi-decade optimisation with perfect foresight.
* It is now possible to specify years for biomass potentials which do not exist
in the JRC-ENSPRESO database, e.g. 2037. These are linearly interpolated.
* In pathway mode, the biomass potential is linked to the investment year.
* Rule ``purge`` now initiates a dialog to confirm if purge is desired.
* Rule ``retrieve_irena`` get updated values for renewables capacities.
* Split configuration to enable SMR and SMR CC.
* The ``mock_snakemake`` function can now be used with a Snakefile from a different directory using the new ``root_dir`` argument.
* Added Enhanced Geothermal Systems for electricity generation. * Added Enhanced Geothermal Systems for electricity generation.
**Bugs and Compatibility**
* A bug preventing custom powerplants specified in ``data/custom_powerplants.csv`` was fixed. (https://github.com/PyPSA/pypsa-eur/pull/732)
PyPSA-Eur 0.8.1 (27th July 2023) PyPSA-Eur 0.8.1 (27th July 2023)
================================ ================================

View File

@ -118,6 +118,11 @@ This rule downloads techno-economic assumptions from the `technology-data reposi
- ``resources/costs.csv`` - ``resources/costs.csv``
Rule ``retrieve_irena``
================================
.. automodule:: retrieve_irena
Rule ``retrieve_ship_raster`` Rule ``retrieve_ship_raster``
================================ ================================

View File

@ -133,89 +133,82 @@ This triggers a workflow of multiple preceding jobs that depend on each rule's i
graph[bgcolor=white, margin=0]; graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2]; node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2];
edge[penwidth=2, color=grey]; edge[penwidth=2, color=grey];
0[label = "solve_network", color = "0.21 0.6 0.85", style="rounded"]; 0[label = "solve_network", color = "0.33 0.6 0.85", style="rounded"];
1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.02 0.6 0.85", style="rounded"]; 1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.03 0.6 0.85", style="rounded"];
2[label = "add_extra_components", color = "0.37 0.6 0.85", style="rounded"]; 2[label = "add_extra_components", color = "0.45 0.6 0.85", style="rounded"];
3[label = "cluster_network\nclusters: 6", color = "0.39 0.6 0.85", style="rounded"]; 3[label = "cluster_network\nclusters: 6", color = "0.46 0.6 0.85", style="rounded"];
4[label = "simplify_network\nsimpl: ", color = "0.11 0.6 0.85", style="rounded"]; 4[label = "simplify_network\nsimpl: ", color = "0.52 0.6 0.85", style="rounded"];
5[label = "add_electricity", color = "0.23 0.6 0.85", style="rounded"]; 5[label = "add_electricity", color = "0.55 0.6 0.85", style="rounded"];
6[label = "build_renewable_profiles\ntechnology: onwind", color = "0.57 0.6 0.85", style="rounded"]; 6[label = "build_renewable_profiles\ntechnology: solar", color = "0.15 0.6 0.85", style="rounded"];
7[label = "base_network", color = "0.09 0.6 0.85", style="rounded"]; 7[label = "base_network", color = "0.37 0.6 0.85", style="rounded,dashed"];
8[label = "build_shapes", color = "0.41 0.6 0.85", style="rounded"]; 8[label = "build_shapes", color = "0.07 0.6 0.85", style="rounded,dashed"];
9[label = "retrieve_databundle", color = "0.28 0.6 0.85", style="rounded"]; 9[label = "retrieve_databundle", color = "0.60 0.6 0.85", style="rounded"];
10[label = "retrieve_natura_raster", color = "0.62 0.6 0.85", style="rounded"]; 10[label = "retrieve_natura_raster", color = "0.42 0.6 0.85", style="rounded"];
11[label = "build_bus_regions", color = "0.53 0.6 0.85", style="rounded"]; 11[label = "build_bus_regions", color = "0.09 0.6 0.85", style="rounded,dashed"];
12[label = "retrieve_cutout\ncutout: europe-2013-era5", color = "0.05 0.6 0.85", style="rounded,dashed"]; 12[label = "build_renewable_profiles\ntechnology: onwind", color = "0.15 0.6 0.85", style="rounded"];
13[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.57 0.6 0.85", style="rounded"]; 13[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.15 0.6 0.85", style="rounded"];
14[label = "build_ship_raster", color = "0.64 0.6 0.85", style="rounded"]; 14[label = "build_ship_raster", color = "0.02 0.6 0.85", style="rounded"];
15[label = "retrieve_ship_raster", color = "0.07 0.6 0.85", style="rounded,dashed"]; 15[label = "retrieve_ship_raster", color = "0.40 0.6 0.85", style="rounded"];
16[label = "retrieve_cutout\ncutout: europe-2013-sarah", color = "0.05 0.6 0.85", style="rounded,dashed"]; 16[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.15 0.6 0.85", style="rounded"];
17[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.57 0.6 0.85", style="rounded"]; 17[label = "build_line_rating", color = "0.32 0.6 0.85", style="rounded"];
18[label = "build_renewable_profiles\ntechnology: solar", color = "0.57 0.6 0.85", style="rounded"]; 18[label = "retrieve_cost_data\nyear: 2030", color = "0.50 0.6 0.85", style="rounded"];
19[label = "build_hydro_profile", color = "0.44 0.6 0.85", style="rounded"]; 19[label = "build_powerplants", color = "0.64 0.6 0.85", style="rounded,dashed"];
20[label = "retrieve_cost_data", color = "0.30 0.6 0.85", style="rounded"]; 20[label = "build_electricity_demand", color = "0.13 0.6 0.85", style="rounded,dashed"];
21[label = "build_powerplants", color = "0.16 0.6 0.85", style="rounded"]; 21[label = "retrieve_electricity_demand", color = "0.31 0.6 0.85", style="rounded"];
22[label = "build_electricity_demand", color = "0.00 0.6 0.85", style="rounded"]; 22[label = "copy_config", color = "0.23 0.6 0.85", style="rounded"];
23[label = "retrieve_electricity_demand", color = "0.34 0.6 0.85", style="rounded,dashed"]; 1 -> 0
1 -> 0 22 -> 0
2 -> 1 2 -> 1
20 -> 1 18 -> 1
3 -> 2 3 -> 2
20 -> 2 18 -> 2
4 -> 3 4 -> 3
20 -> 3 18 -> 3
5 -> 4 5 -> 4
20 -> 4 18 -> 4
11 -> 4 11 -> 4
6 -> 5 6 -> 5
13 -> 5 12 -> 5
17 -> 5 13 -> 5
18 -> 5 16 -> 5
19 -> 5 7 -> 5
7 -> 5 17 -> 5
20 -> 5 18 -> 5
11 -> 5 11 -> 5
21 -> 5 19 -> 5
9 -> 5 9 -> 5
22 -> 5 20 -> 5
8 -> 5 8 -> 5
7 -> 6 7 -> 6
9 -> 6 9 -> 6
10 -> 6 10 -> 6
8 -> 6 8 -> 6
11 -> 6 11 -> 6
12 -> 6 8 -> 7
8 -> 7 9 -> 8
9 -> 8 8 -> 11
8 -> 11 7 -> 11
7 -> 11 7 -> 12
7 -> 13 9 -> 12
9 -> 13 10 -> 12
10 -> 13 8 -> 12
14 -> 13 11 -> 12
8 -> 13 7 -> 13
11 -> 13 9 -> 13
12 -> 13 10 -> 13
15 -> 14 14 -> 13
12 -> 14 8 -> 13
16 -> 14 11 -> 13
7 -> 17 15 -> 14
9 -> 17 7 -> 16
10 -> 17 9 -> 16
14 -> 17 10 -> 16
8 -> 17 14 -> 16
11 -> 17 8 -> 16
12 -> 17 11 -> 16
7 -> 18 7 -> 17
9 -> 18 7 -> 19
10 -> 18 21 -> 20
8 -> 18
11 -> 18
16 -> 18
8 -> 19
12 -> 19
7 -> 21
23 -> 22
} }
| |

View File

@ -59,7 +59,7 @@ To run an overnight / greenfiled scenario with the specifications above, run
.. code:: bash .. code:: bash
snakemake -call --configfile config/test/config.overnight.yaml all snakemake -call all --configfile config/test/config.overnight.yaml
which will result in the following *additional* jobs ``snakemake`` wants to run which will result in the following *additional* jobs ``snakemake`` wants to run
on top of those already included in the electricity-only tutorial: on top of those already included in the electricity-only tutorial:
@ -318,7 +318,7 @@ To run a myopic foresight scenario with the specifications above, run
.. code:: bash .. code:: bash
snakemake -call --configfile config/test/config.myopic.yaml all snakemake -call all --configfile config/test/config.myopic.yaml
which will result in the following *additional* jobs ``snakemake`` wants to run: which will result in the following *additional* jobs ``snakemake`` wants to run:

View File

@ -27,7 +27,7 @@ dependencies:
- numpy - numpy
- pandas>=1.4 - pandas>=1.4
- geopandas>=0.11.0 - geopandas>=0.11.0
- xarray - xarray<=2023.8.0
- rioxarray - rioxarray
- netcdf4 - netcdf4
- networkx - networkx
@ -55,5 +55,5 @@ dependencies:
- pip: - pip:
- tsam>=1.1.0 - tsam>=2.3.1
- pypsa>=0.25.1 - pypsa>=0.25.2

View File

@ -280,15 +280,16 @@ rule build_biomass_potentials:
country_shapes=RESOURCES + "country_shapes.geojson", country_shapes=RESOURCES + "country_shapes.geojson",
output: output:
biomass_potentials_all=RESOURCES biomass_potentials_all=RESOURCES
+ "biomass_potentials_all_s{simpl}_{clusters}.csv", + "biomass_potentials_all_s{simpl}_{clusters}_{planning_horizons}.csv",
biomass_potentials=RESOURCES + "biomass_potentials_s{simpl}_{clusters}.csv", biomass_potentials=RESOURCES
+ "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv",
threads: 1 threads: 1
resources: resources:
mem_mb=1000, mem_mb=1000,
log: log:
LOGS + "build_biomass_potentials_s{simpl}_{clusters}.log", LOGS + "build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.log",
benchmark: benchmark:
BENCHMARKS + "build_biomass_potentials_s{simpl}_{clusters}" BENCHMARKS + "build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}"
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
script: script:
@ -608,7 +609,7 @@ if config["sector"]["retrofitting"]["retro_endogen"]:
countries=config["countries"], countries=config["countries"],
input: input:
building_stock="data/retro/data_building_stock.csv", building_stock="data/retro/data_building_stock.csv",
data_tabula="data/retro/tabula-calculator-calcsetbuilding.csv", data_tabula="data/bundle-sector/retro/tabula-calculator-calcsetbuilding.csv",
air_temperature=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc", air_temperature=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc",
u_values_PL="data/retro/u_values_poland.csv", u_values_PL="data/retro/u_values_poland.csv",
tax_w="data/retro/electricity_taxes_eu.csv", tax_w="data/retro/electricity_taxes_eu.csv",
@ -753,7 +754,12 @@ rule prepare_sector_network:
dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv", dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv",
co2_totals_name=RESOURCES + "co2_totals.csv", co2_totals_name=RESOURCES + "co2_totals.csv",
co2="data/bundle-sector/eea/UNFCCC_v23.csv", co2="data/bundle-sector/eea/UNFCCC_v23.csv",
biomass_potentials=RESOURCES + "biomass_potentials_s{simpl}_{clusters}.csv", biomass_potentials=RESOURCES
+ "biomass_potentials_s{simpl}_{clusters}_"
+ "{}.csv".format(config["biomass"]["year"])
if config["foresight"] == "overnight"
else RESOURCES
+ "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv",
heat_profile="data/heat_load_profile_BDEW.csv", heat_profile="data/heat_load_profile_BDEW.csv",
costs="data/costs_{}.csv".format(config["costs"]["year"]) costs="data/costs_{}.csv".format(config["costs"]["year"])
if config["foresight"] == "overnight" if config["foresight"] == "overnight"

View File

@ -14,12 +14,6 @@ localrules:
plot_networks, plot_networks,
rule all:
input:
RESULTS + "graphs/costs.pdf",
default_target: True
rule cluster_networks: rule cluster_networks:
input: input:
expand(RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", **config["scenario"]), expand(RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", **config["scenario"]),
@ -66,6 +60,15 @@ rule solve_sector_networks:
), ),
rule solve_sector_networks_perfect:
input:
expand(
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
**config["scenario"]
),
rule plot_networks: rule plot_networks:
input: input:
expand( expand(

View File

@ -8,31 +8,62 @@ localrules:
copy_conda_env, copy_conda_env,
rule plot_network: if config["foresight"] != "perfect":
params:
foresight=config["foresight"], rule plot_network:
plotting=config["plotting"], params:
input: foresight=config["foresight"],
network=RESULTS plotting=config["plotting"],
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", input:
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", network=RESULTS
output: + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
map=RESULTS regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", output:
today=RESULTS map=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf", + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
threads: 2 today=RESULTS
resources: + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf",
mem_mb=10000, threads: 2
benchmark: resources:
( mem_mb=10000,
benchmark:
(
BENCHMARKS
+ "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/plot_network.py"
if config["foresight"] == "perfect":
rule plot_network:
params:
foresight=config["foresight"],
plotting=config["plotting"],
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
output:
**{
f"map_{year}": RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_"
+ f"{year}.pdf"
for year in config["scenario"]["planning_horizons"]
},
threads: 2
resources:
mem_mb=10000,
benchmark:
BENCHMARKS BENCHMARKS
+ "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" +"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_benchmark"
) conda:
conda: "../envs/environment.yaml"
"../envs/environment.yaml" script:
script: "../scripts/plot_network.py"
"../scripts/plot_network.py"
rule copy_config: rule copy_config:

View File

@ -39,6 +39,24 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_databundle",
"../scripts/retrieve_databundle.py" "../scripts/retrieve_databundle.py"
if config["enable"].get("retrieve_irena"):
rule retrieve_irena:
output:
offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv",
onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv",
solar="data/existing_infrastructure/solar_capacity_IRENA.csv",
log:
LOGS + "retrieve_irena.log",
resources:
mem_mb=1000,
retries: 2
conda:
"../envs/environment.yaml"
script:
"../scripts/retrieve_irena.py"
if config["enable"]["retrieve"] and config["enable"].get("retrieve_cutout", True): if config["enable"]["retrieve"] and config["enable"].get("retrieve_cutout", True):
rule retrieve_cutout: rule retrieve_cutout:

194
rules/solve_perfect.smk Normal file
View File

@ -0,0 +1,194 @@
# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
rule add_existing_baseyear:
params:
baseyear=config["scenario"]["planning_horizons"][0],
sector=config["sector"],
existing_capacities=config["existing_capacities"],
costs=config["costs"],
input:
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
powerplants=RESOURCES + "powerplants.csv",
busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv",
busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv",
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]),
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
existing_heating="data/existing_infrastructure/existing_heating_raw.csv",
existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv",
existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv",
existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv",
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
wildcard_constraints:
planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear
threads: 1
resources:
mem_mb=2000,
log:
LOGS
+ "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
BENCHMARKS
+ "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/add_existing_baseyear.py"
rule add_brownfield:
params:
H2_retrofit=config["sector"]["H2_retrofit"],
H2_retrofit_capacity_per_CH4=config["sector"]["H2_retrofit_capacity_per_CH4"],
threshold_capacity=config["existing_capacities"]["threshold_capacity"],
input:
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
network_p=solved_previous_horizon, #solved network at previous time step
costs="data/costs_{planning_horizons}.csv",
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
threads: 4
resources:
mem_mb=10000,
log:
LOGS
+ "add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
BENCHMARKS
+ "add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/add_brownfield.py"
rule prepare_perfect_foresight:
input:
**{
f"network_{year}": RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ f"{year}.nc"
for year in config["scenario"]["planning_horizons"][1:]
},
brownfield_network=lambda w: (
RESULTS
+ "prenetworks-brownfield/"
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ "{}.nc".format(str(config["scenario"]["planning_horizons"][0]))
),
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
threads: 2
resources:
mem_mb=10000,
log:
LOGS
+ "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log",
benchmark:
(
BENCHMARKS
+ "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/prepare_perfect_foresight.py"
rule solve_sector_network_perfect:
params:
solving=config["solving"],
foresight=config["foresight"],
sector=config["sector"],
planning_horizons=config["scenario"]["planning_horizons"],
co2_sequestration_potential=config["sector"].get(
"co2_sequestration_potential", 200
),
input:
network=RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
costs="data/costs_2030.csv",
config=RESULTS + "config.yaml",
output:
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
threads: 4
resources:
mem_mb=config["solving"]["mem"],
shadow:
"shallow"
log:
solver=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_solver.log",
python=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_python.log",
memory=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_memory.log",
benchmark:
(
BENCHMARKS
+ "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/solve_network.py"
rule make_summary_perfect:
input:
**{
f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS
+ f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc"
for simpl in config["scenario"]["simpl"]
for clusters in config["scenario"]["clusters"]
for opts in config["scenario"]["opts"]
for sector_opts in config["scenario"]["sector_opts"]
for ll in config["scenario"]["ll"]
},
costs="data/costs_2020.csv",
output:
nodal_costs=RESULTS + "csvs/nodal_costs.csv",
nodal_capacities=RESULTS + "csvs/nodal_capacities.csv",
nodal_cfs=RESULTS + "csvs/nodal_cfs.csv",
cfs=RESULTS + "csvs/cfs.csv",
costs=RESULTS + "csvs/costs.csv",
capacities=RESULTS + "csvs/capacities.csv",
curtailment=RESULTS + "csvs/curtailment.csv",
energy=RESULTS + "csvs/energy.csv",
supply=RESULTS + "csvs/supply.csv",
supply_energy=RESULTS + "csvs/supply_energy.csv",
prices=RESULTS + "csvs/prices.csv",
weighted_prices=RESULTS + "csvs/weighted_prices.csv",
market_values=RESULTS + "csvs/market_values.csv",
price_statistics=RESULTS + "csvs/price_statistics.csv",
metrics=RESULTS + "csvs/metrics.csv",
co2_emissions=RESULTS + "csvs/co2_emissions.csv",
threads: 2
resources:
mem_mb=10000,
log:
LOGS + "make_summary_perfect.log",
benchmark:
(BENCHMARKS + "make_summary_perfect")
conda:
"../envs/environment.yaml"
script:
"../scripts/make_summary_perfect.py"
ruleorder: add_existing_baseyear > add_brownfield

256
scripts/_benchmark.py Normal file
View File

@ -0,0 +1,256 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
"""
from __future__ import absolute_import, print_function
import logging
import os
import sys
import time
logger = logging.getLogger(__name__)
# TODO: provide alternative when multiprocessing is not available
try:
from multiprocessing import Pipe, Process
except ImportError:
from multiprocessing.dummy import Process, Pipe
from memory_profiler import _get_memory, choose_backend
# The memory logging facilities have been adapted from memory_profiler
class MemTimer(Process):
"""
Write memory consumption over a time interval to file until signaled to
stop on the pipe.
"""
def __init__(
self, monitor_pid, interval, pipe, filename, max_usage, backend, *args, **kw
):
self.monitor_pid = monitor_pid
self.interval = interval
self.pipe = pipe
self.filename = filename
self.max_usage = max_usage
self.backend = backend
self.timestamps = kw.pop("timestamps", True)
self.include_children = kw.pop("include_children", True)
super(MemTimer, self).__init__(*args, **kw)
def run(self):
# get baseline memory usage
cur_mem = _get_memory(
self.monitor_pid,
self.backend,
timestamps=self.timestamps,
include_children=self.include_children,
)
n_measurements = 1
mem_usage = cur_mem if self.max_usage else [cur_mem]
if self.filename is not None:
stream = open(self.filename, "w")
stream.write("MEM {0:.6f} {1:.4f}\n".format(*cur_mem))
stream.flush()
else:
stream = None
self.pipe.send(0) # we're ready
stop = False
while True:
cur_mem = _get_memory(
self.monitor_pid,
self.backend,
timestamps=self.timestamps,
include_children=self.include_children,
)
if stream is not None:
stream.write("MEM {0:.6f} {1:.4f}\n".format(*cur_mem))
stream.flush()
n_measurements += 1
if not self.max_usage:
mem_usage.append(cur_mem)
else:
mem_usage = max(cur_mem, mem_usage)
if stop:
break
stop = self.pipe.poll(self.interval)
# do one more iteration
if stream is not None:
stream.close()
self.pipe.send(mem_usage)
self.pipe.send(n_measurements)
class memory_logger(object):
"""
Context manager for taking and reporting memory measurements at fixed
intervals from a separate process, for the duration of a context.
Parameters
----------
filename : None|str
Name of the text file to log memory measurements, if None no log is
created (defaults to None)
interval : float
Interval between measurements (defaults to 1.)
max_usage : bool
If True, only store and report the maximum value (defaults to True)
timestamps : bool
Whether to record tuples of memory usage and timestamps; if logging to
a file timestamps are always kept (defaults to True)
include_children : bool
Whether the memory of subprocesses is to be included (default: True)
Arguments
---------
n_measurements : int
Number of measurements that have been taken
mem_usage : (float, float)|[(float, float)]
All memory measurements and timestamps (if timestamps was True) or only
the maximum memory usage and its timestamp
Note
----
The arguments are only set after all the measurements, i.e. outside of the
with statement.
Example
-------
with memory_logger(filename="memory.log", max_usage=True) as mem:
# Do a lot of long running memory intensive stuff
hard_memory_bound_stuff()
max_mem, timestamp = mem.mem_usage
"""
def __init__(
self,
filename=None,
interval=1.0,
max_usage=True,
timestamps=True,
include_children=True,
):
if filename is not None:
timestamps = True
self.filename = filename
self.interval = interval
self.max_usage = max_usage
self.timestamps = timestamps
self.include_children = include_children
def __enter__(self):
backend = choose_backend()
self.child_conn, self.parent_conn = Pipe() # this will store MemTimer's results
self.p = MemTimer(
os.getpid(),
self.interval,
self.child_conn,
self.filename,
backend=backend,
timestamps=self.timestamps,
max_usage=self.max_usage,
include_children=self.include_children,
)
self.p.start()
self.parent_conn.recv() # wait until memory logging in subprocess is ready
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.parent_conn.send(0) # finish timing
self.mem_usage = self.parent_conn.recv()
self.n_measurements = self.parent_conn.recv()
else:
self.p.terminate()
return False
class timer(object):
level = 0
opened = False
def __init__(self, name="", verbose=True):
self.name = name
self.verbose = verbose
def __enter__(self):
if self.verbose:
if self.opened:
sys.stdout.write("\n")
if len(self.name) > 0:
sys.stdout.write((".. " * self.level) + self.name + ": ")
sys.stdout.flush()
self.__class__.opened = True
self.__class__.level += 1
self.start = time.time()
return self
def print_usec(self, usec):
if usec < 1000:
print("%.1f usec" % usec)
else:
msec = usec / 1000
if msec < 1000:
print("%.1f msec" % msec)
else:
sec = msec / 1000
print("%.1f sec" % sec)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.opened and self.verbose:
sys.stdout.write(".. " * self.level)
if exc_type is None:
stop = time.time()
self.usec = usec = (stop - self.start) * 1e6
if self.verbose:
self.print_usec(usec)
elif self.verbose:
print("failed")
sys.stdout.flush()
self.__class__.level -= 1
if self.verbose:
self.__class__.opened = False
return False
class optional(object):
def __init__(self, variable, contextman):
self.variable = variable
self.contextman = contextman
def __enter__(self):
if self.variable:
return self.contextman.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.variable:
return self.contextman.__exit__(exc_type, exc_val, exc_tb)
return False

View File

@ -191,7 +191,7 @@ def progress_retrieve(url, file, disable=False):
urllib.request.urlretrieve(url, file, reporthook=update_to) urllib.request.urlretrieve(url, file, reporthook=update_to)
def mock_snakemake(rulename, configfiles=[], **wildcards): def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards):
""" """
This function is expected to be executed from the 'scripts'-directory of ' This function is expected to be executed from the 'scripts'-directory of '
the snakemake project. It returns a snakemake.script.Snakemake object, the snakemake project. It returns a snakemake.script.Snakemake object,
@ -203,6 +203,8 @@ def mock_snakemake(rulename, configfiles=[], **wildcards):
---------- ----------
rulename: str rulename: str
name of the rule for which the snakemake object should be generated name of the rule for which the snakemake object should be generated
root_dir: str/path-like
path to the root directory of the snakemake project
configfiles: list, str configfiles: list, str
list of configfiles to be used to update the config list of configfiles to be used to update the config
**wildcards: **wildcards:
@ -217,7 +219,10 @@ def mock_snakemake(rulename, configfiles=[], **wildcards):
from snakemake.script import Snakemake from snakemake.script import Snakemake
script_dir = Path(__file__).parent.resolve() script_dir = Path(__file__).parent.resolve()
root_dir = script_dir.parent if root_dir is None:
root_dir = script_dir.parent
else:
root_dir = Path(root_dir).resolve()
user_in_script_dir = Path.cwd().resolve() == script_dir user_in_script_dir = Path.cwd().resolve() == script_dir
if user_in_script_dir: if user_in_script_dir:
@ -303,10 +308,7 @@ def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None):
def parse(l): def parse(l):
if len(l) == 1: return yaml.safe_load(l[0]) if len(l) == 1 else {l.pop(0): parse(l)}
return yaml.safe_load(l[0])
else:
return {l.pop(0): parse(l)}
def update_config_with_sector_opts(config, sector_opts): def update_config_with_sector_opts(config, sector_opts):

View File

@ -41,12 +41,9 @@ def add_brownfield(n, n_p, year):
# remove assets if their optimized nominal capacity is lower than a threshold # remove assets if their optimized nominal capacity is lower than a threshold
# since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible # since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible
chp_heat = c.df.index[ chp_heat = c.df.index[
( (c.df[f"{attr}_nom_extendable"] & c.df.index.str.contains("urban central"))
c.df[attr + "_nom_extendable"] & c.df.index.str.contains("CHP")
& c.df.index.str.contains("urban central") & c.df.index.str.contains("heat")
& c.df.index.str.contains("CHP")
& c.df.index.str.contains("heat")
)
] ]
threshold = snakemake.params.threshold_capacity threshold = snakemake.params.threshold_capacity
@ -60,21 +57,20 @@ def add_brownfield(n, n_p, year):
) )
n_p.mremove( n_p.mremove(
c.name, c.name,
chp_heat[c.df.loc[chp_heat, attr + "_nom_opt"] < threshold_chp_heat], chp_heat[c.df.loc[chp_heat, f"{attr}_nom_opt"] < threshold_chp_heat],
) )
n_p.mremove( n_p.mremove(
c.name, c.name,
c.df.index[ c.df.index[
c.df[attr + "_nom_extendable"] (c.df[f"{attr}_nom_extendable"] & ~c.df.index.isin(chp_heat))
& ~c.df.index.isin(chp_heat) & (c.df[f"{attr}_nom_opt"] < threshold)
& (c.df[attr + "_nom_opt"] < threshold)
], ],
) )
# copy over assets but fix their capacity # copy over assets but fix their capacity
c.df[attr + "_nom"] = c.df[attr + "_nom_opt"] c.df[f"{attr}_nom"] = c.df[f"{attr}_nom_opt"]
c.df[attr + "_nom_extendable"] = False c.df[f"{attr}_nom_extendable"] = False
n.import_components_from_dataframe(c.df, c.name) n.import_components_from_dataframe(c.df, c.name)
@ -124,7 +120,6 @@ def add_brownfield(n, n_p, year):
n.links.loc[new_pipes, "p_nom_min"] = 0.0 n.links.loc[new_pipes, "p_nom_min"] = 0.0
# %%
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake

View File

@ -293,24 +293,23 @@ def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.0):
l = opsd_load[cntry] l = opsd_load[cntry]
if len(group) == 1: if len(group) == 1:
return pd.DataFrame({group.index[0]: l}) return pd.DataFrame({group.index[0]: l})
else: nuts3_cntry = nuts3.loc[nuts3.country == cntry]
nuts3_cntry = nuts3.loc[nuts3.country == cntry] transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr()
transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr() gdp_n = pd.Series(
gdp_n = pd.Series( transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index
transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index )
) pop_n = pd.Series(
pop_n = pd.Series( transfer.dot(nuts3_cntry["pop"].fillna(1.0).values), index=group.index
transfer.dot(nuts3_cntry["pop"].fillna(1.0).values), index=group.index )
)
# relative factors 0.6 and 0.4 have been determined from a linear # relative factors 0.6 and 0.4 have been determined from a linear
# regression on the country to continent load data # regression on the country to continent load data
factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n)) factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))
return pd.DataFrame( return pd.DataFrame(
factors.values * l.values[:, np.newaxis], factors.values * l.values[:, np.newaxis],
index=l.index, index=l.index,
columns=factors.index, columns=factors.index,
) )
load = pd.concat( load = pd.concat(
[ [
@ -406,6 +405,7 @@ def attach_wind_and_solar(
capital_cost=capital_cost, capital_cost=capital_cost,
efficiency=costs.at[supcar, "efficiency"], efficiency=costs.at[supcar, "efficiency"],
p_max_pu=ds["profile"].transpose("time", "bus").to_pandas(), p_max_pu=ds["profile"].transpose("time", "bus").to_pandas(),
lifetime=costs.at[supcar, "lifetime"],
) )
@ -434,7 +434,7 @@ def attach_conventional_generators(
ppl = ( ppl = (
ppl.query("carrier in @carriers") ppl.query("carrier in @carriers")
.join(costs, on="carrier", rsuffix="_r") .join(costs, on="carrier", rsuffix="_r")
.rename(index=lambda s: "C" + str(s)) .rename(index=lambda s: f"C{str(s)}")
) )
ppl["efficiency"] = ppl.efficiency.fillna(ppl.efficiency_r) ppl["efficiency"] = ppl.efficiency.fillna(ppl.efficiency_r)
@ -511,7 +511,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
ppl = ( ppl = (
ppl.query('carrier == "hydro"') ppl.query('carrier == "hydro"')
.reset_index(drop=True) .reset_index(drop=True)
.rename(index=lambda s: str(s) + " hydro") .rename(index=lambda s: f"{str(s)} hydro")
) )
ror = ppl.query('technology == "Run-Of-River"') ror = ppl.query('technology == "Run-Of-River"')
phs = ppl.query('technology == "Pumped Storage"') phs = ppl.query('technology == "Pumped Storage"')
@ -608,16 +608,13 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
) )
if not missing_countries.empty: if not missing_countries.empty:
logger.warning( logger.warning(
"Assuming max_hours=6 for hydro reservoirs in the countries: {}".format( f'Assuming max_hours=6 for hydro reservoirs in the countries: {", ".join(missing_countries)}'
", ".join(missing_countries)
)
) )
hydro_max_hours = hydro.max_hours.where( hydro_max_hours = hydro.max_hours.where(
hydro.max_hours > 0, hydro.country.map(max_hours_country) hydro.max_hours > 0, hydro.country.map(max_hours_country)
).fillna(6) ).fillna(6)
flatten_dispatch = params.get("flatten_dispatch", False) if flatten_dispatch := params.get("flatten_dispatch", False):
if flatten_dispatch:
buffer = params.get("flatten_dispatch_buffer", 0.2) buffer = params.get("flatten_dispatch_buffer", 0.2)
average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"] average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"]
p_max_pu = (average_capacity_factor + buffer).clip(upper=1) p_max_pu = (average_capacity_factor + buffer).clip(upper=1)

View File

@ -45,7 +45,7 @@ def add_build_year_to_new_assets(n, baseyear):
# add -baseyear to name # add -baseyear to name
rename = pd.Series(c.df.index, c.df.index) rename = pd.Series(c.df.index, c.df.index)
rename[assets] += "-" + str(baseyear) rename[assets] += f"-{str(baseyear)}"
c.df.rename(index=rename, inplace=True) c.df.rename(index=rename, inplace=True)
# rename time-dependent # rename time-dependent
@ -252,7 +252,7 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
if "m" in snakemake.wildcards.clusters: if "m" in snakemake.wildcards.clusters:
for ind in new_capacity.index: for ind in new_capacity.index:
# existing capacities are split evenly among regions in every country # existing capacities are split evenly among regions in every country
inv_ind = [i for i in inv_busmap[ind]] inv_ind = list(inv_busmap[ind])
# for offshore the splitting only includes coastal regions # for offshore the splitting only includes coastal regions
inv_ind = [ inv_ind = [
@ -305,6 +305,18 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
if "EU" not in vars(spatial)[carrier[generator]].locations: if "EU" not in vars(spatial)[carrier[generator]].locations:
bus0 = bus0.intersection(capacity.index + " gas") bus0 = bus0.intersection(capacity.index + " gas")
# check for missing bus
missing_bus = pd.Index(bus0).difference(n.buses.index)
if not missing_bus.empty:
logger.info(f"add buses {bus0}")
n.madd(
"Bus",
bus0,
carrier=generator,
location=vars(spatial)[carrier[generator]].locations,
unit="MWh_el",
)
already_build = n.links.index.intersection(asset_i) already_build = n.links.index.intersection(asset_i)
new_build = asset_i.difference(n.links.index) new_build = asset_i.difference(n.links.index)
lifetime_assets = lifetime.loc[grouping_year, generator].dropna() lifetime_assets = lifetime.loc[grouping_year, generator].dropna()
@ -533,13 +545,17 @@ def add_heating_capacities_installed_before_baseyear(
bus0=nodes[name], bus0=nodes[name],
bus1=nodes[name] + " " + name + " heat", bus1=nodes[name] + " " + name + " heat",
carrier=name + " resistive heater", carrier=name + " resistive heater",
efficiency=costs.at[name_type + " resistive heater", "efficiency"], efficiency=costs.at[f"{name_type} resistive heater", "efficiency"],
capital_cost=costs.at[name_type + " resistive heater", "efficiency"] capital_cost=(
* costs.at[name_type + " resistive heater", "fixed"], costs.at[f"{name_type} resistive heater", "efficiency"]
p_nom=0.5 * costs.at[f"{name_type} resistive heater", "fixed"]
* nodal_df[f"{heat_type} resistive heater"][nodes[name]] ),
* ratio p_nom=(
/ costs.at[name_type + " resistive heater", "efficiency"], 0.5
* nodal_df[f"{heat_type} resistive heater"][nodes[name]]
* ratio
/ costs.at[f"{name_type} resistive heater", "efficiency"]
),
build_year=int(grouping_year), build_year=int(grouping_year),
lifetime=costs.at[costs_name, "lifetime"], lifetime=costs.at[costs_name, "lifetime"],
) )
@ -552,16 +568,20 @@ def add_heating_capacities_installed_before_baseyear(
bus1=nodes[name] + " " + name + " heat", bus1=nodes[name] + " " + name + " heat",
bus2="co2 atmosphere", bus2="co2 atmosphere",
carrier=name + " gas boiler", carrier=name + " gas boiler",
efficiency=costs.at[name_type + " gas boiler", "efficiency"], efficiency=costs.at[f"{name_type} gas boiler", "efficiency"],
efficiency2=costs.at["gas", "CO2 intensity"], efficiency2=costs.at["gas", "CO2 intensity"],
capital_cost=costs.at[name_type + " gas boiler", "efficiency"] capital_cost=(
* costs.at[name_type + " gas boiler", "fixed"], costs.at[f"{name_type} gas boiler", "efficiency"]
p_nom=0.5 * costs.at[f"{name_type} gas boiler", "fixed"]
* nodal_df[f"{heat_type} gas boiler"][nodes[name]] ),
* ratio p_nom=(
/ costs.at[name_type + " gas boiler", "efficiency"], 0.5
* nodal_df[f"{heat_type} gas boiler"][nodes[name]]
* ratio
/ costs.at[f"{name_type} gas boiler", "efficiency"]
),
build_year=int(grouping_year), build_year=int(grouping_year),
lifetime=costs.at[name_type + " gas boiler", "lifetime"], lifetime=costs.at[f"{name_type} gas boiler", "lifetime"],
) )
n.madd( n.madd(
@ -581,7 +601,7 @@ def add_heating_capacities_installed_before_baseyear(
* ratio * ratio
/ costs.at["decentral oil boiler", "efficiency"], / costs.at["decentral oil boiler", "efficiency"],
build_year=int(grouping_year), build_year=int(grouping_year),
lifetime=costs.at[name_type + " gas boiler", "lifetime"], lifetime=costs.at[f"{name_type} gas boiler", "lifetime"],
) )
# delete links with p_nom=nan corresponding to extra nodes in country # delete links with p_nom=nan corresponding to extra nodes in country
@ -605,21 +625,24 @@ def add_heating_capacities_installed_before_baseyear(
], ],
) )
# drop assets which are at the end of their lifetime
links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index
n.mremove("Link", links_i)
# %%
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
"add_existing_baseyear", "add_existing_baseyear",
configfiles="config/test/config.myopic.yaml", # configfiles="config/test/config.myopic.yaml",
simpl="", simpl="",
clusters="5", clusters="37",
ll="v1.5", ll="v1.0",
opts="", opts="",
sector_opts="24H-T-H-B-I-A-solar+p3-dist1", sector_opts="1p7-4380H-T-H-B-I-A-solar+p3-dist1",
planning_horizons=2030, planning_horizons=2020,
) )
logging.basicConfig(level=snakemake.config["logging"]["level"]) logging.basicConfig(level=snakemake.config["logging"]["level"])

View File

@ -151,9 +151,7 @@ def _load_buses_from_eg(eg_buses, europe_shape, config_elec):
buses.v_nom.isin(config_elec["voltages"]) | buses.v_nom.isnull() buses.v_nom.isin(config_elec["voltages"]) | buses.v_nom.isnull()
) )
logger.info( logger.info(
"Removing buses with voltages {}".format( f'Removing buses with voltages {pd.Index(buses.v_nom.unique()).dropna().difference(config_elec["voltages"])}'
pd.Index(buses.v_nom.unique()).dropna().difference(config_elec["voltages"])
)
) )
return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b]) return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b])
@ -460,11 +458,7 @@ def _remove_unconnected_components(network):
components_to_remove = component_sizes.iloc[1:] components_to_remove = component_sizes.iloc[1:]
logger.info( logger.info(
"Removing {} unconnected network components with less than {} buses. In total {} buses.".format( f"Removing {len(components_to_remove)} unconnected network components with less than {components_to_remove.max()} buses. In total {components_to_remove.sum()} buses."
len(components_to_remove),
components_to_remove.max(),
components_to_remove.sum(),
)
) )
return network[component == component_sizes.index[0]] return network[component == component_sizes.index[0]]

View File

@ -7,9 +7,15 @@ Compute biogas and solid biomass potentials for each clustered model region
using data from JRC ENSPRESO. using data from JRC ENSPRESO.
""" """
import logging
logger = logging.getLogger(__name__)
import geopandas as gpd import geopandas as gpd
import numpy as np
import pandas as pd import pandas as pd
AVAILABLE_BIOMASS_YEARS = [2010, 2020, 2030, 2040, 2050]
def build_nuts_population_data(year=2013): def build_nuts_population_data(year=2013):
pop = pd.read_csv( pop = pd.read_csv(
@ -208,13 +214,41 @@ if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake("build_biomass_potentials", simpl="", clusters="5") snakemake = mock_snakemake(
"build_biomass_potentials",
simpl="",
clusters="5",
planning_horizons=2050,
)
overnight = snakemake.config["foresight"] == "overnight"
params = snakemake.params.biomass params = snakemake.params.biomass
year = params["year"] investment_year = int(snakemake.wildcards.planning_horizons)
year = params["year"] if overnight else investment_year
scenario = params["scenario"] scenario = params["scenario"]
enspreso = enspreso_biomass_potentials(year, scenario) if year > 2050:
logger.info("No biomass potentials for years after 2050, using 2050.")
max_year = max(AVAILABLE_BIOMASS_YEARS)
enspreso = enspreso_biomass_potentials(max_year, scenario)
elif year not in AVAILABLE_BIOMASS_YEARS:
before = int(np.floor(year / 10) * 10)
after = int(np.ceil(year / 10) * 10)
logger.info(
f"No biomass potentials for {year}, interpolating linearly between {before} and {after}."
)
enspreso_before = enspreso_biomass_potentials(before, scenario)
enspreso_after = enspreso_biomass_potentials(after, scenario)
fraction = (year - before) / (after - before)
enspreso = enspreso_before + fraction * (enspreso_after - enspreso_before)
else:
logger.info(f"Using biomass potentials for {year}.")
enspreso = enspreso_biomass_potentials(year, scenario)
enspreso = disaggregate_nuts0(enspreso) enspreso = disaggregate_nuts0(enspreso)

View File

@ -172,8 +172,6 @@ def build_swiss(year):
def idees_per_country(ct, year, base_dir): def idees_per_country(ct, year, base_dir):
ct_totals = {}
ct_idees = idees_rename.get(ct, ct) ct_idees = idees_rename.get(ct, ct)
fn_residential = f"{base_dir}/JRC-IDEES-2015_Residential_{ct_idees}.xlsx" fn_residential = f"{base_dir}/JRC-IDEES-2015_Residential_{ct_idees}.xlsx"
fn_tertiary = f"{base_dir}/JRC-IDEES-2015_Tertiary_{ct_idees}.xlsx" fn_tertiary = f"{base_dir}/JRC-IDEES-2015_Tertiary_{ct_idees}.xlsx"
@ -183,11 +181,11 @@ def idees_per_country(ct, year, base_dir):
df = pd.read_excel(fn_residential, "RES_hh_fec", index_col=0)[year] df = pd.read_excel(fn_residential, "RES_hh_fec", index_col=0)[year]
ct_totals["total residential space"] = df["Space heating"]
rows = ["Advanced electric heating", "Conventional electric heating"] rows = ["Advanced electric heating", "Conventional electric heating"]
ct_totals["electricity residential space"] = df[rows].sum() ct_totals = {
"total residential space": df["Space heating"],
"electricity residential space": df[rows].sum(),
}
ct_totals["total residential water"] = df.at["Water heating"] ct_totals["total residential water"] = df.at["Water heating"]
assert df.index[23] == "Electricity" assert df.index[23] == "Electricity"

View File

@ -29,25 +29,25 @@ def diameter_to_capacity(pipe_diameter_mm):
Based on p.15 of Based on p.15 of
https://gasforclimate2050.eu/wp-content/uploads/2020/07/2020_European-Hydrogen-Backbone_Report.pdf https://gasforclimate2050.eu/wp-content/uploads/2020/07/2020_European-Hydrogen-Backbone_Report.pdf
""" """
# slopes definitions
m0 = (1500 - 0) / (500 - 0)
m1 = (5000 - 1500) / (600 - 500) m1 = (5000 - 1500) / (600 - 500)
m2 = (11250 - 5000) / (900 - 600) m2 = (11250 - 5000) / (900 - 600)
m3 = (21700 - 11250) / (1200 - 900)
# intercept
a0 = 0
a1 = -16000 a1 = -16000
a2 = -7500 a2 = -7500
a3 = -20100
if pipe_diameter_mm < 500: if pipe_diameter_mm < 500:
# slopes definitions
m0 = (1500 - 0) / (500 - 0)
# intercept
a0 = 0
return a0 + m0 * pipe_diameter_mm return a0 + m0 * pipe_diameter_mm
elif pipe_diameter_mm < 600: elif pipe_diameter_mm < 600:
return a1 + m1 * pipe_diameter_mm return a1 + m1 * pipe_diameter_mm
elif pipe_diameter_mm < 900: elif pipe_diameter_mm < 900:
return a2 + m2 * pipe_diameter_mm return a2 + m2 * pipe_diameter_mm
else: else:
m3 = (21700 - 11250) / (1200 - 900)
a3 = -20100
return a3 + m3 * pipe_diameter_mm return a3 + m3 * pipe_diameter_mm

View File

@ -154,7 +154,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake( snakemake = mock_snakemake(
"build_industrial_distribution_key", "build_industrial_distribution_key",
simpl="", simpl="",
clusters=48, clusters=128,
) )
logging.basicConfig(level=snakemake.config["logging"]["level"]) logging.basicConfig(level=snakemake.config["logging"]["level"])

View File

@ -167,9 +167,7 @@ def industrial_energy_demand(countries, year):
with mp.Pool(processes=nprocesses) as pool: with mp.Pool(processes=nprocesses) as pool:
demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs)) demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))
demand = pd.concat(demand_l, keys=countries) return pd.concat(demand_l, keys=countries)
return demand
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -41,7 +41,7 @@ The following heat gains and losses are considered:
- heat gain through resistive losses - heat gain through resistive losses
- heat gain through solar radiation - heat gain through solar radiation
- heat loss through radiation of the trasnmission line - heat loss through radiation of the transmission line
- heat loss through forced convection with wind - heat loss through forced convection with wind
- heat loss through natural convection - heat loss through natural convection
@ -83,8 +83,7 @@ def calculate_resistance(T, R_ref, T_ref=293, alpha=0.00403):
------- -------
Resistance of at given temperature. Resistance of at given temperature.
""" """
R = R_ref * (1 + alpha * (T - T_ref)) return R_ref * (1 + alpha * (T - T_ref))
return R
def calculate_line_rating(n, cutout): def calculate_line_rating(n, cutout):
@ -125,13 +124,12 @@ def calculate_line_rating(n, cutout):
R = calculate_resistance(T=353, R_ref=R) R = calculate_resistance(T=353, R_ref=R)
Imax = cutout.line_rating(shapes, R, D=0.0218, Ts=353, epsilon=0.8, alpha=0.8) Imax = cutout.line_rating(shapes, R, D=0.0218, Ts=353, epsilon=0.8, alpha=0.8)
line_factor = relevant_lines.eval("v_nom * n_bundle * num_parallel") / 1e3 # in mW line_factor = relevant_lines.eval("v_nom * n_bundle * num_parallel") / 1e3 # in mW
da = xr.DataArray( return xr.DataArray(
data=np.sqrt(3) * Imax * line_factor.values.reshape(-1, 1), data=np.sqrt(3) * Imax * line_factor.values.reshape(-1, 1),
attrs=dict( attrs=dict(
description="Maximal possible power in MW for given line considering line rating" description="Maximal possible power in MW for given line considering line rating"
), ),
) )
return da
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -89,7 +89,7 @@ logger = logging.getLogger(__name__)
def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False): def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False):
if not custom_ppl_query: if not custom_ppl_query:
return ppl return ppl
add_ppls = pd.read_csv(custom_powerplants, index_col=0, dtype={"bus": "str"}) add_ppls = pd.read_csv(custom_powerplants, dtype={"bus": "str"})
if isinstance(custom_ppl_query, str): if isinstance(custom_ppl_query, str):
add_ppls.query(custom_ppl_query, inplace=True) add_ppls.query(custom_ppl_query, inplace=True)
return pd.concat( return pd.concat(
@ -146,8 +146,7 @@ if __name__ == "__main__":
ppl, snakemake.input.custom_powerplants, custom_ppl_query ppl, snakemake.input.custom_powerplants, custom_ppl_query
) )
countries_wo_ppl = set(countries) - set(ppl.Country.unique()) if countries_wo_ppl := set(countries) - set(ppl.Country.unique()):
if countries_wo_ppl:
logging.warning(f"No powerplants known in: {', '.join(countries_wo_ppl)}") logging.warning(f"No powerplants known in: {', '.join(countries_wo_ppl)}")
substations = n.buses.query("substation_lv") substations = n.buses.query("substation_lv")

View File

@ -102,7 +102,7 @@ solar_energy_transmittance = (
) )
# solar global radiation [kWh/(m^2a)] # solar global radiation [kWh/(m^2a)]
solar_global_radiation = pd.Series( solar_global_radiation = pd.Series(
[246, 401, 246, 148], [271, 392, 271, 160],
index=["east", "south", "west", "north"], index=["east", "south", "west", "north"],
name="solar_global_radiation [kWh/(m^2a)]", name="solar_global_radiation [kWh/(m^2a)]",
) )
@ -164,6 +164,12 @@ def prepare_building_stock_data():
}, },
inplace=True, inplace=True,
) )
building_data["feature"].replace(
{
"Construction features (U-value)": "Construction features (U-values)",
},
inplace=True,
)
building_data.country_code = building_data.country_code.str.upper() building_data.country_code = building_data.country_code.str.upper()
building_data["subsector"].replace( building_data["subsector"].replace(
@ -198,12 +204,14 @@ def prepare_building_stock_data():
} }
) )
building_data["country_code"] = building_data["country"].map(country_iso_dic)
# heated floor area ---------------------------------------------------------- # heated floor area ----------------------------------------------------------
area = building_data[ area = building_data[
(building_data.type == "Heated area [Mm²]") (building_data.type == "Heated area [Mm²]")
& (building_data.subsector != "Total") & (building_data.subsector != "Total")
] ]
area_tot = area.groupby(["country", "sector"]).sum() area_tot = area[["country", "sector", "value"]].groupby(["country", "sector"]).sum()
area = pd.concat( area = pd.concat(
[ [
area, area,
@ -223,7 +231,7 @@ def prepare_building_stock_data():
usecols=[0, 1, 2, 3], usecols=[0, 1, 2, 3],
encoding="ISO-8859-1", encoding="ISO-8859-1",
) )
area_tot = area_tot.append(area_missing.unstack(level=-1).dropna().stack()) area_tot = pd.concat([area_tot, area_missing.unstack(level=-1).dropna().stack()])
area_tot = area_tot.loc[~area_tot.index.duplicated(keep="last")] area_tot = area_tot.loc[~area_tot.index.duplicated(keep="last")]
# for still missing countries calculate floor area by population size # for still missing countries calculate floor area by population size
@ -246,7 +254,7 @@ def prepare_building_stock_data():
averaged_data.index = index averaged_data.index = index
averaged_data["estimated"] = 1 averaged_data["estimated"] = 1
if ct not in area_tot.index.levels[0]: if ct not in area_tot.index.levels[0]:
area_tot = area_tot.append(averaged_data, sort=True) area_tot = pd.concat([area_tot, averaged_data], sort=True)
else: else:
area_tot.loc[averaged_data.index] = averaged_data area_tot.loc[averaged_data.index] = averaged_data
@ -272,7 +280,7 @@ def prepare_building_stock_data():
][x["bage"]].iloc[0], ][x["bage"]].iloc[0],
axis=1, axis=1,
) )
data_PL_final = data_PL_final.append(data_PL) data_PL_final = pd.concat([data_PL_final, data_PL])
u_values = pd.concat([u_values, data_PL_final]).reset_index(drop=True) u_values = pd.concat([u_values, data_PL_final]).reset_index(drop=True)
@ -609,12 +617,11 @@ def calculate_costs(u_values, l, cost_retro, window_assumptions):
/ x.A_C_Ref / x.A_C_Ref
if x.name[3] != "Window" if x.name[3] != "Window"
else ( else (
window_cost(x["new_U_{}".format(l)], cost_retro, window_assumptions) (window_cost(x[f"new_U_{l}"], cost_retro, window_assumptions) * x.A_element)
* x.A_element
/ x.A_C_Ref / x.A_C_Ref
if x.value > window_limit(float(l), window_assumptions) )
else 0 if x.value > window_limit(float(l), window_assumptions)
), else 0,
axis=1, axis=1,
) )
@ -739,12 +746,12 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor)
# (1) by transmission # (1) by transmission
# calculate new U values of building elements due to additional insulation # calculate new U values of building elements due to additional insulation
for l in l_strength: for l in l_strength:
u_values["new_U_{}".format(l)] = calculate_new_u( u_values[f"new_U_{l}"] = calculate_new_u(
u_values, l, l_weight, window_assumptions u_values, l, l_weight, window_assumptions
) )
# surface area of building components [m^2] # surface area of building components [m^2]
area_element = ( area_element = (
data_tabula[["A_{}".format(e) for e in u_values.index.levels[3]]] data_tabula[[f"A_{e}" for e in u_values.index.levels[3]]]
.rename(columns=lambda x: x[2:]) .rename(columns=lambda x: x[2:])
.stack() .stack()
.unstack(-2) .unstack(-2)
@ -756,7 +763,7 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor)
# heat transfer H_tr_e [W/m^2K] through building element # heat transfer H_tr_e [W/m^2K] through building element
# U_e * A_e / A_C_Ref # U_e * A_e / A_C_Ref
columns = ["value"] + ["new_U_{}".format(l) for l in l_strength] columns = ["value"] + [f"new_U_{l}" for l in l_strength]
heat_transfer = pd.concat( heat_transfer = pd.concat(
[u_values[columns].mul(u_values.A_element, axis=0), u_values.A_element], axis=1 [u_values[columns].mul(u_values.A_element, axis=0), u_values.A_element], axis=1
) )
@ -875,10 +882,7 @@ def calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain):
alpha = alpha_H_0 + (tau / tau_H_0) alpha = alpha_H_0 + (tau / tau_H_0)
# heat balance ratio # heat balance ratio
gamma = (1 / Q_ht).mul(Q_gain.sum(axis=1), axis=0) gamma = (1 / Q_ht).mul(Q_gain.sum(axis=1), axis=0)
# gain utilisation factor return (1 - gamma**alpha) / (1 - gamma ** (alpha + 1))
nu = (1 - gamma**alpha) / (1 - gamma ** (alpha + 1))
return nu
def calculate_space_heat_savings( def calculate_space_heat_savings(
@ -947,7 +951,8 @@ def sample_dE_costs_area(
.rename(index=rename_sectors, level=2) .rename(index=rename_sectors, level=2)
.reset_index() .reset_index()
) )
.rename(columns={"country": "country_code"}) # if uncommented, leads to the second `country_code` column
# .rename(columns={"country": "country_code"})
.set_index(["country_code", "subsector", "bage"]) .set_index(["country_code", "subsector", "bage"])
) )
@ -960,13 +965,14 @@ def sample_dE_costs_area(
) )
# map missing countries # map missing countries
for ct in countries.difference(cost_dE.index.levels[0]): for ct in set(countries).difference(cost_dE.index.levels[0]):
averaged_data = ( averaged_data = (
cost_dE.reindex(index=map_for_missings[ct], level=0) cost_dE.reindex(index=map_for_missings[ct], level=0)
.mean(level=1) .groupby(level=1)
.mean()
.set_index(pd.MultiIndex.from_product([[ct], cost_dE.index.levels[1]])) .set_index(pd.MultiIndex.from_product([[ct], cost_dE.index.levels[1]]))
) )
cost_dE = cost_dE.append(averaged_data) cost_dE = pd.concat([cost_dE, averaged_data])
# weights costs after construction index # weights costs after construction index
if construction_index: if construction_index:
@ -983,24 +989,23 @@ def sample_dE_costs_area(
# drop not considered countries # drop not considered countries
cost_dE = cost_dE.reindex(countries, level=0) cost_dE = cost_dE.reindex(countries, level=0)
# get share of residential and service floor area # get share of residential and service floor area
sec_w = area_tot.value / area_tot.value.groupby(level=0).sum() sec_w = area_tot.div(area_tot.groupby(level=0).transform("sum"))
# get the total cost-energy-savings weight by sector area # get the total cost-energy-savings weight by sector area
tot = ( tot = (
cost_dE.mul(sec_w, axis=0) # sec_w has columns "estimated" and "value"
.groupby(level="country_code") cost_dE.mul(sec_w.value, axis=0)
# for some reasons names of the levels were lost somewhere
# .groupby(level="country_code")
.groupby(level=0)
.sum() .sum()
.set_index( .set_index(pd.MultiIndex.from_product([cost_dE.index.unique(level=0), ["tot"]]))
pd.MultiIndex.from_product(
[cost_dE.index.unique(level="country_code"), ["tot"]]
)
)
) )
cost_dE = cost_dE.append(tot).unstack().stack() cost_dE = pd.concat([cost_dE, tot]).unstack().stack()
summed_area = pd.DataFrame(area_tot.groupby("country").sum()).set_index( summed_area = pd.DataFrame(area_tot.groupby(level=0).sum()).set_index(
pd.MultiIndex.from_product([area_tot.index.unique(level="country"), ["tot"]]) pd.MultiIndex.from_product([area_tot.index.unique(level=0), ["tot"]])
) )
area_tot = area_tot.append(summed_area).unstack().stack() area_tot = pd.concat([area_tot, summed_area]).unstack().stack()
cost_per_saving = cost_dE["cost"] / ( cost_per_saving = cost_dE["cost"] / (
1 - cost_dE["dE"] 1 - cost_dE["dE"]

View File

@ -66,11 +66,7 @@ def salt_cavern_potential_by_region(caverns, regions):
"capacity_per_area * share * area_caverns / 1000" "capacity_per_area * share * area_caverns / 1000"
) # TWh ) # TWh
caverns_regions = ( return overlay.groupby(["name", "storage_type"]).e_nom.sum().unstack("storage_type")
overlay.groupby(["name", "storage_type"]).e_nom.sum().unstack("storage_type")
)
return caverns_regions
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -119,7 +119,7 @@ def countries(naturalearth, country_list):
fieldnames = ( fieldnames = (
df[x].where(lambda s: s != "-99") for x in ("ISO_A2", "WB_A2", "ADM0_A3") df[x].where(lambda s: s != "-99") for x in ("ISO_A2", "WB_A2", "ADM0_A3")
) )
df["name"] = reduce(lambda x, y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2] df["name"] = reduce(lambda x, y: x.fillna(y), fieldnames, next(fieldnames)).str[:2]
df = df.loc[ df = df.loc[
df.name.isin(country_list) & ((df["scalerank"] == 0) | (df["scalerank"] == 5)) df.name.isin(country_list) & ((df["scalerank"] == 0) | (df["scalerank"] == 5))

View File

@ -81,14 +81,12 @@ def build_transport_demand(traffic_fn, airtemp_fn, nodes, nodal_transport_data):
- pop_weighted_energy_totals["electricity rail"] - pop_weighted_energy_totals["electricity rail"]
) )
transport = ( return (
(transport_shape.multiply(energy_totals_transport) * 1e6 * nyears) (transport_shape.multiply(energy_totals_transport) * 1e6 * nyears)
.divide(efficiency_gain * ice_correction) .divide(efficiency_gain * ice_correction)
.multiply(1 + dd_EV) .multiply(1 + dd_EV)
) )
return transport
def transport_degree_factor( def transport_degree_factor(
temperature, temperature,
@ -132,14 +130,12 @@ def bev_availability_profile(fn, snapshots, nodes, options):
traffic.mean() - traffic.min() traffic.mean() - traffic.min()
) )
avail_profile = generate_periodic_profiles( return generate_periodic_profiles(
dt_index=snapshots, dt_index=snapshots,
nodes=nodes, nodes=nodes,
weekly_profile=avail.values, weekly_profile=avail.values,
) )
return avail_profile
def bev_dsm_profile(snapshots, nodes, options): def bev_dsm_profile(snapshots, nodes, options):
dsm_week = np.zeros((24 * 7,)) dsm_week = np.zeros((24 * 7,))
@ -148,14 +144,12 @@ def bev_dsm_profile(snapshots, nodes, options):
"bev_dsm_restriction_value" "bev_dsm_restriction_value"
] ]
dsm_profile = generate_periodic_profiles( return generate_periodic_profiles(
dt_index=snapshots, dt_index=snapshots,
nodes=nodes, nodes=nodes,
weekly_profile=dsm_week, weekly_profile=dsm_week,
) )
return dsm_profile
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():

View File

@ -322,9 +322,9 @@ def busmap_for_n_clusters(
neighbor_bus = n.lines.query( neighbor_bus = n.lines.query(
"bus0 == @disconnected_bus or bus1 == @disconnected_bus" "bus0 == @disconnected_bus or bus1 == @disconnected_bus"
).iloc[0][["bus0", "bus1"]] ).iloc[0][["bus0", "bus1"]]
new_country = list( new_country = list(set(n.buses.loc[neighbor_bus].country) - {country})[
set(n.buses.loc[neighbor_bus].country) - set([country]) 0
)[0] ]
logger.info( logger.info(
f"overwriting country `{country}` of bus `{disconnected_bus}` " f"overwriting country `{country}` of bus `{disconnected_bus}` "

View File

@ -33,10 +33,7 @@ def assign_locations(n):
ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index)
for i in ifind.unique(): for i in ifind.unique():
names = ifind.index[ifind == i] names = ifind.index[ifind == i]
if i == -1: c.df.loc[names, "location"] = "" if i == -1 else names.str[:i]
c.df.loc[names, "location"] = ""
else:
c.df.loc[names, "location"] = names.str[:i]
def calculate_nodal_cfs(n, label, nodal_cfs): def calculate_nodal_cfs(n, label, nodal_cfs):
@ -397,7 +394,7 @@ def calculate_supply_energy(n, label, supply_energy):
for c in n.iterate_components(n.branch_components): for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]: for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df["bus" + str(end)].map(bus_map).fillna(False)] items = c.df.index[c.df[f"bus{str(end)}"].map(bus_map).fillna(False)]
if len(items) == 0: if len(items) == 0:
continue continue
@ -493,7 +490,7 @@ def calculate_weighted_prices(n, label, weighted_prices):
"H2": ["Sabatier", "H2 Fuel Cell"], "H2": ["Sabatier", "H2 Fuel Cell"],
} }
for carrier in link_loads: for carrier, value in link_loads.items():
if carrier == "electricity": if carrier == "electricity":
suffix = "" suffix = ""
elif carrier[:5] == "space": elif carrier[:5] == "space":
@ -515,15 +512,15 @@ def calculate_weighted_prices(n, label, weighted_prices):
else: else:
load = n.loads_t.p_set[buses] load = n.loads_t.p_set[buses]
for tech in link_loads[carrier]: for tech in value:
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech] names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
if names.empty: if not names.empty:
continue load += (
n.links_t.p0[names]
load += ( .groupby(n.links.loc[names, "bus0"], axis=1)
n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum() .sum()
) )
# Add H2 Store when charging # Add H2 Store when charging
# if carrier == "H2": # if carrier == "H2":
@ -650,11 +647,7 @@ def make_summaries(networks_dict):
networks_dict.keys(), names=["cluster", "ll", "opt", "planning_horizon"] networks_dict.keys(), names=["cluster", "ll", "opt", "planning_horizon"]
) )
df = {} df = {output: pd.DataFrame(columns=columns, dtype=float) for output in outputs}
for output in outputs:
df[output] = pd.DataFrame(columns=columns, dtype=float)
for label, filename in networks_dict.items(): for label, filename in networks_dict.items():
logger.info(f"Make summary for scenario {label}, using {filename}") logger.info(f"Make summary for scenario {label}, using {filename}")

View File

@ -0,0 +1,744 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Create summary CSV files for all scenario runs with perfect foresight including
costs, capacities, capacity factors, curtailment, energy balances, prices and
other metrics.
"""
import numpy as np
import pandas as pd
import pypsa
from make_summary import (
assign_carriers,
assign_locations,
calculate_cfs,
calculate_nodal_cfs,
calculate_nodal_costs,
)
from prepare_sector_network import prepare_costs
from pypsa.descriptors import get_active_assets, nominal_attrs
from six import iteritems
idx = pd.IndexSlice
opt_name = {"Store": "e", "Line": "s", "Transformer": "s"}
def calculate_costs(n, label, costs):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
costs.columns.levels[0],
costs.columns.levels[1],
costs.columns.levels[2],
investments,
],
names=costs.columns.names[:3] + ["year"],
)
costs = costs.reindex(cols, axis=1)
for c in n.iterate_components(
n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
active = pd.concat(
[
get_active_assets(n, c.name, inv_p).rename(inv_p)
for inv_p in investments
],
axis=1,
).astype(int)
capital_costs = active.mul(capital_costs, axis=0)
discount = (
n.investment_period_weightings["objective"]
/ n.investment_period_weightings["years"]
)
capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum().mul(discount)
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"])
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=[c.list_name])
costs = costs.reindex(capital_costs_grouped.index.union(costs.index))
costs.loc[capital_costs_grouped.index, label] = capital_costs_grouped.values
if c.name == "Link":
p = (
c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
)
elif c.name == "Line":
continue
elif c.name == "StorageUnit":
p_all = c.pnl.p.multiply(n.snapshot_weightings.stores, axis=0)
p_all[p_all < 0.0] = 0.0
p = p_all.groupby(level=0).sum()
else:
p = (
round(c.pnl.p, ndigits=2)
.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
)
# correct sequestration cost
if c.name == "Store":
items = c.df.index[
(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.0)
]
c.df.loc[items, "marginal_cost"] = -20.0
marginal_costs = p.mul(c.df.marginal_cost).T
# marginal_costs = active.mul(marginal_costs, axis=0)
marginal_costs_grouped = (
marginal_costs.groupby(c.df.carrier).sum().mul(discount)
)
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=["marginal"])
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=[c.list_name])
costs = costs.reindex(marginal_costs_grouped.index.union(costs.index))
costs.loc[marginal_costs_grouped.index, label] = marginal_costs_grouped.values
# add back in all hydro
# costs.loc[("storage_units","capital","hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro","p_nom"].sum()
# costs.loc[("storage_units","capital","PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS","p_nom"].sum()
# costs.loc[("generators","capital","ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror","p_nom"].sum()
return costs
def calculate_cumulative_cost():
planning_horizons = snakemake.config["scenario"]["planning_horizons"]
cumulative_cost = pd.DataFrame(
index=df["costs"].sum().index,
columns=pd.Series(data=np.arange(0, 0.1, 0.01), name="social discount rate"),
)
# discount cost and express them in money value of planning_horizons[0]
for r in cumulative_cost.columns:
cumulative_cost[r] = [
df["costs"].sum()[index] / ((1 + r) ** (index[-1] - planning_horizons[0]))
for index in cumulative_cost.index
]
# integrate cost throughout the transition path
for r in cumulative_cost.columns:
for cluster in cumulative_cost.index.get_level_values(level=0).unique():
for lv in cumulative_cost.index.get_level_values(level=1).unique():
for sector_opts in cumulative_cost.index.get_level_values(
level=2
).unique():
cumulative_cost.loc[
(cluster, lv, sector_opts, "cumulative cost"), r
] = np.trapz(
cumulative_cost.loc[
idx[cluster, lv, sector_opts, planning_horizons], r
].values,
x=planning_horizons,
)
return cumulative_cost
def calculate_nodal_capacities(n, label, nodal_capacities):
# Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff
for c in n.iterate_components(
n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
nodal_capacities_c = c.df.groupby(["location", "carrier"])[
opt_name.get(c.name, "p") + "_nom_opt"
].sum()
index = pd.MultiIndex.from_tuples(
[(c.list_name,) + t for t in nodal_capacities_c.index.to_list()]
)
nodal_capacities = nodal_capacities.reindex(index.union(nodal_capacities.index))
nodal_capacities.loc[index, label] = nodal_capacities_c.values
return nodal_capacities
def calculate_capacities(n, label, capacities):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
capacities.columns.levels[0],
capacities.columns.levels[1],
capacities.columns.levels[2],
investments,
],
names=capacities.columns.names[:3] + ["year"],
)
capacities = capacities.reindex(cols, axis=1)
for c in n.iterate_components(
n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
active = pd.concat(
[
get_active_assets(n, c.name, inv_p).rename(inv_p)
for inv_p in investments
],
axis=1,
).astype(int)
caps = c.df[opt_name.get(c.name, "p") + "_nom_opt"]
caps = active.mul(caps, axis=0)
capacities_grouped = (
caps.groupby(c.df.carrier).sum().drop("load", errors="ignore")
)
capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name])
capacities = capacities.reindex(
capacities_grouped.index.union(capacities.index)
)
capacities.loc[capacities_grouped.index, label] = capacities_grouped.values
return capacities
def calculate_curtailment(n, label, curtailment):
avail = (
n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt)
.sum()
.groupby(n.generators.carrier)
.sum()
)
used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()
curtailment[label] = (((avail - used) / avail) * 100).round(3)
return curtailment
def calculate_energy(n, label, energy):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
energy.columns.levels[0],
energy.columns.levels[1],
energy.columns.levels[2],
investments,
],
names=energy.columns.names[:3] + ["year"],
)
energy = energy.reindex(cols, axis=1)
for c in n.iterate_components(n.one_port_components | n.branch_components):
if c.name in n.one_port_components:
c_energies = (
c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
.multiply(c.df.sign)
.groupby(c.df.carrier, axis=1)
.sum()
)
else:
c_energies = pd.DataFrame(
0.0, columns=c.df.carrier.unique(), index=n.investment_periods
)
for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
totals = (
c.pnl["p" + port]
.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
)
# remove values where bus is missing (bug in nomopyomo)
no_bus = c.df.index[c.df["bus" + port] == ""]
totals[no_bus] = float(
n.component_attrs[c.name].loc["p" + port, "default"]
)
c_energies -= totals.groupby(c.df.carrier, axis=1).sum()
c_energies = pd.concat([c_energies.T], keys=[c.list_name])
energy = energy.reindex(c_energies.index.union(energy.index))
energy.loc[c_energies.index, label] = c_energies.values
return energy
def calculate_supply(n, label, supply):
"""
Calculate the max dispatch of each component at the buses aggregated by
carrier.
"""
bus_carriers = n.buses.carrier.unique()
for i in bus_carriers:
bus_map = n.buses.carrier == i
bus_map.at[""] = False
for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0:
continue
s = (
c.pnl.p[items]
.max()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"])
.sum()
)
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply = supply.reindex(s.index.union(supply.index))
supply.loc[s.index, label] = s
for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df["bus" + end].map(bus_map).fillna(False)]
if len(items) == 0:
continue
# lots of sign compensation for direction and to do maximums
s = (-1) ** (1 - int(end)) * (
(-1) ** int(end) * c.pnl["p" + end][items]
).max().groupby(c.df.loc[items, "carrier"]).sum()
s.index = s.index + end
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply = supply.reindex(s.index.union(supply.index))
supply.loc[s.index, label] = s
return supply
def calculate_supply_energy(n, label, supply_energy):
"""
Calculate the total energy supply/consuption of each component at the buses
aggregated by carrier.
"""
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
supply_energy.columns.levels[0],
supply_energy.columns.levels[1],
supply_energy.columns.levels[2],
investments,
],
names=supply_energy.columns.names[:3] + ["year"],
)
supply_energy = supply_energy.reindex(cols, axis=1)
bus_carriers = n.buses.carrier.unique()
for i in bus_carriers:
bus_map = n.buses.carrier == i
bus_map.at[""] = False
for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0:
continue
if c.name == "Generator":
weightings = n.snapshot_weightings.generators
else:
weightings = n.snapshot_weightings.stores
if i in ["oil", "co2", "H2"]:
if c.name == "Load":
c.df.loc[items, "carrier"] = [
load.split("-202")[0] for load in items
]
if i == "oil" and c.name == "Generator":
c.df.loc[items, "carrier"] = "imported oil"
s = (
c.pnl.p[items]
.multiply(weightings, axis=0)
.groupby(level=0)
.sum()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"], axis=1)
.sum()
.T
)
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply_energy = supply_energy.reindex(
s.index.union(supply_energy.index, sort=False)
)
supply_energy.loc[s.index, label] = s.values
for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df[f"bus{str(end)}"].map(bus_map).fillna(False)]
if len(items) == 0:
continue
s = (
(-1)
* c.pnl["p" + end]
.reindex(items, axis=1)
.multiply(n.snapshot_weightings.objective, axis=0)
.groupby(level=0)
.sum()
.groupby(c.df.loc[items, "carrier"], axis=1)
.sum()
).T
s.index = s.index + end
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply_energy = supply_energy.reindex(
s.index.union(supply_energy.index, sort=False)
)
supply_energy.loc[s.index, label] = s.values
return supply_energy
def calculate_metrics(n, label, metrics):
metrics = metrics.reindex(
pd.Index(
[
"line_volume",
"line_volume_limit",
"line_volume_AC",
"line_volume_DC",
"line_volume_shadow",
"co2_shadow",
]
).union(metrics.index)
)
metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[
n.links.carrier == "DC"
].sum()
metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum()
metrics.at["line_volume", label] = metrics.loc[
["line_volume_AC", "line_volume_DC"], label
].sum()
if hasattr(n, "line_volume_limit"):
metrics.at["line_volume_limit", label] = n.line_volume_limit
metrics.at["line_volume_shadow", label] = n.line_volume_limit_dual
if "CO2Limit" in n.global_constraints.index:
metrics.at["co2_shadow", label] = n.global_constraints.at["CO2Limit", "mu"]
return metrics
def calculate_prices(n, label, prices):
prices = prices.reindex(prices.index.union(n.buses.carrier.unique()))
# WARNING: this is time-averaged, see weighted_prices for load-weighted average
prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean()
return prices
def calculate_weighted_prices(n, label, weighted_prices):
# Warning: doesn't include storage units as loads
weighted_prices = weighted_prices.reindex(
pd.Index(
[
"electricity",
"heat",
"space heat",
"urban heat",
"space urban heat",
"gas",
"H2",
]
)
)
link_loads = {
"electricity": [
"heat pump",
"resistive heater",
"battery charger",
"H2 Electrolysis",
],
"heat": ["water tanks charger"],
"urban heat": ["water tanks charger"],
"space heat": [],
"space urban heat": [],
"gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"],
"H2": ["Sabatier", "H2 Fuel Cell"],
}
for carrier, value in link_loads.items():
if carrier == "electricity":
suffix = ""
elif carrier[:5] == "space":
suffix = carrier[5:]
else:
suffix = " " + carrier
buses = n.buses.index[n.buses.index.str[2:] == suffix]
if buses.empty:
continue
load = (
pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
if carrier in ["H2", "gas"]
else n.loads_t.p_set.reindex(buses, axis=1)
)
for tech in value:
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
if names.empty:
continue
load += (
n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum()
)
# Add H2 Store when charging
# if carrier == "H2":
# stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1)
# stores[stores > 0.] = 0.
# load += -stores
weighted_prices.loc[carrier, label] = (
load * n.buses_t.marginal_price[buses]
).sum().sum() / load.sum().sum()
if carrier[:5] == "space":
print(load * n.buses_t.marginal_price[buses])
return weighted_prices
def calculate_market_values(n, label, market_values):
# Warning: doesn't include storage units
carrier = "AC"
buses = n.buses.index[n.buses.carrier == carrier]
## First do market value of generators ##
generators = n.generators.index[n.buses.loc[n.generators.bus, "carrier"] == carrier]
techs = n.generators.loc[generators, "carrier"].value_counts().index
market_values = market_values.reindex(market_values.index.union(techs))
for tech in techs:
gens = generators[n.generators.loc[generators, "carrier"] == tech]
dispatch = (
n.generators_t.p[gens]
.groupby(n.generators.loc[gens, "bus"], axis=1)
.sum()
.reindex(columns=buses, fill_value=0.0)
)
revenue = dispatch * n.buses_t.marginal_price[buses]
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
## Now do market value of links ##
for i in ["0", "1"]:
all_links = n.links.index[n.buses.loc[n.links["bus" + i], "carrier"] == carrier]
techs = n.links.loc[all_links, "carrier"].value_counts().index
market_values = market_values.reindex(market_values.index.union(techs))
for tech in techs:
links = all_links[n.links.loc[all_links, "carrier"] == tech]
dispatch = (
n.links_t["p" + i][links]
.groupby(n.links.loc[links, "bus" + i], axis=1)
.sum()
.reindex(columns=buses, fill_value=0.0)
)
revenue = dispatch * n.buses_t.marginal_price[buses]
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
return market_values
def calculate_price_statistics(n, label, price_statistics):
price_statistics = price_statistics.reindex(
price_statistics.index.union(
pd.Index(["zero_hours", "mean", "standard_deviation"])
)
)
buses = n.buses.index[n.buses.carrier == "AC"]
threshold = 0.1 # higher than phoney marginal_cost of wind/solar
df = pd.DataFrame(data=0.0, columns=buses, index=n.snapshots)
df[n.buses_t.marginal_price[buses] < threshold] = 1.0
price_statistics.at["zero_hours", label] = df.sum().sum() / (
df.shape[0] * df.shape[1]
)
price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].mean().mean()
price_statistics.at["standard_deviation", label] = (
n.buses_t.marginal_price[buses].droplevel(0).unstack().std()
)
return price_statistics
def calculate_co2_emissions(n, label, df):
carattr = "co2_emissions"
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
if emissions.empty:
return
weightings = n.snapshot_weightings.generators.mul(
n.investment_period_weightings["years"]
.reindex(n.snapshots)
.fillna(method="bfill")
.fillna(1.0),
axis=0,
)
# generators
gens = n.generators.query("carrier in @emissions.index")
if not gens.empty:
em_pu = gens.carrier.map(emissions) / gens.efficiency
em_pu = (
weightings["generators"].to_frame("weightings")
@ em_pu.to_frame("weightings").T
)
emitted = n.generators_t.p[gens.index].mul(em_pu)
emitted_grouped = (
emitted.groupby(level=0).sum().groupby(n.generators.carrier, axis=1).sum().T
)
df = df.reindex(emitted_grouped.index.union(df.index))
df.loc[emitted_grouped.index, label] = emitted_grouped.values
if any(n.stores.carrier == "co2"):
co2_i = n.stores[n.stores.carrier == "co2"].index
df[label] = n.stores_t.e.groupby(level=0).last()[co2_i].iloc[:, 0]
return df
outputs = [
"nodal_costs",
"nodal_capacities",
"nodal_cfs",
"cfs",
"costs",
"capacities",
"curtailment",
"energy",
"supply",
"supply_energy",
"prices",
"weighted_prices",
"price_statistics",
"market_values",
"metrics",
"co2_emissions",
]
def make_summaries(networks_dict):
columns = pd.MultiIndex.from_tuples(
networks_dict.keys(), names=["cluster", "lv", "opt"]
)
df = {}
for output in outputs:
df[output] = pd.DataFrame(columns=columns, dtype=float)
for label, filename in iteritems(networks_dict):
print(label, filename)
try:
n = pypsa.Network(filename)
except OSError:
print(label, " not solved yet.")
continue
# del networks_dict[label]
if not hasattr(n, "objective"):
print(label, " not solved correctly. Check log if infeasible or unbounded.")
continue
assign_carriers(n)
assign_locations(n)
for output in outputs:
df[output] = globals()["calculate_" + output](n, label, df[output])
return df
def to_csv(df):
for key in df:
df[key] = df[key].apply(lambda x: pd.to_numeric(x))
df[key].to_csv(snakemake.output[key])
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("make_summary_perfect")
run = snakemake.config["run"]["name"]
if run != "":
run += "/"
networks_dict = {
(clusters, lv, opts + sector_opts): "results/"
+ run
+ f"postnetworks/elec_s{simpl}_{clusters}_l{lv}_{opts}_{sector_opts}_brownfield_all_years.nc"
for simpl in snakemake.config["scenario"]["simpl"]
for clusters in snakemake.config["scenario"]["clusters"]
for opts in snakemake.config["scenario"]["opts"]
for sector_opts in snakemake.config["scenario"]["sector_opts"]
for lv in snakemake.config["scenario"]["ll"]
}
print(networks_dict)
nyears = 1
costs_db = prepare_costs(
snakemake.input.costs,
snakemake.config["costs"],
nyears,
)
df = make_summaries(networks_dict)
df["metrics"].loc["total costs"] = df["costs"].sum().groupby(level=[0, 1, 2]).sum()
to_csv(df)

View File

@ -24,7 +24,7 @@ from make_summary import assign_carriers
from plot_summary import preferred_order, rename_techs from plot_summary import preferred_order, rename_techs
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
plt.style.use(["ggplot", "matplotlibrc"]) plt.style.use(["ggplot"])
def rename_techs_tyndp(tech): def rename_techs_tyndp(tech):
@ -145,12 +145,12 @@ def plot_map(
ac_color = "rosybrown" ac_color = "rosybrown"
dc_color = "darkseagreen" dc_color = "darkseagreen"
title = "added grid"
if snakemake.wildcards["ll"] == "v1.0": if snakemake.wildcards["ll"] == "v1.0":
# should be zero # should be zero
line_widths = n.lines.s_nom_opt - n.lines.s_nom line_widths = n.lines.s_nom_opt - n.lines.s_nom
link_widths = n.links.p_nom_opt - n.links.p_nom link_widths = n.links.p_nom_opt - n.links.p_nom
title = "added grid"
if transmission: if transmission:
line_widths = n.lines.s_nom_opt line_widths = n.lines.s_nom_opt
link_widths = n.links.p_nom_opt link_widths = n.links.p_nom_opt
@ -160,8 +160,6 @@ def plot_map(
else: else:
line_widths = n.lines.s_nom_opt - n.lines.s_nom_min line_widths = n.lines.s_nom_opt - n.lines.s_nom_min
link_widths = n.links.p_nom_opt - n.links.p_nom_min link_widths = n.links.p_nom_opt - n.links.p_nom_min
title = "added grid"
if transmission: if transmission:
line_widths = n.lines.s_nom_opt line_widths = n.lines.s_nom_opt
link_widths = n.links.p_nom_opt link_widths = n.links.p_nom_opt
@ -262,12 +260,7 @@ def group_pipes(df, drop_direction=False):
lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}",
axis=1, axis=1,
) )
# group pipe lines connecting the same buses and rename them for plotting return df.groupby(level=0).agg({"p_nom_opt": sum, "bus0": "first", "bus1": "first"})
pipe_capacity = df.groupby(level=0).agg(
{"p_nom_opt": sum, "bus0": "first", "bus1": "first"}
)
return pipe_capacity
def plot_h2_map(network, regions): def plot_h2_map(network, regions):
@ -766,11 +759,13 @@ def plot_series(network, carrier="AC", name="test"):
supply = pd.concat( supply = pd.concat(
( (
supply, supply,
(-1) (
* c.pnl["p" + str(i)] -1
.loc[:, c.df.index[c.df["bus" + str(i)].isin(buses)]] * c.pnl[f"p{str(i)}"]
.groupby(c.df.carrier, axis=1) .loc[:, c.df.index[c.df[f"bus{str(i)}"].isin(buses)]]
.sum(), .groupby(c.df.carrier, axis=1)
.sum()
),
), ),
axis=1, axis=1,
) )
@ -913,6 +908,158 @@ def plot_series(network, carrier="AC", name="test"):
) )
def plot_map_perfect(
network,
components=["Link", "Store", "StorageUnit", "Generator"],
bus_size_factor=1.7e10,
):
n = network.copy()
assign_location(n)
# Drop non-electric buses so they don't clutter the plot
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
# investment periods
investments = n.snapshots.levels[0]
costs = {}
for comp in components:
df_c = n.df(comp)
if df_c.empty:
continue
df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp)
attr = "e_nom_opt" if comp == "Store" else "p_nom_opt"
active = pd.concat(
[n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments],
axis=1,
).astype(int)
capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost
capital_cost_t = (
(active.mul(capital_cost, axis=0))
.groupby([n.df(comp).location, n.df(comp).nice_group])
.sum()
)
capital_cost_t.drop("load", level=1, inplace=True, errors="ignore")
costs[comp] = capital_cost_t
costs = pd.concat(costs).groupby(level=[1, 2]).sum()
costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True)
new_columns = preferred_order.intersection(costs.index.levels[1]).append(
costs.index.levels[1].difference(preferred_order)
)
costs = costs.reindex(new_columns, level=1)
for item in new_columns:
if item not in snakemake.config["plotting"]["tech_colors"]:
print(
"Warning!",
item,
"not in config/plotting/tech_colors, assign random color",
)
snakemake.config["plotting"]["tech_colors"] = "pink"
n.links.drop(
n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")],
inplace=True,
)
# drop non-bus
to_drop = costs.index.levels[0].symmetric_difference(n.buses.index)
if len(to_drop) != 0:
print("dropping non-buses", to_drop)
costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore")
# make sure they are removed from index
costs.index = pd.MultiIndex.from_tuples(costs.index.values)
# PDF has minimum width, so set these to zero
line_lower_threshold = 500.0
line_upper_threshold = 1e4
linewidth_factor = 2e3
ac_color = "gray"
dc_color = "m"
line_widths = n.lines.s_nom_opt
link_widths = n.links.p_nom_opt
linewidth_factor = 2e3
line_lower_threshold = 0.0
title = "Today's transmission"
line_widths[line_widths < line_lower_threshold] = 0.0
link_widths[link_widths < line_lower_threshold] = 0.0
line_widths[line_widths > line_upper_threshold] = line_upper_threshold
link_widths[link_widths > line_upper_threshold] = line_upper_threshold
for year in costs.columns:
fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()})
fig.set_size_inches(7, 6)
fig.suptitle(year)
n.plot(
bus_sizes=costs[year] / bus_size_factor,
bus_colors=snakemake.config["plotting"]["tech_colors"],
line_colors=ac_color,
link_colors=dc_color,
line_widths=line_widths / linewidth_factor,
link_widths=link_widths / linewidth_factor,
ax=ax,
**map_opts,
)
sizes = [20, 10, 5]
labels = [f"{s} bEUR/a" for s in sizes]
sizes = [s / bus_size_factor * 1e9 for s in sizes]
legend_kw = dict(
loc="upper left",
bbox_to_anchor=(0.01, 1.06),
labelspacing=0.8,
frameon=False,
handletextpad=0,
title="system cost",
)
add_legend_circles(
ax,
sizes,
labels,
srid=n.srid,
patch_kw=dict(facecolor="lightgrey"),
legend_kw=legend_kw,
)
sizes = [10, 5]
labels = [f"{s} GW" for s in sizes]
scale = 1e3 / linewidth_factor
sizes = [s * scale for s in sizes]
legend_kw = dict(
loc="upper left",
bbox_to_anchor=(0.27, 1.06),
frameon=False,
labelspacing=0.8,
handletextpad=1,
title=title,
)
add_legend_lines(
ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw
)
legend_kw = dict(
bbox_to_anchor=(1.52, 1.04),
frameon=False,
)
fig.savefig(
snakemake.output[f"map_{year}"], transparent=True, bbox_inches="tight"
)
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
@ -921,10 +1068,9 @@ if __name__ == "__main__":
"plot_network", "plot_network",
simpl="", simpl="",
opts="", opts="",
clusters="5", clusters="37",
ll="v1.5", ll="v1.0",
sector_opts="CO2L0-1H-T-H-B-I-A-solar+p3-dist1", sector_opts="4380H-T-H-B-I-A-solar+p3-dist1",
planning_horizons="2030",
) )
logging.basicConfig(level=snakemake.config["logging"]["level"]) logging.basicConfig(level=snakemake.config["logging"]["level"])
@ -938,16 +1084,23 @@ if __name__ == "__main__":
if map_opts["boundaries"] is None: if map_opts["boundaries"] is None:
map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1]
plot_map( if snakemake.params["foresight"] == "perfect":
n, plot_map_perfect(
components=["generators", "links", "stores", "storage_units"], n,
bus_size_factor=2e10, components=["Link", "Store", "StorageUnit", "Generator"],
transmission=False, bus_size_factor=2e10,
) )
else:
plot_map(
n,
components=["generators", "links", "stores", "storage_units"],
bus_size_factor=2e10,
transmission=False,
)
plot_h2_map(n, regions) plot_h2_map(n, regions)
plot_ch4_map(n) plot_ch4_map(n)
plot_map_without(n) plot_map_without(n)
# plot_series(n, carrier="AC", name=suffix) # plot_series(n, carrier="AC", name=suffix)
# plot_series(n, carrier="heat", name=suffix) # plot_series(n, carrier="heat", name=suffix)

View File

@ -33,8 +33,6 @@ if __name__ == "__main__":
lambda s: s != "", "lightgrey" lambda s: s != "", "lightgrey"
) )
# %%
def rename_index(ds): def rename_index(ds):
specific = ds.index.map(lambda x: f"{x[1]}\n({x[0]})") specific = ds.index.map(lambda x: f"{x[1]}\n({x[0]})")
generic = ds.index.get_level_values("carrier") generic = ds.index.get_level_values("carrier")

View File

@ -49,6 +49,10 @@ def rename_techs(label):
# "H2 Fuel Cell": "hydrogen storage", # "H2 Fuel Cell": "hydrogen storage",
# "H2 pipeline": "hydrogen storage", # "H2 pipeline": "hydrogen storage",
"battery": "battery storage", "battery": "battery storage",
"H2 for industry": "H2 for industry",
"land transport fuel cell": "land transport fuel cell",
"land transport oil": "land transport oil",
"oil shipping": "shipping oil",
# "CC": "CC" # "CC": "CC"
} }
@ -157,11 +161,11 @@ def plot_costs():
df.index.difference(preferred_order) df.index.difference(preferred_order)
) )
new_columns = df.sum().sort_values().index # new_columns = df.sum().sort_values().index
fig, ax = plt.subplots(figsize=(12, 8)) fig, ax = plt.subplots(figsize=(12, 8))
df.loc[new_index, new_columns].T.plot( df.loc[new_index].T.plot(
kind="bar", kind="bar",
ax=ax, ax=ax,
stacked=True, stacked=True,
@ -213,17 +217,22 @@ def plot_energy():
logger.info(f"Total energy of {round(df.sum()[0])} TWh/a") logger.info(f"Total energy of {round(df.sum()[0])} TWh/a")
if df.empty:
fig, ax = plt.subplots(figsize=(12, 8))
fig.savefig(snakemake.output.energy, bbox_inches="tight")
return
new_index = preferred_order.intersection(df.index).append( new_index = preferred_order.intersection(df.index).append(
df.index.difference(preferred_order) df.index.difference(preferred_order)
) )
new_columns = df.columns.sort_values() # new_columns = df.columns.sort_values()
fig, ax = plt.subplots(figsize=(12, 8)) fig, ax = plt.subplots(figsize=(12, 8))
logger.debug(df.loc[new_index, new_columns]) logger.debug(df.loc[new_index])
df.loc[new_index, new_columns].T.plot( df.loc[new_index].T.plot(
kind="bar", kind="bar",
ax=ax, ax=ax,
stacked=True, stacked=True,
@ -267,8 +276,6 @@ def plot_balances():
i for i in balances_df.index.levels[0] if i not in co2_carriers i for i in balances_df.index.levels[0] if i not in co2_carriers
] ]
fig, ax = plt.subplots(figsize=(12, 8))
for k, v in balances.items(): for k, v in balances.items():
df = balances_df.loc[v] df = balances_df.loc[v]
df = df.groupby(df.index.get_level_values(2)).sum() df = df.groupby(df.index.get_level_values(2)).sum()
@ -279,7 +286,7 @@ def plot_balances():
# remove trailing link ports # remove trailing link ports
df.index = [ df.index = [
i[:-1] i[:-1]
if ((i not in ["co2", "NH3"]) and (i[-1:] in ["0", "1", "2", "3"])) if ((i not in ["co2", "NH3", "H2"]) and (i[-1:] in ["0", "1", "2", "3"]))
else i else i
for i in df.index for i in df.index
] ]
@ -290,11 +297,7 @@ def plot_balances():
df.abs().max(axis=1) < snakemake.params.plotting["energy_threshold"] / 10 df.abs().max(axis=1) < snakemake.params.plotting["energy_threshold"] / 10
] ]
if v[0] in co2_carriers: units = "MtCO2/a" if v[0] in co2_carriers else "TWh/a"
units = "MtCO2/a"
else:
units = "TWh/a"
logger.debug( logger.debug(
f"Dropping technology energy balance smaller than {snakemake.params['plotting']['energy_threshold']/10} {units}" f"Dropping technology energy balance smaller than {snakemake.params['plotting']['energy_threshold']/10} {units}"
) )
@ -313,6 +316,8 @@ def plot_balances():
new_columns = df.columns.sort_values() new_columns = df.columns.sort_values()
fig, ax = plt.subplots(figsize=(12, 8))
df.loc[new_index, new_columns].T.plot( df.loc[new_index, new_columns].T.plot(
kind="bar", kind="bar",
ax=ax, ax=ax,
@ -345,8 +350,6 @@ def plot_balances():
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight") fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight")
plt.cla()
def historical_emissions(countries): def historical_emissions(countries):
""" """
@ -354,8 +357,7 @@ def historical_emissions(countries):
""" """
# https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16 # https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
# downloaded 201228 (modified by EEA last on 201221) # downloaded 201228 (modified by EEA last on 201221)
fn = "data/bundle-sector/eea/UNFCCC_v23.csv" df = pd.read_csv(snakemake.input.co2, encoding="latin-1", low_memory=False)
df = pd.read_csv(fn, encoding="latin-1")
df.loc[df["Year"] == "1985-1987", "Year"] = 1986 df.loc[df["Year"] == "1985-1987", "Year"] = 1986
df["Year"] = df["Year"].astype(int) df["Year"] = df["Year"].astype(int)
df = df.set_index( df = df.set_index(
@ -379,18 +381,21 @@ def historical_emissions(countries):
e["waste management"] = "5 - Waste management" e["waste management"] = "5 - Waste management"
e["other"] = "6 - Other Sector" e["other"] = "6 - Other Sector"
e["indirect"] = "ind_CO2 - Indirect CO2" e["indirect"] = "ind_CO2 - Indirect CO2"
e["total wL"] = "Total (with LULUCF)" e["other LULUCF"] = "4.H - Other LULUCF"
e["total woL"] = "Total (without LULUCF)"
pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"] pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"]
if "GB" in countries: if "GB" in countries:
countries.remove("GB") countries.remove("GB")
countries.append("UK") countries.append("UK")
# remove countries which are not included in eea historical emission dataset year = df.index.levels[0][df.index.levels[0] >= 1990]
countries_to_remove = {"AL", "BA", "ME", "MK", "RS"}
countries = list(set(countries) - countries_to_remove) missing = pd.Index(countries).difference(df.index.levels[2])
year = np.arange(1990, 2018).tolist() if not missing.empty:
logger.warning(
f"The following countries are missing and not considered when plotting historic CO2 emissions: {missing}"
)
countries = pd.Index(df.index.levels[2]).intersection(countries)
idx = pd.IndexSlice idx = pd.IndexSlice
co2_totals = ( co2_totals = (
@ -453,18 +458,12 @@ def plot_carbon_budget_distribution(input_eurostat):
plt.rcParams["xtick.labelsize"] = 20 plt.rcParams["xtick.labelsize"] = 20
plt.rcParams["ytick.labelsize"] = 20 plt.rcParams["ytick.labelsize"] = 20
plt.figure(figsize=(10, 7))
gs1 = gridspec.GridSpec(1, 1)
ax1 = plt.subplot(gs1[0, 0])
ax1.set_ylabel("CO$_2$ emissions (Gt per year)", fontsize=22)
ax1.set_ylim([0, 5])
ax1.set_xlim([1990, snakemake.params.planning_horizons[-1] + 1])
path_cb = "results/" + snakemake.params.RDIR + "csvs/"
countries = snakemake.params.countries
emissions_scope = snakemake.params.emissions_scope emissions_scope = snakemake.params.emissions_scope
report_year = snakemake.params.eurostat_report_year report_year = snakemake.params.eurostat_report_year
input_co2 = snakemake.input.co2 input_co2 = snakemake.input.co2
# historic emissions
countries = snakemake.params.countries
e_1990 = co2_emissions_year( e_1990 = co2_emissions_year(
countries, countries,
input_eurostat, input_eurostat,
@ -474,15 +473,37 @@ def plot_carbon_budget_distribution(input_eurostat):
input_co2, input_co2,
year=1990, year=1990,
) )
CO2_CAP = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)
ax1.plot(e_1990 * CO2_CAP[o], linewidth=3, color="dodgerblue", label=None)
emissions = historical_emissions(countries) emissions = historical_emissions(countries)
# add other years https://sdi.eea.europa.eu/data/0569441f-2853-4664-a7cd-db969ef54de0
emissions.loc[2019] = 2.971372
emissions.loc[2020] = 2.691958
emissions.loc[2021] = 2.869355
if snakemake.config["foresight"] == "myopic":
path_cb = "results/" + snakemake.params.RDIR + "/csvs/"
co2_cap = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)[
["cb"]
]
co2_cap *= e_1990
else:
supply_energy = pd.read_csv(
snakemake.input.balances, index_col=[0, 1, 2], header=[0, 1, 2, 3]
)
co2_cap = (
supply_energy.loc["co2"].droplevel(0).drop("co2").sum().unstack().T / 1e9
)
co2_cap.rename(index=lambda x: int(x), inplace=True)
plt.figure(figsize=(10, 7))
gs1 = gridspec.GridSpec(1, 1)
ax1 = plt.subplot(gs1[0, 0])
ax1.set_ylabel("CO$_2$ emissions \n [Gt per year]", fontsize=22)
# ax1.set_ylim([0, 5])
ax1.set_xlim([1990, snakemake.params.planning_horizons[-1] + 1])
ax1.plot(emissions, color="black", linewidth=3, label=None) ax1.plot(emissions, color="black", linewidth=3, label=None)
# plot committed and uder-discussion targets # plot committed and under-discussion targets
# (notice that historical emissions include all countries in the # (notice that historical emissions include all countries in the
# network, but targets refer to EU) # network, but targets refer to EU)
ax1.plot( ax1.plot(
@ -499,7 +520,7 @@ def plot_carbon_budget_distribution(input_eurostat):
[0.45 * emissions[1990]], [0.45 * emissions[1990]],
marker="*", marker="*",
markersize=12, markersize=12,
markerfacecolor="white", markerfacecolor="black",
markeredgecolor="black", markeredgecolor="black",
) )
@ -523,21 +544,7 @@ def plot_carbon_budget_distribution(input_eurostat):
ax1.plot( ax1.plot(
[2050], [2050],
[0.01 * emissions[1990]], [0.0 * emissions[1990]],
marker="*",
markersize=12,
markerfacecolor="white",
linewidth=0,
markeredgecolor="black",
label="EU under-discussion target",
zorder=10,
clip_on=False,
)
ax1.plot(
[2050],
[0.125 * emissions[1990]],
"ro",
marker="*", marker="*",
markersize=12, markersize=12,
markerfacecolor="black", markerfacecolor="black",
@ -545,12 +552,16 @@ def plot_carbon_budget_distribution(input_eurostat):
label="EU committed target", label="EU committed target",
) )
for col in co2_cap.columns:
ax1.plot(co2_cap[col], linewidth=3, label=col)
ax1.legend( ax1.legend(
fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True
) )
path_cb_plot = "results/" + snakemake.params.RDIR + "graphs/" plt.grid(axis="y")
plt.savefig(path_cb_plot + "carbon_budget_plot.pdf", dpi=300) path = snakemake.output.balances.split("balances")[0] + "carbon_budget.pdf"
plt.savefig(path, bbox_inches="tight")
if __name__ == "__main__": if __name__ == "__main__":
@ -571,6 +582,5 @@ if __name__ == "__main__":
for sector_opts in snakemake.params.sector_opts: for sector_opts in snakemake.params.sector_opts:
opts = sector_opts.split("-") opts = sector_opts.split("-")
for o in opts: if any("cb" in o for o in opts) or snakemake.config["foresight"] == "perfect":
if "cb" in o: plot_carbon_budget_distribution(snakemake.input.eurostat)
plot_carbon_budget_distribution(snakemake.input.eurostat)

View File

@ -84,13 +84,9 @@ def cross_border_time_series(countries, data):
df_neg.plot.area( df_neg.plot.area(
ax=ax[axis], stacked=True, linewidth=0.0, color=color, ylim=[-1, 1] ax=ax[axis], stacked=True, linewidth=0.0, color=color, ylim=[-1, 1]
) )
if (axis % 2) == 0: title = "Historic" if (axis % 2) == 0 else "Optimized"
title = "Historic"
else:
title = "Optimized"
ax[axis].set_title( ax[axis].set_title(
title + " Import / Export for " + cc.convert(country, to="name_short") f"{title} Import / Export for " + cc.convert(country, to="name_short")
) )
# Custom legend elements # Custom legend elements
@ -137,16 +133,12 @@ def cross_border_bar(countries, data):
df_country = sort_one_country(country, df) df_country = sort_one_country(country, df)
df_neg, df_pos = df_country.clip(upper=0), df_country.clip(lower=0) df_neg, df_pos = df_country.clip(upper=0), df_country.clip(lower=0)
if (order % 2) == 0: title = "Historic" if (order % 2) == 0 else "Optimized"
title = "Historic"
else:
title = "Optimized"
df_positive_new = pd.DataFrame(data=df_pos.sum()).T.rename( df_positive_new = pd.DataFrame(data=df_pos.sum()).T.rename(
{0: title + " " + cc.convert(country, to="name_short")} {0: f"{title} " + cc.convert(country, to="name_short")}
) )
df_negative_new = pd.DataFrame(data=df_neg.sum()).T.rename( df_negative_new = pd.DataFrame(data=df_neg.sum()).T.rename(
{0: title + " " + cc.convert(country, to="name_short")} {0: f"{title} " + cc.convert(country, to="name_short")}
) )
df_positive = pd.concat([df_positive_new, df_positive]) df_positive = pd.concat([df_positive_new, df_positive])

View File

@ -274,7 +274,6 @@ def set_line_nom_max(
n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True) n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True)
# %%
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake

View File

@ -0,0 +1,548 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Concats pypsa networks of single investment periods to one network.
"""
import logging
import re
import numpy as np
import pandas as pd
import pypsa
from _helpers import update_config_with_sector_opts
from add_existing_baseyear import add_build_year_to_new_assets
from pypsa.descriptors import expand_series
from pypsa.io import import_components_from_dataframe
from six import iterkeys
logger = logging.getLogger(__name__)
# helper functions ---------------------------------------------------
def get_missing(df, n, c):
"""
Get in network n missing assets of df for component c.
Input:
df: pandas DataFrame, static values of pypsa components
n : pypsa Network to which new assets should be added
c : string, pypsa component.list_name (e.g. "generators")
Return:
pd.DataFrame with static values of missing assets
"""
df_final = getattr(n, c)
missing_i = df.index.difference(df_final.index)
return df.loc[missing_i]
def get_social_discount(t, r=0.01):
"""
Calculate for a given time t and social discount rate r [per unit] the
social discount.
"""
return 1 / (1 + r) ** t
def get_investment_weighting(time_weighting, r=0.01):
"""
Define cost weighting.
Returns cost weightings depending on the the time_weighting
(pd.Series) and the social discountrate r
"""
end = time_weighting.cumsum()
start = time_weighting.cumsum().shift().fillna(0)
return pd.concat([start, end], axis=1).apply(
lambda x: sum(get_social_discount(t, r) for t in range(int(x[0]), int(x[1]))),
axis=1,
)
def add_year_to_constraints(n, baseyear):
"""
Add investment period to global constraints and rename index.
Parameters
----------
n : pypsa.Network
baseyear : int
year in which optimized assets are built
"""
for c in n.iterate_components(["GlobalConstraint"]):
c.df["investment_period"] = baseyear
c.df.rename(index=lambda x: x + "-" + str(baseyear), inplace=True)
def hvdc_transport_model(n):
"""
Convert AC lines to DC links for multi-decade optimisation with line
expansion.
Losses of DC links are assumed to be 3% per 1000km
"""
logger.info("Convert AC lines to DC links to perform multi-decade optimisation.")
n.madd(
"Link",
n.lines.index,
bus0=n.lines.bus0,
bus1=n.lines.bus1,
p_nom_extendable=True,
p_nom=n.lines.s_nom,
p_nom_min=n.lines.s_nom,
p_min_pu=-1,
efficiency=1 - 0.03 * n.lines.length / 1000,
marginal_cost=0,
carrier="DC",
length=n.lines.length,
capital_cost=n.lines.capital_cost,
)
# Remove AC lines
logger.info("Removing AC lines")
lines_rm = n.lines.index
n.mremove("Line", lines_rm)
# Set efficiency of all DC links to include losses depending on length
n.links.loc[n.links.carrier == "DC", "efficiency"] = (
1 - 0.03 * n.links.loc[n.links.carrier == "DC", "length"] / 1000
)
def adjust_electricity_grid(n, year, years):
"""
Add carrier to lines. Replace AC lines with DC links in case of line
expansion. Add lifetime to DC links in case of line expansion.
Parameters
----------
n : pypsa.Network
year : int
year in which optimized assets are built
years: list
investment periods
"""
n.lines["carrier"] = "AC"
links_i = n.links[n.links.carrier == "DC"].index
if n.lines.s_nom_extendable.any() or n.links.loc[links_i, "p_nom_extendable"].any():
hvdc_transport_model(n)
links_i = n.links[n.links.carrier == "DC"].index
n.links.loc[links_i, "lifetime"] = 100
if year != years[0]:
n.links.loc[links_i, "p_nom_min"] = 0
n.links.loc[links_i, "p_nom"] = 0
# --------------------------------------------------------------------
def concat_networks(years):
"""
Concat given pypsa networks and adds build_year.
Return:
n : pypsa.Network for the whole planning horizon
"""
# input paths of sector coupling networks
network_paths = [snakemake.input.brownfield_network] + [
snakemake.input[f"network_{year}"] for year in years[1:]
]
# final concatenated network
n = pypsa.Network()
# iterate over single year networks and concat to perfect foresight network
for i, network_path in enumerate(network_paths):
year = years[i]
network = pypsa.Network(network_path)
adjust_electricity_grid(network, year, years)
add_build_year_to_new_assets(network, year)
# static ----------------------------------
# (1) add buses and carriers
for component in network.iterate_components(["Bus", "Carrier"]):
df_year = component.df
# get missing assets
missing = get_missing(df_year, n, component.list_name)
import_components_from_dataframe(n, missing, component.name)
# (2) add generators, links, stores and loads
for component in network.iterate_components(
["Generator", "Link", "Store", "Load", "Line", "StorageUnit"]
):
df_year = component.df.copy()
missing = get_missing(df_year, n, component.list_name)
import_components_from_dataframe(n, missing, component.name)
# time variant --------------------------------------------------
network_sns = pd.MultiIndex.from_product([[year], network.snapshots])
snapshots = n.snapshots.drop("now", errors="ignore").union(network_sns)
n.set_snapshots(snapshots)
for component in network.iterate_components():
pnl = getattr(n, component.list_name + "_t")
for k in iterkeys(component.pnl):
pnl_year = component.pnl[k].copy().reindex(snapshots, level=1)
if pnl_year.empty and ~(component.name == "Load" and k == "p_set"):
continue
if component.name == "Load":
static_load = network.loads.loc[network.loads.p_set != 0]
static_load_t = expand_series(static_load.p_set, network_sns).T
pnl_year = pd.concat(
[pnl_year.reindex(network_sns), static_load_t], axis=1
)
columns = (pnl[k].columns.union(pnl_year.columns)).unique()
pnl[k] = pnl[k].reindex(columns=columns)
pnl[k].loc[pnl_year.index, pnl_year.columns] = pnl_year
else:
# this is to avoid adding multiple times assets with
# infinite lifetime as ror
cols = pnl_year.columns.difference(pnl[k].columns)
pnl[k] = pd.concat([pnl[k], pnl_year[cols]], axis=1)
n.snapshot_weightings.loc[year, :] = network.snapshot_weightings.values
# (3) global constraints
for component in network.iterate_components(["GlobalConstraint"]):
add_year_to_constraints(network, year)
import_components_from_dataframe(n, component.df, component.name)
# set investment periods
n.investment_periods = n.snapshots.levels[0]
# weighting of the investment period -> assuming last period same weighting as the period before
time_w = n.investment_periods.to_series().diff().shift(-1).fillna(method="ffill")
n.investment_period_weightings["years"] = time_w
# set objective weightings
objective_w = get_investment_weighting(
n.investment_period_weightings["years"], social_discountrate
)
n.investment_period_weightings["objective"] = objective_w
# all former static loads are now time-dependent -> set static = 0
n.loads["p_set"] = 0
n.loads_t.p_set.fillna(0, inplace=True)
return n
def adjust_stores(n):
"""
Make sure that stores still behave cyclic over one year and not whole
modelling horizon.
"""
# cyclic constraint
cyclic_i = n.stores[n.stores.e_cyclic].index
n.stores.loc[cyclic_i, "e_cyclic_per_period"] = True
n.stores.loc[cyclic_i, "e_cyclic"] = False
# non cyclic store assumptions
non_cyclic_store = ["co2", "co2 stored", "solid biomass", "biogas", "Li ion"]
co2_i = n.stores[n.stores.carrier.isin(non_cyclic_store)].index
n.stores.loc[co2_i, "e_cyclic_per_period"] = False
n.stores.loc[co2_i, "e_cyclic"] = False
# e_initial at beginning of each investment period
e_initial_store = ["solid biomass", "biogas"]
co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index
n.stores.loc[co2_i, "e_initial_per_period"] = True
# n.stores.loc[co2_i, "e_initial"] *= 10
# n.stores.loc[co2_i, "e_nom"] *= 10
e_initial_store = ["co2 stored"]
co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index
n.stores.loc[co2_i, "e_initial_per_period"] = True
return n
def set_phase_out(n, carrier, ct, phase_out_year):
"""
Set planned phase outs for given carrier,country (ct) and planned year of
phase out (phase_out_year).
"""
df = n.links[(n.links.carrier.isin(carrier)) & (n.links.bus1.str[:2] == ct)]
# assets which are going to be phased out before end of their lifetime
assets_i = df[df[["build_year", "lifetime"]].sum(axis=1) > phase_out_year].index
build_year = n.links.loc[assets_i, "build_year"]
# adjust lifetime
n.links.loc[assets_i, "lifetime"] = (phase_out_year - build_year).astype(float)
def set_all_phase_outs(n):
# TODO move this to a csv or to the config
planned = [
(["nuclear"], "DE", 2022),
(["nuclear"], "BE", 2025),
(["nuclear"], "ES", 2027),
(["coal", "lignite"], "DE", 2030),
(["coal", "lignite"], "ES", 2027),
(["coal", "lignite"], "FR", 2022),
(["coal", "lignite"], "GB", 2024),
(["coal", "lignite"], "IT", 2025),
(["coal", "lignite"], "DK", 2030),
(["coal", "lignite"], "FI", 2030),
(["coal", "lignite"], "HU", 2030),
(["coal", "lignite"], "SK", 2030),
(["coal", "lignite"], "GR", 2030),
(["coal", "lignite"], "IE", 2030),
(["coal", "lignite"], "NL", 2030),
(["coal", "lignite"], "RS", 2030),
]
for carrier, ct, phase_out_year in planned:
set_phase_out(n, carrier, ct, phase_out_year)
# remove assets which are already phased out
remove_i = n.links[n.links[["build_year", "lifetime"]].sum(axis=1) < years[0]].index
n.mremove("Link", remove_i)
def set_carbon_constraints(n, opts):
"""
Add global constraints for carbon emissions.
"""
budget = None
for o in opts:
# other budgets
m = re.match(r"^\d+p\d$", o, re.IGNORECASE)
if m is not None:
budget = snakemake.config["co2_budget"][m.group(0)] * 1e9
if budget != None:
logger.info(f"add carbon budget of {budget}")
n.add(
"GlobalConstraint",
"Budget",
type="Co2Budget",
carrier_attribute="co2_emissions",
sense="<=",
constant=budget,
investment_period=n.investment_periods[-1],
)
# drop other CO2 limits
drop_i = n.global_constraints[n.global_constraints.type == "co2_limit"].index
n.mremove("GlobalConstraint", drop_i)
n.add(
"GlobalConstraint",
"carbon_neutral",
type="co2_limit",
carrier_attribute="co2_emissions",
sense="<=",
constant=0,
investment_period=n.investment_periods[-1],
)
# set minimum CO2 emission constraint to avoid too fast reduction
if "co2min" in opts:
emissions_1990 = 4.53693
emissions_2019 = 3.344096
target_2030 = 0.45 * emissions_1990
annual_reduction = (emissions_2019 - target_2030) / 11
first_year = n.snapshots.levels[0][0]
time_weightings = n.investment_period_weightings.loc[first_year, "years"]
co2min = emissions_2019 - ((first_year - 2019) * annual_reduction)
logger.info(f"add minimum emissions for {first_year} of {co2min} t CO2/a")
n.add(
"GlobalConstraint",
f"Co2Min-{first_year}",
type="Co2min",
carrier_attribute="co2_emissions",
sense=">=",
investment_period=first_year,
constant=co2min * 1e9 * time_weightings,
)
return n
def adjust_lvlimit(n):
"""
Convert global constraints for single investment period to one uniform if
all attributes stay the same.
"""
c = "GlobalConstraint"
cols = ["carrier_attribute", "sense", "constant", "type"]
glc_type = "transmission_volume_expansion_limit"
if (n.df(c)[n.df(c).type == glc_type][cols].nunique() == 1).all():
glc = n.df(c)[n.df(c).type == glc_type][cols].iloc[[0]]
glc.index = pd.Index(["lv_limit"])
remove_i = n.df(c)[n.df(c).type == glc_type].index
n.mremove(c, remove_i)
import_components_from_dataframe(n, glc, c)
return n
def adjust_CO2_glc(n):
c = "GlobalConstraint"
glc_name = "CO2Limit"
glc_type = "primary_energy"
mask = (n.df(c).index.str.contains(glc_name)) & (n.df(c).type == glc_type)
n.df(c).loc[mask, "type"] = "co2_limit"
return n
def add_H2_boilers(n):
"""
Gas boilers can be retrofitted to run with H2.
Add H2 boilers for heating for all existing gas boilers.
"""
c = "Link"
logger.info("Add H2 boilers.")
# existing gas boilers
mask = n.links.carrier.str.contains("gas boiler") & ~n.links.p_nom_extendable
gas_i = n.links[mask].index
df = n.links.loc[gas_i]
# adjust bus 0
df["bus0"] = df.bus1.map(n.buses.location) + " H2"
# rename carrier and index
df["carrier"] = df.carrier.apply(
lambda x: x.replace("gas boiler", "retrofitted H2 boiler")
)
df.rename(
index=lambda x: x.replace("gas boiler", "retrofitted H2 boiler"), inplace=True
)
# todo, costs for retrofitting
df["capital_costs"] = 100
# set existing capacity to zero
df["p_nom"] = 0
df["p_nom_extendable"] = True
# add H2 boilers to network
import_components_from_dataframe(n, df, c)
def apply_time_segmentation_perfect(
n, segments, solver_name="cbc", overwrite_time_dependent=True
):
"""
Aggregating time series to segments with different lengths.
Input:
n: pypsa Network
segments: (int) number of segments in which the typical period should be
subdivided
solver_name: (str) name of solver
overwrite_time_dependent: (bool) overwrite time dependent data of pypsa network
with typical time series created by tsam
"""
try:
import tsam.timeseriesaggregation as tsam
except:
raise ModuleNotFoundError(
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
)
# get all time-dependent data
columns = pd.MultiIndex.from_tuples([], names=["component", "key", "asset"])
raw = pd.DataFrame(index=n.snapshots, columns=columns)
for c in n.iterate_components():
for attr, pnl in c.pnl.items():
# exclude e_min_pu which is used for SOC of EVs in the morning
if not pnl.empty and attr != "e_min_pu":
df = pnl.copy()
df.columns = pd.MultiIndex.from_product([[c.name], [attr], df.columns])
raw = pd.concat([raw, df], axis=1)
raw = raw.dropna(axis=1)
sn_weightings = {}
for year in raw.index.levels[0]:
logger.info(f"Find representative snapshots for {year}.")
raw_t = raw.loc[year]
# normalise all time-dependent data
annual_max = raw_t.max().replace(0, 1)
raw_t = raw_t.div(annual_max, level=0)
# get representative segments
agg = tsam.TimeSeriesAggregation(
raw_t,
hoursPerPeriod=len(raw_t),
noTypicalPeriods=1,
noSegments=int(segments),
segmentation=True,
solver=solver_name,
)
segmented = agg.createTypicalPeriods()
weightings = segmented.index.get_level_values("Segment Duration")
offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0)
timesteps = [raw_t.index[0] + pd.Timedelta(f"{offset}h") for offset in offsets]
snapshots = pd.DatetimeIndex(timesteps)
sn_weightings[year] = pd.Series(
weightings, index=snapshots, name="weightings", dtype="float64"
)
sn_weightings = pd.concat(sn_weightings)
n.set_snapshots(sn_weightings.index)
n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0)
return n
def set_temporal_aggregation_SEG(n, opts, solver_name):
"""
Aggregate network temporally with tsam.
"""
for o in opts:
# segments with package tsam
m = re.match(r"^(\d+)seg$", o, re.IGNORECASE)
if m is not None:
segments = int(m[1])
logger.info(f"Use temporal segmentation with {segments} segments")
n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name)
break
return n
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"prepare_perfect_foresight",
simpl="",
opts="",
clusters="37",
ll="v1.5",
sector_opts="1p7-4380H-T-H-B-I-A-solar+p3-dist1",
)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
# parameters -----------------------------------------------------------
years = snakemake.config["scenario"]["planning_horizons"]
opts = snakemake.wildcards.sector_opts.split("-")
social_discountrate = snakemake.config["costs"]["social_discountrate"]
for o in opts:
if "sdr" in o:
social_discountrate = float(o.replace("sdr", "")) / 100
logger.info(
f"Concat networks of investment period {years} with social discount rate of {social_discountrate * 100}%"
)
# concat prenetworks of planning horizon to single network ------------
n = concat_networks(years)
# temporal aggregate
opts = snakemake.wildcards.sector_opts.split("-")
solver_name = snakemake.config["solving"]["solver"]["name"]
n = set_temporal_aggregation_SEG(n, opts, solver_name)
# adjust global constraints lv limit if the same for all years
n = adjust_lvlimit(n)
# adjust global constraints CO2 limit
n = adjust_CO2_glc(n)
# adjust stores to multi period investment
n = adjust_stores(n)
# set phase outs
set_all_phase_outs(n)
# add H2 boiler
add_H2_boilers(n)
# set carbon constraints
opts = snakemake.wildcards.sector_opts.split("-")
n = set_carbon_constraints(n, opts)
# export network
n.export_to_netcdf(snakemake.output[0])

View File

@ -184,10 +184,7 @@ def get(item, investment_year=None):
""" """
Check whether item depends on investment year. Check whether item depends on investment year.
""" """
if isinstance(item, dict): return item[investment_year] if isinstance(item, dict) else item
return item[investment_year]
else:
return item
def co2_emissions_year( def co2_emissions_year(
@ -220,7 +217,7 @@ def co2_emissions_year(
# TODO: move to own rule with sector-opts wildcard? # TODO: move to own rule with sector-opts wildcard?
def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year, input_co2): def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year):
""" """
Distribute carbon budget following beta or exponential transition path. Distribute carbon budget following beta or exponential transition path.
""" """
@ -413,11 +410,9 @@ def update_wind_solar_costs(n, costs):
# e.g. clusters == 37m means that VRE generators are left # e.g. clusters == 37m means that VRE generators are left
# at clustering of simplified network, but that they are # at clustering of simplified network, but that they are
# connected to 37-node network # connected to 37-node network
if snakemake.wildcards.clusters[-1:] == "m": genmap = (
genmap = busmap_s busmap_s if snakemake.wildcards.clusters[-1:] == "m" else clustermaps
else: )
genmap = clustermaps
connection_cost = (connection_cost * weight).groupby( connection_cost = (connection_cost * weight).groupby(
genmap genmap
).sum() / weight.groupby(genmap).sum() ).sum() / weight.groupby(genmap).sum()
@ -505,8 +500,7 @@ def remove_non_electric_buses(n):
""" """
Remove buses from pypsa-eur with carriers which are not AC buses. Remove buses from pypsa-eur with carriers which are not AC buses.
""" """
to_drop = list(n.buses.query("carrier not in ['AC', 'DC']").carrier.unique()) if to_drop := list(n.buses.query("carrier not in ['AC', 'DC']").carrier.unique()):
if to_drop:
logger.info(f"Drop buses from PyPSA-Eur with carrier: {to_drop}") logger.info(f"Drop buses from PyPSA-Eur with carrier: {to_drop}")
n.buses = n.buses[n.buses.carrier.isin(["AC", "DC"])] n.buses = n.buses[n.buses.carrier.isin(["AC", "DC"])]
@ -577,6 +571,7 @@ def add_co2_tracking(n, options):
capital_cost=options["co2_sequestration_cost"], capital_cost=options["co2_sequestration_cost"],
carrier="co2 stored", carrier="co2 stored",
bus=spatial.co2.nodes, bus=spatial.co2.nodes,
lifetime=options["co2_sequestration_lifetime"],
) )
n.add("Carrier", "co2 stored") n.add("Carrier", "co2 stored")
@ -1231,11 +1226,9 @@ def add_storage_and_grids(n, costs):
# apply k_edge_augmentation weighted by length of complement edges # apply k_edge_augmentation weighted by length of complement edges
k_edge = options.get("gas_network_connectivity_upgrade", 3) k_edge = options.get("gas_network_connectivity_upgrade", 3)
augmentation = list( if augmentation := list(
k_edge_augmentation(G, k_edge, avail=complement_edges.values) k_edge_augmentation(G, k_edge, avail=complement_edges.values)
) ):
if augmentation:
new_gas_pipes = pd.DataFrame(augmentation, columns=["bus0", "bus1"]) new_gas_pipes = pd.DataFrame(augmentation, columns=["bus0", "bus1"])
new_gas_pipes["length"] = new_gas_pipes.apply(haversine, axis=1) new_gas_pipes["length"] = new_gas_pipes.apply(haversine, axis=1)
@ -1399,7 +1392,7 @@ def add_storage_and_grids(n, costs):
lifetime=costs.at["coal", "lifetime"], lifetime=costs.at["coal", "lifetime"],
) )
if options["SMR"]: if options["SMR_cc"]:
n.madd( n.madd(
"Link", "Link",
spatial.nodes, spatial.nodes,
@ -1417,6 +1410,7 @@ def add_storage_and_grids(n, costs):
lifetime=costs.at["SMR CC", "lifetime"], lifetime=costs.at["SMR CC", "lifetime"],
) )
if options["SMR"]:
n.madd( n.madd(
"Link", "Link",
nodes + " SMR", nodes + " SMR",
@ -1960,7 +1954,7 @@ def add_heat(n, costs):
# demand 'dE' [per unit of original heat demand] for each country and # demand 'dE' [per unit of original heat demand] for each country and
# different retrofitting strengths [additional insulation thickness in m] # different retrofitting strengths [additional insulation thickness in m]
retro_data = pd.read_csv( retro_data = pd.read_csv(
snakemake.input.retro_cost_energy, snakemake.input.retro_cost,
index_col=[0, 1], index_col=[0, 1],
skipinitialspace=True, skipinitialspace=True,
header=[0, 1], header=[0, 1],
@ -2893,6 +2887,30 @@ def add_industry(n, costs):
p_set=p_set, p_set=p_set,
) )
primary_steel = get(
snakemake.config["industry"]["St_primary_fraction"], investment_year
)
dri_steel = get(snakemake.config["industry"]["DRI_fraction"], investment_year)
bof_steel = primary_steel - dri_steel
if bof_steel > 0:
add_carrier_buses(n, "coal")
mwh_coal_per_mwh_coke = 1.366 # from eurostat energy balance
p_set = (
industrial_demand["coal"].sum()
+ mwh_coal_per_mwh_coke * industrial_demand["coke"].sum()
) / nhours
n.madd(
"Load",
spatial.coal.nodes,
suffix=" for industry",
bus=spatial.coal.nodes,
carrier="coal for industry",
p_set=p_set,
)
def add_waste_heat(n): def add_waste_heat(n):
# TODO options? # TODO options?
@ -3438,7 +3456,7 @@ if __name__ == "__main__":
spatial = define_spatial(pop_layout.index, options) spatial = define_spatial(pop_layout.index, options)
if snakemake.params.foresight == "myopic": if snakemake.params.foresight in ["myopic", "perfect"]:
add_lifetime_wind_solar(n, costs) add_lifetime_wind_solar(n, costs)
conventional = snakemake.params.conventional_carriers conventional = snakemake.params.conventional_carriers
@ -3515,7 +3533,7 @@ if __name__ == "__main__":
if "cb" not in o: if "cb" not in o:
continue continue
limit_type = "carbon budget" limit_type = "carbon budget"
fn = "results/" + snakemake.params.RDIR + "csvs/carbon_budget_distribution.csv" fn = "results/" + snakemake.params.RDIR + "/csvs/carbon_budget_distribution.csv"
if not os.path.exists(fn): if not os.path.exists(fn):
emissions_scope = snakemake.params.emissions_scope emissions_scope = snakemake.params.emissions_scope
report_year = snakemake.params.eurostat_report_year report_year = snakemake.params.eurostat_report_year
@ -3559,7 +3577,7 @@ if __name__ == "__main__":
if options["electricity_grid_connection"]: if options["electricity_grid_connection"]:
add_electricity_grid_connection(n, costs) add_electricity_grid_connection(n, costs)
first_year_myopic = (snakemake.params.foresight == "myopic") and ( first_year_myopic = (snakemake.params.foresight in ["myopic", "perfect"]) and (
snakemake.params.planning_horizons[0] == investment_year snakemake.params.planning_horizons[0] == investment_year
) )

107
scripts/retrieve_irena.py Normal file
View File

@ -0,0 +1,107 @@
# -*- coding: utf-8 -*-
# Copyright 2023 Thomas Gilon (Climact)
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
This rule downloads the existing capacities from `IRENASTAT <https://www.irena.org/Data/Downloads/IRENASTAT>`_ and extracts it in the ``data/existing_capacities`` sub-directory.
**Relevant Settings**
.. code:: yaml
enable:
retrieve_irena:
.. seealso::
Documentation of the configuration file ``config.yaml`` at
:ref:`enable_cf`
**Outputs**
- ``data/existing_capacities``: existing capacities for offwind, onwind and solar
"""
import logging
import pandas as pd
from _helpers import configure_logging
logger = logging.getLogger(__name__)
REGIONS = [
"Albania",
"Austria",
"Belgium",
"Bosnia and Herzegovina",
"Bulgaria",
"Croatia",
"Czechia",
"Denmark",
"Estonia",
"Finland",
"France",
"Germany",
"Greece",
"Hungary",
"Ireland",
"Italy",
"Latvia",
"Lithuania",
"Luxembourg",
"Montenegro",
# "Netherlands",
"Netherlands (Kingdom of the)",
"North Macedonia",
"Norway",
"Poland",
"Portugal",
"Romania",
"Serbia",
"Slovakia",
"Slovenia",
"Spain",
"Sweden",
"Switzerland",
# "United Kingdom",
"United Kingdom of Great Britain and Northern Ireland (the)",
]
REGIONS_DICT = {
"Bosnia and Herzegovina": "Bosnia Herzg",
"Netherlands (Kingdom of the)": "Netherlands",
"United Kingdom of Great Britain and Northern Ireland (the)": "UK",
}
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("retrieve_irena")
configure_logging(snakemake)
irena_raw = pd.read_csv(
"https://pxweb.irena.org:443/sq/99e64b12-fe03-4a7b-92ea-a22cc3713b92",
skiprows=2,
index_col=[0, 1, 3],
encoding="latin-1",
)
var = "Installed electricity capacity (MW)"
irena = irena_raw[var].unstack(level=2).reset_index(level=1).replace(0, "")
irena = irena[irena.index.isin(REGIONS)]
irena.rename(index=REGIONS_DICT, inplace=True)
df_offwind = irena[irena.Technology.str.contains("Offshore")].drop(
columns=["Technology"]
)
df_onwind = irena[irena.Technology.str.contains("Onshore")].drop(
columns=["Technology"]
)
df_pv = irena[irena.Technology.str.contains("Solar")].drop(columns=["Technology"])
df_offwind.to_csv(snakemake.output["offwind"])
df_onwind.to_csv(snakemake.output["onwind"])
df_pv.to_csv(snakemake.output["solar"])

View File

@ -152,22 +152,20 @@ def _prepare_connection_costs_per_link(n, costs, renewable_carriers, length_fact
if n.links.empty: if n.links.empty:
return {} return {}
connection_costs_per_link = {} return {
tech: (
for tech in renewable_carriers: n.links.length
if tech.startswith("offwind"): * length_factor
connection_costs_per_link[tech] = ( * (
n.links.length n.links.underwater_fraction
* length_factor * costs.at[tech + "-connection-submarine", "capital_cost"]
* ( + (1.0 - n.links.underwater_fraction)
n.links.underwater_fraction * costs.at[tech + "-connection-underground", "capital_cost"]
* costs.at[tech + "-connection-submarine", "capital_cost"]
+ (1.0 - n.links.underwater_fraction)
* costs.at[tech + "-connection-underground", "capital_cost"]
)
) )
)
return connection_costs_per_link for tech in renewable_carriers
if tech.startswith("offwind")
}
def _compute_connection_costs_to_bus( def _compute_connection_costs_to_bus(

View File

@ -33,7 +33,9 @@ import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
import xarray as xr import xarray as xr
from _benchmark import memory_logger
from _helpers import configure_logging, update_config_with_sector_opts from _helpers import configure_logging, update_config_with_sector_opts
from pypsa.descriptors import get_activity_mask
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
pypsa.pf.logger.setLevel(logging.WARNING) pypsa.pf.logger.setLevel(logging.WARNING)
@ -47,6 +49,69 @@ def add_land_use_constraint(n, planning_horizons, config):
_add_land_use_constraint(n) _add_land_use_constraint(n)
def add_land_use_constraint_perfect(n):
"""
Add global constraints for tech capacity limit.
"""
logger.info("Add land-use constraint for perfect foresight")
def compress_series(s):
def process_group(group):
if group.nunique() == 1:
return pd.Series(group.iloc[0], index=[None])
else:
return group
return s.groupby(level=[0, 1]).apply(process_group)
def new_index_name(t):
# Convert all elements to string and filter out None values
parts = [str(x) for x in t if x is not None]
# Join with space, but use a dash for the last item if not None
return " ".join(parts[:2]) + (f"-{parts[-1]}" if len(parts) > 2 else "")
def check_p_min_p_max(p_nom_max):
p_nom_min = n.generators[ext_i].groupby(grouper).sum().p_nom_min
p_nom_min = p_nom_min.reindex(p_nom_max.index)
check = (
p_nom_min.groupby(level=[0, 1]).sum()
> p_nom_max.groupby(level=[0, 1]).min()
)
if check.sum():
logger.warning(
f"summed p_min_pu values at node larger than technical potential {check[check].index}"
)
grouper = [n.generators.carrier, n.generators.bus, n.generators.build_year]
ext_i = n.generators.p_nom_extendable
# get technical limit per node and investment period
p_nom_max = n.generators[ext_i].groupby(grouper).min().p_nom_max
# drop carriers without tech limit
p_nom_max = p_nom_max[~p_nom_max.isin([np.inf, np.nan])]
# carrier
carriers = p_nom_max.index.get_level_values(0).unique()
gen_i = n.generators[(n.generators.carrier.isin(carriers)) & (ext_i)].index
n.generators.loc[gen_i, "p_nom_min"] = 0
# check minimum capacities
check_p_min_p_max(p_nom_max)
# drop multi entries in case p_nom_max stays constant in different periods
# p_nom_max = compress_series(p_nom_max)
# adjust name to fit syntax of nominal constraint per bus
df = p_nom_max.reset_index()
df["name"] = df.apply(
lambda row: f"nom_max_{row['carrier']}"
+ (f"_{row['build_year']}" if row["build_year"] is not None else ""),
axis=1,
)
for name in df.name.unique():
df_carrier = df[df.name == name]
bus = df_carrier.bus
n.buses.loc[bus, name] = df_carrier.p_nom_max.values
return n
def _add_land_use_constraint(n): def _add_land_use_constraint(n):
# warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind' # warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
@ -82,19 +147,13 @@ def _add_land_use_constraint(n):
def _add_land_use_constraint_m(n, planning_horizons, config): def _add_land_use_constraint_m(n, planning_horizons, config):
# if generators clustering is lower than network clustering, land_use accounting is at generators clusters # if generators clustering is lower than network clustering, land_use accounting is at generators clusters
planning_horizons = param["planning_horizons"]
grouping_years = config["existing_capacities"]["grouping_years"] grouping_years = config["existing_capacities"]["grouping_years"]
current_horizon = snakemake.wildcards.planning_horizons current_horizon = snakemake.wildcards.planning_horizons
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]: for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"] existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"]
ind = list( ind = list(
set( {i.split(sep=" ")[0] + " " + i.split(sep=" ")[1] for i in existing.index}
[
i.split(sep=" ")[0] + " " + i.split(sep=" ")[1]
for i in existing.index
]
)
) )
previous_years = [ previous_years = [
@ -116,7 +175,7 @@ def _add_land_use_constraint_m(n, planning_horizons, config):
n.generators.p_nom_max.clip(lower=0, inplace=True) n.generators.p_nom_max.clip(lower=0, inplace=True)
def add_co2_sequestration_limit(n, limit=200): def add_co2_sequestration_limit(n, config, limit=200):
""" """
Add a global constraint on the amount of Mt CO2 that can be sequestered. Add a global constraint on the amount of Mt CO2 that can be sequestered.
""" """
@ -130,16 +189,146 @@ def add_co2_sequestration_limit(n, limit=200):
limit = float(o[o.find("seq") + 3 :]) * 1e6 limit = float(o[o.find("seq") + 3 :]) * 1e6
break break
n.add( if not n.investment_periods.empty:
periods = n.investment_periods
names = pd.Index([f"co2_sequestration_limit-{period}" for period in periods])
else:
periods = [np.nan]
names = pd.Index(["co2_sequestration_limit"])
n.madd(
"GlobalConstraint", "GlobalConstraint",
"co2_sequestration_limit", names,
sense="<=", sense="<=",
constant=limit, constant=limit,
type="primary_energy", type="primary_energy",
carrier_attribute="co2_absorptions", carrier_attribute="co2_absorptions",
investment_period=periods,
) )
def add_carbon_constraint(n, snapshots):
glcs = n.global_constraints.query('type == "co2_limit"')
if glcs.empty:
return
for name, glc in glcs.iterrows():
carattr = glc.carrier_attribute
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
if emissions.empty:
continue
# stores
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
stores = n.stores.query("carrier in @emissions.index and not e_cyclic")
if not stores.empty:
last = n.snapshot_weightings.reset_index().groupby("period").last()
last_i = last.set_index([last.index, last.timestep]).index
final_e = n.model["Store-e"].loc[last_i, stores.index]
time_valid = int(glc.loc["investment_period"])
time_i = pd.IndexSlice[time_valid, :]
lhs = final_e.loc[time_i, :] - final_e.shift(snapshot=1).loc[time_i, :]
rhs = glc.constant
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
def add_carbon_budget_constraint(n, snapshots):
glcs = n.global_constraints.query('type == "Co2Budget"')
if glcs.empty:
return
for name, glc in glcs.iterrows():
carattr = glc.carrier_attribute
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
if emissions.empty:
continue
# stores
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
stores = n.stores.query("carrier in @emissions.index and not e_cyclic")
if not stores.empty:
last = n.snapshot_weightings.reset_index().groupby("period").last()
last_i = last.set_index([last.index, last.timestep]).index
final_e = n.model["Store-e"].loc[last_i, stores.index]
time_valid = int(glc.loc["investment_period"])
time_i = pd.IndexSlice[time_valid, :]
weighting = n.investment_period_weightings.loc[time_valid, "years"]
lhs = final_e.loc[time_i, :] * weighting
rhs = glc.constant
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
def add_max_growth(n, config):
"""
Add maximum growth rates for different carriers.
"""
opts = snakemake.params["sector"]["limit_max_growth"]
# take maximum yearly difference between investment periods since historic growth is per year
factor = n.investment_period_weightings.years.max() * opts["factor"]
for carrier in opts["max_growth"].keys():
max_per_period = opts["max_growth"][carrier] * factor
logger.info(
f"set maximum growth rate per investment period of {carrier} to {max_per_period} GW."
)
n.carriers.loc[carrier, "max_growth"] = max_per_period * 1e3
for carrier in opts["max_relative_growth"].keys():
max_r_per_period = opts["max_relative_growth"][carrier]
logger.info(
f"set maximum relative growth per investment period of {carrier} to {max_r_per_period}."
)
n.carriers.loc[carrier, "max_relative_growth"] = max_r_per_period
return n
def add_retrofit_gas_boiler_constraint(n, snapshots):
"""
Allow retrofitting of existing gas boilers to H2 boilers.
"""
c = "Link"
logger.info("Add constraint for retrofitting gas boilers to H2 boilers.")
# existing gas boilers
mask = n.links.carrier.str.contains("gas boiler") & ~n.links.p_nom_extendable
gas_i = n.links[mask].index
mask = n.links.carrier.str.contains("retrofitted H2 boiler")
h2_i = n.links[mask].index
n.links.loc[gas_i, "p_nom_extendable"] = True
p_nom = n.links.loc[gas_i, "p_nom"]
n.links.loc[gas_i, "p_nom"] = 0
# heat profile
cols = n.loads_t.p_set.columns[
n.loads_t.p_set.columns.str.contains("heat")
& ~n.loads_t.p_set.columns.str.contains("industry")
& ~n.loads_t.p_set.columns.str.contains("agriculture")
]
profile = n.loads_t.p_set[cols].div(
n.loads_t.p_set[cols].groupby(level=0).max(), level=0
)
# to deal if max value is zero
profile.fillna(0, inplace=True)
profile.rename(columns=n.loads.bus.to_dict(), inplace=True)
profile = profile.reindex(columns=n.links.loc[gas_i, "bus1"])
profile.columns = gas_i
rhs = profile.mul(p_nom)
dispatch = n.model["Link-p"]
active = get_activity_mask(n, c, snapshots, gas_i)
rhs = rhs[active]
p_gas = dispatch.sel(Link=gas_i)
p_h2 = dispatch.sel(Link=h2_i)
lhs = p_gas + p_h2
n.model.add_constraints(lhs == rhs, name="gas_retrofit")
def prepare_network( def prepare_network(
n, n,
solve_opts=None, solve_opts=None,
@ -156,8 +345,7 @@ def prepare_network(
): ):
df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True) df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)
load_shedding = solve_opts.get("load_shedding") if load_shedding := solve_opts.get("load_shedding"):
if load_shedding:
# intersect between macroeconomic and surveybased willingness to pay # intersect between macroeconomic and surveybased willingness to pay
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full # http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full
# TODO: retrieve color and nice name from config # TODO: retrieve color and nice name from config
@ -200,9 +388,14 @@ def prepare_network(
if foresight == "myopic": if foresight == "myopic":
add_land_use_constraint(n, planning_horizons, config) add_land_use_constraint(n, planning_horizons, config)
if foresight == "perfect":
n = add_land_use_constraint_perfect(n)
if snakemake.params["sector"]["limit_max_growth"]["enable"]:
n = add_max_growth(n, config)
if n.stores.carrier.eq("co2 stored").any(): if n.stores.carrier.eq("co2 stored").any():
limit = co2_sequestration_potential limit = co2_sequestration_potential
add_co2_sequestration_limit(n, limit=limit) add_co2_sequestration_limit(n, config, limit=limit)
return n return n
@ -594,12 +787,17 @@ def extra_functionality(n, snapshots):
add_EQ_constraints(n, o) add_EQ_constraints(n, o)
add_battery_constraints(n) add_battery_constraints(n)
add_pipe_retrofit_constraint(n) add_pipe_retrofit_constraint(n)
if n._multi_invest:
add_carbon_constraint(n, snapshots)
add_carbon_budget_constraint(n, snapshots)
add_retrofit_gas_boiler_constraint(n, snapshots)
def solve_network(n, config, solving, opts="", **kwargs): def solve_network(n, config, solving, opts="", **kwargs):
set_of_options = solving["solver"]["options"] set_of_options = solving["solver"]["options"]
cf_solving = solving["options"] cf_solving = solving["options"]
kwargs["multi_investment_periods"] = config["foresight"] == "perfect"
kwargs["solver_options"] = ( kwargs["solver_options"] = (
solving["solver_options"][set_of_options] if set_of_options else {} solving["solver_options"][set_of_options] if set_of_options else {}
) )
@ -671,13 +869,14 @@ if __name__ == "__main__":
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
"solve_network", "solve_sector_network_perfect",
configfiles="../config/test/config.perfect.yaml",
simpl="", simpl="",
opts="Ept", opts="",
clusters="37", clusters="5",
ll="v1.0", ll="v1.5",
sector_opts="", sector_opts="8760H-T-H-B-I-A-solar+p3-dist1",
planning_horizons="2020", planning_horizons="2030",
) )
configure_logging(snakemake) configure_logging(snakemake)
if "sector_opts" in snakemake.wildcards.keys(): if "sector_opts" in snakemake.wildcards.keys():
@ -704,13 +903,18 @@ if __name__ == "__main__":
co2_sequestration_potential=snakemake.params["co2_sequestration_potential"], co2_sequestration_potential=snakemake.params["co2_sequestration_potential"],
) )
n = solve_network( with memory_logger(
n, filename=getattr(snakemake.log, "memory", None), interval=30.0
config=snakemake.config, ) as mem:
solving=snakemake.params.solving, n = solve_network(
opts=opts, n,
log_fn=snakemake.log.solver, config=snakemake.config,
) solving=snakemake.params.solving,
opts=opts,
log_fn=snakemake.log.solver,
)
logger.info(f"Maximum memory usage: {mem.mem_usage}")
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])

View File

@ -7,6 +7,7 @@ Solves linear optimal dispatch in hourly resolution using the capacities of
previous capacity expansion in rule :mod:`solve_network`. previous capacity expansion in rule :mod:`solve_network`.
""" """
import logging import logging
import numpy as np import numpy as np
@ -35,7 +36,7 @@ if __name__ == "__main__":
configure_logging(snakemake) configure_logging(snakemake)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts) update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
opts = (snakemake.wildcards.opts + "-" + snakemake.wildcards.sector_opts).split("-") opts = f"{snakemake.wildcards.opts}-{snakemake.wildcards.sector_opts}".split("-")
opts = [o for o in opts if o != ""] opts = [o for o in opts if o != ""]
solve_opts = snakemake.params.options solve_opts = snakemake.params.options