Merge branch 'master' into multiyear

This commit is contained in:
Fabian Neumann 2023-10-08 11:55:11 +02:00
commit b8fee80919
82 changed files with 5083 additions and 896 deletions

View File

@ -83,6 +83,7 @@ jobs:
snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime
- name: Upload artifacts
uses: actions/upload-artifact@v3

15
.gitignore vendored
View File

@ -8,6 +8,7 @@ __pycache__
*dconf
gurobi.log
.vscode
*.orig
/bak
/resources
@ -28,18 +29,18 @@ dconf
/data/links_p_nom.csv
/data/*totals.csv
/data/biomass*
/data/emobility/
/data/eea*
/data/jrc*
/data/bundle-sector/emobility/
/data/bundle-sector/eea*
/data/bundle-sector/jrc*
/data/heating/
/data/eurostat*
/data/bundle-sector/eurostat*
/data/odyssee/
/data/transport_data.csv
/data/switzerland*
/data/bundle-sector/switzerland*
/data/.nfs*
/data/Industrial_Database.csv
/data/bundle-sector/Industrial_Database.csv
/data/retro/tabula-calculator-calcsetbuilding.csv
/data/nuts*
/data/bundle-sector/nuts*
data/gas_network/scigrid-gas/
data/costs_*.csv

View File

@ -30,10 +30,10 @@ repos:
# Find common spelling mistakes in comments and docstrings
- repo: https://github.com/codespell-project/codespell
rev: v2.2.5
rev: v2.2.6
hooks:
- id: codespell
args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco,berfore'] # Ignore capital case words, e.g. country codes
args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco,berfore,vor'] # Ignore capital case words, e.g. country codes
types_or: [python, rst, markdown]
files: ^(scripts|doc)/
@ -51,7 +51,7 @@ repos:
# Formatting with "black" coding style
- repo: https://github.com/psf/black
rev: 23.7.0
rev: 23.9.1
hooks:
# Format Python files
- id: black

View File

@ -14,4 +14,3 @@ build:
python:
install:
- requirements: doc/requirements.txt
system_packages: false

11
.sync-send Normal file
View File

@ -0,0 +1,11 @@
# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
rules
scripts
config
config/test
envs
matplotlibrc
Snakefile

View File

@ -1,21 +0,0 @@
# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
.snakemake
.git
.pytest_cache
.ipynb_checkpoints
.vscode
.DS_Store
__pycache__
*.pyc
*.pyo
*.ipynb
notebooks
doc
cutouts
data
benchmarks
*.nc
configs

View File

@ -1,23 +0,0 @@
# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
.snakemake
.git
.pytest_cache
.ipynb_checkpoints
.vscode
.DS_Store
__pycache__
*.pyc
*.pyo
*.ipynb
notebooks
benchmarks
logs
resources*
results
networks*
cutouts
data/bundle
doc

View File

@ -6,7 +6,7 @@ cff-version: 1.1.0
message: "If you use this package, please cite it in the following way."
title: "PyPSA-Eur: An open sector-coupled optimisation model of the European energy system"
repository: https://github.com/pypsa/pypsa-eur
version: 0.8.0
version: 0.8.1
license: MIT
authors:
- family-names: Brown

View File

@ -9,7 +9,7 @@ SPDX-License-Identifier: CC-BY-4.0
![Size](https://img.shields.io/github/repo-size/pypsa/pypsa-eur)
[![Zenodo PyPSA-Eur](https://zenodo.org/badge/DOI/10.5281/zenodo.3520874.svg)](https://doi.org/10.5281/zenodo.3520874)
[![Zenodo PyPSA-Eur-Sec](https://zenodo.org/badge/DOI/10.5281/zenodo.3938042.svg)](https://doi.org/10.5281/zenodo.3938042)
[![Snakemake](https://img.shields.io/badge/snakemake-≥5.0.0-brightgreen.svg?style=flat)](https://snakemake.readthedocs.io)
[![Snakemake](https://img.shields.io/badge/snakemake-≥7.7.0-brightgreen.svg?style=flat)](https://snakemake.readthedocs.io)
[![REUSE status](https://api.reuse.software/badge/github.com/pypsa/pypsa-eur)](https://api.reuse.software/info/github.com/pypsa/pypsa-eur)
[![Stack Exchange questions](https://img.shields.io/stackexchange/stackoverflow/t/pypsa)](https://stackoverflow.com/questions/tagged/pypsa)
@ -35,17 +35,18 @@ The model is designed to be imported into the open toolbox
[PyPSA](https://github.com/PyPSA/PyPSA).
**WARNING**: PyPSA-Eur is under active development and has several
[limitations](https://pypsa-eur.readthedocs.io/en/latest/limitations.html)
which you should understand before using the model. The github repository
[limitations](https://pypsa-eur.readthedocs.io/en/latest/limitations.html) which
you should understand before using the model. The github repository
[issues](https://github.com/PyPSA/pypsa-eur/issues) collect known topics we are
working on (please feel free to help or make suggestions). The
[documentation](https://pypsa-eur.readthedocs.io/) remains somewhat patchy. You
can find showcases of the model's capabilities in the preprint [Benefits of a
Hydrogen Network in Europe](https://arxiv.org/abs/2207.05816), a [paper in Joule
with a description of the industry sector](https://arxiv.org/abs/2109.09563), or
in [a 2021 presentation at EMP-E](https://nworbmot.org/energy/brown-empe.pdf).
We cannot support this model if you choose to use it. We do not recommend to use
the full resolution network model for simulations. At high granularity the
can find showcases of the model's capabilities in the Joule paper [The potential
role of a hydrogen network in
Europe](https://doi.org/10.1016/j.joule.2023.06.016), another [paper in Joule
with a description of the industry
sector](https://doi.org/10.1016/j.joule.2022.04.016), or in [a 2021 presentation
at EMP-E](https://nworbmot.org/energy/brown-empe.pdf). We do not recommend to
use the full resolution network model for simulations. At high granularity the
assignment of loads and generators to the nearest network node may not be a
correct assumption, depending on the topology of the underlying distribution
grid, and local grid bottlenecks may cause unrealistic load-shedding or

View File

@ -41,7 +41,7 @@ localrules:
wildcard_constraints:
weather_year="[0-9]{4}|",
simpl="[a-zA-Z0-9]*",
clusters="[0-9]+m?|all",
clusters="[0-9]+(m|c)?|all",
ll="(v|c)([0-9\.]+|opt)",
opts="[-+a-zA-Z0-9\.]*",
sector_opts="[-+a-zA-Z0-9\.\s]*",
@ -54,6 +54,7 @@ include: "rules/build_electricity.smk"
include: "rules/build_sector.smk"
include: "rules/solve_electricity.smk"
include: "rules/postprocess.smk"
include: "rules/validate.smk"
if config["foresight"] == "overnight":
@ -66,13 +67,31 @@ if config["foresight"] == "myopic":
include: "rules/solve_myopic.smk"
if config["foresight"] == "perfect":
include: "rules/solve_perfect.smk"
rule all:
input:
RESULTS + "graphs/costs.pdf",
default_target: True
rule purge:
message:
"Purging generated resources, results and docs. Downloads are kept."
run:
rmtree("resources/", ignore_errors=True)
rmtree("results/", ignore_errors=True)
rmtree("doc/_build", ignore_errors=True)
import builtins
do_purge = builtins.input(
"Do you really want to delete all generated resources, \nresults and docs (downloads are kept)? [y/N] "
)
if do_purge == "y":
rmtree("resources/", ignore_errors=True)
rmtree("results/", ignore_errors=True)
rmtree("doc/_build", ignore_errors=True)
print("Purging generated resources, results and docs. Downloads are kept.")
else:
raise Exception(f"Input {do_purge}. Aborting purge.")
rule dag:
@ -99,3 +118,14 @@ rule doc:
directory("doc/_build"),
shell:
"make -C doc html"
rule sync:
params:
cluster=f"{config['remote']['ssh']}:{config['remote']['path']}",
shell:
"""
rsync -uvarh --ignore-missing-args --files-from=.sync-send . {params.cluster}
rsync -uvarh --no-g {params.cluster}/results . || echo "No results directory, skipping rsync"
rsync -uvarh --no-g {params.cluster}/logs . || echo "No logs directory, skipping rsync"
"""

View File

@ -3,13 +3,21 @@
# SPDX-License-Identifier: CC0-1.0
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#top-level-configuration
version: 0.8.0
version: 0.8.1
tutorial: false
logging:
level: INFO
format: '%(levelname)s:%(name)s:%(message)s'
private:
keys:
entsoe_api:
remote:
ssh: ""
path: ""
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#run
run:
name: ""
@ -213,6 +221,8 @@ renewable:
carriers: [ror, PHS, hydro]
PHS_max_hours: 6
hydro_max_hours: "energy_capacity_totals_by_country" # one of energy_capacity_totals_by_country, estimate_by_large_installations or a float
flatten_dispatch: false
flatten_dispatch_buffer: 0.2
clip_min_inflow: 1.0
eia_norm_year: 2013
eia_correct_by_capacity: false
@ -220,6 +230,8 @@ renewable:
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#conventional
conventional:
unit_commitment: false
dynamic_fuel_price: false
nuclear:
p_max_pu: "data/nuclear_p_max_pu.csv" # float of file name
@ -234,6 +246,12 @@ lines:
max_extension: .inf
length_factor: 1.25
under_construction: 'zero' # 'zero': set capacity to zero, 'remove': remove, 'keep': with full capacity
dynamic_line_rating:
activate: false
cutout: europe-2013-era5
correction_factor: 0.95
max_voltage_difference: false
max_line_rating: false
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#links
links:
@ -451,6 +469,7 @@ sector:
years_of_storage: 25
co2_sequestration_potential: 200
co2_sequestration_cost: 10
co2_sequestration_lifetime: 50
co2_spatial: false
co2network: false
cc_fraction: 0.9
@ -481,6 +500,20 @@ sector:
OCGT: gas
biomass_to_liquid: false
biosng: false
limit_max_growth:
enable: false
# allowing 30% larger than max historic growth
factor: 1.3
max_growth: # unit GW
onwind: 16 # onshore max grow so far 16 GW in Europe https://www.iea.org/reports/renewables-2020/wind
solar: 28 # solar max grow so far 28 GW in Europe https://www.iea.org/reports/renewables-2020/solar-pv
offwind-ac: 35 # offshore max grow so far 3.5 GW in Europe https://windeurope.org/about-wind/statistics/offshore/european-offshore-wind-industry-key-trends-statistics-2019/
offwind-dc: 35
max_relative_growth:
onwind: 3
solar: 3
offwind-ac: 3
offwind-dc: 3
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#industry
industry:
@ -533,11 +566,13 @@ industry:
hotmaps_locate_missing: false
reference_year: 2015
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#costs
costs:
year: 2030
version: v0.6.0
rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person)
social_discountrate: 0.02
fill_values:
FOM: 0
VOM: 0
@ -576,16 +611,12 @@ clustering:
algorithm: kmeans
feature: solar+onwind-time
exclude_carriers: []
consider_efficiency_classes: false
aggregation_strategies:
generators:
p_nom_max: sum
p_nom_min: sum
p_min_pu: mean
marginal_cost: mean
committable: any
ramp_limit_up: max
ramp_limit_down: max
efficiency: mean
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#solving
solving:
@ -593,13 +624,17 @@ solving:
options:
clip_p_max_pu: 1.e-2
load_shedding: false
transmission_losses: 0
noisy_costs: true
skip_iterations: true
rolling_horizon: false
seed: 123
# options that go into the optimize function
track_iterations: false
min_iterations: 4
max_iterations: 6
seed: 123
transmission_losses: 0
linearized_unit_commitment: true
horizon: 365
solver:
name: gurobi
@ -627,7 +662,6 @@ solving:
AggFill: 0
PreDual: 0
GURO_PAR_BARDENSETHRESH: 200
seed: 10 # Consistent seed for all plattforms
gurobi-numeric-focus:
name: gurobi
NumericFocus: 3 # Favour numeric stability over speed
@ -660,6 +694,7 @@ solving:
glpk-default: {} # Used in CI
mem: 30000 #memory in MB; 20 GB enough for 50+B+I+H2; 100 GB for 181+B+I+H2
walltime: "12:00:00"
operations:
@ -706,6 +741,8 @@ plotting:
H2: "Hydrogen Storage"
lines: "Transmission Lines"
ror: "Run of River"
ac: "AC"
dc: "DC"
tech_colors:
# wind
@ -765,6 +802,7 @@ plotting:
gas pipeline new: '#a87c62'
# oil
oil: '#c9c9c9'
imported oil: '#a3a3a3'
oil boiler: '#adadad'
residential rural oil boiler: '#a9a9a9'
services rural oil boiler: '#a5a5a5'
@ -896,6 +934,7 @@ plotting:
H2 for shipping: "#ebaee0"
H2: '#bf13a0'
hydrogen: '#bf13a0'
retrofitted H2 boiler: '#e5a0d9'
SMR: '#870c71'
SMR CC: '#4f1745'
H2 liquefaction: '#d647bd'

View File

@ -0,0 +1,43 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
run:
name: "perfect"
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#foresight
foresight: perfect
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#scenario
# Wildcard docs in https://pypsa-eur.readthedocs.io/en/latest/wildcards.html
scenario:
simpl:
- ''
ll:
- v1.0
clusters:
- 37
opts:
- ''
sector_opts:
- 1p5-4380H-T-H-B-I-A-solar+p3-dist1
- 1p7-4380H-T-H-B-I-A-solar+p3-dist1
- 2p0-4380H-T-H-B-I-A-solar+p3-dist1
planning_horizons:
- 2020
- 2030
- 2040
- 2050
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#co2-budget
co2_budget:
# update of IPCC 6th AR compared to the 1.5SR. (discussed here: https://twitter.com/JoeriRogelj/status/1424743828339167233)
1p5: 34.2 # 25.7 # Budget in Gt CO2 for 1.5 for Europe, global 420 Gt, assuming per capita share
1p6: 43.259666 # 35 # Budget in Gt CO2 for 1.6 for Europe, global 580 Gt
1p7: 51.4 # 45 # Budget in Gt CO2 for 1.7 for Europe, global 800 Gt
2p0: 69.778 # 73.9 # Budget in Gt CO2 for 2 for Europe, global 1170 Gt
sector:
min_part_load_fischer_tropsch: 0
min_part_load_methanolisation: 0

View File

@ -0,0 +1,98 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
run:
name: "validation"
scenario:
ll:
- v1.0
clusters:
- 37
opts:
- 'Ept'
snapshots:
start: "2019-01-01"
end: "2020-01-01"
inclusive: 'left'
enable:
retrieve_cutout: false
electricity:
co2limit: 1e9
extendable_carriers:
Generator: []
StorageUnit: []
Store: []
Link: []
powerplants_filter: not (DateOut < 2019)
conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass]
renewable_carriers: [solar, onwind, offwind-ac, offwind-dc, hydro]
estimate_renewable_capacities:
year: 2019
atlite:
default_cutout: europe-2019-era5
cutouts:
europe-2019-era5:
module: era5
x: [-12., 35.]
y: [33., 72]
dx: 0.3
dy: 0.3
time: ['2019', '2019']
renewable:
onwind:
cutout: europe-2019-era5
offwind-ac:
cutout: europe-2019-era5
offwind-dc:
cutout: europe-2019-era5
solar:
cutout: europe-2019-era5
hydro:
cutout: europe-2019-era5
flatten_dispatch: 0.01
conventional:
unit_commitment: false
dynamic_fuel_price: true
nuclear:
p_max_pu: "data/nuclear_p_max_pu.csv"
biomass:
p_max_pu: 0.65
load:
power_statistics: false
lines:
s_max_pu: 0.23
under_construction: 'remove'
links:
include_tyndp: false
costs:
year: 2020
emission_prices:
co2: 25
clustering:
simplify_network:
exclude_carriers: [oil, coal, lignite, OCGT, CCGT]
cluster_network:
consider_efficiency_classes: true
solving:
options:
load_shedding: true
rolling_horizon: false
horizon: 1000
overlap: 48

View File

@ -62,6 +62,12 @@ renewable:
clustering:
exclude_carriers: ["OCGT", "offwind-ac", "coal"]
lines:
dynamic_line_rating:
activate: true
cutout: be-03-2013-era5
max_line_rating: 1.3
solving:
solver:

View File

@ -0,0 +1,91 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
tutorial: true
run:
name: "test-sector-perfect"
disable_progressbar: true
shared_resources: true
shared_cutouts: true
foresight: perfect
scenario:
ll:
- v1.0
clusters:
- 5
sector_opts:
- 8760H-T-H-B-I-A-solar+p3-dist1
planning_horizons:
- 2030
- 2040
- 2050
countries: ['BE']
snapshots:
start: "2013-03-01"
end: "2013-03-08"
electricity:
co2limit: 100.e+6
extendable_carriers:
Generator: [OCGT]
StorageUnit: [battery]
Store: [H2]
Link: [H2 pipeline]
renewable_carriers: [solar, onwind, offwind-ac, offwind-dc]
sector:
min_part_load_fischer_tropsch: 0
min_part_load_methanolisation: 0
atlite:
default_cutout: be-03-2013-era5
cutouts:
be-03-2013-era5:
module: era5
x: [4., 15.]
y: [46., 56.]
time: ["2013-03-01", "2013-03-08"]
renewable:
onwind:
cutout: be-03-2013-era5
offwind-ac:
cutout: be-03-2013-era5
max_depth: false
offwind-dc:
cutout: be-03-2013-era5
max_depth: false
solar:
cutout: be-03-2013-era5
industry:
St_primary_fraction:
2020: 0.8
2030: 0.6
2040: 0.5
2050: 0.4
solving:
solver:
name: glpk
options: glpk-default
mem: 4000
plotting:
map:
boundaries:
eu_node_location:
x: -5.5
y: 46.
costs_max: 1000
costs_threshold: 0.0000001
energy_max:
energy_min:
energy_threshold: 0.000001

View File

@ -1,195 +0,0 @@
technology,year,parameter,value,unit,source
solar-rooftop,2030,discount rate,0.04,per unit,standard for decentral
onwind,2030,lifetime,30,years,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind,2030,lifetime,30,years,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
solar,2030,lifetime,25,years,IEA2010
solar-rooftop,2030,lifetime,25,years,IEA2010
solar-utility,2030,lifetime,25,years,IEA2010
PHS,2030,lifetime,80,years,IEA2010
hydro,2030,lifetime,80,years,IEA2010
ror,2030,lifetime,80,years,IEA2010
OCGT,2030,lifetime,30,years,IEA2010
nuclear,2030,lifetime,45,years,ECF2010 in DIW DataDoc http://hdl.handle.net/10419/80348
CCGT,2030,lifetime,30,years,IEA2010
coal,2030,lifetime,40,years,IEA2010
lignite,2030,lifetime,40,years,IEA2010
geothermal,2030,lifetime,40,years,IEA2010
biomass,2030,lifetime,30,years,ECF2010 in DIW DataDoc http://hdl.handle.net/10419/80348
oil,2030,lifetime,30,years,ECF2010 in DIW DataDoc http://hdl.handle.net/10419/80348
onwind,2030,investment,1040,EUR/kWel,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind,2030,investment,1640,EUR/kWel,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind-ac-station,2030,investment,250,EUR/kWel,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind-ac-connection-submarine,2030,investment,2685,EUR/MW/km,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind-ac-connection-underground,2030,investment,1342,EUR/MW/km,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind-dc-station,2030,investment,400,EUR/kWel,Haertel 2017; assuming one onshore and one offshore node + 13% learning reduction
offwind-dc-connection-submarine,2030,investment,2000,EUR/MW/km,DTU report based on Fig 34 of https://ec.europa.eu/energy/sites/ener/files/documents/2014_nsog_report.pdf
offwind-dc-connection-underground,2030,investment,1000,EUR/MW/km,Haertel 2017; average + 13% learning reduction
solar,2030,investment,600,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
biomass,2030,investment,2209,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
geothermal,2030,investment,3392,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
coal,2030,investment,1300,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348 PC (Advanced/SuperC)
lignite,2030,investment,1500,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
solar-rooftop,2030,investment,725,EUR/kWel,ETIP PV
solar-utility,2030,investment,425,EUR/kWel,ETIP PV
PHS,2030,investment,2000,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
hydro,2030,investment,2000,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
ror,2030,investment,3000,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
OCGT,2030,investment,400,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
nuclear,2030,investment,6000,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
CCGT,2030,investment,800,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
oil,2030,investment,400,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
onwind,2030,FOM,2.450549,%/year,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind,2030,FOM,2.304878,%/year,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
solar,2030,FOM,4.166667,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
solar-rooftop,2030,FOM,2,%/year,ETIP PV
solar-utility,2030,FOM,3,%/year,ETIP PV
biomass,2030,FOM,4.526935,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
geothermal,2030,FOM,2.358491,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
coal,2030,FOM,1.923076,%/year,DIW DataDoc http://hdl.handle.net/10419/80348 PC (Advanced/SuperC)
lignite,2030,FOM,2.0,%/year,DIW DataDoc http://hdl.handle.net/10419/80348 PC (Advanced/SuperC)
oil,2030,FOM,1.5,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
PHS,2030,FOM,1,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
hydro,2030,FOM,1,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
ror,2030,FOM,2,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
CCGT,2030,FOM,2.5,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
OCGT,2030,FOM,3.75,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
onwind,2030,VOM,2.3,EUR/MWhel,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind,2030,VOM,2.7,EUR/MWhel,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
solar,2030,VOM,0.01,EUR/MWhel,RES costs made up to fix curtailment order
coal,2030,VOM,6,EUR/MWhel,DIW DataDoc http://hdl.handle.net/10419/80348 PC (Advanced/SuperC)
lignite,2030,VOM,7,EUR/MWhel,DIW DataDoc http://hdl.handle.net/10419/80348
CCGT,2030,VOM,4,EUR/MWhel,DIW DataDoc http://hdl.handle.net/10419/80348
OCGT,2030,VOM,3,EUR/MWhel,DIW DataDoc http://hdl.handle.net/10419/80348
nuclear,2030,VOM,8,EUR/MWhel,DIW DataDoc http://hdl.handle.net/10419/80348
gas,2030,fuel,21.6,EUR/MWhth,IEA2011b
uranium,2030,fuel,3,EUR/MWhth,DIW DataDoc http://hdl.handle.net/10419/80348
oil,2030,VOM,3,EUR/MWhel,DIW DataDoc http://hdl.handle.net/10419/80348
nuclear,2030,fuel,3,EUR/MWhth,IEA2011b
biomass,2030,fuel,7,EUR/MWhth,IEA2011b
coal,2030,fuel,8.4,EUR/MWhth,IEA2011b
lignite,2030,fuel,2.9,EUR/MWhth,IEA2011b
oil,2030,fuel,50,EUR/MWhth,IEA WEM2017 97USD/boe = http://www.iea.org/media/weowebsite/2017/WEM_Documentation_WEO2017.pdf
PHS,2030,efficiency,0.75,per unit,DIW DataDoc http://hdl.handle.net/10419/80348
hydro,2030,efficiency,0.9,per unit,DIW DataDoc http://hdl.handle.net/10419/80348
ror,2030,efficiency,0.9,per unit,DIW DataDoc http://hdl.handle.net/10419/80348
OCGT,2030,efficiency,0.39,per unit,DIW DataDoc http://hdl.handle.net/10419/80348
CCGT,2030,efficiency,0.5,per unit,DIW DataDoc http://hdl.handle.net/10419/80348
biomass,2030,efficiency,0.468,per unit,DIW DataDoc http://hdl.handle.net/10419/80348
geothermal,2030,efficiency,0.239,per unit,DIW DataDoc http://hdl.handle.net/10419/80348
nuclear,2030,efficiency,0.337,per unit,DIW DataDoc http://hdl.handle.net/10419/80348
gas,2030,CO2 intensity,0.187,tCO2/MWth,https://www.eia.gov/environment/emissions/co2_vol_mass.php
coal,2030,efficiency,0.464,per unit,DIW DataDoc http://hdl.handle.net/10419/80348 PC (Advanced/SuperC)
lignite,2030,efficiency,0.447,per unit,DIW DataDoc http://hdl.handle.net/10419/80348
oil,2030,efficiency,0.393,per unit,DIW DataDoc http://hdl.handle.net/10419/80348 CT
coal,2030,CO2 intensity,0.354,tCO2/MWth,https://www.eia.gov/environment/emissions/co2_vol_mass.php
lignite,2030,CO2 intensity,0.334,tCO2/MWth,https://www.eia.gov/environment/emissions/co2_vol_mass.php
oil,2030,CO2 intensity,0.248,tCO2/MWth,https://www.eia.gov/environment/emissions/co2_vol_mass.php
geothermal,2030,CO2 intensity,0.026,tCO2/MWth,https://www.eia.gov/environment/emissions/co2_vol_mass.php
electrolysis,2030,investment,350,EUR/kWel,Palzer Thesis
electrolysis,2030,FOM,4,%/year,NREL http://www.nrel.gov/docs/fy09osti/45873.pdf; budischak2013
electrolysis,2030,lifetime,18,years,NREL http://www.nrel.gov/docs/fy09osti/45873.pdf; budischak2013
electrolysis,2030,efficiency,0.8,per unit,NREL http://www.nrel.gov/docs/fy09osti/45873.pdf; budischak2013
fuel cell,2030,investment,339,EUR/kWel,NREL http://www.nrel.gov/docs/fy09osti/45873.pdf; budischak2013
fuel cell,2030,FOM,3,%/year,NREL http://www.nrel.gov/docs/fy09osti/45873.pdf; budischak2013
fuel cell,2030,lifetime,20,years,NREL http://www.nrel.gov/docs/fy09osti/45873.pdf; budischak2013
fuel cell,2030,efficiency,0.58,per unit,NREL http://www.nrel.gov/docs/fy09osti/45873.pdf; budischak2013 conservative 2020
hydrogen storage,2030,investment,11.2,USD/kWh,budischak2013
hydrogen storage,2030,lifetime,20,years,budischak2013
hydrogen underground storage,2030,investment,0.5,EUR/kWh,maximum from https://www.nrel.gov/docs/fy10osti/46719.pdf
hydrogen underground storage,2030,lifetime,40,years,http://www.acatech.de/fileadmin/user_upload/Baumstruktur_nach_Website/Acatech/root/de/Publikationen/Materialien/ESYS_Technologiesteckbrief_Energiespeicher.pdf
H2 pipeline,2030,investment,267,EUR/MW/km,Welder et al https://doi.org/10.1016/j.ijhydene.2018.12.156
H2 pipeline,2030,lifetime,40,years,Krieg2012 http://juser.fz-juelich.de/record/136392/files/Energie%26Umwelt_144.pdf
H2 pipeline,2030,FOM,5,%/year,Krieg2012 http://juser.fz-juelich.de/record/136392/files/Energie%26Umwelt_144.pdf
H2 pipeline,2030,efficiency,0.98,per unit,Krieg2012 http://juser.fz-juelich.de/record/136392/files/Energie%26Umwelt_144.pdf
methanation,2030,investment,1000,EUR/kWH2,Schaber thesis
methanation,2030,lifetime,25,years,Schaber thesis
methanation,2030,FOM,3,%/year,Schaber thesis
methanation,2030,efficiency,0.6,per unit,Palzer; Breyer for DAC
helmeth,2030,investment,1000,EUR/kW,no source
helmeth,2030,lifetime,25,years,no source
helmeth,2030,FOM,3,%/year,no source
helmeth,2030,efficiency,0.8,per unit,HELMETH press release
DAC,2030,investment,250,EUR/(tCO2/a),Fasihi/Climeworks
DAC,2030,lifetime,30,years,Fasihi
DAC,2030,FOM,4,%/year,Fasihi
battery inverter,2030,investment,411,USD/kWel,budischak2013
battery inverter,2030,lifetime,20,years,budischak2013
battery inverter,2030,efficiency,0.9,per unit charge/discharge,budischak2013; Lund and Kempton (2008) http://dx.doi.org/10.1016/j.enpol.2008.06.007
battery inverter,2030,FOM,3,%/year,budischak2013
battery storage,2030,investment,192,USD/kWh,budischak2013
battery storage,2030,lifetime,15,years,budischak2013
decentral air-sourced heat pump,2030,investment,1050,EUR/kWth,HP; Palzer thesis
decentral air-sourced heat pump,2030,lifetime,20,years,HP; Palzer thesis
decentral air-sourced heat pump,2030,FOM,3.5,%/year,Palzer thesis
decentral air-sourced heat pump,2030,efficiency,3,per unit,default for costs
decentral air-sourced heat pump,2030,discount rate,0.04,per unit,Palzer thesis
decentral ground-sourced heat pump,2030,investment,1400,EUR/kWth,Palzer thesis
decentral ground-sourced heat pump,2030,lifetime,20,years,Palzer thesis
decentral ground-sourced heat pump,2030,FOM,3.5,%/year,Palzer thesis
decentral ground-sourced heat pump,2030,efficiency,4,per unit,default for costs
decentral ground-sourced heat pump,2030,discount rate,0.04,per unit,Palzer thesis
central air-sourced heat pump,2030,investment,700,EUR/kWth,Palzer thesis
central air-sourced heat pump,2030,lifetime,20,years,Palzer thesis
central air-sourced heat pump,2030,FOM,3.5,%/year,Palzer thesis
central air-sourced heat pump,2030,efficiency,3,per unit,default for costs
retrofitting I,2030,discount rate,0.04,per unit,Palzer thesis
retrofitting I,2030,lifetime,50,years,Palzer thesis
retrofitting I,2030,FOM,1,%/year,Palzer thesis
retrofitting I,2030,investment,50,EUR/m2/fraction reduction,Palzer thesis
retrofitting II,2030,discount rate,0.04,per unit,Palzer thesis
retrofitting II,2030,lifetime,50,years,Palzer thesis
retrofitting II,2030,FOM,1,%/year,Palzer thesis
retrofitting II,2030,investment,250,EUR/m2/fraction reduction,Palzer thesis
water tank charger,2030,efficiency,0.9,per unit,HP
water tank discharger,2030,efficiency,0.9,per unit,HP
decentral water tank storage,2030,investment,860,EUR/m3,IWES Interaktion
decentral water tank storage,2030,FOM,1,%/year,HP
decentral water tank storage,2030,lifetime,20,years,HP
decentral water tank storage,2030,discount rate,0.04,per unit,Palzer thesis
central water tank storage,2030,investment,30,EUR/m3,IWES Interaktion
central water tank storage,2030,FOM,1,%/year,HP
central water tank storage,2030,lifetime,40,years,HP
decentral resistive heater,2030,investment,100,EUR/kWhth,Schaber thesis
decentral resistive heater,2030,lifetime,20,years,Schaber thesis
decentral resistive heater,2030,FOM,2,%/year,Schaber thesis
decentral resistive heater,2030,efficiency,0.9,per unit,Schaber thesis
decentral resistive heater,2030,discount rate,0.04,per unit,Palzer thesis
central resistive heater,2030,investment,100,EUR/kWhth,Schaber thesis
central resistive heater,2030,lifetime,20,years,Schaber thesis
central resistive heater,2030,FOM,2,%/year,Schaber thesis
central resistive heater,2030,efficiency,0.9,per unit,Schaber thesis
decentral gas boiler,2030,investment,175,EUR/kWhth,Palzer thesis
decentral gas boiler,2030,lifetime,20,years,Palzer thesis
decentral gas boiler,2030,FOM,2,%/year,Palzer thesis
decentral gas boiler,2030,efficiency,0.9,per unit,Palzer thesis
decentral gas boiler,2030,discount rate,0.04,per unit,Palzer thesis
central gas boiler,2030,investment,63,EUR/kWhth,Palzer thesis
central gas boiler,2030,lifetime,22,years,Palzer thesis
central gas boiler,2030,FOM,1,%/year,Palzer thesis
central gas boiler,2030,efficiency,0.9,per unit,Palzer thesis
decentral CHP,2030,lifetime,25,years,HP
decentral CHP,2030,investment,1400,EUR/kWel,HP
decentral CHP,2030,FOM,3,%/year,HP
decentral CHP,2030,discount rate,0.04,per unit,Palzer thesis
central CHP,2030,lifetime,25,years,HP
central CHP,2030,investment,650,EUR/kWel,HP
central CHP,2030,FOM,3,%/year,HP
decentral solar thermal,2030,discount rate,0.04,per unit,Palzer thesis
decentral solar thermal,2030,FOM,1.3,%/year,HP
decentral solar thermal,2030,investment,270000,EUR/1000m2,HP
decentral solar thermal,2030,lifetime,20,years,HP
central solar thermal,2030,FOM,1.4,%/year,HP
central solar thermal,2030,investment,140000,EUR/1000m2,HP
central solar thermal,2030,lifetime,20,years,HP
HVAC overhead,2030,investment,400,EUR/MW/km,Hagspiel
HVAC overhead,2030,lifetime,40,years,Hagspiel
HVAC overhead,2030,FOM,2,%/year,Hagspiel
HVDC overhead,2030,investment,400,EUR/MW/km,Hagspiel
HVDC overhead,2030,lifetime,40,years,Hagspiel
HVDC overhead,2030,FOM,2,%/year,Hagspiel
HVDC submarine,2030,investment,2000,EUR/MW/km,DTU report based on Fig 34 of https://ec.europa.eu/energy/sites/ener/files/documents/2014_nsog_report.pdf
HVDC submarine,2030,lifetime,40,years,Hagspiel
HVDC submarine,2030,FOM,2,%/year,Hagspiel
HVDC inverter pair,2030,investment,150000,EUR/MW,Hagspiel
HVDC inverter pair,2030,lifetime,40,years,Hagspiel
HVDC inverter pair,2030,FOM,2,%/year,Hagspiel
1 technology year parameter value unit source
2 solar-rooftop 2030 discount rate 0.04 per unit standard for decentral
3 onwind 2030 lifetime 30 years DEA https://ens.dk/en/our-services/projections-and-models/technology-data
4 offwind 2030 lifetime 30 years DEA https://ens.dk/en/our-services/projections-and-models/technology-data
5 solar 2030 lifetime 25 years IEA2010
6 solar-rooftop 2030 lifetime 25 years IEA2010
7 solar-utility 2030 lifetime 25 years IEA2010
8 PHS 2030 lifetime 80 years IEA2010
9 hydro 2030 lifetime 80 years IEA2010
10 ror 2030 lifetime 80 years IEA2010
11 OCGT 2030 lifetime 30 years IEA2010
12 nuclear 2030 lifetime 45 years ECF2010 in DIW DataDoc http://hdl.handle.net/10419/80348
13 CCGT 2030 lifetime 30 years IEA2010
14 coal 2030 lifetime 40 years IEA2010
15 lignite 2030 lifetime 40 years IEA2010
16 geothermal 2030 lifetime 40 years IEA2010
17 biomass 2030 lifetime 30 years ECF2010 in DIW DataDoc http://hdl.handle.net/10419/80348
18 oil 2030 lifetime 30 years ECF2010 in DIW DataDoc http://hdl.handle.net/10419/80348
19 onwind 2030 investment 1040 EUR/kWel DEA https://ens.dk/en/our-services/projections-and-models/technology-data
20 offwind 2030 investment 1640 EUR/kWel DEA https://ens.dk/en/our-services/projections-and-models/technology-data
21 offwind-ac-station 2030 investment 250 EUR/kWel DEA https://ens.dk/en/our-services/projections-and-models/technology-data
22 offwind-ac-connection-submarine 2030 investment 2685 EUR/MW/km DEA https://ens.dk/en/our-services/projections-and-models/technology-data
23 offwind-ac-connection-underground 2030 investment 1342 EUR/MW/km DEA https://ens.dk/en/our-services/projections-and-models/technology-data
24 offwind-dc-station 2030 investment 400 EUR/kWel Haertel 2017; assuming one onshore and one offshore node + 13% learning reduction
25 offwind-dc-connection-submarine 2030 investment 2000 EUR/MW/km DTU report based on Fig 34 of https://ec.europa.eu/energy/sites/ener/files/documents/2014_nsog_report.pdf
26 offwind-dc-connection-underground 2030 investment 1000 EUR/MW/km Haertel 2017; average + 13% learning reduction
27 solar 2030 investment 600 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
28 biomass 2030 investment 2209 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
29 geothermal 2030 investment 3392 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
30 coal 2030 investment 1300 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348 PC (Advanced/SuperC)
31 lignite 2030 investment 1500 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
32 solar-rooftop 2030 investment 725 EUR/kWel ETIP PV
33 solar-utility 2030 investment 425 EUR/kWel ETIP PV
34 PHS 2030 investment 2000 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
35 hydro 2030 investment 2000 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
36 ror 2030 investment 3000 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
37 OCGT 2030 investment 400 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
38 nuclear 2030 investment 6000 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
39 CCGT 2030 investment 800 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
40 oil 2030 investment 400 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
41 onwind 2030 FOM 2.450549 %/year DEA https://ens.dk/en/our-services/projections-and-models/technology-data
42 offwind 2030 FOM 2.304878 %/year DEA https://ens.dk/en/our-services/projections-and-models/technology-data
43 solar 2030 FOM 4.166667 %/year DIW DataDoc http://hdl.handle.net/10419/80348
44 solar-rooftop 2030 FOM 2 %/year ETIP PV
45 solar-utility 2030 FOM 3 %/year ETIP PV
46 biomass 2030 FOM 4.526935 %/year DIW DataDoc http://hdl.handle.net/10419/80348
47 geothermal 2030 FOM 2.358491 %/year DIW DataDoc http://hdl.handle.net/10419/80348
48 coal 2030 FOM 1.923076 %/year DIW DataDoc http://hdl.handle.net/10419/80348 PC (Advanced/SuperC)
49 lignite 2030 FOM 2.0 %/year DIW DataDoc http://hdl.handle.net/10419/80348 PC (Advanced/SuperC)
50 oil 2030 FOM 1.5 %/year DIW DataDoc http://hdl.handle.net/10419/80348
51 PHS 2030 FOM 1 %/year DIW DataDoc http://hdl.handle.net/10419/80348
52 hydro 2030 FOM 1 %/year DIW DataDoc http://hdl.handle.net/10419/80348
53 ror 2030 FOM 2 %/year DIW DataDoc http://hdl.handle.net/10419/80348
54 CCGT 2030 FOM 2.5 %/year DIW DataDoc http://hdl.handle.net/10419/80348
55 OCGT 2030 FOM 3.75 %/year DIW DataDoc http://hdl.handle.net/10419/80348
56 onwind 2030 VOM 2.3 EUR/MWhel DEA https://ens.dk/en/our-services/projections-and-models/technology-data
57 offwind 2030 VOM 2.7 EUR/MWhel DEA https://ens.dk/en/our-services/projections-and-models/technology-data
58 solar 2030 VOM 0.01 EUR/MWhel RES costs made up to fix curtailment order
59 coal 2030 VOM 6 EUR/MWhel DIW DataDoc http://hdl.handle.net/10419/80348 PC (Advanced/SuperC)
60 lignite 2030 VOM 7 EUR/MWhel DIW DataDoc http://hdl.handle.net/10419/80348
61 CCGT 2030 VOM 4 EUR/MWhel DIW DataDoc http://hdl.handle.net/10419/80348
62 OCGT 2030 VOM 3 EUR/MWhel DIW DataDoc http://hdl.handle.net/10419/80348
63 nuclear 2030 VOM 8 EUR/MWhel DIW DataDoc http://hdl.handle.net/10419/80348
64 gas 2030 fuel 21.6 EUR/MWhth IEA2011b
65 uranium 2030 fuel 3 EUR/MWhth DIW DataDoc http://hdl.handle.net/10419/80348
66 oil 2030 VOM 3 EUR/MWhel DIW DataDoc http://hdl.handle.net/10419/80348
67 nuclear 2030 fuel 3 EUR/MWhth IEA2011b
68 biomass 2030 fuel 7 EUR/MWhth IEA2011b
69 coal 2030 fuel 8.4 EUR/MWhth IEA2011b
70 lignite 2030 fuel 2.9 EUR/MWhth IEA2011b
71 oil 2030 fuel 50 EUR/MWhth IEA WEM2017 97USD/boe = http://www.iea.org/media/weowebsite/2017/WEM_Documentation_WEO2017.pdf
72 PHS 2030 efficiency 0.75 per unit DIW DataDoc http://hdl.handle.net/10419/80348
73 hydro 2030 efficiency 0.9 per unit DIW DataDoc http://hdl.handle.net/10419/80348
74 ror 2030 efficiency 0.9 per unit DIW DataDoc http://hdl.handle.net/10419/80348
75 OCGT 2030 efficiency 0.39 per unit DIW DataDoc http://hdl.handle.net/10419/80348
76 CCGT 2030 efficiency 0.5 per unit DIW DataDoc http://hdl.handle.net/10419/80348
77 biomass 2030 efficiency 0.468 per unit DIW DataDoc http://hdl.handle.net/10419/80348
78 geothermal 2030 efficiency 0.239 per unit DIW DataDoc http://hdl.handle.net/10419/80348
79 nuclear 2030 efficiency 0.337 per unit DIW DataDoc http://hdl.handle.net/10419/80348
80 gas 2030 CO2 intensity 0.187 tCO2/MWth https://www.eia.gov/environment/emissions/co2_vol_mass.php
81 coal 2030 efficiency 0.464 per unit DIW DataDoc http://hdl.handle.net/10419/80348 PC (Advanced/SuperC)
82 lignite 2030 efficiency 0.447 per unit DIW DataDoc http://hdl.handle.net/10419/80348
83 oil 2030 efficiency 0.393 per unit DIW DataDoc http://hdl.handle.net/10419/80348 CT
84 coal 2030 CO2 intensity 0.354 tCO2/MWth https://www.eia.gov/environment/emissions/co2_vol_mass.php
85 lignite 2030 CO2 intensity 0.334 tCO2/MWth https://www.eia.gov/environment/emissions/co2_vol_mass.php
86 oil 2030 CO2 intensity 0.248 tCO2/MWth https://www.eia.gov/environment/emissions/co2_vol_mass.php
87 geothermal 2030 CO2 intensity 0.026 tCO2/MWth https://www.eia.gov/environment/emissions/co2_vol_mass.php
88 electrolysis 2030 investment 350 EUR/kWel Palzer Thesis
89 electrolysis 2030 FOM 4 %/year NREL http://www.nrel.gov/docs/fy09osti/45873.pdf; budischak2013
90 electrolysis 2030 lifetime 18 years NREL http://www.nrel.gov/docs/fy09osti/45873.pdf; budischak2013
91 electrolysis 2030 efficiency 0.8 per unit NREL http://www.nrel.gov/docs/fy09osti/45873.pdf; budischak2013
92 fuel cell 2030 investment 339 EUR/kWel NREL http://www.nrel.gov/docs/fy09osti/45873.pdf; budischak2013
93 fuel cell 2030 FOM 3 %/year NREL http://www.nrel.gov/docs/fy09osti/45873.pdf; budischak2013
94 fuel cell 2030 lifetime 20 years NREL http://www.nrel.gov/docs/fy09osti/45873.pdf; budischak2013
95 fuel cell 2030 efficiency 0.58 per unit NREL http://www.nrel.gov/docs/fy09osti/45873.pdf; budischak2013 conservative 2020
96 hydrogen storage 2030 investment 11.2 USD/kWh budischak2013
97 hydrogen storage 2030 lifetime 20 years budischak2013
98 hydrogen underground storage 2030 investment 0.5 EUR/kWh maximum from https://www.nrel.gov/docs/fy10osti/46719.pdf
99 hydrogen underground storage 2030 lifetime 40 years http://www.acatech.de/fileadmin/user_upload/Baumstruktur_nach_Website/Acatech/root/de/Publikationen/Materialien/ESYS_Technologiesteckbrief_Energiespeicher.pdf
100 H2 pipeline 2030 investment 267 EUR/MW/km Welder et al https://doi.org/10.1016/j.ijhydene.2018.12.156
101 H2 pipeline 2030 lifetime 40 years Krieg2012 http://juser.fz-juelich.de/record/136392/files/Energie%26Umwelt_144.pdf
102 H2 pipeline 2030 FOM 5 %/year Krieg2012 http://juser.fz-juelich.de/record/136392/files/Energie%26Umwelt_144.pdf
103 H2 pipeline 2030 efficiency 0.98 per unit Krieg2012 http://juser.fz-juelich.de/record/136392/files/Energie%26Umwelt_144.pdf
104 methanation 2030 investment 1000 EUR/kWH2 Schaber thesis
105 methanation 2030 lifetime 25 years Schaber thesis
106 methanation 2030 FOM 3 %/year Schaber thesis
107 methanation 2030 efficiency 0.6 per unit Palzer; Breyer for DAC
108 helmeth 2030 investment 1000 EUR/kW no source
109 helmeth 2030 lifetime 25 years no source
110 helmeth 2030 FOM 3 %/year no source
111 helmeth 2030 efficiency 0.8 per unit HELMETH press release
112 DAC 2030 investment 250 EUR/(tCO2/a) Fasihi/Climeworks
113 DAC 2030 lifetime 30 years Fasihi
114 DAC 2030 FOM 4 %/year Fasihi
115 battery inverter 2030 investment 411 USD/kWel budischak2013
116 battery inverter 2030 lifetime 20 years budischak2013
117 battery inverter 2030 efficiency 0.9 per unit charge/discharge budischak2013; Lund and Kempton (2008) http://dx.doi.org/10.1016/j.enpol.2008.06.007
118 battery inverter 2030 FOM 3 %/year budischak2013
119 battery storage 2030 investment 192 USD/kWh budischak2013
120 battery storage 2030 lifetime 15 years budischak2013
121 decentral air-sourced heat pump 2030 investment 1050 EUR/kWth HP; Palzer thesis
122 decentral air-sourced heat pump 2030 lifetime 20 years HP; Palzer thesis
123 decentral air-sourced heat pump 2030 FOM 3.5 %/year Palzer thesis
124 decentral air-sourced heat pump 2030 efficiency 3 per unit default for costs
125 decentral air-sourced heat pump 2030 discount rate 0.04 per unit Palzer thesis
126 decentral ground-sourced heat pump 2030 investment 1400 EUR/kWth Palzer thesis
127 decentral ground-sourced heat pump 2030 lifetime 20 years Palzer thesis
128 decentral ground-sourced heat pump 2030 FOM 3.5 %/year Palzer thesis
129 decentral ground-sourced heat pump 2030 efficiency 4 per unit default for costs
130 decentral ground-sourced heat pump 2030 discount rate 0.04 per unit Palzer thesis
131 central air-sourced heat pump 2030 investment 700 EUR/kWth Palzer thesis
132 central air-sourced heat pump 2030 lifetime 20 years Palzer thesis
133 central air-sourced heat pump 2030 FOM 3.5 %/year Palzer thesis
134 central air-sourced heat pump 2030 efficiency 3 per unit default for costs
135 retrofitting I 2030 discount rate 0.04 per unit Palzer thesis
136 retrofitting I 2030 lifetime 50 years Palzer thesis
137 retrofitting I 2030 FOM 1 %/year Palzer thesis
138 retrofitting I 2030 investment 50 EUR/m2/fraction reduction Palzer thesis
139 retrofitting II 2030 discount rate 0.04 per unit Palzer thesis
140 retrofitting II 2030 lifetime 50 years Palzer thesis
141 retrofitting II 2030 FOM 1 %/year Palzer thesis
142 retrofitting II 2030 investment 250 EUR/m2/fraction reduction Palzer thesis
143 water tank charger 2030 efficiency 0.9 per unit HP
144 water tank discharger 2030 efficiency 0.9 per unit HP
145 decentral water tank storage 2030 investment 860 EUR/m3 IWES Interaktion
146 decentral water tank storage 2030 FOM 1 %/year HP
147 decentral water tank storage 2030 lifetime 20 years HP
148 decentral water tank storage 2030 discount rate 0.04 per unit Palzer thesis
149 central water tank storage 2030 investment 30 EUR/m3 IWES Interaktion
150 central water tank storage 2030 FOM 1 %/year HP
151 central water tank storage 2030 lifetime 40 years HP
152 decentral resistive heater 2030 investment 100 EUR/kWhth Schaber thesis
153 decentral resistive heater 2030 lifetime 20 years Schaber thesis
154 decentral resistive heater 2030 FOM 2 %/year Schaber thesis
155 decentral resistive heater 2030 efficiency 0.9 per unit Schaber thesis
156 decentral resistive heater 2030 discount rate 0.04 per unit Palzer thesis
157 central resistive heater 2030 investment 100 EUR/kWhth Schaber thesis
158 central resistive heater 2030 lifetime 20 years Schaber thesis
159 central resistive heater 2030 FOM 2 %/year Schaber thesis
160 central resistive heater 2030 efficiency 0.9 per unit Schaber thesis
161 decentral gas boiler 2030 investment 175 EUR/kWhth Palzer thesis
162 decentral gas boiler 2030 lifetime 20 years Palzer thesis
163 decentral gas boiler 2030 FOM 2 %/year Palzer thesis
164 decentral gas boiler 2030 efficiency 0.9 per unit Palzer thesis
165 decentral gas boiler 2030 discount rate 0.04 per unit Palzer thesis
166 central gas boiler 2030 investment 63 EUR/kWhth Palzer thesis
167 central gas boiler 2030 lifetime 22 years Palzer thesis
168 central gas boiler 2030 FOM 1 %/year Palzer thesis
169 central gas boiler 2030 efficiency 0.9 per unit Palzer thesis
170 decentral CHP 2030 lifetime 25 years HP
171 decentral CHP 2030 investment 1400 EUR/kWel HP
172 decentral CHP 2030 FOM 3 %/year HP
173 decentral CHP 2030 discount rate 0.04 per unit Palzer thesis
174 central CHP 2030 lifetime 25 years HP
175 central CHP 2030 investment 650 EUR/kWel HP
176 central CHP 2030 FOM 3 %/year HP
177 decentral solar thermal 2030 discount rate 0.04 per unit Palzer thesis
178 decentral solar thermal 2030 FOM 1.3 %/year HP
179 decentral solar thermal 2030 investment 270000 EUR/1000m2 HP
180 decentral solar thermal 2030 lifetime 20 years HP
181 central solar thermal 2030 FOM 1.4 %/year HP
182 central solar thermal 2030 investment 140000 EUR/1000m2 HP
183 central solar thermal 2030 lifetime 20 years HP
184 HVAC overhead 2030 investment 400 EUR/MW/km Hagspiel
185 HVAC overhead 2030 lifetime 40 years Hagspiel
186 HVAC overhead 2030 FOM 2 %/year Hagspiel
187 HVDC overhead 2030 investment 400 EUR/MW/km Hagspiel
188 HVDC overhead 2030 lifetime 40 years Hagspiel
189 HVDC overhead 2030 FOM 2 %/year Hagspiel
190 HVDC submarine 2030 investment 2000 EUR/MW/km DTU report based on Fig 34 of https://ec.europa.eu/energy/sites/ener/files/documents/2014_nsog_report.pdf
191 HVDC submarine 2030 lifetime 40 years Hagspiel
192 HVDC submarine 2030 FOM 2 %/year Hagspiel
193 HVDC inverter pair 2030 investment 150000 EUR/MW Hagspiel
194 HVDC inverter pair 2030 lifetime 40 years Hagspiel
195 HVDC inverter pair 2030 FOM 2 %/year Hagspiel

View File

@ -1,16 +1,16 @@
country,factor
BE,0.65
BG,0.89
CZ,0.82
FI,0.92
FR,0.70
DE,0.88
HU,0.90
NL,0.86
RO,0.92
SK,0.89
SI,0.94
ES,0.89
SE,0.82
CH,0.86
GB,0.67
BE,0.796
BG,0.894
CZ,0.827
FI,0.936
FR,0.71
DE,0.871
HU,0.913
NL,0.868
RO,0.909
SK,0.9
SI,0.913
ES,0.897
SE,0.851
CH,0.87
GB,0.656

1 country factor
2 BE 0.65 0.796
3 BG 0.89 0.894
4 CZ 0.82 0.827
5 FI 0.92 0.936
6 FR 0.70 0.71
7 DE 0.88 0.871
8 HU 0.90 0.913
9 NL 0.86 0.868
10 RO 0.92 0.909
11 SK 0.89 0.9
12 SI 0.94 0.913
13 ES 0.89 0.897
14 SE 0.82 0.851
15 CH 0.86 0.87
16 GB 0.67 0.656

8
data/unit_commitment.csv Normal file
View File

@ -0,0 +1,8 @@
attribute,OCGT,CCGT,coal,lignite,nuclear
ramp_limit_up,1,1,1,1,0.3
ramp_limit_start_up,0.2,0.45,0.38,0.4,0.5
ramp_limit_shut_down,0.2,0.45,0.38,0.4,0.5
p_min_pu,0.2,0.45,0.325,0.4,0.5
min_up_time,,3,5,7,6
min_down_time,,2,6,6,10
start_up_cost,9.6,34.2,35.64,19.14,16.5
1 attribute OCGT CCGT coal lignite nuclear
2 ramp_limit_up 1 1 1 1 0.3
3 ramp_limit_start_up 0.2 0.45 0.38 0.4 0.5
4 ramp_limit_shut_down 0.2 0.45 0.38 0.4 0.5
5 p_min_pu 0.2 0.45 0.325 0.4 0.5
6 min_up_time 3 5 7 6
7 min_down_time 2 6 6 10
8 start_up_cost 9.6 34.2 35.64 19.14 16.5

View File

@ -82,7 +82,7 @@ author = "Tom Brown (KIT, TUB, FIAS), Jonas Hoersch (KIT, FIAS), Fabian Hofmann
# The short X.Y version.
version = "0.8"
# The full version, including alpha/beta/rc tags.
release = "0.8.0"
release = "0.8.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View File

@ -1,17 +1,18 @@
,Unit,Values,Description
simplify_network,,,
-- to_substations,bool,"{'true','false'}","Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones"
-- algorithm,str,"One of {kmeans, hac, modularity}",
-- feature,str,"Str in the format carrier1+carrier2+...+carrierN-X, where CarrierI can be from {solar, onwind, offwind, ror} and X is one of {cap, time}.",
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
-- remove stubs,bool,"true/false","Controls whether radial parts of the network should be recursively aggregated. Defaults to true."
-- remove_stubs_across_borders,bool,"true/false","Controls whether radial parts of the network should be recursively aggregated across borders. Defaults to true."
cluster_network,,,
-- algorithm,str,"One of {kmeans, hac}",
-- feature,str,"Str in the format carrier1+carrier2+...+carrierN-X, where CarrierI can be from {solar, onwind, offwind, ror} and X is one of {cap, time}.",
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
aggregation_strategies,,,
-- generators,,,
-- -- {key},str,"{key} can be any of the component of the generator (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator."
-- buses,,,
-- -- {key},str,"{key} can be any of the component of the bus (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus."
,Unit,Values,Description
simplify_network,,,
-- to_substations,bool,"{'true','false'}","Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones"
-- algorithm,str,"One of {kmeans, hac, modularity}",
-- feature,str,"Str in the format carrier1+carrier2+...+carrierN-X, where CarrierI can be from {solar, onwind, offwind, ror} and X is one of {cap, time}.",
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
-- remove stubs,bool,"{'true','false'}",Controls whether radial parts of the network should be recursively aggregated. Defaults to true.
-- remove_stubs_across_borders,bool,"{'true','false'}",Controls whether radial parts of the network should be recursively aggregated across borders. Defaults to true.
cluster_network,,,
-- algorithm,str,"One of {kmeans, hac}",
-- feature,str,"Str in the format carrier1+carrier2+...+carrierN-X, where CarrierI can be from {solar, onwind, offwind, ror} and X is one of {cap, time}.",
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
-- consider_efficiency_classes,bool,"{'true','false'}","Aggregated each carriers into the top 10-quantile (high), the bottom 90-quantile (low), and everything in between (medium)."
aggregation_strategies,,,
-- generators,,,
-- -- {key},str,"{key} can be any of the component of the generator (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator."
-- buses,,,
-- -- {key},str,"{key} can be any of the component of the bus (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus."

1 Unit Values Description
2 simplify_network
3 -- to_substations bool {'true','false'} Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones
4 -- algorithm str One of {‘kmeans’, ‘hac’, ‘modularity‘}
5 -- feature str Str in the format ‘carrier1+carrier2+...+carrierN-X’, where CarrierI can be from {‘solar’, ‘onwind’, ‘offwind’, ‘ror’} and X is one of {‘cap’, ‘time’}.
6 -- exclude_carriers list List of Str like [ 'solar', 'onwind'] or empy list [] List of carriers which will not be aggregated. If empty, all carriers will be aggregated.
7 -- remove stubs bool true/false {'true','false'} Controls whether radial parts of the network should be recursively aggregated. Defaults to true.
8 -- remove_stubs_across_borders bool true/false {'true','false'} Controls whether radial parts of the network should be recursively aggregated across borders. Defaults to true.
9 cluster_network
10 -- algorithm str One of {‘kmeans’, ‘hac’}
11 -- feature str Str in the format ‘carrier1+carrier2+...+carrierN-X’, where CarrierI can be from {‘solar’, ‘onwind’, ‘offwind’, ‘ror’} and X is one of {‘cap’, ‘time’}.
12 -- exclude_carriers list List of Str like [ 'solar', 'onwind'] or empy list [] List of carriers which will not be aggregated. If empty, all carriers will be aggregated.
13 aggregation_strategies -- consider_efficiency_classes bool {'true','false'} Aggregated each carriers into the top 10-quantile (high), the bottom 90-quantile (low), and everything in between (medium).
14 -- generators aggregation_strategies
15 -- -- {key} -- generators str {key} can be any of the component of the generator (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator.
16 -- buses -- -- {key} str {key} can be any of the component of the generator (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator.
17 -- -- {key} -- buses str {key} can be any of the component of the bus (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus.
18 -- -- {key} str {key} can be any of the component of the bus (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus.

View File

@ -1,3 +1,5 @@
,Unit,Values,Description
{name},--,"string","For any carrier/technology overwrite attributes as listed below."
-- {attribute},--,"string or float","For any attribute, can specify a float or reference to a file path to a CSV file giving floats for each country (2-letter code)."
,Unit,Values,Description
unit_commitment ,bool,"{true, false}","Allow the overwrite of ramp_limit_up, ramp_limit_start_up, ramp_limit_shut_down, p_min_pu, min_up_time, min_down_time, and start_up_cost of conventional generators. Refer to the CSV file „unit_commitment.csv“."
dynamic_fuel_price ,bool,"{true, false}","Consider the monthly fluctuating fuel prices for each conventional generator. Refer to the CSV file ""data/validation/monthly_fuel_price.csv""."
{name},--,string,For any carrier/technology overwrite attributes as listed below.
-- {attribute},--,string or float,"For any attribute, can specify a float or reference to a file path to a CSV file giving floats for each country (2-letter code)."

1 Unit Values Description
2 {name} unit_commitment -- bool string {true, false} For any carrier/technology overwrite attributes as listed below. Allow the overwrite of ramp_limit_up, ramp_limit_start_up, ramp_limit_shut_down, p_min_pu, min_up_time, min_down_time, and start_up_cost of conventional generators. Refer to the CSV file „unit_commitment.csv“.
3 -- {attribute} dynamic_fuel_price -- bool string or float {true, false} For any attribute, can specify a float or reference to a file path to a CSV file giving floats for each country (2-letter code). Consider the monthly fluctuating fuel prices for each conventional generator. Refer to the CSV file "data/validation/monthly_fuel_price.csv".
4 {name} -- string For any carrier/technology overwrite attributes as listed below.
5 -- {attribute} -- string or float For any attribute, can specify a float or reference to a file path to a CSV file giving floats for each country (2-letter code).

View File

@ -1,3 +1,4 @@
<<<<<<< HEAD
,Unit,Values,Description
cutout,--,"Must be 'europe-2013-era5'","Specifies the directory where the relevant weather data ist stored."
carriers,--,"Any subset of {'ror', 'PHS', 'hydro'}","Specifies the types of hydro power plants to build per-unit availability time series for. 'ror' stands for run-of-river plants, 'PHS' represents pumped-hydro storage, and 'hydro' stands for hydroelectric dams."
@ -7,3 +8,13 @@ clip_min_inflow,MW,float,"To avoid too small values in the inflow time series, v
eia_norm_year,--,"Year in EIA hydro generation dataset; or False to disable","To specify a specific year by which hydro inflow is normed that deviates from the snapshots' year"
eia_correct_by_capacity,--,boolean,"Correct EIA annual hydro generation data by installed capacity."
eia_approximate_missing,--,boolean,"Approximate hydro generation data for years not included in EIA dataset through a regression based on annual runoff."
=======
,Unit,Values,Description
cutout,--,Must be 'europe-2013-era5',Specifies the directory where the relevant weather data ist stored.
carriers,--,"Any subset of {'ror', 'PHS', 'hydro'}","Specifies the types of hydro power plants to build per-unit availability time series for. 'ror' stands for run-of-river plants, 'PHS' represents pumped-hydro storage, and 'hydro' stands for hydroelectric dams."
PHS_max_hours,h,float,Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
hydro_max_hours,h,"Any of {float, 'energy_capacity_totals_by_country', 'estimate_by_large_installations'}",Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom`` or heuristically determined. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
flatten_dispatch,bool,"{true, false}",Consider an upper limit for the hydro dispatch. The limit is given by the average capacity factor plus the buffer given in ``flatten_dispatch_buffer``
flatten_dispatch_buffer,--,float,"If ``flatten_dispatch`` is true, specify the value added above the average capacity factor."
clip_min_inflow,MW,float,"To avoid too small values in the inflow time series, values below this threshold are set to zero."
>>>>>>> master

Can't render this file because it has a wrong number of fields in line 2.

View File

@ -5,3 +5,9 @@ s_nom_max,MW,"float","Global upper limit for the maximum capacity of each extend
max_extension,MW,"float","Upper limit for the extended capacity of each extendable line."
length_factor,--,float,"Correction factor to account for the fact that buses are *not* connected by lines through air-line distance."
under_construction,--,"One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity}","Specifies how to handle lines which are currently under construction."
dynamic_line_rating,,,
-- activate,bool,"true or false","Whether to take dynamic line rating into account"
-- cutout,--,"Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module must be ERA5.","Specifies the directory where the relevant weather data ist stored."
-- correction_factor,--,"float","Factor to compensate for overestimation of wind speeds in hourly averaged wind data"
-- max_voltage_difference,deg,"float","Maximum voltage angle difference in degrees or 'false' to disable"
-- max_line_rating,--,"float","Maximum line rating relative to nominal capacity without DLR, e.g. 1.3 or 'false' to disable"

1 Unit Values Description
5 max_extension MW float Upper limit for the extended capacity of each extendable line.
6 length_factor -- float Correction factor to account for the fact that buses are *not* connected by lines through air-line distance.
7 under_construction -- One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity} Specifies how to handle lines which are currently under construction.
8 dynamic_line_rating
9 -- activate bool true or false Whether to take dynamic line rating into account
10 -- cutout -- Should be a folder listed in the configuration ``atlite: cutouts:`` (e.g. 'europe-2013-era5') or reference an existing folder in the directory ``cutouts``. Source module must be ERA5. Specifies the directory where the relevant weather data ist stored.
11 -- correction_factor -- float Factor to compensate for overestimation of wind speeds in hourly averaged wind data
12 -- max_voltage_difference deg float Maximum voltage angle difference in degrees or 'false' to disable
13 -- max_line_rating -- float Maximum line rating relative to nominal capacity without DLR, e.g. 1.3 or 'false' to disable

View File

@ -3,6 +3,7 @@ Trigger, Description, Definition, Status
``nSEG``; e.g. ``4380SEG``, "Apply time series segmentation with `tsam <https://tsam.readthedocs.io/en/latest/index.html>`_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load.", ``prepare_network``: apply_time_segmentation(), In active use
``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L19>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L154>`__, In active use
``Ep``, Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L24>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L158>`__, In active use
``Ept``, Add monthly cost for a carbon-dioxide price based on historical values built by the rule ``build_monthly_prices``, In active use
``CCL``, Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``., ``solve_network``, In active use
``EQ``, "Require each country or node to on average produce a minimal share of its total consumption itself. Example: ``EQ0.5c`` demands each country to produce on average at least 50% of its consumption; ``EQ0.5`` demands each node to produce on average at least 50% of its consumption.", ``solve_network``, In active use
``ATK``, "Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.", ``prepare_network``, In active use

Can't render this file because it has a wrong number of fields in line 6.

View File

@ -1,17 +1,19 @@
,Unit,Values,Description
options,,,
-- load_shedding,bool/float,"{'true','false', float}","Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh."
-- transmission_losses,int,"[0-9]","Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored."
-- noisy_costs,bool,"{'true','false'}","Add random noise to marginal cost of generators by :math:`\mathcal{U}(0.009,0,011)` and capital cost of lines and links by :math:`\mathcal{U}(0.09,0,11)`."
-- min_iterations,--,int,"Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
-- max_iterations,--,int,"Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
-- nhours,--,int,"Specifies the :math:`n` first snapshots to take into account. Must be less than the total number of snapshots. Rather recommended only for debugging."
-- clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero."
-- skip_iterations,bool,"{'true','false'}","Skip iterating, do not update impedances of branches. Defaults to true."
-- track_iterations,bool,"{'true','false'}","Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration)"
-- seed,--,int,"Random seed for increased deterministic behaviour."
solver,,,
-- name,--,"One of {'gurobi', 'cplex', 'cbc', 'glpk', 'ipopt'}; potentially more possible","Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow."
-- options,--,"Key listed under ``solver_options``.","Link to specific parameter settings."
solver_options,,"dict","Dictionaries with solver-specific parameter settings."
mem,MB,"int","Estimated maximum memory requirement for solving networks."
,Unit,Values,Description
options,,,
-- clip_p_max_pu,p.u.,float,To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero.
-- load_shedding,bool/float,"{'true','false', float}","Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh."
-- noisy_costs,bool,"{'true','false'}","Add random noise to marginal cost of generators by :math:`\mathcal{U}(0.009,0,011)` and capital cost of lines and links by :math:`\mathcal{U}(0.09,0,11)`."
-- skip_iterations,bool,"{'true','false'}","Skip iterating, do not update impedances of branches. Defaults to true."
-- rolling_horizon,bool,"{'true','false'}","Whether to optimize the network in a rolling horizon manner, where the snapshot range is split into slices of size `horizon` which are solved consecutively."
-- seed,--,int,Random seed for increased deterministic behaviour.
-- track_iterations,bool,"{'true','false'}",Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration)
-- min_iterations,--,int,Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run.
-- max_iterations,--,int,Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run.
-- transmission_losses,int,[0-9],"Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored."
-- linearized_unit_commitment,bool,"{'true','false'}",Whether to optimise using the linearized unit commitment formulation.
-- horizon,--,int,Number of snapshots to consider in each iteration. Defaults to 100.
solver,,,
-- name,--,"One of {'gurobi', 'cplex', 'cbc', 'glpk', 'ipopt'}; potentially more possible",Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow.
-- options,--,Key listed under ``solver_options``.,Link to specific parameter settings.
solver_options,,dict,Dictionaries with solver-specific parameter settings.
mem,MB,int,Estimated maximum memory requirement for solving networks.

1 Unit Values Description
2 options
3 -- load_shedding -- clip_p_max_pu bool/float p.u. {'true','false', float} float Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh. To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero.
4 -- transmission_losses -- load_shedding int bool/float [0-9] {'true','false', float} Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored. Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh.
5 -- noisy_costs bool {'true','false'} Add random noise to marginal cost of generators by :math:`\mathcal{U}(0.009,0,011)` and capital cost of lines and links by :math:`\mathcal{U}(0.09,0,11)`.
6 -- min_iterations -- skip_iterations -- bool int {'true','false'} Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run. Skip iterating, do not update impedances of branches. Defaults to true.
7 -- max_iterations -- rolling_horizon -- bool int {'true','false'} Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run. Whether to optimize the network in a rolling horizon manner, where the snapshot range is split into slices of size `horizon` which are solved consecutively.
8 -- nhours -- seed -- int Specifies the :math:`n` first snapshots to take into account. Must be less than the total number of snapshots. Rather recommended only for debugging. Random seed for increased deterministic behaviour.
9 -- clip_p_max_pu -- track_iterations p.u. bool float {'true','false'} To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero. Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration)
10 -- skip_iterations -- min_iterations bool -- {'true','false'} int Skip iterating, do not update impedances of branches. Defaults to true. Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run.
11 -- track_iterations -- max_iterations bool -- {'true','false'} int Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration) Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run.
12 -- seed -- transmission_losses -- int int [0-9] Random seed for increased deterministic behaviour. Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored.
13 solver -- linearized_unit_commitment bool {'true','false'} Whether to optimise using the linearized unit commitment formulation.
14 -- name -- horizon -- One of {'gurobi', 'cplex', 'cbc', 'glpk', 'ipopt'}; potentially more possible int Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow. Number of snapshots to consider in each iteration. Defaults to 100.
15 -- options solver -- Key listed under ``solver_options``. Link to specific parameter settings.
16 solver_options -- name -- dict One of {'gurobi', 'cplex', 'cbc', 'glpk', 'ipopt'}; potentially more possible Dictionaries with solver-specific parameter settings. Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow.
17 mem -- options MB -- int Key listed under ``solver_options``. Estimated maximum memory requirement for solving networks. Link to specific parameter settings.
18 solver_options dict Dictionaries with solver-specific parameter settings.
19 mem MB int Estimated maximum memory requirement for solving networks.

View File

@ -1,3 +1,4 @@
<<<<<<< HEAD
,Unit,Values,Description
version,--,0.x.x,"Version of PyPSA-Eur. Descriptive only."
tutorial,bool,"{true, false}","Switch to retrieve the tutorial data set instead of the full data set."
@ -23,3 +24,17 @@ co2_budget,--,"Dictionary with planning horizons as keys.","CO2 budget as a frac
>>>>>>> master
=======
>>>>>>> origin/master
=======
,Unit,Values,Description
version,--,0.x.x,Version of PyPSA-Eur. Descriptive only.
tutorial,bool,"{true, false}",Switch to retrieve the tutorial data set instead of the full data set.
logging,,,
-- level,--,"Any of {'INFO', 'WARNING', 'ERROR'}","Restrict console outputs to all infos, warning or errors only"
-- format,--,,Custom format for log messages. See `LogRecord <https://docs.python.org/3/library/logging.html#logging.LogRecord>`_ attributes.
private,,,
-- keys,,,
-- -- entsoe_api,--,,Optionally specify the ENTSO-E API key. See the guidelines to get `ENTSO-E API key <https://transparency.entsoe.eu/content/static_content/Static%20content/web%20api/Guide.html>`_
remote,,,
-- ssh,--,,Optionally specify the SSH of a remote cluster to be synchronized.
-- path,--,,Optionally specify the file path within the remote cluster to be synchronized.
>>>>>>> master

Can't render this file because it has a wrong number of fields in line 7.

View File

@ -16,12 +16,13 @@ PyPSA-Eur has several configuration options which are documented in this section
Top-level configuration
=======================
"Private" refers to local, machine-specific settings or data meant for personal use, not to be shared. "Remote" indicates the address of a server used for data exchange, often for clusters and data pushing/pulling.
.. literalinclude:: ../config/config.default.yaml
:language: yaml
:start-at: version:
:end-before: # docs
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33

View File

@ -41,10 +41,10 @@ Perfect foresight scenarios
.. warning::
Perfect foresight is currently under development and not yet implemented.
Perfect foresight is currently implemented as a first test version.
For running perfect foresight scenarios, in future versions you will be able to
set in the ``config/config.yaml``:
For running perfect foresight scenarios, you can adjust the
``config/config.perfect.yaml``:
.. code:: yaml

View File

@ -78,10 +78,10 @@ them:
.. note::
You can find showcases of the model's capabilities in the Supplementary Materials of the
preprint `Benefits of a Hydrogen Network in Europe
<https://arxiv.org/abs/2207.05816>`_, the Supplementary Materials of the `paper in Joule with a
Joule paper `The potential role of a hydrogen network in Europe
<https://doi.org/10.1016/j.joule.2023.06.016>`_, the Supplementary Materials of another `paper in Joule with a
description of the industry sector
<https://arxiv.org/abs/2109.09563>`_, or in `a 2021 presentation
<https://doi.org/10.1016/j.joule.2022.04.016>`_, or in `a 2021 presentation
at EMP-E <https://nworbmot.org/energy/brown-empe.pdf>`_.
The sector-coupled extension of PyPSA-Eur was
initially described in the paper `Synergies of sector coupling and transmission
@ -179,10 +179,13 @@ For sector-coupling studies: ::
@misc{PyPSAEurSec,
author = "Fabian Neumann and Elisabeth Zeyen and Marta Victoria and Tom Brown",
title = "The Potential Role of a Hydrogen Network in Europe",
year = "2022",
title = "The potential role of a hydrogen network in Europe",
journal "Joule",
volume = "7",
pages = "1--25"
year = "2023",
eprint = "2207.05816",
url = "https://arxiv.org/abs/2207.05816",
doi = "10.1016/j.joule.2022.04.016",
}
For sector-coupling studies with pathway optimisation: ::
@ -277,6 +280,7 @@ The PyPSA-Eur workflow is continuously tested for Linux, macOS and Windows (WSL
release_notes
licenses
validation
limitations
contributing
support

View File

@ -10,43 +10,152 @@ Release Notes
Upcoming Release
================
* ``param:`` section in rule definition are added to track changed settings in ``config.yaml``. The goal is to automatically re-execute rules whose parameters have changed. See `Non-file parameters for rules <https://snakemake.readthedocs.io/en/stable/snakefiles/rules.html#non-file-parameters-for-rules>`_ in the snakemake documentation.
* Updated Global Energy Monitor LNG terminal data to March 2023 version.
* **Important:** The configuration files are now located in the ``config`` directory. This counts for ``config.default.yaml``, ``config.yaml`` as well as the test configuration files which are now located in ``config/test``. Config files that are still in the root directory will be ignored.
* For industry distribution, use EPRTR as fallback if ETS data is not available.
* Bugfix: Correct typo in the CPLEX solver configuration in ``config.default.yaml``.
* The minimum capacity for renewable generators when using the myopic option has been fixed.
* Bugfix: Error in ``add_electricity`` where carriers were added multiple times to the network, resulting in a non-unique carriers error.
* Files downloaded from zenodo are now write-protected to prevent accidental re-download.
* Renamed script file from PyPSA-EUR ``build_load_data`` to ``build_electricity_demand`` and ``retrieve_load_data`` to ``retrieve_electricity_demand``.
* Files extracted from sector-coupled data bundle have been moved from ``data/`` to ``data/sector-bundle``.
* Fix docs readthedocs built
* New feature multi-decade optimisation with perfect foresight.
* It is now possible to specify years for biomass potentials which do not exist
in the JRC-ENSPRESO database, e.g. 2037. These are linearly interpolated.
* In pathway mode, the biomass potential is linked to the investment year.
* Rule ``purge`` now initiates a dialog to confirm if purge is desired.
**Bugs and Compatibility**
* A bug preventing custom powerplants specified in ``data/custom_powerplants.csv`` was fixed. (https://github.com/PyPSA/pypsa-eur/pull/732)
PyPSA-Eur 0.8.1 (27th July 2023)
================================
**New Features**
* Add option to consider dynamic line rating based on wind speeds and
temperature according to `Glaum and Hofmann (2022)
<https://arxiv.org/abs/2208.04716>`_. See configuration section ``lines:
dynamic_line_rating:`` for more details. (https://github.com/PyPSA/pypsa-eur/pull/675)
* Add option to include a piecewise linear approximation of transmission losses,
e.g. by setting ``solving: options: transmission_losses: 2`` for an
approximation with two tangents. (https://github.com/PyPSA/pypsa-eur/pull/664)
* Add plain hydrogen turbine as additional re-electrification option besides
hydrogen fuel cell. Add switches for both re-electrification options under
``sector: hydrogen_turbine:`` and ``sector: hydrogen_fuel_cell:``.
* A new function named ``sanitize_carrier`` ensures that all unique carrier names are present in the network's carriers attribute, and adds nice names and colors for each carrier according to the provided configuration dictionary.
* Additional tech_color are added to include previously unlisted carriers.
* Remove ``vresutils`` dependency.
(https://github.com/PyPSA/pypsa-eur/pull/647)
* Added configuration option ``lines: max_extension:`` and ``links:
max_extension:``` to control the maximum capacity addition per line or link in
MW.
MW. (https://github.com/PyPSA/pypsa-eur/pull/665)
* Add option to include a piecewise linear approximation of transmission losses,
e.g. by setting ``solving: options: transmission_losses: 2`` for an
approximation with two tangents.
* A ``param:`` section in the snakemake rule definitions was added to track
changed settings in ``config.yaml``. The goal is to automatically re-execute
rules where parameters have changed. See `Non-file parameters for rules
<https://snakemake.readthedocs.io/en/stable/snakefiles/rules.html#non-file-parameters-for-rules>`_
in the snakemake documentation. (https://github.com/PyPSA/pypsa-eur/pull/663)
* A new function named ``sanitize_carrier`` ensures that all unique carrier
names are present in the network's carriers attribute, and adds nice names and
colors for each carrier according to the provided configuration dictionary.
(https://github.com/PyPSA/pypsa-eur/pull/653,
https://github.com/PyPSA/pypsa-eur/pull/690)
* The configuration settings have been documented in more detail.
(https://github.com/PyPSA/pypsa-eur/pull/685)
**Breaking Changes**
* The configuration files are now located in the ``config`` directory. This
includes the ``config.default.yaml``, ``config.yaml`` as well as the test
configuration files which are now located in the ``config/test`` directory.
Config files that are still in the root directory will be ignored.
(https://github.com/PyPSA/pypsa-eur/pull/640)
* Renamed script and rule name from ``build_load_data`` to
``build_electricity_demand`` and ``retrieve_load_data`` to
``retrieve_electricity_demand``. (https://github.com/PyPSA/pypsa-eur/pull/642,
https://github.com/PyPSA/pypsa-eur/pull/652)
* Updated to new spatial clustering module introduced in PyPSA v0.25.
(https://github.com/PyPSA/pypsa-eur/pull/696)
**Changes**
* Handling networks with links with multiple inputs/outputs no longer requires
to override component attributes.
(https://github.com/PyPSA/pypsa-eur/pull/695)
* Added configuration option ``enable: retrieve:`` to control whether data
retrieval rules from snakemake are enabled or not. Th default setting ``auto``
will automatically detect and enable/disable the rules based on internet connectivity.
will automatically detect and enable/disable the rules based on internet
connectivity. (https://github.com/PyPSA/pypsa-eur/pull/694)
* Update to ``technology-data`` v0.6.0.
(https://github.com/PyPSA/pypsa-eur/pull/704)
* Handle data bundle extraction paths via ``snakemake.output``.
* Additional technologies are added to ``tech_color`` in the configuration files
to include previously unlisted carriers.
* Doc: Added note that Windows is only tested in CI with WSL.
(https://github.com/PyPSA/pypsa-eur/issues/697)
* Doc: Add support section. (https://github.com/PyPSA/pypsa-eur/pull/656)
* Open ``rasterio`` files with ``rioxarray``.
(https://github.com/PyPSA/pypsa-eur/pull/474)
* Migrate CI to ``micromamba``. (https://github.com/PyPSA/pypsa-eur/pull/700)
**Bugs and Compatibility**
* The new minimum PyPSA version is v0.25.1.
* Removed ``vresutils`` dependency.
(https://github.com/PyPSA/pypsa-eur/pull/662)
* Adapt to new ``powerplantmatching`` version.
(https://github.com/PyPSA/pypsa-eur/pull/687,
https://github.com/PyPSA/pypsa-eur/pull/701)
* Bugfix: Correct typo in the CPLEX solver configuration in
``config.default.yaml``. (https://github.com/PyPSA/pypsa-eur/pull/630)
* Bugfix: Error in ``add_electricity`` where carriers were added multiple times
to the network, resulting in a non-unique carriers error.
* Bugfix of optional reserve constraint.
(https://github.com/PyPSA/pypsa-eur/pull/645)
* Fix broken equity constraints logic.
(https://github.com/PyPSA/pypsa-eur/pull/679)
* Fix addition of load shedding generators.
(https://github.com/PyPSA/pypsa-eur/pull/649)
* Fix automatic building of documentation on readthedocs.org.
(https://github.com/PyPSA/pypsa-eur/pull/658)
* Bugfix: Update network clustering to avoid adding deleted links in clustered
network. (https://github.com/PyPSA/pypsa-eur/pull/678)
* Address ``geopandas`` deprecations.
(https://github.com/PyPSA/pypsa-eur/pull/678)
* Fix bug with underground hydrogen storage creation, where for some small model
regions no cavern storage is available.
(https://github.com/PyPSA/pypsa-eur/pull/672)
PyPSA-Eur 0.8.0 (18th March 2023)

View File

@ -83,7 +83,7 @@ This rule, as a substitute for :mod:`build_natura_raster`, downloads an already
Rule ``retrieve_electricity_demand``
====================================
This rule downloads hourly electric load data for each country from the `OPSD platform <data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv>`_.
This rule downloads hourly electric load data for each country from the `OPSD platform <https://data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv>`_.
**Relevant Settings**

View File

@ -145,89 +145,82 @@ This triggers a workflow of multiple preceding jobs that depend on each rule's i
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
0[label = "solve_network", color = "0.21 0.6 0.85", style="rounded"];
1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.02 0.6 0.85", style="rounded"];
2[label = "add_extra_components", color = "0.37 0.6 0.85", style="rounded"];
3[label = "cluster_network\nclusters: 6", color = "0.39 0.6 0.85", style="rounded"];
4[label = "simplify_network\nsimpl: ", color = "0.11 0.6 0.85", style="rounded"];
5[label = "add_electricity", color = "0.23 0.6 0.85", style="rounded"];
6[label = "build_renewable_profiles\ntechnology: onwind", color = "0.57 0.6 0.85", style="rounded"];
7[label = "base_network", color = "0.09 0.6 0.85", style="rounded"];
8[label = "build_shapes", color = "0.41 0.6 0.85", style="rounded"];
9[label = "retrieve_databundle", color = "0.28 0.6 0.85", style="rounded"];
10[label = "retrieve_natura_raster", color = "0.62 0.6 0.85", style="rounded"];
11[label = "build_bus_regions", color = "0.53 0.6 0.85", style="rounded"];
12[label = "retrieve_cutout\ncutout: europe-2013-era5", color = "0.05 0.6 0.85", style="rounded,dashed"];
13[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.57 0.6 0.85", style="rounded"];
14[label = "build_ship_raster", color = "0.64 0.6 0.85", style="rounded"];
15[label = "retrieve_ship_raster", color = "0.07 0.6 0.85", style="rounded,dashed"];
16[label = "retrieve_cutout\ncutout: europe-2013-sarah", color = "0.05 0.6 0.85", style="rounded,dashed"];
17[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.57 0.6 0.85", style="rounded"];
18[label = "build_renewable_profiles\ntechnology: solar", color = "0.57 0.6 0.85", style="rounded"];
19[label = "build_hydro_profile", color = "0.44 0.6 0.85", style="rounded"];
20[label = "retrieve_cost_data", color = "0.30 0.6 0.85", style="rounded"];
21[label = "build_powerplants", color = "0.16 0.6 0.85", style="rounded"];
22[label = "build_electricity_demand", color = "0.00 0.6 0.85", style="rounded"];
23[label = "retrieve_electricity_demand", color = "0.34 0.6 0.85", style="rounded,dashed"];
1 -> 0
2 -> 1
20 -> 1
3 -> 2
20 -> 2
4 -> 3
20 -> 3
5 -> 4
20 -> 4
11 -> 4
6 -> 5
13 -> 5
17 -> 5
18 -> 5
19 -> 5
7 -> 5
20 -> 5
11 -> 5
21 -> 5
9 -> 5
22 -> 5
8 -> 5
7 -> 6
9 -> 6
10 -> 6
8 -> 6
11 -> 6
12 -> 6
8 -> 7
9 -> 8
8 -> 11
7 -> 11
7 -> 13
9 -> 13
10 -> 13
14 -> 13
8 -> 13
11 -> 13
12 -> 13
15 -> 14
12 -> 14
16 -> 14
7 -> 17
9 -> 17
10 -> 17
14 -> 17
8 -> 17
11 -> 17
12 -> 17
7 -> 18
9 -> 18
10 -> 18
8 -> 18
11 -> 18
16 -> 18
8 -> 19
12 -> 19
7 -> 21
23 -> 22
0[label = "solve_network", color = "0.33 0.6 0.85", style="rounded"];
1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.03 0.6 0.85", style="rounded"];
2[label = "add_extra_components", color = "0.45 0.6 0.85", style="rounded"];
3[label = "cluster_network\nclusters: 6", color = "0.46 0.6 0.85", style="rounded"];
4[label = "simplify_network\nsimpl: ", color = "0.52 0.6 0.85", style="rounded"];
5[label = "add_electricity", color = "0.55 0.6 0.85", style="rounded"];
6[label = "build_renewable_profiles\ntechnology: solar", color = "0.15 0.6 0.85", style="rounded"];
7[label = "base_network", color = "0.37 0.6 0.85", style="rounded,dashed"];
8[label = "build_shapes", color = "0.07 0.6 0.85", style="rounded,dashed"];
9[label = "retrieve_databundle", color = "0.60 0.6 0.85", style="rounded"];
10[label = "retrieve_natura_raster", color = "0.42 0.6 0.85", style="rounded"];
11[label = "build_bus_regions", color = "0.09 0.6 0.85", style="rounded,dashed"];
12[label = "build_renewable_profiles\ntechnology: onwind", color = "0.15 0.6 0.85", style="rounded"];
13[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.15 0.6 0.85", style="rounded"];
14[label = "build_ship_raster", color = "0.02 0.6 0.85", style="rounded"];
15[label = "retrieve_ship_raster", color = "0.40 0.6 0.85", style="rounded"];
16[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.15 0.6 0.85", style="rounded"];
17[label = "build_line_rating", color = "0.32 0.6 0.85", style="rounded"];
18[label = "retrieve_cost_data\nyear: 2030", color = "0.50 0.6 0.85", style="rounded"];
19[label = "build_powerplants", color = "0.64 0.6 0.85", style="rounded,dashed"];
20[label = "build_electricity_demand", color = "0.13 0.6 0.85", style="rounded,dashed"];
21[label = "retrieve_electricity_demand", color = "0.31 0.6 0.85", style="rounded"];
22[label = "copy_config", color = "0.23 0.6 0.85", style="rounded"];
1 -> 0
22 -> 0
2 -> 1
18 -> 1
3 -> 2
18 -> 2
4 -> 3
18 -> 3
5 -> 4
18 -> 4
11 -> 4
6 -> 5
12 -> 5
13 -> 5
16 -> 5
7 -> 5
17 -> 5
18 -> 5
11 -> 5
19 -> 5
9 -> 5
20 -> 5
8 -> 5
7 -> 6
9 -> 6
10 -> 6
8 -> 6
11 -> 6
8 -> 7
9 -> 8
8 -> 11
7 -> 11
7 -> 12
9 -> 12
10 -> 12
8 -> 12
11 -> 12
7 -> 13
9 -> 13
10 -> 13
14 -> 13
8 -> 13
11 -> 13
15 -> 14
7 -> 16
9 -> 16
10 -> 16
14 -> 16
8 -> 16
11 -> 16
7 -> 17
7 -> 19
21 -> 20
}
|

View File

@ -59,7 +59,7 @@ To run an overnight / greenfiled scenario with the specifications above, run
.. code:: bash
snakemake -call --configfile config/test/config.overnight.yaml all
snakemake -call all --configfile config/test/config.overnight.yaml
which will result in the following *additional* jobs ``snakemake`` wants to run
on top of those already included in the electricity-only tutorial:
@ -318,7 +318,7 @@ To run a myopic foresight scenario with the specifications above, run
.. code:: bash
snakemake -call --configfile config/test/config.myopic.yaml all
snakemake -call all --configfile config/test/config.myopic.yaml
which will result in the following *additional* jobs ``snakemake`` wants to run:

53
doc/validation.rst Normal file
View File

@ -0,0 +1,53 @@
..
SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0
##########################################
Validation
##########################################
The PyPSA-Eur model workflow provides a built-in mechanism for validation. This allows users to contrast the outcomes of network optimization against the historical behaviour of the European power system. The snakemake rule ``validate_elec_networks`` enables this by generating comparative figures that encapsulate key data points such as dispatch carrier, cross-border flows, and market prices per price zone.
These comparisons utilize data from the 2019 ENTSO-E Transparency Platform. To enable this, an ENTSO-E API key must be inserted into the ``config.yaml`` file. Detailed steps for this process can be found in the user guide `here <https://transparency.entsoe.eu/content/static_content/Static%20content/web%20api/Guide.html>`_.
Once the API key is set, the validation workflow can be triggered by running the following command:
snakemake validate_elec_networks --configfile config/config.validation.yaml -c8
The configuration file `config/config.validation.yaml` contains the following parameters:
.. literalinclude:: ../config/config.validation.yaml
:language: yaml
The setup uses monthly varying fuel prices for gas, lignite, coal and oil as well as CO2 prices, which are created by the script ``build_monthly_prices``. Upon completion of the validation process, the resulting network and generated figures will be stored in the ``results/validation`` directory for further analysis.
Results
=======
By the time of writing the comparison with the historical data shows partially accurate, partially improvable results. The following figures show the comparison of the dispatch of the different carriers.
.. image:: ../graphics/validation_seasonal_operation_area_elec_s_37_ec_lv1.0_Ept.png
:width: 100%
:align: center
.. image:: ../graphics/validation_production_bar_elec_s_37_ec_lv1.0_Ept.png
:width: 100%
:align: center
Issues and possible improvements
--------------------------------
**Overestimated dispatch of wind and solar:** Renewable potentials of wind and solar are slightly overestimated in the model. This leads to a higher dispatch of these carriers than in the historical data. In particular, the solar dispatch during winter is overestimated.
**Coal - Lignite fuel switch:** The model has a fuel switch from coal to lignite. This might result from non-captured subsidies for lignite and coal in the model. In order to fix the fuel switch from coal to lignite, a manual cost correction was added to the script ``build_monthly_prices``.
**Planned outages of nuclear power plants:** Planned outages of nuclear power plants are not captured in the model. This leads to a underestimated dispatch of nuclear power plants in winter and a overestimated dispatch in summer. This point is hard to fix, since the planned outages are not published in the ENTSO-E Transparency Platform.
**False classification of run-of-river power plants:** Some run-of-river power plants are classified as hydro power plants in the model. This leads to a general overestimation of the hydro power dispatch. In particular, Swedish hydro power plants are overestimated.
**Load shedding:** Due to constraint NTC's (crossborder capacities), the model has to shed load in some regions. This leads to a high market prices in the regions which drive the average market price up. Further fine-tuning of the NTC's is needed to avoid load shedding.

View File

@ -12,74 +12,93 @@ dependencies:
- _libgcc_mutex=0.1
- _openmp_mutex=4.5
- affine=2.4.0
- alsa-lib=1.2.8
- alsa-lib=1.2.9
- ampl-mp=3.1.0
- amply=0.1.5
- amply=0.1.6
- anyio=3.7.1
- appdirs=1.4.4
- argon2-cffi=21.3.0
- argon2-cffi-bindings=21.2.0
- asttokens=2.2.1
- atlite=0.2.10
- async-lru=2.0.3
- atk-1.0=2.38.0
- atlite=0.2.11
- attr=2.5.1
- attrs=22.2.0
- attrs=23.1.0
- aws-c-auth=0.7.0
- aws-c-cal=0.6.0
- aws-c-common=0.8.23
- aws-c-compression=0.2.17
- aws-c-event-stream=0.3.1
- aws-c-http=0.7.11
- aws-c-io=0.13.28
- aws-c-mqtt=0.8.14
- aws-c-s3=0.3.13
- aws-c-sdkutils=0.1.11
- aws-checksums=0.1.16
- aws-crt-cpp=0.20.3
- aws-sdk-cpp=1.10.57
- babel=2.12.1
- backcall=0.2.0
- backports=1.0
- backports.functools_lru_cache=1.6.4
- beautifulsoup4=4.11.2
- blosc=1.21.3
- bokeh=2.4.3
- backports.functools_lru_cache=1.6.5
- beautifulsoup4=4.12.2
- bleach=6.0.0
- blosc=1.21.4
- bokeh=3.2.1
- boost-cpp=1.78.0
- bottleneck=1.3.6
- bottleneck=1.3.7
- branca=0.6.0
- brotli=1.0.9
- brotli-bin=1.0.9
- brotlipy=0.7.0
- brotli-python=1.0.9
- bzip2=1.0.8
- c-ares=1.18.1
- ca-certificates=2022.12.7
- c-ares=1.19.1
- c-blosc2=2.10.0
- ca-certificates=2023.7.22
- cairo=1.16.0
- cartopy=0.21.1
- cdsapi=0.5.1
- certifi=2022.12.7
- cdsapi=0.6.1
- certifi=2023.7.22
- cffi=1.15.1
- cfitsio=4.2.0
- cftime=1.6.2
- charset-normalizer=2.1.1
- click=8.1.3
- charset-normalizer=3.2.0
- click=8.1.6
- click-plugins=1.1.1
- cligj=0.7.2
- cloudpickle=2.2.1
- coin-or-cbc=2.10.8
- coin-or-cgl=0.60.6
- coin-or-clp=1.17.7
- coin-or-osi=0.108.7
- coin-or-utils=2.11.6
- coincbc=2.10.8
- colorama=0.4.6
- configargparse=1.5.3
- comm=0.1.3
- configargparse=1.7
- connection_pool=0.0.3
- country_converter=0.8.0
- cryptography=39.0.1
- curl=7.88.0
- contourpy=1.1.0
- country_converter=1.0.0
- curl=8.2.0
- cycler=0.11.0
- cytoolz=0.12.0
- dask=2023.2.0
- dask-core=2023.2.0
- cytoolz=0.12.2
- dask=2023.7.1
- dask-core=2023.7.1
- datrie=0.8.2
- dbus=1.13.6
- debugpy=1.6.7
- decorator=5.1.1
- defusedxml=0.7.1
- deprecation=2.1.0
- descartes=1.1.0
- distributed=2023.2.0
- distributed=2023.7.1
- distro=1.8.0
- docutils=0.19
- dpath=2.1.4
- entsoe-py=0.5.8
- docutils=0.20.1
- dpath=2.1.6
- entrypoints=0.4
- entsoe-py=0.5.10
- et_xmlfile=1.1.0
- exceptiongroup=1.1.0
- exceptiongroup=1.1.2
- executing=1.2.0
- expat=2.5.0
- fftw=3.3.10
- filelock=3.9.0
- fiona=1.9.1
- filelock=3.12.2
- fiona=1.9.4
- flit-core=3.9.0
- folium=0.14.0
- font-ttf-dejavu-sans-mono=2.37
- font-ttf-inconsolata=3.000
@ -88,293 +107,366 @@ dependencies:
- fontconfig=2.14.2
- fonts-conda-ecosystem=1
- fonts-conda-forge=1
- fonttools=4.38.0
- fonttools=4.41.1
- freetype=2.12.1
- freexl=1.0.6
- fsspec=2023.1.0
- gdal=3.6.2
- fribidi=1.0.10
- fsspec=2023.6.0
- gdal=3.7.0
- gdk-pixbuf=2.42.10
- geographiclib=1.52
- geojson-rewind=1.0.2
- geopandas=0.12.2
- geopandas-base=0.12.2
- geopandas=0.13.2
- geopandas-base=0.13.2
- geopy=2.3.0
- geos=3.11.1
- geos=3.11.2
- geotiff=1.7.1
- gettext=0.21.1
- gflags=2.2.2
- giflib=5.2.1
- gitdb=4.0.10
- gitpython=3.1.30
- glib=2.74.1
- glib-tools=2.74.1
- gitpython=3.1.32
- glib=2.76.4
- glib-tools=2.76.4
- glog=0.6.0
- gmp=6.2.1
- graphite2=1.3.13
- gst-plugins-base=1.22.0
- gstreamer=1.22.0
- gstreamer-orc=0.4.33
- harfbuzz=6.0.0
- graphviz=8.1.0
- gst-plugins-base=1.22.5
- gstreamer=1.22.5
- gtk2=2.24.33
- gts=0.7.6
- harfbuzz=7.3.0
- hdf4=4.2.15
- hdf5=1.12.2
- heapdict=1.0.1
- hdf5=1.14.1
- humanfriendly=10.0
- icu=70.1
- icu=72.1
- idna=3.4
- importlib-metadata=6.0.0
- importlib_resources=5.10.2
- importlib-metadata=6.8.0
- importlib_metadata=6.8.0
- importlib_resources=6.0.0
- iniconfig=2.0.0
- ipopt=3.14.11
- ipython=8.10.0
- jack=1.9.22
- ipopt=3.14.12
- ipykernel=6.24.0
- ipython=8.14.0
- ipython_genutils=0.2.0
- ipywidgets=8.0.7
- jedi=0.18.2
- jinja2=3.1.2
- joblib=1.2.0
- jpeg=9e
- joblib=1.3.0
- json-c=0.16
- jsonschema=4.17.3
- jupyter_core=5.2.0
- kealib=1.5.0
- json5=0.9.14
- jsonschema=4.18.4
- jsonschema-specifications=2023.7.1
- jupyter=1.0.0
- jupyter-lsp=2.2.0
- jupyter_client=8.3.0
- jupyter_console=6.6.3
- jupyter_core=5.3.1
- jupyter_events=0.6.3
- jupyter_server=2.7.0
- jupyter_server_terminals=0.4.4
- jupyterlab=4.0.3
- jupyterlab_pygments=0.2.2
- jupyterlab_server=2.24.0
- jupyterlab_widgets=3.0.8
- kealib=1.5.1
- keyutils=1.6.1
- kiwisolver=1.4.4
- krb5=1.20.1
- krb5=1.21.1
- lame=3.100
- lcms2=2.14
- lcms2=2.15
- ld_impl_linux-64=2.40
- lerc=4.0.0
- libabseil=20230125.3
- libaec=1.0.6
- libarchive=3.6.2
- libarrow=12.0.1
- libblas=3.9.0
- libbrotlicommon=1.0.9
- libbrotlidec=1.0.9
- libbrotlienc=1.0.9
- libcap=2.66
- libcap=2.67
- libcblas=3.9.0
- libclang=15.0.7
- libclang13=15.0.7
- libcrc32c=1.1.2
- libcups=2.3.3
- libcurl=7.88.0
- libdb=6.2.32
- libdeflate=1.17
- libcurl=8.2.0
- libdeflate=1.18
- libedit=3.1.20191231
- libev=4.33
- libevent=2.1.10
- libevent=2.1.12
- libexpat=2.5.0
- libffi=3.4.2
- libflac=1.4.2
- libgcc-ng=12.2.0
- libflac=1.4.3
- libgcc-ng=13.1.0
- libgcrypt=1.10.1
- libgdal=3.6.2
- libgfortran-ng=12.2.0
- libgfortran5=12.2.0
- libglib=2.74.1
- libgomp=12.2.0
- libgpg-error=1.46
- libgd=2.3.3
- libgdal=3.7.0
- libgfortran-ng=13.1.0
- libgfortran5=13.1.0
- libglib=2.76.4
- libgomp=13.1.0
- libgoogle-cloud=2.12.0
- libgpg-error=1.47
- libgrpc=1.56.2
- libiconv=1.17
- libjpeg-turbo=2.1.5.1
- libkml=1.3.0
- liblapack=3.9.0
- liblapacke=3.9.0
- libllvm15=15.0.7
- libnetcdf=4.8.1
- libnghttp2=1.51.0
- libnetcdf=4.9.2
- libnghttp2=1.52.0
- libnsl=2.0.0
- libnuma=2.0.16
- libogg=1.3.4
- libopenblas=0.3.21
- libopenblas=0.3.23
- libopus=1.3.1
- libpng=1.6.39
- libpq=15.2
- libpq=15.3
- libprotobuf=4.23.3
- librsvg=2.56.1
- librttopo=1.1.0
- libsndfile=1.2.0
- libsodium=1.0.18
- libspatialindex=1.9.3
- libspatialite=5.0.1
- libsqlite=3.40.0
- libssh2=1.10.0
- libstdcxx-ng=12.2.0
- libsystemd0=252
- libtiff=4.5.0
- libsqlite=3.42.0
- libssh2=1.11.0
- libstdcxx-ng=13.1.0
- libsystemd0=253
- libthrift=0.18.1
- libtiff=4.5.1
- libtool=2.4.7
- libudev1=252
- libuuid=2.32.1
- libutf8proc=2.8.0
- libuuid=2.38.1
- libvorbis=1.3.7
- libwebp-base=1.2.4
- libxcb=1.13
- libwebp=1.3.1
- libwebp-base=1.3.1
- libxcb=1.15
- libxkbcommon=1.5.0
- libxml2=2.10.3
- libxml2=2.11.4
- libxslt=1.1.37
- libzip=1.9.2
- libzlib=1.2.13
- linopy=0.1.3
- locket=1.0.0
- lxml=4.9.2
- lxml=4.9.3
- lz4=4.3.2
- lz4-c=1.9.4
- lzo=2.10
- mapclassify=2.5.0
- markupsafe=2.1.2
- markupsafe=2.1.3
- matplotlib=3.5.3
- matplotlib-base=3.5.3
- matplotlib-inline=0.1.6
- memory_profiler=0.61.0
- metis=5.1.0
- mpg123=1.31.2
- msgpack-python=1.0.4
- metis=5.1.1
- mistune=3.0.0
- mpg123=1.31.3
- msgpack-python=1.0.5
- mumps-include=5.2.1
- mumps-seq=5.2.1
- munch=2.5.0
- munch=4.0.0
- munkres=1.1.4
- mysql-common=8.0.32
- mysql-libs=8.0.32
- nbformat=5.7.3
- ncurses=6.3
- netcdf4=1.6.2
- networkx=3.0
- mysql-common=8.0.33
- mysql-libs=8.0.33
- nbclient=0.8.0
- nbconvert=7.7.2
- nbconvert-core=7.7.2
- nbconvert-pandoc=7.7.2
- nbformat=5.9.1
- ncurses=6.4
- nest-asyncio=1.5.6
- netcdf4=1.6.4
- networkx=3.1
- nomkl=1.0
- notebook=7.0.0
- notebook-shim=0.2.3
- nspr=4.35
- nss=3.88
- numexpr=2.8.3
- numpy=1.24
- nss=3.89
- numexpr=2.8.4
- numpy=1.25.1
- openjdk=17.0.3
- openjpeg=2.5.0
- openpyxl=3.1.0
- openssl=3.0.8
- packaging=23.0
- pandas=1.5.3
- openpyxl=3.1.2
- openssl=3.1.1
- orc=1.9.0
- overrides=7.3.1
- packaging=23.1
- pandas=2.0.3
- pandoc=3.1.3
- pandocfilters=1.5.0
- pango=1.50.14
- parso=0.8.3
- partd=1.3.0
- partd=1.4.0
- patsy=0.5.3
- pcre2=10.40
- pexpect=4.8.0
- pickleshare=0.7.5
- pillow=9.4.0
- pip=23.0
- pillow=10.0.0
- pip=23.2.1
- pixman=0.40.0
- pkgutil-resolve-name=1.3.10
- plac=1.3.5
- platformdirs=3.0.0
- pluggy=1.0.0
- platformdirs=3.9.1
- pluggy=1.2.0
- ply=3.11
- pooch=1.6.0
- poppler=22.12.0
- pooch=1.7.0
- poppler=23.05.0
- poppler-data=0.4.12
- postgresql=15.2
- powerplantmatching=0.5.6
- postgresql=15.3
- powerplantmatching=0.5.7
- progressbar2=4.2.0
- proj=9.1.0
- prompt-toolkit=3.0.36
- psutil=5.9.4
- proj=9.2.1
- prometheus_client=0.17.1
- prompt-toolkit=3.0.39
- prompt_toolkit=3.0.39
- psutil=5.9.5
- pthread-stubs=0.4
- ptyprocess=0.7.0
- pulp=2.7.0
- pulseaudio=16.1
- pulseaudio-client=16.1
- pure_eval=0.2.2
- py-cpuinfo=9.0.0
- pyarrow=12.0.1
- pycountry=22.3.5
- pycparser=2.21
- pygments=2.14.0
- pyomo=6.4.4
- pyopenssl=23.0.0
- pyparsing=3.0.9
- pyproj=3.4.1
- pypsa=0.22.1
- pygments=2.15.1
- pyomo=6.6.1
- pyparsing=3.1.0
- pyproj=3.6.0
- pyqt=5.15.7
- pyqt5-sip=12.11.0
- pyrsistent=0.19.3
- pyshp=2.3.1
- pysocks=1.7.1
- pytables=3.7.0
- pytest=7.2.1
- python=3.10.9
- pytables=3.8.0
- pytest=7.4.0
- python=3.10.12
- python-dateutil=2.8.2
- python-fastjsonschema=2.16.2
- python-utils=3.5.2
- python-fastjsonschema=2.18.0
- python-json-logger=2.0.7
- python-tzdata=2023.3
- python-utils=3.7.0
- python_abi=3.10
- pytz=2022.7.1
- pytz=2023.3
- pyxlsb=1.0.10
- pyyaml=6.0
- pyzmq=25.1.0
- qt-main=5.15.8
- rasterio=1.3.4
- readline=8.1.2
- requests=2.28.1
- retry=0.9.2
- rich=12.5.1
- rioxarray=0.13.3
- rtree=1.0.0
- s2n=1.0.10
- scikit-learn=1.1.1
- scipy=1.8.1
- qtconsole=5.4.3
- qtconsole-base=5.4.3
- qtpy=2.3.1
- rasterio=1.3.8
- rdma-core=28.9
- re2=2023.03.02
- readline=8.2
- referencing=0.30.0
- requests=2.31.0
- reretry=0.11.8
- rfc3339-validator=0.1.4
- rfc3986-validator=0.1.1
- rioxarray=0.14.1
- rpds-py=0.9.2
- rtree=1.0.1
- s2n=1.3.46
- scikit-learn=1.3.0
- scipy=1.11.1
- scotch=6.0.9
- seaborn=0.12.2
- seaborn-base=0.12.2
- setuptools=67.3.2
- send2trash=1.8.2
- setuptools=68.0.0
- setuptools-scm=7.1.0
- setuptools_scm=7.1.0
- shapely=2.0.1
- sip=6.7.7
- sip=6.7.10
- six=1.16.0
- smart_open=6.3.0
- smmap=3.0.5
- snakemake-minimal=7.22.0
- snappy=1.1.9
- snakemake-minimal=7.30.2
- snappy=1.1.10
- sniffio=1.3.0
- snuggs=1.4.7
- sortedcontainers=2.4.0
- soupsieve=2.3.2.post1
- sqlite=3.40.0
- sqlite=3.42.0
- stack_data=0.6.2
- statsmodels=0.13.5
- statsmodels=0.14.0
- stopit=1.1.2
- tabula-py=2.6.0
- tabulate=0.9.0
- tblib=1.7.0
- threadpoolctl=3.1.0
- terminado=0.17.1
- threadpoolctl=3.2.0
- throttler=1.2.1
- tiledb=2.13.2
- tinycss2=1.2.1
- tk=8.6.12
- toml=0.10.2
- tomli=2.0.1
- toolz=0.12.0
- toposort=1.9
- tornado=6.2
- tqdm=4.64.1
- toposort=1.10
- tornado=6.3.2
- tqdm=4.65.0
- traitlets=5.9.0
- typing-extensions=4.4.0
- typing_extensions=4.4.0
- tzcode=2022g
- tzdata=2022g
- typing-extensions=4.7.1
- typing_extensions=4.7.1
- typing_utils=0.1.0
- tzcode=2023c
- tzdata=2023c
- ucx=1.14.1
- unicodedata2=15.0.0
- unidecode=1.3.6
- unixodbc=2.3.10
- urllib3=1.26.14
- urllib3=2.0.4
- wcwidth=0.2.6
- wheel=0.38.4
- wrapt=1.14.1
- xarray=2023.2.0
- webencodings=0.5.1
- websocket-client=1.6.1
- wheel=0.41.0
- widgetsnbextension=4.0.8
- wrapt=1.15.0
- xarray=2023.7.0
- xcb-util=0.4.0
- xcb-util-image=0.4.0
- xcb-util-keysyms=0.4.0
- xcb-util-renderutil=0.3.9
- xcb-util-wm=0.4.1
- xerces-c=3.2.4
- xkeyboard-config=2.39
- xlrd=2.0.1
- xorg-fixesproto=5.0
- xorg-inputproto=2.3.2
- xorg-kbproto=1.0.7
- xorg-libice=1.0.10
- xorg-libsm=1.2.3
- xorg-libx11=1.7.2
- xorg-libxau=1.0.9
- xorg-libice=1.1.1
- xorg-libsm=1.2.4
- xorg-libx11=1.8.6
- xorg-libxau=1.0.11
- xorg-libxdmcp=1.1.3
- xorg-libxext=1.3.4
- xorg-libxfixes=5.0.3
- xorg-libxi=1.7.10
- xorg-libxrender=0.9.10
- xorg-libxrender=0.9.11
- xorg-libxtst=1.2.3
- xorg-recordproto=1.14.2
- xorg-renderproto=0.11.1
- xorg-xextproto=7.3.0
- xorg-xf86vidmodeproto=2.3.1
- xorg-xproto=7.0.31
- xyzservices=2022.9.0
- xyzservices=2023.7.0
- xz=5.2.6
- yaml=0.2.5
- yte=1.5.1
- zict=2.2.0
- zipp=3.13.0
- zeromq=4.3.4
- zict=3.0.0
- zipp=3.16.2
- zlib=1.2.13
- zlib-ng=2.0.7
- zstd=1.5.2
- pip:
- countrycode==0.2
- highspy==1.5.0.dev0
- pybind11==2.10.3
- tsam==2.2.2
- gurobipy==10.0.2
- linopy==0.2.2
- pypsa==0.25.1
- tsam==2.3.0
- validators==0.20.0

View File

@ -53,6 +53,7 @@ dependencies:
- descartes
- rasterio!=1.2.10
- pip:
- tsam>=1.1.0
- git+https://github.com/PyPSA/PyPSA.git@master
- git+https://github.com/fneum/tsam.git@performance
- pypsa>=0.25.2

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 801 KiB

View File

@ -4,3 +4,4 @@
font.family: sans-serif
font.sans-serif: Ubuntu, DejaVu Sans
image.cmap: viridis
figure.autolayout : True

View File

@ -81,6 +81,9 @@ rule base_network:
params:
countries=config["countries"],
snapshots=config["snapshots"],
lines=config["lines"],
links=config["links"],
transformers=config["transformers"],
input:
eg_buses="data/entsoegridkit/buses.csv",
eg_lines="data/entsoegridkit/lines.csv",
@ -292,6 +295,24 @@ rule build_renewable_profiles:
"../scripts/build_renewable_profiles.py"
rule build_monthly_prices:
input:
co2_price_raw="data/validation/emission-spot-primary-market-auction-report-2019-data.xls",
fuel_price_raw="data/validation/energy-price-trends-xlsx-5619002.xlsx",
output:
co2_price=RESOURCES + "co2_price.csv",
fuel_price=RESOURCES + "monthly_fuel_price.csv",
log:
LOGS + "build_monthly_prices.log",
threads: 1
resources:
mem_mb=5000,
conda:
"../envs/environment.yaml"
script:
"../scripts/build_monthly_prices.py"
rule build_hydro_profile:
params:
hydro=config["renewable"]["hydro"],
@ -315,6 +336,30 @@ rule build_hydro_profile:
"../scripts/build_hydro_profile.py"
if config["lines"]["dynamic_line_rating"]["activate"]:
rule build_line_rating:
input:
base_network=RESOURCES + "networks/base.nc",
cutout="cutouts/"
+ CDIR
+ config["lines"]["dynamic_line_rating"]["cutout"]
+ ".nc",
output:
output=RESOURCES + "networks/line_rating.nc",
log:
LOGS + "build_line_rating.log",
benchmark:
BENCHMARKS + "build_line_rating"
threads: ATLITE_NPROCESSES
resources:
mem_mb=ATLITE_NPROCESSES * 1000,
conda:
"../envs/environment.yaml"
script:
"../scripts/build_line_rating.py"
rule add_electricity:
params:
length_factor=config["lines"]["length_factor"],
@ -322,7 +367,7 @@ rule add_electricity:
countries=config["countries"],
renewable=config["renewable"],
electricity=config["electricity"],
conventional=config.get("conventional", {}),
conventional=config["conventional"],
costs=config["costs"],
input:
**{
@ -332,15 +377,23 @@ rule add_electricity:
**{
f"conventional_{carrier}_{attr}": fn
for carrier, d in config.get("conventional", {None: {}}).items()
if carrier in config["electricity"]["conventional_carriers"]
for attr, fn in d.items()
if str(fn).startswith("data/")
},
base_network=RESOURCES + "networks/base.nc",
line_rating=RESOURCES + "networks/line_rating.nc"
if config["lines"]["dynamic_line_rating"]["activate"]
else RESOURCES + "networks/base.nc",
tech_costs=COSTS,
regions=RESOURCES + "regions_onshore.geojson",
powerplants=RESOURCES + "powerplants.csv",
hydro_capacities=ancient("data/bundle/hydro_capacities.csv"),
geth_hydro_capacities="data/geth2015_hydro_capacities.csv",
unit_commitment="data/unit_commitment.csv",
fuel_price=RESOURCES + "monthly_fuel_price.csv"
if config["conventional"]["dynamic_fuel_price"]
else [],
load=RESOURCES + "load{weather_year}.csv",
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
output:
@ -351,7 +404,7 @@ rule add_electricity:
BENCHMARKS + "add_electricity{weather_year}"
threads: 1
resources:
mem_mb=5000,
mem_mb=10000,
conda:
"../envs/environment.yaml"
script:
@ -387,7 +440,7 @@ rule simplify_network:
BENCHMARKS + "simplify_network/elec{weather_year}_s{simpl}"
threads: 1
resources:
mem_mb=4000,
mem_mb=12000,
conda:
"../envs/environment.yaml"
script:
@ -432,7 +485,7 @@ rule cluster_network:
BENCHMARKS + "cluster_network/elec{weather_year}_s{simpl}_{clusters}"
threads: 1
resources:
mem_mb=6000,
mem_mb=10000,
conda:
"../envs/environment.yaml"
script:
@ -455,7 +508,7 @@ rule add_extra_components:
BENCHMARKS + "add_extra_components/elec{weather_year}_s{simpl}_{clusters}_ec"
threads: 1
resources:
mem_mb=3000,
mem_mb=4000,
conda:
"../envs/environment.yaml"
script:
@ -474,6 +527,7 @@ rule prepare_network:
input:
RESOURCES + "networks/elec{weather_year}_s{simpl}_{clusters}_ec.nc",
tech_costs=COSTS,
co2_price=lambda w: RESOURCES + "co2_price.csv" if "Ept" in w.opts else [],
output:
RESOURCES + "networks/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
log:

View File

@ -86,7 +86,7 @@ if config["sector"]["gas_network"] or config["sector"]["H2_retrofit"]:
rule build_gas_input_locations:
input:
lng=HTTP.remote(
"https://globalenergymonitor.org/wp-content/uploads/2022/09/Europe-Gas-Tracker-August-2022.xlsx",
"https://globalenergymonitor.org/wp-content/uploads/2023/07/Europe-Gas-Tracker-2023-03-v3.xlsx",
keep_local=True,
),
entry="data/gas_network/scigrid-gas/data/IGGIELGN_BorderPoints.geojson",
@ -242,9 +242,9 @@ rule build_energy_totals:
energy=config["energy"],
input:
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
co2="data/eea/UNFCCC_v23.csv",
swiss="data/switzerland-sfoe/switzerland-new_format.csv",
idees="data/jrc-idees-2015",
co2="data/bundle-sector/eea/UNFCCC_v23.csv",
swiss="data/bundle-sector/switzerland-sfoe/switzerland-new_format.csv",
idees="data/bundle-sector/jrc-idees-2015",
district_heat_share="data/district_heat_share.csv",
eurostat=directory("data/eurostat-energy_balances-june_2021_edition"),
output:
@ -290,7 +290,7 @@ rule build_biomass_potentials:
"https://cidportal.jrc.ec.europa.eu/ftp/jrc-opendata/ENSPRESO/ENSPRESO_BIOMASS.xlsx",
keep_local=True,
),
nuts2="data/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", # https://gisco-services.ec.europa.eu/distribution/v2/nuts/download/#nuts21
nuts2="data/bundle-sector/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", # https://gisco-services.ec.europa.eu/distribution/v2/nuts/download/#nuts21
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
nuts3_population=ancient("data/bundle/nama_10r_3popgdp.tsv.gz"),
swiss_cantons=ancient("data/bundle/ch_cantons.csv"),
@ -298,22 +298,23 @@ rule build_biomass_potentials:
country_shapes=RESOURCES + "country_shapes.geojson",
output:
biomass_potentials_all=RESOURCES
+ "biomass_potentials_all{weather_year}_s{simpl}_{clusters}.csv",
biomass_potentials=RESOURCES + "biomass_potentials{weather_year}_s{simpl}_{clusters}.csv",
+ "biomass_potentials_all{weather_year}_s{simpl}_{clusters}_{planning_horizons}.csv",
biomass_potentials=RESOURCES
+ "biomass_potentials{weather_year}_s{simpl}_{clusters}_{planning_horizons}.csv",
threads: 1
resources:
mem_mb=1000,
log:
LOGS + "build_biomass_potentials{weather_year}_s{simpl}_{clusters}.log",
LOGS + "build_biomass_potentials{weather_year}_s{simpl}_{clusters}_{planning_horizons}.log",
benchmark:
BENCHMARKS + "build_biomass_potentials{weather_year}_s{simpl}_{clusters}"
BENCHMARKS + "build_biomass_potentials{weather_year}_s{simpl}_{clusters}_{planning_horizons}"
conda:
"../envs/environment.yaml"
script:
"../scripts/build_biomass_potentials.py"
if config["sector"]["biomass_transport"]:
if config["sector"]["biomass_transport"] or config["sector"]["biomass_spatial"]:
rule build_biomass_transport_costs:
input:
@ -338,9 +339,8 @@ if config["sector"]["biomass_transport"]:
build_biomass_transport_costs_output = rules.build_biomass_transport_costs.output
if not config["sector"]["biomass_transport"]:
if not (config["sector"]["biomass_transport"] or config["sector"]["biomass_spatial"]):
# this is effecively an `else` statement which is however not liked by snakefmt
build_biomass_transport_costs_output = {}
@ -385,7 +385,7 @@ if not config["sector"]["regional_co2_sequestration_potential"]["enable"]:
rule build_salt_cavern_potentials:
input:
salt_caverns="data/h2_salt_caverns_GWh_per_sqkm.geojson",
salt_caverns="data/bundle-sector/h2_salt_caverns_GWh_per_sqkm.geojson",
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
regions_offshore=RESOURCES + "regions_offshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
output:
@ -407,7 +407,7 @@ rule build_ammonia_production:
params:
countries=config["countries"],
input:
usgs="data/myb1-2017-nitro.xls",
usgs="data/bundle-sector/myb1-2017-nitro.xls",
output:
ammonia_production=RESOURCES + "ammonia_production.csv",
threads: 1
@ -429,7 +429,7 @@ rule build_industry_sector_ratios:
ammonia=config["sector"].get("ammonia", False),
input:
ammonia_production=RESOURCES + "ammonia_production.csv",
idees="data/jrc-idees-2015",
idees="data/bundle-sector/jrc-idees-2015",
output:
industry_sector_ratios=RESOURCES + "industry_sector_ratios.csv",
threads: 1
@ -451,8 +451,8 @@ rule build_industrial_production_per_country:
countries=config["countries"],
input:
ammonia_production=RESOURCES + "ammonia_production.csv",
jrc="data/jrc-idees-2015",
eurostat="data/eurostat-energy_balances-may_2018_edition",
jrc="data/bundle-sector/jrc-idees-2015",
eurostat="data/bundle-sector/eurostat-energy_balances-may_2018_edition",
output:
industrial_production_per_country=RESOURCES
+ "industrial_production_per_country.csv",
@ -502,7 +502,7 @@ rule build_industrial_distribution_key:
input:
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
clustered_pop_layout=RESOURCES + "pop_layout_elec{weather_year}_s{simpl}_{clusters}.csv",
hotmaps_industrial_database="data/Industrial_Database.csv",
hotmaps_industrial_database="data/bundle-sector/Industrial_Database.csv",
output:
industrial_distribution_key=RESOURCES
+ "industrial_distribution_key_elec{weather_year}_s{simpl}_{clusters}.csv",
@ -577,7 +577,7 @@ rule build_industrial_energy_demand_per_country_today:
countries=config["countries"],
industry=config["industry"],
input:
jrc="data/jrc-idees-2015",
jrc="data/bundle-sector/jrc-idees-2015",
ammonia_production=RESOURCES + "ammonia_production.csv",
industrial_production_per_country=RESOURCES
+ "industrial_production_per_country.csv",
@ -703,8 +703,8 @@ rule build_transport_demand:
pop_weighted_energy_totals=RESOURCES
+ "pop_weighted_energy_totals{weather_year}_s{simpl}_{clusters}.csv",
transport_data=RESOURCES + "transport_data.csv",
traffic_data_KFZ="data/emobility/KFZ__count",
traffic_data_Pkw="data/emobility/Pkw__count",
traffic_data_KFZ="data/bundle-sector/emobility/KFZ__count",
traffic_data_Pkw="data/bundle-sector/emobility/Pkw__count",
temp_air_total=RESOURCES + "temp_air_total_elec{weather_year}_s{simpl}_{clusters}.nc",
output:
transport_demand=RESOURCES + "transport_demand{weather_year}_s{simpl}_{clusters}.csv",
@ -755,8 +755,13 @@ rule prepare_sector_network:
avail_profile=RESOURCES + "avail_profile{weather_year}_s{simpl}_{clusters}.csv",
dsm_profile=RESOURCES + "dsm_profile{weather_year}_s{simpl}_{clusters}.csv",
co2_totals_name=RESOURCES + "co2_totals.csv",
co2="data/eea/UNFCCC_v23.csv",
biomass_potentials=RESOURCES + "biomass_potentials{weather_year}_s{simpl}_{clusters}.csv",
co2="data/bundle-sector/eea/UNFCCC_v23.csv",
biomass_potentials=RESOURCES
+ "biomass_potentials{weather_year}_s{simpl}_{clusters}_"
+ "{}.csv".format(config["biomass"]["year"])
if config["foresight"] == "overnight"
else RESOURCES
+ "biomass_potentials{weather_year}_s{simpl}_{clusters}_{planning_horizons}.csv",
heat_profile="data/heat_load_profile_BDEW.csv",
costs="data/costs_{}.csv".format(config["costs"]["year"])
if config["foresight"] == "overnight"

View File

@ -14,12 +14,6 @@ localrules:
plot_networks,
rule all:
input:
RESULTS + "graphs/costs.pdf",
default_target: True
rule cluster_networks:
input:
expand(
@ -72,6 +66,15 @@ rule solve_sector_networks:
),
rule solve_sector_networks_perfect:
input:
expand(
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
**config["scenario"]
),
rule plot_networks:
input:
expand(
@ -79,3 +82,18 @@ rule plot_networks:
+ "maps/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
**config["scenario"]
),
rule validate_elec_networks:
input:
expand(
RESULTS
+ "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
**config["scenario"]
),
expand(
RESULTS
+ "figures/.validation_{kind}_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
**config["scenario"],
kind=["production", "prices", "cross_border"]
),

View File

@ -15,8 +15,8 @@ def memory(w):
if m is not None:
factor *= int(m.group(1)) / 8760
break
if w.clusters.endswith("m"):
return int(factor * (18000 + 180 * int(w.clusters[:-1])))
if w.clusters.endswith("m") or w.clusters.endswith("c"):
return int(factor * (55000 + 600 * int(w.clusters[:-1])))
elif w.clusters == "all":
return int(factor * (18000 + 180 * 4000))
else:
@ -42,7 +42,7 @@ def has_internet_access(url="www.zenodo.org") -> bool:
def input_eurostat(w):
# 2016 includes BA, 2017 does not
report_year = config["energy"]["eurostat_report_year"]
return f"data/eurostat-energy_balances-june_{report_year}_edition"
return f"data/bundle-sector/eurostat-energy_balances-june_{report_year}_edition"
def solved_previous_horizon(wildcards):

View File

@ -8,39 +8,69 @@ localrules:
copy_conda_env,
rule plot_network:
params:
foresight=config["foresight"],
plotting=config["plotting"],
input:
network=RESULTS
+ "postnetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=RESOURCES
+ "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
output:
map=RESULTS
+ "maps/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
today=RESULTS
+ "maps/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf",
threads: 2
resources:
mem_mb=10000,
benchmark:
(
if config["foresight"] != "perfect":
rule plot_network:
params:
foresight=config["foresight"],
plotting=config["plotting"],
input:
network=RESULTS
+ "postnetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
output:
map=RESULTS
+ "maps/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
today=RESULTS
+ "maps/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf",
threads: 2
resources:
mem_mb=10000,
benchmark:
(
BENCHMARKS
+ "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/plot_network.py"
if config["foresight"] == "perfect":
rule plot_network:
params:
foresight=config["foresight"],
plotting=config["plotting"],
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
output:
**{
f"map_{year}": RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_"
+ f"{year}.pdf"
for year in config["scenario"]["planning_horizons"]
},
threads: 2
resources:
mem_mb=10000,
benchmark:
BENCHMARKS
+ "plot_network/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/plot_network.py"
+"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_benchmark"
conda:
"../envs/environment.yaml"
script:
"../scripts/plot_network.py"
rule copy_config:
params:
RDIR=RDIR,
output:
RESULTS + "config/config.yaml",
RESULTS + "config.yaml",
threads: 1
resources:
mem_mb=1000,
@ -52,22 +82,6 @@ rule copy_config:
"../scripts/copy_config.py"
rule copy_conda_env:
output:
RESULTS + "config/environment.yaml",
threads: 1
resources:
mem_mb=500,
log:
LOGS + "copy_conda_env.log",
benchmark:
BENCHMARKS + "copy_conda_env"
conda:
"../envs/environment.yaml"
shell:
"conda env export -f {output} --no-builds"
rule make_summary:
params:
foresight=config["foresight"],
@ -123,6 +137,8 @@ rule plot_summary:
countries=config["countries"],
planning_horizons=config["scenario"]["planning_horizons"],
sector_opts=config["scenario"]["sector_opts"],
emissions_scope=config["energy"]["emissions"],
eurostat_report_year=config["energy"]["eurostat_report_year"],
plotting=config["plotting"],
RDIR=RDIR,
input:
@ -130,6 +146,7 @@ rule plot_summary:
energy=RESULTS + "csvs/energy.csv",
balances=RESULTS + "csvs/supply_energy.csv",
eurostat=input_eurostat,
co2="data/bundle-sector/eea/UNFCCC_v23.csv",
output:
costs=RESULTS + "graphs/costs.pdf",
energy=RESULTS + "graphs/energy.pdf",
@ -145,3 +162,34 @@ rule plot_summary:
"../envs/environment.yaml"
script:
"../scripts/plot_summary.py"
STATISTICS_BARPLOTS = [
"capacity_factor",
"installed_capacity",
"optimal_capacity",
"capital_expenditure",
"operational_expenditure",
"curtailment",
"supply",
"withdrawal",
"market_value",
]
rule plot_elec_statistics:
params:
plotting=config["plotting"],
barplots=STATISTICS_BARPLOTS,
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
output:
**{
f"{plot}_bar": RESULTS
+ f"figures/statistics_{plot}_bar_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
for plot in STATISTICS_BARPLOTS
},
barplots_touch=RESULTS
+ "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
script:
"../scripts/plot_statistics.py"

View File

@ -27,7 +27,7 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_databundle",
rule retrieve_databundle:
output:
expand("data/bundle/{file}", file=datafiles),
protected(expand("data/bundle/{file}", file=datafiles)),
log:
LOGS + "retrieve_databundle.log",
resources:
@ -92,7 +92,7 @@ if config["enable"]["retrieve"] and config["enable"].get(
static=True,
),
output:
RESOURCES + "natura.tiff",
protected(RESOURCES + "natura.tiff"),
log:
LOGS + "retrieve_natura_raster.log",
resources:
@ -106,21 +106,34 @@ if config["enable"]["retrieve"] and config["enable"].get(
"retrieve_sector_databundle", True
):
datafiles = [
"data/eea/UNFCCC_v23.csv",
"data/switzerland-sfoe/switzerland-new_format.csv",
"data/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson",
"data/myb1-2017-nitro.xls",
"data/Industrial_Database.csv",
"data/emobility/KFZ__count",
"data/emobility/Pkw__count",
"data/h2_salt_caverns_GWh_per_sqkm.geojson",
directory("data/eurostat-energy_balances-june_2021_edition"),
directory("data/jrc-idees-2015"),
"eea/UNFCCC_v23.csv",
"switzerland-sfoe/switzerland-new_format.csv",
"nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson",
"myb1-2017-nitro.xls",
"Industrial_Database.csv",
"emobility/KFZ__count",
"emobility/Pkw__count",
"h2_salt_caverns_GWh_per_sqkm.geojson",
]
# TODO: check which versions of eurostat to keep
datafolders = [
protected(
directory("data/bundle-sector/eurostat-energy_balances-june_2016_edition")
),
protected(
directory("data/bundle-sector/eurostat-energy_balances-june_2021_edition")
),
protected(
directory("data/bundle-sector/eurostat-energy_balances-may_2018_edition")
),
protected(directory("data/bundle-sector/jrc-idees-2015")),
]
rule retrieve_sector_databundle:
output:
*datafiles,
protected(expand("data/bundle-sector/{files}", files=datafiles)),
*datafolders,
log:
LOGS + "retrieve_sector_databundle.log",
retries: 2
@ -142,7 +155,9 @@ if config["enable"]["retrieve"] and (
rule retrieve_gas_infrastructure_data:
output:
expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles),
protected(
expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles)
),
log:
LOGS + "retrieve_gas_infrastructure_data.log",
retries: 2
@ -156,7 +171,11 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_opsd_load_dat
rule retrieve_electricity_demand:
input:
HTTP.remote(
"data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv",
"data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv".format(
version="2019-06-05"
if config["snapshots"]["end"] < "2019"
else "2020-10-06"
),
keep_local=True,
static=True,
),
@ -192,7 +211,7 @@ if config["enable"]["retrieve"]:
static=True,
),
output:
"data/shipdensity_global.zip",
protected("data/shipdensity_global.zip"),
log:
LOGS + "retrieve_ship_raster.log",
resources:
@ -200,3 +219,39 @@ if config["enable"]["retrieve"]:
retries: 2
run:
move(input[0], output[0])
if config["enable"]["retrieve"]:
rule retrieve_monthly_co2_prices:
input:
HTTP.remote(
"https://www.eex.com/fileadmin/EEX/Downloads/EUA_Emission_Spot_Primary_Market_Auction_Report/Archive_Reports/emission-spot-primary-market-auction-report-2019-data.xls",
keep_local=True,
static=True,
),
output:
"data/validation/emission-spot-primary-market-auction-report-2019-data.xls",
log:
LOGS + "retrieve_monthly_co2_prices.log",
resources:
mem_mb=5000,
retries: 2
run:
move(input[0], output[0])
if config["enable"]["retrieve"]:
rule retrieve_monthly_fuel_prices:
output:
"data/validation/energy-price-trends-xlsx-5619002.xlsx",
log:
LOGS + "retrieve_monthly_fuel_prices.log",
resources:
mem_mb=5000,
retries: 2
conda:
"../envs/environment.yaml"
script:
"../scripts/retrieve_monthly_fuel_prices.py"

View File

@ -14,6 +14,7 @@ rule solve_network:
input:
network=RESOURCES
+ "networks/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
config=RESULTS + "config.yaml",
output:
network=RESULTS
+ "networks/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
@ -32,6 +33,7 @@ rule solve_network:
threads: 4
resources:
mem_mb=memory,
walltime=config["solving"].get("walltime", "12:00:00"),
shadow:
"minimal"
conda:
@ -63,7 +65,8 @@ rule solve_operations_network:
)
threads: 4
resources:
mem_mb=(lambda w: 5000 + 372 * int(w.clusters)),
mem_mb=(lambda w: 10000 + 372 * int(w.clusters)),
walltime=config["solving"].get("walltime", "12:00:00"),
shadow:
"minimal"
conda:

View File

@ -97,7 +97,7 @@ rule solve_sector_network_myopic:
network=RESULTS
+ "prenetworks-brownfield/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
costs="data/costs_{planning_horizons}.csv",
config=RESULTS + "config/config.yaml",
config=RESULTS + "config.yaml",
output:
RESULTS
+ "postnetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
@ -111,6 +111,7 @@ rule solve_sector_network_myopic:
threads: 4
resources:
mem_mb=config["solving"]["mem"],
walltime=config["solving"].get("walltime", "12:00:00"),
benchmark:
(
BENCHMARKS

View File

@ -14,9 +14,7 @@ rule solve_sector_network:
input:
network=RESULTS
+ "prenetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
costs="data/costs_{}.csv".format(config["costs"]["year"]),
config=RESULTS + "config/config.yaml",
#env=RDIR + 'config/environment.yaml',
config=RESULTS + "config.yaml",
output:
RESULTS
+ "postnetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
@ -30,6 +28,7 @@ rule solve_sector_network:
threads: config["solving"]["solver"].get("threads", 4)
resources:
mem_mb=config["solving"]["mem"],
walltime=config["solving"].get("walltime", "12:00:00"),
benchmark:
(
RESULTS

194
rules/solve_perfect.smk Normal file
View File

@ -0,0 +1,194 @@
# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
rule add_existing_baseyear:
params:
baseyear=config["scenario"]["planning_horizons"][0],
sector=config["sector"],
existing_capacities=config["existing_capacities"],
costs=config["costs"],
input:
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
powerplants=RESOURCES + "powerplants.csv",
busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv",
busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv",
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]),
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
existing_heating="data/existing_infrastructure/existing_heating_raw.csv",
existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv",
existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv",
existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv",
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
wildcard_constraints:
planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear
threads: 1
resources:
mem_mb=2000,
log:
LOGS
+ "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
BENCHMARKS
+ "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/add_existing_baseyear.py"
rule add_brownfield:
params:
H2_retrofit=config["sector"]["H2_retrofit"],
H2_retrofit_capacity_per_CH4=config["sector"]["H2_retrofit_capacity_per_CH4"],
threshold_capacity=config["existing_capacities"]["threshold_capacity"],
input:
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
network_p=solved_previous_horizon, #solved network at previous time step
costs="data/costs_{planning_horizons}.csv",
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
threads: 4
resources:
mem_mb=10000,
log:
LOGS
+ "add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
BENCHMARKS
+ "add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/add_brownfield.py"
rule prepare_perfect_foresight:
input:
**{
f"network_{year}": RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ f"{year}.nc"
for year in config["scenario"]["planning_horizons"][1:]
},
brownfield_network=lambda w: (
RESULTS
+ "prenetworks-brownfield/"
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ "{}.nc".format(str(config["scenario"]["planning_horizons"][0]))
),
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
threads: 2
resources:
mem_mb=10000,
log:
LOGS
+ "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log",
benchmark:
(
BENCHMARKS
+ "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/prepare_perfect_foresight.py"
rule solve_sector_network_perfect:
params:
solving=config["solving"],
foresight=config["foresight"],
sector=config["sector"],
planning_horizons=config["scenario"]["planning_horizons"],
co2_sequestration_potential=config["sector"].get(
"co2_sequestration_potential", 200
),
input:
network=RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
costs="data/costs_2030.csv",
config=RESULTS + "config.yaml",
output:
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
threads: 4
resources:
mem_mb=config["solving"]["mem"],
shadow:
"shallow"
log:
solver=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_solver.log",
python=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_python.log",
memory=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_memory.log",
benchmark:
(
BENCHMARKS
+ "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/solve_network.py"
rule make_summary_perfect:
input:
**{
f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS
+ f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc"
for simpl in config["scenario"]["simpl"]
for clusters in config["scenario"]["clusters"]
for opts in config["scenario"]["opts"]
for sector_opts in config["scenario"]["sector_opts"]
for ll in config["scenario"]["ll"]
},
costs="data/costs_2020.csv",
output:
nodal_costs=RESULTS + "csvs/nodal_costs.csv",
nodal_capacities=RESULTS + "csvs/nodal_capacities.csv",
nodal_cfs=RESULTS + "csvs/nodal_cfs.csv",
cfs=RESULTS + "csvs/cfs.csv",
costs=RESULTS + "csvs/costs.csv",
capacities=RESULTS + "csvs/capacities.csv",
curtailment=RESULTS + "csvs/curtailment.csv",
energy=RESULTS + "csvs/energy.csv",
supply=RESULTS + "csvs/supply.csv",
supply_energy=RESULTS + "csvs/supply_energy.csv",
prices=RESULTS + "csvs/prices.csv",
weighted_prices=RESULTS + "csvs/weighted_prices.csv",
market_values=RESULTS + "csvs/market_values.csv",
price_statistics=RESULTS + "csvs/price_statistics.csv",
metrics=RESULTS + "csvs/metrics.csv",
co2_emissions=RESULTS + "csvs/co2_emissions.csv",
threads: 2
resources:
mem_mb=10000,
log:
LOGS + "make_summary_perfect.log",
benchmark:
(BENCHMARKS + "make_summary_perfect")
conda:
"../envs/environment.yaml"
script:
"../scripts/make_summary_perfect.py"
ruleorder: add_existing_baseyear > add_brownfield

117
rules/validate.smk Normal file
View File

@ -0,0 +1,117 @@
# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
PRODUCTION_PLOTS = [
"production_bar",
"production_deviation_bar",
"seasonal_operation_area",
]
CROSS_BORDER_PLOTS = ["trade_time_series", "cross_border_bar"]
PRICES_PLOTS = ["price_bar", "price_line"]
rule build_electricity_production:
"""
This rule builds the electricity production for each country and technology from ENTSO-E data.
The data is used for validation of the optimization results.
"""
params:
snapshots=config["snapshots"],
countries=config["countries"],
output:
RESOURCES + "historical_electricity_production.csv",
log:
LOGS + "build_electricity_production.log",
resources:
mem_mb=5000,
script:
"../scripts/build_electricity_production.py"
rule build_cross_border_flows:
"""
This rule builds the cross-border flows from ENTSO-E data.
The data is used for validation of the optimization results.
"""
params:
snapshots=config["snapshots"],
countries=config["countries"],
input:
network=RESOURCES + "networks/base.nc",
output:
RESOURCES + "historical_cross_border_flows.csv",
log:
LOGS + "build_cross_border_flows.log",
resources:
mem_mb=5000,
script:
"../scripts/build_cross_border_flows.py"
rule build_electricity_prices:
"""
This rule builds the electricity prices from ENTSO-E data.
The data is used for validation of the optimization results.
"""
params:
snapshots=config["snapshots"],
countries=config["countries"],
output:
RESOURCES + "historical_electricity_prices.csv",
log:
LOGS + "build_electricity_prices.log",
resources:
mem_mb=5000,
script:
"../scripts/build_electricity_prices.py"
rule plot_validation_electricity_production:
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
electricity_production=RESOURCES + "historical_electricity_production.csv",
output:
**{
plot: RESULTS
+ f"figures/validation_{plot}_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
for plot in PRODUCTION_PLOTS
},
plots_touch=RESULTS
+ "figures/.validation_production_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
script:
"../scripts/plot_validation_electricity_production.py"
rule plot_validation_cross_border_flows:
params:
countries=config["countries"],
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
cross_border_flows=RESOURCES + "historical_cross_border_flows.csv",
output:
**{
plot: RESULTS
+ f"figures/validation_{plot}_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
for plot in CROSS_BORDER_PLOTS
},
plots_touch=RESULTS
+ "figures/.validation_cross_border_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
script:
"../scripts/plot_validation_cross_border_flows.py"
rule plot_validation_electricity_prices:
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
electricity_prices=RESOURCES + "historical_electricity_prices.csv",
output:
**{
plot: RESULTS
+ f"figures/validation_{plot}_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
for plot in PRICES_PLOTS
},
plots_touch=RESULTS
+ "figures/.validation_prices_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
script:
"../scripts/plot_validation_electricity_prices.py"

256
scripts/_benchmark.py Normal file
View File

@ -0,0 +1,256 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
"""
from __future__ import absolute_import, print_function
import logging
import os
import sys
import time
logger = logging.getLogger(__name__)
# TODO: provide alternative when multiprocessing is not available
try:
from multiprocessing import Pipe, Process
except ImportError:
from multiprocessing.dummy import Process, Pipe
from memory_profiler import _get_memory, choose_backend
# The memory logging facilities have been adapted from memory_profiler
class MemTimer(Process):
"""
Write memory consumption over a time interval to file until signaled to
stop on the pipe.
"""
def __init__(
self, monitor_pid, interval, pipe, filename, max_usage, backend, *args, **kw
):
self.monitor_pid = monitor_pid
self.interval = interval
self.pipe = pipe
self.filename = filename
self.max_usage = max_usage
self.backend = backend
self.timestamps = kw.pop("timestamps", True)
self.include_children = kw.pop("include_children", True)
super(MemTimer, self).__init__(*args, **kw)
def run(self):
# get baseline memory usage
cur_mem = _get_memory(
self.monitor_pid,
self.backend,
timestamps=self.timestamps,
include_children=self.include_children,
)
n_measurements = 1
mem_usage = cur_mem if self.max_usage else [cur_mem]
if self.filename is not None:
stream = open(self.filename, "w")
stream.write("MEM {0:.6f} {1:.4f}\n".format(*cur_mem))
stream.flush()
else:
stream = None
self.pipe.send(0) # we're ready
stop = False
while True:
cur_mem = _get_memory(
self.monitor_pid,
self.backend,
timestamps=self.timestamps,
include_children=self.include_children,
)
if stream is not None:
stream.write("MEM {0:.6f} {1:.4f}\n".format(*cur_mem))
stream.flush()
n_measurements += 1
if not self.max_usage:
mem_usage.append(cur_mem)
else:
mem_usage = max(cur_mem, mem_usage)
if stop:
break
stop = self.pipe.poll(self.interval)
# do one more iteration
if stream is not None:
stream.close()
self.pipe.send(mem_usage)
self.pipe.send(n_measurements)
class memory_logger(object):
"""
Context manager for taking and reporting memory measurements at fixed
intervals from a separate process, for the duration of a context.
Parameters
----------
filename : None|str
Name of the text file to log memory measurements, if None no log is
created (defaults to None)
interval : float
Interval between measurements (defaults to 1.)
max_usage : bool
If True, only store and report the maximum value (defaults to True)
timestamps : bool
Whether to record tuples of memory usage and timestamps; if logging to
a file timestamps are always kept (defaults to True)
include_children : bool
Whether the memory of subprocesses is to be included (default: True)
Arguments
---------
n_measurements : int
Number of measurements that have been taken
mem_usage : (float, float)|[(float, float)]
All memory measurements and timestamps (if timestamps was True) or only
the maximum memory usage and its timestamp
Note
----
The arguments are only set after all the measurements, i.e. outside of the
with statement.
Example
-------
with memory_logger(filename="memory.log", max_usage=True) as mem:
# Do a lot of long running memory intensive stuff
hard_memory_bound_stuff()
max_mem, timestamp = mem.mem_usage
"""
def __init__(
self,
filename=None,
interval=1.0,
max_usage=True,
timestamps=True,
include_children=True,
):
if filename is not None:
timestamps = True
self.filename = filename
self.interval = interval
self.max_usage = max_usage
self.timestamps = timestamps
self.include_children = include_children
def __enter__(self):
backend = choose_backend()
self.child_conn, self.parent_conn = Pipe() # this will store MemTimer's results
self.p = MemTimer(
os.getpid(),
self.interval,
self.child_conn,
self.filename,
backend=backend,
timestamps=self.timestamps,
max_usage=self.max_usage,
include_children=self.include_children,
)
self.p.start()
self.parent_conn.recv() # wait until memory logging in subprocess is ready
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.parent_conn.send(0) # finish timing
self.mem_usage = self.parent_conn.recv()
self.n_measurements = self.parent_conn.recv()
else:
self.p.terminate()
return False
class timer(object):
level = 0
opened = False
def __init__(self, name="", verbose=True):
self.name = name
self.verbose = verbose
def __enter__(self):
if self.verbose:
if self.opened:
sys.stdout.write("\n")
if len(self.name) > 0:
sys.stdout.write((".. " * self.level) + self.name + ": ")
sys.stdout.flush()
self.__class__.opened = True
self.__class__.level += 1
self.start = time.time()
return self
def print_usec(self, usec):
if usec < 1000:
print("%.1f usec" % usec)
else:
msec = usec / 1000
if msec < 1000:
print("%.1f msec" % msec)
else:
sec = msec / 1000
print("%.1f sec" % sec)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.opened and self.verbose:
sys.stdout.write(".. " * self.level)
if exc_type is None:
stop = time.time()
self.usec = usec = (stop - self.start) * 1e6
if self.verbose:
self.print_usec(usec)
elif self.verbose:
print("failed")
sys.stdout.flush()
self.__class__.level -= 1
if self.verbose:
self.__class__.opened = False
return False
class optional(object):
def __init__(self, variable, contextman):
self.variable = variable
self.contextman = contextman
def __enter__(self):
if self.variable:
return self.contextman.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.variable:
return self.contextman.__exit__(exc_type, exc_val, exc_tb)
return False

View File

@ -2,8 +2,6 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# coding: utf-8
"""
Adds electrical generators and existing hydro storage units to a base network.
@ -167,7 +165,7 @@ def sanitize_carriers(n, config):
nice_names = (
pd.Series(config["plotting"]["nice_names"])
.reindex(carrier_i)
.fillna(carrier_i.to_series().str.title())
.fillna(carrier_i.to_series())
)
n.carriers["nice_name"] = n.carriers.nice_name.where(
n.carriers.nice_name != "", nice_names
@ -206,7 +204,6 @@ def load_costs(tech_costs, config, max_hours, Nyears=1.0):
* costs["investment"]
* Nyears
)
costs.at["OCGT", "fuel"] = costs.at["gas", "fuel"]
costs.at["CCGT", "fuel"] = costs.at["gas", "fuel"]
@ -362,7 +359,6 @@ def attach_wind_and_solar(
n, costs, input_profiles, carriers, extendable_carriers, line_length_factor=1
):
add_missing_carriers(n, carriers)
for car in carriers:
if car == "hydro":
continue
@ -410,6 +406,7 @@ def attach_wind_and_solar(
capital_cost=capital_cost,
efficiency=costs.at[supcar, "efficiency"],
p_max_pu=ds["profile"].transpose("time", "bus").to_pandas(),
lifetime=costs.at[supcar, "lifetime"],
)
@ -421,6 +418,8 @@ def attach_conventional_generators(
extendable_carriers,
conventional_params,
conventional_inputs,
unit_commitment=None,
fuel_price=None,
):
carriers = list(set(conventional_carriers) | set(extendable_carriers["Generator"]))
add_missing_carriers(n, carriers)
@ -439,15 +438,34 @@ def attach_conventional_generators(
.rename(index=lambda s: "C" + str(s))
)
ppl["efficiency"] = ppl.efficiency.fillna(ppl.efficiency_r)
ppl["marginal_cost"] = (
ppl.carrier.map(costs.VOM) + ppl.carrier.map(costs.fuel) / ppl.efficiency
)
logger.info(
"Adding {} generators with capacities [GW] \n{}".format(
len(ppl), ppl.groupby("carrier").p_nom.sum().div(1e3).round(2)
if unit_commitment is not None:
committable_attrs = ppl.carrier.isin(unit_commitment).to_frame("committable")
for attr in unit_commitment.index:
default = pypsa.components.component_attrs["Generator"].default[attr]
committable_attrs[attr] = ppl.carrier.map(unit_commitment.loc[attr]).fillna(
default
)
else:
committable_attrs = {}
if fuel_price is not None:
fuel_price = fuel_price.assign(
OCGT=fuel_price["gas"], CCGT=fuel_price["gas"]
).drop("gas", axis=1)
missing_carriers = list(set(carriers) - set(fuel_price))
fuel_price = fuel_price.assign(**costs.fuel[missing_carriers])
fuel_price = fuel_price.reindex(ppl.carrier, axis=1)
fuel_price.columns = ppl.index
marginal_cost = fuel_price.div(ppl.efficiency).add(ppl.carrier.map(costs.VOM))
else:
marginal_cost = (
ppl.carrier.map(costs.VOM) + ppl.carrier.map(costs.fuel) / ppl.efficiency
)
)
# Define generators using modified ppl DataFrame
caps = ppl.groupby("carrier").p_nom.sum().div(1e3).round(2)
logger.info(f"Adding {len(ppl)} generators with capacities [GW] \n{caps}")
n.madd(
"Generator",
@ -458,13 +476,14 @@ def attach_conventional_generators(
p_nom=ppl.p_nom.where(ppl.carrier.isin(conventional_carriers), 0),
p_nom_extendable=ppl.carrier.isin(extendable_carriers["Generator"]),
efficiency=ppl.efficiency,
marginal_cost=ppl.marginal_cost,
marginal_cost=marginal_cost,
capital_cost=ppl.capital_cost,
build_year=ppl.datein.fillna(0).astype(int),
lifetime=(ppl.dateout - ppl.datein).fillna(np.inf),
**committable_attrs,
)
for carrier in conventional_params:
for carrier in set(conventional_params) & set(carriers):
# Generators with technology affected
idx = n.generators.query("carrier == @carrier").index
@ -598,6 +617,14 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
hydro.max_hours > 0, hydro.country.map(max_hours_country)
).fillna(6)
flatten_dispatch = params.get("flatten_dispatch", False)
if flatten_dispatch:
buffer = params.get("flatten_dispatch_buffer", 0.2)
average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"]
p_max_pu = (average_capacity_factor + buffer).clip(upper=1)
else:
p_max_pu = 1
n.madd(
"StorageUnit",
hydro.index,
@ -607,7 +634,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
max_hours=hydro_max_hours,
capital_cost=costs.at["hydro", "capital_cost"],
marginal_cost=costs.at["hydro", "marginal_cost"],
p_max_pu=1.0, # dispatch
p_max_pu=p_max_pu, # dispatch
p_min_pu=0.0, # store
efficiency_dispatch=costs.at["hydro", "efficiency"],
efficiency_store=0.0,
@ -697,13 +724,14 @@ def attach_OPSD_renewables(n, tech_map):
{"Solar": "PV"}
)
df = df.query("Fueltype in @tech_map").powerplant.convert_country_to_alpha2()
df = df.dropna(subset=["lat", "lon"])
for fueltype, carriers in tech_map.items():
gens = n.generators[lambda df: df.carrier.isin(carriers)]
buses = n.buses.loc[gens.bus.unique()]
gens_per_bus = gens.groupby("bus").p_nom.count()
caps = map_country_bus(df.query("Fueltype == @fueltype and lat == lat"), buses)
caps = map_country_bus(df.query("Fueltype == @fueltype"), buses)
caps = caps.groupby(["bus"]).Capacity.sum()
caps = caps / gens_per_bus.reindex(caps.index, fill_value=1)
@ -761,6 +789,30 @@ def drop_leap_day(n):
logger.info("Dropped February 29 from leap year.")
def attach_line_rating(
n, rating, s_max_pu, correction_factor, max_voltage_difference, max_line_rating
):
# TODO: Only considers overhead lines
n.lines_t.s_max_pu = (rating / n.lines.s_nom[rating.columns]) * correction_factor
if max_voltage_difference:
x_pu = (
n.lines.type.map(n.line_types["x_per_length"])
* n.lines.length
/ (n.lines.v_nom**2)
)
# need to clip here as cap values might be below 1
# -> would mean the line cannot be operated at actual given pessimistic ampacity
s_max_pu_cap = (
np.deg2rad(max_voltage_difference) / (x_pu * n.lines.s_nom)
).clip(lower=1)
n.lines_t.s_max_pu = n.lines_t.s_max_pu.clip(
lower=1, upper=s_max_pu_cap, axis=1
)
if max_line_rating:
n.lines_t.s_max_pu = n.lines_t.s_max_pu.clip(upper=max_line_rating)
n.lines_t.s_max_pu *= s_max_pu
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
@ -808,6 +860,20 @@ if __name__ == "__main__":
conventional_inputs = {
k: v for k, v in snakemake.input.items() if k.startswith("conventional_")
}
if params.conventional["unit_commitment"]:
unit_commitment = pd.read_csv(snakemake.input.unit_commitment, index_col=0)
else:
unit_commitment = None
if params.conventional["dynamic_fuel_price"]:
fuel_price = pd.read_csv(
snakemake.input.fuel_price, index_col=0, header=0, parse_dates=True
)
fuel_price = fuel_price.reindex(n.snapshots).fillna(method="ffill")
else:
fuel_price = None
attach_conventional_generators(
n,
costs,
@ -816,6 +882,8 @@ if __name__ == "__main__":
extendable_carriers,
params.conventional,
conventional_inputs,
unit_commitment=unit_commitment,
fuel_price=fuel_price,
)
attach_wind_and_solar(
@ -828,15 +896,16 @@ if __name__ == "__main__":
)
if "hydro" in renewable_carriers:
para = params.renewable["hydro"]
p = params.renewable["hydro"]
carriers = p.pop("carriers", [])
attach_hydro(
n,
costs,
ppl,
snakemake.input.profile_hydro,
snakemake.input.hydro_capacities,
para.pop("carriers", []),
**para,
carriers,
**p,
)
estimate_renewable_caps = params.electricity["estimate_renewable_capacities"]
@ -853,6 +922,23 @@ if __name__ == "__main__":
update_p_nom_max(n)
line_rating_config = snakemake.config["lines"]["dynamic_line_rating"]
if line_rating_config["activate"]:
rating = xr.open_dataarray(snakemake.input.line_rating).to_pandas().transpose()
s_max_pu = snakemake.config["lines"]["s_max_pu"]
correction_factor = line_rating_config["correction_factor"]
max_voltage_difference = line_rating_config["max_voltage_difference"]
max_line_rating = line_rating_config["max_line_rating"]
attach_line_rating(
n,
rating,
s_max_pu,
correction_factor,
max_voltage_difference,
max_line_rating,
)
sanitize_carriers(n, snakemake.config)
if snakemake.config["enable"].get("drop_leap_days", True):

View File

@ -305,6 +305,18 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
if "EU" not in vars(spatial)[carrier[generator]].locations:
bus0 = bus0.intersection(capacity.index + " gas")
# check for missing bus
missing_bus = pd.Index(bus0).difference(n.buses.index)
if not missing_bus.empty:
logger.info(f"add buses {bus0}")
n.madd(
"Bus",
bus0,
carrier=generator,
location=vars(spatial)[carrier[generator]].locations,
unit="MWh_el",
)
already_build = n.links.index.intersection(asset_i)
new_build = asset_i.difference(n.links.index)
lifetime_assets = lifetime.loc[grouping_year, generator].dropna()
@ -435,15 +447,23 @@ def add_heating_capacities_installed_before_baseyear(
# split existing capacities between residential and services
# proportional to energy demand
p_set_sum = n.loads_t.p_set.sum()
ratio_residential = pd.Series(
[
(
n.loads_t.p_set.sum()[f"{node} residential rural heat"]
p_set_sum[f"{node} residential rural heat"]
/ (
n.loads_t.p_set.sum()[f"{node} residential rural heat"]
+ n.loads_t.p_set.sum()[f"{node} services rural heat"]
p_set_sum[f"{node} residential rural heat"]
+ p_set_sum[f"{node} services rural heat"]
)
)
# if rural heating demand for one of the nodes doesn't exist,
# then columns were dropped before and heating demand share should be 0.0
if all(
f"{node} {service} rural heat" in p_set_sum.index
for service in ["residential", "services"]
)
else 0.0
for node in nodal_df.index
],
index=nodal_df.index,
@ -597,6 +617,10 @@ def add_heating_capacities_installed_before_baseyear(
],
)
# drop assets which are at the end of their lifetime
links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index
n.mremove("Link", links_i)
# %%
if __name__ == "__main__":
@ -608,11 +632,11 @@ if __name__ == "__main__":
weather_year="",
configfiles="config/test/config.myopic.yaml",
simpl="",
clusters="5",
ll="v1.5",
clusters="37",
ll="v1.0",
opts="",
sector_opts="24H-T-H-B-I-A-solar+p3-dist1",
planning_horizons=2030,
sector_opts="1p7-4380H-T-H-B-I-A-solar+p3-dist1",
planning_horizons=2020,
)
logging.basicConfig(level=snakemake.config["logging"]["level"])

View File

@ -335,7 +335,7 @@ def _load_lines_from_eg(buses, eg_lines):
)
lines["length"] /= 1e3
lines["carrier"] = "AC"
lines = _remove_dangling_branches(lines, buses)
return lines

View File

@ -7,9 +7,15 @@ Compute biogas and solid biomass potentials for each clustered model region
using data from JRC ENSPRESO.
"""
import logging
logger = logging.getLogger(__name__)
import geopandas as gpd
import numpy as np
import pandas as pd
AVAILABLE_BIOMASS_YEARS = [2010, 2020, 2030, 2040, 2050]
def build_nuts_population_data(year=2013):
pop = pd.read_csv(
@ -209,14 +215,41 @@ if __name__ == "__main__":
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"build_biomass_potentials", weather_year="", simpl="", clusters="5"
"build_biomass_potentials",
weather_year="",
simpl="",
clusters="5",
planning_horizons=2050,
)
overnight = snakemake.config["foresight"] == "overnight"
params = snakemake.params.biomass
year = params["year"]
investment_year = int(snakemake.wildcards.planning_horizons)
year = params["year"] if overnight else investment_year
scenario = params["scenario"]
enspreso = enspreso_biomass_potentials(year, scenario)
if year > 2050:
logger.info("No biomass potentials for years after 2050, using 2050.")
max_year = max(AVAILABLE_BIOMASS_YEARS)
enspreso = enspreso_biomass_potentials(max_year, scenario)
elif year not in AVAILABLE_BIOMASS_YEARS:
before = int(np.floor(year / 10) * 10)
after = int(np.ceil(year / 10) * 10)
logger.info(
f"No biomass potentials for {year}, interpolating linearly between {before} and {after}."
)
enspreso_before = enspreso_biomass_potentials(before, scenario)
enspreso_after = enspreso_biomass_potentials(after, scenario)
fraction = (year - before) / (after - before)
enspreso = enspreso_before + fraction * (enspreso_after - enspreso_before)
else:
logger.info(f"Using biomass potentials for {year}.")
enspreso = enspreso_biomass_potentials(year, scenario)
enspreso = disaggregate_nuts0(enspreso)

View File

@ -0,0 +1,65 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import logging
import pandas as pd
import pypsa
from _helpers import configure_logging
from entsoe import EntsoePandasClient
from entsoe.exceptions import InvalidBusinessParameterError, NoMatchingDataError
from requests import HTTPError
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_cross_border_flows")
configure_logging(snakemake)
api_key = snakemake.config["private"]["keys"]["entsoe_api"]
client = EntsoePandasClient(api_key=api_key)
n = pypsa.Network(snakemake.input.network)
start = pd.Timestamp(snakemake.params.snapshots["start"], tz="Europe/Brussels")
end = pd.Timestamp(snakemake.params.snapshots["end"], tz="Europe/Brussels")
branches = n.branches().query("carrier in ['AC', 'DC']")
c = n.buses.country
branch_countries = pd.concat([branches.bus0.map(c), branches.bus1.map(c)], axis=1)
branch_countries = branch_countries.query("bus0 != bus1")
branch_countries = branch_countries.apply(sorted, axis=1, result_type="broadcast")
country_pairs = branch_countries.drop_duplicates().reset_index(drop=True)
flows = []
unavailable_borders = []
for from_country, to_country in country_pairs.values:
try:
flow_directed = client.query_crossborder_flows(
from_country, to_country, start=start, end=end
)
flow_reverse = client.query_crossborder_flows(
to_country, from_country, start=start, end=end
)
flow = (flow_directed - flow_reverse).rename(
f"{from_country} - {to_country}"
)
flow = flow.tz_localize(None).resample("1h").mean()
flow = flow.loc[start.tz_localize(None) : end.tz_localize(None)]
flows.append(flow)
except (HTTPError, NoMatchingDataError, InvalidBusinessParameterError):
unavailable_borders.append(f"{from_country}-{to_country}")
if unavailable_borders:
logger.warning(
"Historical electricity cross-border flows for countries"
f" {', '.join(unavailable_borders)} not available."
)
flows = pd.concat(flows, axis=1)
flows.to_csv(snakemake.output[0])

View File

@ -80,11 +80,9 @@ def load_timeseries(fn, years, countries, powerstatistics=True):
def rename(s):
return s[: -len(pattern)]
def date_parser(x):
return dateutil.parser.parse(x, ignoretz=True)
return (
pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)
pd.read_csv(fn, index_col=0, parse_dates=[0])
.tz_localize(None)
.filter(like=pattern)
.rename(columns=rename)
.dropna(how="all", axis=0)
@ -168,6 +166,7 @@ def manual_adjustment(load, fn_load, powerstatistics):
by the corresponding ratio of total energy consumptions reported by
IEA Data browser [0] for the year 2013.
2. For the ENTSOE transparency load data (if powerstatistics is False)
Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the
@ -176,6 +175,9 @@ def manual_adjustment(load, fn_load, powerstatistics):
[0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons
Bosnia and Herzegovina (BA) does not exist in the data set for 2019. It gets the
electricity consumption data from Croatia (HR) for the year 2019, scaled by the
factors derived from https://energy.at-site.be/eurostat-2021/
Parameters
----------
@ -264,9 +266,17 @@ def manual_adjustment(load, fn_load, powerstatistics):
load["AL"] = load.ME * (5.7 / 2.9)
if "MK" not in load and "MK" in countries:
load["MK"] = load.ME * (6.7 / 2.9)
if "BA" not in load and "BA" in countries:
load["BA"] = load.HR * (11.0 / 16.2)
copy_timeslice(
load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1)
)
copy_timeslice(
load, "LU", "2019-01-02 11:00", "2019-01-05 05:00", Delta(weeks=-1)
)
copy_timeslice(
load, "LU", "2019-02-05 20:00", "2019-02-06 19:00", Delta(weeks=-1)
)
return load
@ -308,6 +318,9 @@ if __name__ == "__main__":
if snakemake.params.load["manual_adjustments"]:
load = manual_adjustment(load, snakemake.input[0], powerstatistics)
if load.empty:
logger.warning("Build electricity demand time series is empty.")
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")
load = load.interpolate(method="linear", limit=interpolate_limit)

View File

@ -0,0 +1,52 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import logging
import pandas as pd
from _helpers import configure_logging
from entsoe import EntsoePandasClient
from entsoe.exceptions import NoMatchingDataError
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_cross_border_flows")
configure_logging(snakemake)
api_key = snakemake.config["private"]["keys"]["entsoe_api"]
client = EntsoePandasClient(api_key=api_key)
start = pd.Timestamp(snakemake.params.snapshots["start"], tz="Europe/Brussels")
end = pd.Timestamp(snakemake.params.snapshots["end"], tz="Europe/Brussels")
countries = snakemake.params.countries
prices = []
unavailable_countries = []
for country in countries:
country_code = country
try:
gen = client.query_day_ahead_prices(country, start=start, end=end)
gen = gen.tz_localize(None).resample("1h").mean()
gen = gen.loc[start.tz_localize(None) : end.tz_localize(None)]
prices.append(gen)
except NoMatchingDataError:
unavailable_countries.append(country)
if unavailable_countries:
logger.warning(
f"Historical electricity prices for countries {', '.join(unavailable_countries)} not available."
)
keys = [c for c in countries if c not in unavailable_countries]
prices = pd.concat(prices, keys=keys, axis=1)
prices.to_csv(snakemake.output[0])

View File

@ -0,0 +1,73 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import logging
import pandas as pd
from _helpers import configure_logging
from entsoe import EntsoePandasClient
from entsoe.exceptions import NoMatchingDataError
logger = logging.getLogger(__name__)
carrier_grouper = {
"Waste": "Biomass",
"Hydro Pumped Storage": "Hydro",
"Hydro Water Reservoir": "Hydro",
"Hydro Run-of-river and poundage": "Run of River",
"Fossil Coal-derived gas": "Gas",
"Fossil Gas": "Gas",
"Fossil Oil": "Oil",
"Fossil Oil shale": "Oil",
"Fossil Brown coal/Lignite": "Lignite",
"Fossil Peat": "Lignite",
"Fossil Hard coal": "Coal",
"Wind Onshore": "Onshore Wind",
"Wind Offshore": "Offshore Wind",
"Other renewable": "Other",
"Marine": "Other",
}
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_electricity_production")
configure_logging(snakemake)
api_key = snakemake.config["private"]["keys"]["entsoe_api"]
client = EntsoePandasClient(api_key=api_key)
start = pd.Timestamp(snakemake.params.snapshots["start"], tz="Europe/Brussels")
end = pd.Timestamp(snakemake.params.snapshots["end"], tz="Europe/Brussels")
countries = snakemake.params.countries
generation = []
unavailable_countries = []
for country in countries:
country_code = country
try:
gen = client.query_generation(country, start=start, end=end, nett=True)
gen = gen.tz_localize(None).resample("1h").mean()
gen = gen.loc[start.tz_localize(None) : end.tz_localize(None)]
gen = gen.rename(columns=carrier_grouper).groupby(level=0, axis=1).sum()
generation.append(gen)
except NoMatchingDataError:
unavailable_countries.append(country)
if unavailable_countries:
logger.warning(
f"Historical electricity production for countries {', '.join(unavailable_countries)} not available."
)
keys = [c for c in countries if c not in unavailable_countries]
generation = pd.concat(generation, keys=keys, axis=1)
generation.to_csv(snakemake.output[0])

View File

@ -13,10 +13,13 @@ logger = logging.getLogger(__name__)
import uuid
from itertools import product
import country_converter as coco
import geopandas as gpd
import pandas as pd
from packaging.version import Version, parse
cc = coco.CountryConverter()
def locate_missing_industrial_sites(df):
"""
@ -93,6 +96,34 @@ def prepare_hotmaps_database(regions):
gdf.rename(columns={"index_right": "bus"}, inplace=True)
gdf["country"] = gdf.bus.str[:2]
# the .sjoin can lead to duplicates if a geom is in two regions
if gdf.index.duplicated().any():
import pycountry
# get all duplicated entries
duplicated_i = gdf.index[gdf.index.duplicated()]
# convert from raw data country name to iso-2-code
s = df.loc[duplicated_i, "Country"].apply(
lambda x: pycountry.countries.lookup(x).alpha_2
)
# Get a boolean mask where gdf's country column matches s's values for the same index
mask = gdf["country"] == gdf.index.map(s)
# Filter gdf using the mask
gdf_filtered = gdf[mask]
# concat not duplicated and filtered gdf
gdf = pd.concat([gdf.drop(duplicated_i), gdf_filtered]).sort_index()
# the .sjoin can lead to duplicates if a geom is in two overlapping regions
if gdf.index.duplicated().any():
# get all duplicated entries
duplicated_i = gdf.index[gdf.index.duplicated()]
# convert from raw data country name to iso-2-code
code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2")
# screen out malformed country allocation
gdf_filtered = gdf.loc[duplicated_i].query("country == @code")
# concat not duplicated and filtered gdf
gdf = pd.concat([gdf.drop(duplicated_i), gdf_filtered])
return gdf
@ -115,7 +146,9 @@ def build_nodal_distribution_key(hotmaps, regions, countries):
facilities = hotmaps.query("country == @country and Subsector == @sector")
if not facilities.empty:
emissions = facilities["Emissions_ETS_2014"]
emissions = facilities["Emissions_ETS_2014"].fillna(
hotmaps["Emissions_EPRTR_2014"]
)
if emissions.sum() == 0:
key = pd.Series(1 / len(facilities), facilities.index)
else:
@ -131,6 +164,7 @@ def build_nodal_distribution_key(hotmaps, regions, countries):
return keys
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
@ -139,7 +173,7 @@ if __name__ == "__main__":
"build_industrial_distribution_key",
weather_year="",
simpl="",
clusters=48,
clusters=128,
)
logging.basicConfig(level=snakemake.config["logging"]["level"])

156
scripts/build_line_rating.py Executable file
View File

@ -0,0 +1,156 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# coding: utf-8
"""
Adds dynamic line rating timeseries to the base network.
Relevant Settings
-----------------
.. code:: yaml
lines:
cutout:
line_rating:
.. seealso::
Documentation of the configuration file ``config.yaml`
Inputs
------
- ``data/cutouts``:
- ``networks/base.nc``: confer :ref:`base`
Outputs
-------
- ``resources/line_rating.nc``
Description
-----------
The rule :mod:`build_line_rating` calculates the line rating for transmission lines.
The line rating provides the maximal capacity of a transmission line considering the heat exchange with the environment.
The following heat gains and losses are considered:
- heat gain through resistive losses
- heat gain through solar radiation
- heat loss through radiation of the transmission line
- heat loss through forced convection with wind
- heat loss through natural convection
With a heat balance considering the maximum temperature threshold of the transmission line,
the maximal possible capacity factor "s_max_pu" for each transmission line at each time step is calculated.
"""
import logging
import re
import atlite
import geopandas as gpd
import numpy as np
import pandas as pd
import pypsa
import xarray as xr
from _helpers import configure_logging
from shapely.geometry import LineString as Line
from shapely.geometry import Point
def calculate_resistance(T, R_ref, T_ref=293, alpha=0.00403):
"""
Calculates the resistance at other temperatures than the reference
temperature.
Parameters
----------
T : Temperature at which resistance is calculated in [°C] or [K]
R_ref : Resistance at reference temperature in [Ohm] or [Ohm/Per Length Unit]
T_ref : Reference temperature in [°C] or [K]
alpha: Temperature coefficient in [1/K]
Defaults are:
* T_ref : 20 °C
* alpha : 0.00403 1/K
Returns
-------
Resistance of at given temperature.
"""
R = R_ref * (1 + alpha * (T - T_ref))
return R
def calculate_line_rating(n, cutout):
"""
Calculates the maximal allowed power flow in each line for each time step
considering the maximal temperature.
Parameters
----------
n : pypsa.Network object containing information on grid
Returns
-------
xarray DataArray object with maximal power.
"""
relevant_lines = n.lines[(n.lines["underground"] == False)]
buses = relevant_lines[["bus0", "bus1"]].values
x = n.buses.x
y = n.buses.y
shapes = [Line([Point(x[b0], y[b0]), Point(x[b1], y[b1])]) for (b0, b1) in buses]
shapes = gpd.GeoSeries(shapes, index=relevant_lines.index)
if relevant_lines.r_pu.eq(0).all():
# Overwrite standard line resistance with line resistance obtained from line type
r_per_length = n.line_types["r_per_length"]
R = (
relevant_lines.join(r_per_length, on=["type"])["r_per_length"] / 1000
) # in meters
# If line type with bundles is given retrieve number of conductors per bundle
relevant_lines["n_bundle"] = (
relevant_lines["type"]
.where(relevant_lines["type"].str.contains("bundle"))
.dropna()
.apply(lambda x: int(re.findall(r"(\d+)-bundle", x)[0]))
)
# Set default number of bundles per line
relevant_lines["n_bundle"].fillna(1, inplace=True)
R *= relevant_lines["n_bundle"]
R = calculate_resistance(T=353, R_ref=R)
Imax = cutout.line_rating(shapes, R, D=0.0218, Ts=353, epsilon=0.8, alpha=0.8)
line_factor = relevant_lines.eval("v_nom * n_bundle * num_parallel") / 1e3 # in mW
da = xr.DataArray(
data=np.sqrt(3) * Imax * line_factor.values.reshape(-1, 1),
attrs=dict(
description="Maximal possible power in MW for given line considering line rating"
),
)
return da
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"build_line_rating",
network="elec",
simpl="",
clusters="5",
ll="v1.0",
opts="Co2L-4H",
)
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.base_network)
time = pd.date_range(freq="h", **snakemake.config["snapshots"])
cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time)
da = calculate_line_rating(n, cutout)
da.to_netcdf(snakemake.output[0])

View File

@ -0,0 +1,122 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 16 10:37:35 2023.
This script extracts monthly fuel prices of oil, gas, coal and lignite,
as well as CO2 prices
Inputs
------
- ``data/energy-price-trends-xlsx-5619002.xlsx``: energy price index of fossil fuels
- ``emission-spot-primary-market-auction-report-2019-data.xls``: CO2 Prices spot primary auction
Outputs
-------
- ``data/validation/monthly_fuel_price.csv``
- ``data/validation/CO2_price_2019.csv``
Description
-----------
The rule :mod:`build_monthly_prices` collects monthly fuel prices and CO2 prices
and translates them from different input sources to pypsa syntax
Data sources:
[1] Fuel price index. Destatis
https://www.destatis.de/EN/Home/_node.html
[2] average annual fuel price lignite, ENTSO-E
https://2020.entsos-tyndp-scenarios.eu/fuel-commodities-and-carbon-prices/
[3] CO2 Prices, Emission spot primary auction, EEX
https://www.eex.com/en/market-data/environmental-markets/eua-primary-auction-spot-download
Data was accessed at 16.5.2023
"""
import logging
import pandas as pd
from _helpers import configure_logging
logger = logging.getLogger(__name__)
# keywords in datasheet
keywords = {
"coal": " GP09-051 Hard coal",
"lignite": " GP09-052 Lignite and lignite briquettes",
"oil": " GP09-0610 10 Mineral oil, crude",
"gas": "GP09-062 Natural gas",
}
# sheet names to pypsa syntax
sheet_name_map = {
"coal": "5.1 Hard coal and lignite",
"lignite": "5.1 Hard coal and lignite",
"oil": "5.2 Mineral oil",
"gas": "5.3.1 Natural gas - indices",
}
# import fuel price 2015 in Eur/MWh
# source lignite, price for 2020, scaled by price index, ENTSO-E [3]
price_2020 = (
pd.Series({"coal": 3.0, "oil": 10.6, "gas": 5.6, "lignite": 1.1}) * 3.6
) # Eur/MWh
# manual adjustment of coal price
price_2020["coal"] = 2.4 * 3.6
price_2020["lignite"] = 1.6 * 3.6
def get_fuel_price():
price = {}
for carrier, keyword in keywords.items():
sheet_name = sheet_name_map[carrier]
df = pd.read_excel(
snakemake.input.fuel_price_raw,
sheet_name=sheet_name,
index_col=0,
skiprows=6,
nrows=18,
)
df = df.dropna(axis=0).iloc[:, :12]
start, end = df.index[0], str(int(df.index[-1][:4]) + 1)
df = df.stack()
df.index = pd.date_range(start=start, end=end, freq="MS", inclusive="left")
scale = price_2020[carrier] / df["2020"].mean() # scale to 2020 price
df = df.mul(scale)
price[carrier] = df
return pd.concat(price, axis=1)
def get_co2_price():
# emission price
co2_price = pd.read_excel(snakemake.input.co2_price_raw, index_col=1, header=5)
return co2_price["Auction Price €/tCO2"]
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_monthly_prices")
configure_logging(snakemake)
fuel_price = get_fuel_price()
fuel_price.to_csv(snakemake.output.fuel_price)
co2_price = get_co2_price()
co2_price.to_csv(snakemake.output.co2_price)

View File

@ -54,6 +54,23 @@ logger = logging.getLogger(__name__)
def determine_cutout_xXyY(cutout_name):
"""
Determine the full extent of a cutout.
Since the coordinates of the cutout data are given as the
center of the grid cells, the extent of the cutout is
calculated by adding/subtracting half of the grid cell size.
Parameters
----------
cutout_name : str
Path to the cutout.
Returns
-------
A list of extent coordinates in the order [x, X, y, Y].
"""
cutout = atlite.Cutout(cutout_name)
assert cutout.crs.to_epsg() == 4326
x, X, y, Y = cutout.extent

View File

@ -89,7 +89,7 @@ logger = logging.getLogger(__name__)
def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False):
if not custom_ppl_query:
return ppl
add_ppls = pd.read_csv(custom_powerplants, index_col=0, dtype={"bus": "str"})
add_ppls = pd.read_csv(custom_powerplants, dtype={"bus": "str"})
if isinstance(custom_ppl_query, str):
add_ppls.query(custom_ppl_query, inplace=True)
return pd.concat(

View File

@ -186,6 +186,7 @@ import time
import atlite
import geopandas as gpd
import numpy as np
import pandas as pd
import xarray as xr
from _helpers import configure_logging
from dask.distributed import Client
@ -224,7 +225,8 @@ if __name__ == "__main__":
else:
client = None
cutout = atlite.Cutout(snakemake.input.cutout)
sns = pd.date_range(freq="h", **snakemake.config["snapshots"])
cutout = atlite.Cutout(snakemake.input.cutout).sel(time=sns)
regions = gpd.read_file(snakemake.input.regions)
assert not regions.empty, (
f"List of regions in {snakemake.input.regions} is empty, please "
@ -374,4 +376,6 @@ if __name__ == "__main__":
ds["profile"] = ds["profile"].where(ds["profile"] >= min_p_max_pu, 0)
ds.to_netcdf(snakemake.output.profile)
client.shutdown()
if client is not None:
client.shutdown()

View File

@ -28,9 +28,7 @@ def allocate_sequestration_potential(
overlay["share"] = area(overlay) / overlay["area_sqkm"]
adjust_cols = overlay.columns.difference({"name", "area_sqkm", "geometry", "share"})
overlay[adjust_cols] = overlay[adjust_cols].multiply(overlay["share"], axis=0)
gdf_regions = overlay.groupby("name").sum()
gdf_regions.drop(["area_sqkm", "share"], axis=1, inplace=True)
return gdf_regions.squeeze()
return overlay.dissolve("name", aggfunc="sum")[attr]
if __name__ == "__main__":

View File

@ -485,6 +485,23 @@ if __name__ == "__main__":
else:
n_clusters = int(snakemake.wildcards.clusters)
if params.cluster_network.get("consider_efficiency_classes", False):
carriers = []
for c in aggregate_carriers:
gens = n.generators.query("carrier == @c")
low = gens.efficiency.quantile(0.10)
high = gens.efficiency.quantile(0.90)
if low >= high:
carriers += [c]
else:
labels = ["low", "medium", "high"]
suffix = pd.cut(
gens.efficiency, bins=[0, low, high, 1], labels=labels
).astype(str)
carriers += [f"{c} {label} efficiency" for label in labels]
n.generators.carrier.update(gens.carrier + " " + suffix + " efficiency")
aggregate_carriers = carriers
if n_clusters == len(n.buses):
# Fast-path if no clustering is necessary
busmap = n.buses.index.to_series()
@ -526,6 +543,11 @@ if __name__ == "__main__":
update_p_nom_max(clustering.network)
if params.cluster_network.get("consider_efficiency_classes"):
labels = [f" {label} efficiency" for label in ["low", "medium", "high"]]
nc = clustering.network
nc.generators["carrier"] = nc.generators.carrier.replace(labels, "", regex=True)
clustering.network.meta = dict(
snakemake.config, **dict(wildcards=dict(snakemake.wildcards))
)

View File

@ -11,25 +11,13 @@ from shutil import copy
import yaml
files = {
"config/config.yaml": "config.yaml",
"Snakefile": "Snakefile",
"scripts/solve_network.py": "solve_network.py",
"scripts/prepare_sector_network.py": "prepare_sector_network.py",
}
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("copy_config")
basepath = Path(f"results/{snakemake.params.RDIR}config/")
for f, name in files.items():
copy(f, basepath / name)
with open(basepath / "config.snakemake.yaml", "w") as yaml_file:
with open(snakemake.output[0], "w") as yaml_file:
yaml.dump(
snakemake.config,
yaml_file,

View File

@ -713,5 +713,5 @@ if __name__ == "__main__":
if snakemake.params.foresight == "myopic":
cumulative_cost = calculate_cumulative_cost()
cumulative_cost.to_csv(
"results/" + snakemake.params.RDIR + "/csvs/cumulative_cost.csv"
"results/" + snakemake.params.RDIR + "csvs/cumulative_cost.csv"
)

View File

@ -0,0 +1,745 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Create summary CSV files for all scenario runs with perfect foresight including
costs, capacities, capacity factors, curtailment, energy balances, prices and
other metrics.
"""
import numpy as np
import pandas as pd
import pypsa
from make_summary import (
assign_carriers,
assign_locations,
calculate_cfs,
calculate_nodal_cfs,
calculate_nodal_costs,
)
from prepare_sector_network import prepare_costs
from pypsa.descriptors import get_active_assets, nominal_attrs
from six import iteritems
idx = pd.IndexSlice
opt_name = {"Store": "e", "Line": "s", "Transformer": "s"}
def calculate_costs(n, label, costs):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
costs.columns.levels[0],
costs.columns.levels[1],
costs.columns.levels[2],
investments,
],
names=costs.columns.names[:3] + ["year"],
)
costs = costs.reindex(cols, axis=1)
for c in n.iterate_components(
n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
active = pd.concat(
[
get_active_assets(n, c.name, inv_p).rename(inv_p)
for inv_p in investments
],
axis=1,
).astype(int)
capital_costs = active.mul(capital_costs, axis=0)
discount = (
n.investment_period_weightings["objective"]
/ n.investment_period_weightings["years"]
)
capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum().mul(discount)
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"])
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=[c.list_name])
costs = costs.reindex(capital_costs_grouped.index.union(costs.index))
costs.loc[capital_costs_grouped.index, label] = capital_costs_grouped.values
if c.name == "Link":
p = (
c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
)
elif c.name == "Line":
continue
elif c.name == "StorageUnit":
p_all = c.pnl.p.multiply(n.snapshot_weightings.stores, axis=0)
p_all[p_all < 0.0] = 0.0
p = p_all.groupby(level=0).sum()
else:
p = (
round(c.pnl.p, ndigits=2)
.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
)
# correct sequestration cost
if c.name == "Store":
items = c.df.index[
(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.0)
]
c.df.loc[items, "marginal_cost"] = -20.0
marginal_costs = p.mul(c.df.marginal_cost).T
# marginal_costs = active.mul(marginal_costs, axis=0)
marginal_costs_grouped = (
marginal_costs.groupby(c.df.carrier).sum().mul(discount)
)
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=["marginal"])
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=[c.list_name])
costs = costs.reindex(marginal_costs_grouped.index.union(costs.index))
costs.loc[marginal_costs_grouped.index, label] = marginal_costs_grouped.values
# add back in all hydro
# costs.loc[("storage_units","capital","hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro","p_nom"].sum()
# costs.loc[("storage_units","capital","PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS","p_nom"].sum()
# costs.loc[("generators","capital","ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror","p_nom"].sum()
return costs
def calculate_cumulative_cost():
planning_horizons = snakemake.config["scenario"]["planning_horizons"]
cumulative_cost = pd.DataFrame(
index=df["costs"].sum().index,
columns=pd.Series(data=np.arange(0, 0.1, 0.01), name="social discount rate"),
)
# discount cost and express them in money value of planning_horizons[0]
for r in cumulative_cost.columns:
cumulative_cost[r] = [
df["costs"].sum()[index] / ((1 + r) ** (index[-1] - planning_horizons[0]))
for index in cumulative_cost.index
]
# integrate cost throughout the transition path
for r in cumulative_cost.columns:
for cluster in cumulative_cost.index.get_level_values(level=0).unique():
for lv in cumulative_cost.index.get_level_values(level=1).unique():
for sector_opts in cumulative_cost.index.get_level_values(
level=2
).unique():
cumulative_cost.loc[
(cluster, lv, sector_opts, "cumulative cost"), r
] = np.trapz(
cumulative_cost.loc[
idx[cluster, lv, sector_opts, planning_horizons], r
].values,
x=planning_horizons,
)
return cumulative_cost
def calculate_nodal_capacities(n, label, nodal_capacities):
# Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff
for c in n.iterate_components(
n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
nodal_capacities_c = c.df.groupby(["location", "carrier"])[
opt_name.get(c.name, "p") + "_nom_opt"
].sum()
index = pd.MultiIndex.from_tuples(
[(c.list_name,) + t for t in nodal_capacities_c.index.to_list()]
)
nodal_capacities = nodal_capacities.reindex(index.union(nodal_capacities.index))
nodal_capacities.loc[index, label] = nodal_capacities_c.values
return nodal_capacities
def calculate_capacities(n, label, capacities):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
capacities.columns.levels[0],
capacities.columns.levels[1],
capacities.columns.levels[2],
investments,
],
names=capacities.columns.names[:3] + ["year"],
)
capacities = capacities.reindex(cols, axis=1)
for c in n.iterate_components(
n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
active = pd.concat(
[
get_active_assets(n, c.name, inv_p).rename(inv_p)
for inv_p in investments
],
axis=1,
).astype(int)
caps = c.df[opt_name.get(c.name, "p") + "_nom_opt"]
caps = active.mul(caps, axis=0)
capacities_grouped = (
caps.groupby(c.df.carrier).sum().drop("load", errors="ignore")
)
capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name])
capacities = capacities.reindex(
capacities_grouped.index.union(capacities.index)
)
capacities.loc[capacities_grouped.index, label] = capacities_grouped.values
return capacities
def calculate_curtailment(n, label, curtailment):
avail = (
n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt)
.sum()
.groupby(n.generators.carrier)
.sum()
)
used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()
curtailment[label] = (((avail - used) / avail) * 100).round(3)
return curtailment
def calculate_energy(n, label, energy):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
energy.columns.levels[0],
energy.columns.levels[1],
energy.columns.levels[2],
investments,
],
names=energy.columns.names[:3] + ["year"],
)
energy = energy.reindex(cols, axis=1)
for c in n.iterate_components(n.one_port_components | n.branch_components):
if c.name in n.one_port_components:
c_energies = (
c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
.multiply(c.df.sign)
.groupby(c.df.carrier, axis=1)
.sum()
)
else:
c_energies = pd.DataFrame(
0.0, columns=c.df.carrier.unique(), index=n.investment_periods
)
for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
totals = (
c.pnl["p" + port]
.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
)
# remove values where bus is missing (bug in nomopyomo)
no_bus = c.df.index[c.df["bus" + port] == ""]
totals[no_bus] = float(
n.component_attrs[c.name].loc["p" + port, "default"]
)
c_energies -= totals.groupby(c.df.carrier, axis=1).sum()
c_energies = pd.concat([c_energies.T], keys=[c.list_name])
energy = energy.reindex(c_energies.index.union(energy.index))
energy.loc[c_energies.index, label] = c_energies.values
return energy
def calculate_supply(n, label, supply):
"""
Calculate the max dispatch of each component at the buses aggregated by
carrier.
"""
bus_carriers = n.buses.carrier.unique()
for i in bus_carriers:
bus_map = n.buses.carrier == i
bus_map.at[""] = False
for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0:
continue
s = (
c.pnl.p[items]
.max()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"])
.sum()
)
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply = supply.reindex(s.index.union(supply.index))
supply.loc[s.index, label] = s
for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df["bus" + end].map(bus_map).fillna(False)]
if len(items) == 0:
continue
# lots of sign compensation for direction and to do maximums
s = (-1) ** (1 - int(end)) * (
(-1) ** int(end) * c.pnl["p" + end][items]
).max().groupby(c.df.loc[items, "carrier"]).sum()
s.index = s.index + end
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply = supply.reindex(s.index.union(supply.index))
supply.loc[s.index, label] = s
return supply
def calculate_supply_energy(n, label, supply_energy):
"""
Calculate the total energy supply/consuption of each component at the buses
aggregated by carrier.
"""
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
supply_energy.columns.levels[0],
supply_energy.columns.levels[1],
supply_energy.columns.levels[2],
investments,
],
names=supply_energy.columns.names[:3] + ["year"],
)
supply_energy = supply_energy.reindex(cols, axis=1)
bus_carriers = n.buses.carrier.unique()
for i in bus_carriers:
bus_map = n.buses.carrier == i
bus_map.at[""] = False
for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0:
continue
if c.name == "Generator":
weightings = n.snapshot_weightings.generators
else:
weightings = n.snapshot_weightings.stores
if i in ["oil", "co2", "H2"]:
if c.name == "Load":
c.df.loc[items, "carrier"] = [
load.split("-202")[0] for load in items
]
if i == "oil" and c.name == "Generator":
c.df.loc[items, "carrier"] = "imported oil"
s = (
c.pnl.p[items]
.multiply(weightings, axis=0)
.groupby(level=0)
.sum()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"], axis=1)
.sum()
.T
)
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply_energy = supply_energy.reindex(
s.index.union(supply_energy.index, sort=False)
)
supply_energy.loc[s.index, label] = s.values
for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df["bus" + str(end)].map(bus_map).fillna(False)]
if len(items) == 0:
continue
s = (
(-1)
* c.pnl["p" + end]
.reindex(items, axis=1)
.multiply(n.snapshot_weightings.objective, axis=0)
.groupby(level=0)
.sum()
.groupby(c.df.loc[items, "carrier"], axis=1)
.sum()
).T
s.index = s.index + end
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply_energy = supply_energy.reindex(
s.index.union(supply_energy.index, sort=False)
)
supply_energy.loc[s.index, label] = s.values
return supply_energy
def calculate_metrics(n, label, metrics):
metrics = metrics.reindex(
pd.Index(
[
"line_volume",
"line_volume_limit",
"line_volume_AC",
"line_volume_DC",
"line_volume_shadow",
"co2_shadow",
]
).union(metrics.index)
)
metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[
n.links.carrier == "DC"
].sum()
metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum()
metrics.at["line_volume", label] = metrics.loc[
["line_volume_AC", "line_volume_DC"], label
].sum()
if hasattr(n, "line_volume_limit"):
metrics.at["line_volume_limit", label] = n.line_volume_limit
metrics.at["line_volume_shadow", label] = n.line_volume_limit_dual
if "CO2Limit" in n.global_constraints.index:
metrics.at["co2_shadow", label] = n.global_constraints.at["CO2Limit", "mu"]
return metrics
def calculate_prices(n, label, prices):
prices = prices.reindex(prices.index.union(n.buses.carrier.unique()))
# WARNING: this is time-averaged, see weighted_prices for load-weighted average
prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean()
return prices
def calculate_weighted_prices(n, label, weighted_prices):
# Warning: doesn't include storage units as loads
weighted_prices = weighted_prices.reindex(
pd.Index(
[
"electricity",
"heat",
"space heat",
"urban heat",
"space urban heat",
"gas",
"H2",
]
)
)
link_loads = {
"electricity": [
"heat pump",
"resistive heater",
"battery charger",
"H2 Electrolysis",
],
"heat": ["water tanks charger"],
"urban heat": ["water tanks charger"],
"space heat": [],
"space urban heat": [],
"gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"],
"H2": ["Sabatier", "H2 Fuel Cell"],
}
for carrier in link_loads:
if carrier == "electricity":
suffix = ""
elif carrier[:5] == "space":
suffix = carrier[5:]
else:
suffix = " " + carrier
buses = n.buses.index[n.buses.index.str[2:] == suffix]
if buses.empty:
continue
if carrier in ["H2", "gas"]:
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
else:
load = n.loads_t.p_set.reindex(buses, axis=1)
for tech in link_loads[carrier]:
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
if names.empty:
continue
load += (
n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum()
)
# Add H2 Store when charging
# if carrier == "H2":
# stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1)
# stores[stores > 0.] = 0.
# load += -stores
weighted_prices.loc[carrier, label] = (
load * n.buses_t.marginal_price[buses]
).sum().sum() / load.sum().sum()
if carrier[:5] == "space":
print(load * n.buses_t.marginal_price[buses])
return weighted_prices
def calculate_market_values(n, label, market_values):
# Warning: doesn't include storage units
carrier = "AC"
buses = n.buses.index[n.buses.carrier == carrier]
## First do market value of generators ##
generators = n.generators.index[n.buses.loc[n.generators.bus, "carrier"] == carrier]
techs = n.generators.loc[generators, "carrier"].value_counts().index
market_values = market_values.reindex(market_values.index.union(techs))
for tech in techs:
gens = generators[n.generators.loc[generators, "carrier"] == tech]
dispatch = (
n.generators_t.p[gens]
.groupby(n.generators.loc[gens, "bus"], axis=1)
.sum()
.reindex(columns=buses, fill_value=0.0)
)
revenue = dispatch * n.buses_t.marginal_price[buses]
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
## Now do market value of links ##
for i in ["0", "1"]:
all_links = n.links.index[n.buses.loc[n.links["bus" + i], "carrier"] == carrier]
techs = n.links.loc[all_links, "carrier"].value_counts().index
market_values = market_values.reindex(market_values.index.union(techs))
for tech in techs:
links = all_links[n.links.loc[all_links, "carrier"] == tech]
dispatch = (
n.links_t["p" + i][links]
.groupby(n.links.loc[links, "bus" + i], axis=1)
.sum()
.reindex(columns=buses, fill_value=0.0)
)
revenue = dispatch * n.buses_t.marginal_price[buses]
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
return market_values
def calculate_price_statistics(n, label, price_statistics):
price_statistics = price_statistics.reindex(
price_statistics.index.union(
pd.Index(["zero_hours", "mean", "standard_deviation"])
)
)
buses = n.buses.index[n.buses.carrier == "AC"]
threshold = 0.1 # higher than phoney marginal_cost of wind/solar
df = pd.DataFrame(data=0.0, columns=buses, index=n.snapshots)
df[n.buses_t.marginal_price[buses] < threshold] = 1.0
price_statistics.at["zero_hours", label] = df.sum().sum() / (
df.shape[0] * df.shape[1]
)
price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].mean().mean()
price_statistics.at["standard_deviation", label] = (
n.buses_t.marginal_price[buses].droplevel(0).unstack().std()
)
return price_statistics
def calculate_co2_emissions(n, label, df):
carattr = "co2_emissions"
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
if emissions.empty:
return
weightings = n.snapshot_weightings.generators.mul(
n.investment_period_weightings["years"]
.reindex(n.snapshots)
.fillna(method="bfill")
.fillna(1.0),
axis=0,
)
# generators
gens = n.generators.query("carrier in @emissions.index")
if not gens.empty:
em_pu = gens.carrier.map(emissions) / gens.efficiency
em_pu = (
weightings["generators"].to_frame("weightings")
@ em_pu.to_frame("weightings").T
)
emitted = n.generators_t.p[gens.index].mul(em_pu)
emitted_grouped = (
emitted.groupby(level=0).sum().groupby(n.generators.carrier, axis=1).sum().T
)
df = df.reindex(emitted_grouped.index.union(df.index))
df.loc[emitted_grouped.index, label] = emitted_grouped.values
if any(n.stores.carrier == "co2"):
co2_i = n.stores[n.stores.carrier == "co2"].index
df[label] = n.stores_t.e.groupby(level=0).last()[co2_i].iloc[:, 0]
return df
outputs = [
"nodal_costs",
"nodal_capacities",
"nodal_cfs",
"cfs",
"costs",
"capacities",
"curtailment",
"energy",
"supply",
"supply_energy",
"prices",
"weighted_prices",
"price_statistics",
"market_values",
"metrics",
"co2_emissions",
]
def make_summaries(networks_dict):
columns = pd.MultiIndex.from_tuples(
networks_dict.keys(), names=["cluster", "lv", "opt"]
)
df = {}
for output in outputs:
df[output] = pd.DataFrame(columns=columns, dtype=float)
for label, filename in iteritems(networks_dict):
print(label, filename)
try:
n = pypsa.Network(filename)
except OSError:
print(label, " not solved yet.")
continue
# del networks_dict[label]
if not hasattr(n, "objective"):
print(label, " not solved correctly. Check log if infeasible or unbounded.")
continue
assign_carriers(n)
assign_locations(n)
for output in outputs:
df[output] = globals()["calculate_" + output](n, label, df[output])
return df
def to_csv(df):
for key in df:
df[key] = df[key].apply(lambda x: pd.to_numeric(x))
df[key].to_csv(snakemake.output[key])
# %%
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("make_summary_perfect")
run = snakemake.config["run"]["name"]
if run != "":
run += "/"
networks_dict = {
(clusters, lv, opts + sector_opts): "results/"
+ run
+ f"postnetworks/elec_s{simpl}_{clusters}_l{lv}_{opts}_{sector_opts}_brownfield_all_years.nc"
for simpl in snakemake.config["scenario"]["simpl"]
for clusters in snakemake.config["scenario"]["clusters"]
for opts in snakemake.config["scenario"]["opts"]
for sector_opts in snakemake.config["scenario"]["sector_opts"]
for lv in snakemake.config["scenario"]["ll"]
}
print(networks_dict)
nyears = 1
costs_db = prepare_costs(
snakemake.input.costs,
snakemake.config["costs"],
nyears,
)
df = make_summaries(networks_dict)
df["metrics"].loc["total costs"] = df["costs"].sum().groupby(level=[0, 1, 2]).sum()
to_csv(df)

View File

@ -24,7 +24,7 @@ from make_summary import assign_carriers
from plot_summary import preferred_order, rename_techs
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
plt.style.use(["ggplot", "matplotlibrc"])
plt.style.use(["ggplot"])
def rename_techs_tyndp(tech):
@ -913,6 +913,159 @@ def plot_series(network, carrier="AC", name="test"):
)
def plot_map_perfect(
network,
components=["Link", "Store", "StorageUnit", "Generator"],
bus_size_factor=1.7e10,
):
n = network.copy()
assign_location(n)
# Drop non-electric buses so they don't clutter the plot
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
# investment periods
investments = n.snapshots.levels[0]
costs = {}
for comp in components:
df_c = n.df(comp)
if df_c.empty:
continue
df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp)
attr = "e_nom_opt" if comp == "Store" else "p_nom_opt"
active = pd.concat(
[n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments],
axis=1,
).astype(int)
capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost
capital_cost_t = (
(active.mul(capital_cost, axis=0))
.groupby([n.df(comp).location, n.df(comp).nice_group])
.sum()
)
capital_cost_t.drop("load", level=1, inplace=True, errors="ignore")
costs[comp] = capital_cost_t
costs = pd.concat(costs).groupby(level=[1, 2]).sum()
costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True)
new_columns = preferred_order.intersection(costs.index.levels[1]).append(
costs.index.levels[1].difference(preferred_order)
)
costs = costs.reindex(new_columns, level=1)
for item in new_columns:
if item not in snakemake.config["plotting"]["tech_colors"]:
print(
"Warning!",
item,
"not in config/plotting/tech_colors, assign random color",
)
snakemake.config["plotting"]["tech_colors"] = "pink"
n.links.drop(
n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")],
inplace=True,
)
# drop non-bus
to_drop = costs.index.levels[0].symmetric_difference(n.buses.index)
if len(to_drop) != 0:
print("dropping non-buses", to_drop)
costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore")
# make sure they are removed from index
costs.index = pd.MultiIndex.from_tuples(costs.index.values)
# PDF has minimum width, so set these to zero
line_lower_threshold = 500.0
line_upper_threshold = 1e4
linewidth_factor = 2e3
ac_color = "gray"
dc_color = "m"
line_widths = n.lines.s_nom_opt
link_widths = n.links.p_nom_opt
linewidth_factor = 2e3
line_lower_threshold = 0.0
title = "Today's transmission"
line_widths[line_widths < line_lower_threshold] = 0.0
link_widths[link_widths < line_lower_threshold] = 0.0
line_widths[line_widths > line_upper_threshold] = line_upper_threshold
link_widths[link_widths > line_upper_threshold] = line_upper_threshold
for year in costs.columns:
fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()})
fig.set_size_inches(7, 6)
fig.suptitle(year)
n.plot(
bus_sizes=costs[year] / bus_size_factor,
bus_colors=snakemake.config["plotting"]["tech_colors"],
line_colors=ac_color,
link_colors=dc_color,
line_widths=line_widths / linewidth_factor,
link_widths=link_widths / linewidth_factor,
ax=ax,
**map_opts,
)
sizes = [20, 10, 5]
labels = [f"{s} bEUR/a" for s in sizes]
sizes = [s / bus_size_factor * 1e9 for s in sizes]
legend_kw = dict(
loc="upper left",
bbox_to_anchor=(0.01, 1.06),
labelspacing=0.8,
frameon=False,
handletextpad=0,
title="system cost",
)
add_legend_circles(
ax,
sizes,
labels,
srid=n.srid,
patch_kw=dict(facecolor="lightgrey"),
legend_kw=legend_kw,
)
sizes = [10, 5]
labels = [f"{s} GW" for s in sizes]
scale = 1e3 / linewidth_factor
sizes = [s * scale for s in sizes]
legend_kw = dict(
loc="upper left",
bbox_to_anchor=(0.27, 1.06),
frameon=False,
labelspacing=0.8,
handletextpad=1,
title=title,
)
add_legend_lines(
ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw
)
legend_kw = dict(
bbox_to_anchor=(1.52, 1.04),
frameon=False,
)
fig.savefig(
snakemake.output[f"map_{year}"], transparent=True, bbox_inches="tight"
)
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
@ -922,10 +1075,9 @@ if __name__ == "__main__":
weather_year="",
simpl="",
opts="",
clusters="5",
ll="v1.5",
sector_opts="CO2L0-1H-T-H-B-I-A-solar+p3-dist1",
planning_horizons="2030",
clusters="37",
ll="v1.0",
sector_opts="4380H-T-H-B-I-A-solar+p3-dist1",
)
logging.basicConfig(level=snakemake.config["logging"]["level"])
@ -939,16 +1091,23 @@ if __name__ == "__main__":
if map_opts["boundaries"] is None:
map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1]
plot_map(
n,
components=["generators", "links", "stores", "storage_units"],
bus_size_factor=2e10,
transmission=False,
)
if snakemake.params["foresight"] == "perfect":
plot_map_perfect(
n,
components=["Link", "Store", "StorageUnit", "Generator"],
bus_size_factor=2e10,
)
else:
plot_map(
n,
components=["generators", "links", "stores", "storage_units"],
bus_size_factor=2e10,
transmission=False,
)
plot_h2_map(n, regions)
plot_ch4_map(n)
plot_map_without(n)
plot_h2_map(n, regions)
plot_ch4_map(n)
plot_map_without(n)
# plot_series(n, carrier="AC", name=suffix)
# plot_series(n, carrier="heat", name=suffix)

116
scripts/plot_statistics.py Normal file
View File

@ -0,0 +1,116 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import matplotlib.pyplot as plt
import pypsa
import seaborn as sns
from _helpers import configure_logging
sns.set_theme("paper", style="whitegrid")
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"plot_elec_statistics",
simpl="",
opts="Ept-12h",
clusters="37",
ll="v1.0",
)
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network)
n.loads.carrier = "load"
n.carriers.loc["load", ["nice_name", "color"]] = "Load", "darkred"
colors = n.carriers.set_index("nice_name").color.where(
lambda s: s != "", "lightgrey"
)
# %%
def rename_index(ds):
specific = ds.index.map(lambda x: f"{x[1]}\n({x[0]})")
generic = ds.index.get_level_values("carrier")
duplicated = generic.duplicated(keep=False)
index = specific.where(duplicated, generic)
return ds.set_axis(index)
def plot_static_per_carrier(ds, ax, drop_zero=True):
if drop_zero:
ds = ds[ds != 0]
ds = ds.dropna()
c = colors[ds.index.get_level_values("carrier")]
ds = ds.pipe(rename_index)
label = f"{ds.attrs['name']} [{ds.attrs['unit']}]"
ds.plot.barh(color=c.values, xlabel=label, ax=ax)
ax.grid(axis="y")
fig, ax = plt.subplots()
ds = n.statistics.capacity_factor().dropna()
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.capacity_factor_bar)
fig, ax = plt.subplots()
ds = n.statistics.installed_capacity().dropna()
ds = ds.drop("Line")
ds = ds.drop(("Generator", "Load"))
ds = ds / 1e3
ds.attrs["unit"] = "GW"
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.installed_capacity_bar)
fig, ax = plt.subplots()
ds = n.statistics.optimal_capacity()
ds = ds.drop("Line")
ds = ds.drop(("Generator", "Load"))
ds = ds / 1e3
ds.attrs["unit"] = "GW"
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.optimal_capacity_bar)
fig, ax = plt.subplots()
ds = n.statistics.capex()
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.capital_expenditure_bar)
fig, ax = plt.subplots()
ds = n.statistics.opex()
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.operational_expenditure_bar)
fig, ax = plt.subplots()
ds = n.statistics.curtailment()
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.curtailment_bar)
fig, ax = plt.subplots()
ds = n.statistics.supply()
ds = ds.drop("Line")
ds = ds / 1e6
ds.attrs["unit"] = "TWh"
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.supply_bar)
fig, ax = plt.subplots()
ds = n.statistics.withdrawal()
ds = ds.drop("Line")
ds = ds / -1e6
ds.attrs["unit"] = "TWh"
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.withdrawal_bar)
fig, ax = plt.subplots()
ds = n.statistics.market_value()
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.market_value_bar)
# touch file
with open(snakemake.output.barplots_touch, "a"):
pass

View File

@ -49,6 +49,10 @@ def rename_techs(label):
# "H2 Fuel Cell": "hydrogen storage",
# "H2 pipeline": "hydrogen storage",
"battery": "battery storage",
"H2 for industry": "H2 for industry",
"land transport fuel cell": "land transport fuel cell",
"land transport oil": "land transport oil",
"oil shipping": "shipping oil",
# "CC": "CC"
}
@ -157,11 +161,11 @@ def plot_costs():
df.index.difference(preferred_order)
)
new_columns = df.sum().sort_values().index
# new_columns = df.sum().sort_values().index
fig, ax = plt.subplots(figsize=(12, 8))
df.loc[new_index, new_columns].T.plot(
df.loc[new_index].T.plot(
kind="bar",
ax=ax,
stacked=True,
@ -213,17 +217,22 @@ def plot_energy():
logger.info(f"Total energy of {round(df.sum()[0])} TWh/a")
if df.empty:
fig, ax = plt.subplots(figsize=(12, 8))
fig.savefig(snakemake.output.energy, bbox_inches="tight")
return
new_index = preferred_order.intersection(df.index).append(
df.index.difference(preferred_order)
)
new_columns = df.columns.sort_values()
# new_columns = df.columns.sort_values()
fig, ax = plt.subplots(figsize=(12, 8))
logger.debug(df.loc[new_index, new_columns])
logger.debug(df.loc[new_index])
df.loc[new_index, new_columns].T.plot(
df.loc[new_index].T.plot(
kind="bar",
ax=ax,
stacked=True,
@ -267,8 +276,6 @@ def plot_balances():
i for i in balances_df.index.levels[0] if i not in co2_carriers
]
fig, ax = plt.subplots(figsize=(12, 8))
for k, v in balances.items():
df = balances_df.loc[v]
df = df.groupby(df.index.get_level_values(2)).sum()
@ -279,7 +286,7 @@ def plot_balances():
# remove trailing link ports
df.index = [
i[:-1]
if ((i not in ["co2", "NH3"]) and (i[-1:] in ["0", "1", "2", "3"]))
if ((i not in ["co2", "NH3", "H2"]) and (i[-1:] in ["0", "1", "2", "3"]))
else i
for i in df.index
]
@ -313,6 +320,8 @@ def plot_balances():
new_columns = df.columns.sort_values()
fig, ax = plt.subplots(figsize=(12, 8))
df.loc[new_index, new_columns].T.plot(
kind="bar",
ax=ax,
@ -345,8 +354,6 @@ def plot_balances():
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight")
plt.cla()
def historical_emissions(countries):
"""
@ -354,8 +361,7 @@ def historical_emissions(countries):
"""
# https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
# downloaded 201228 (modified by EEA last on 201221)
fn = "data/eea/UNFCCC_v23.csv"
df = pd.read_csv(fn, encoding="latin-1")
df = pd.read_csv(snakemake.input.co2, encoding="latin-1", low_memory=False)
df.loc[df["Year"] == "1985-1987", "Year"] = 1986
df["Year"] = df["Year"].astype(int)
df = df.set_index(
@ -379,15 +385,21 @@ def historical_emissions(countries):
e["waste management"] = "5 - Waste management"
e["other"] = "6 - Other Sector"
e["indirect"] = "ind_CO2 - Indirect CO2"
e["total wL"] = "Total (with LULUCF)"
e["total woL"] = "Total (without LULUCF)"
e["other LULUCF"] = "4.H - Other LULUCF"
pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"]
if "GB" in countries:
countries.remove("GB")
countries.append("UK")
year = np.arange(1990, 2018).tolist()
year = df.index.levels[0][df.index.levels[0] >= 1990]
missing = pd.Index(countries).difference(df.index.levels[2])
if not missing.empty:
logger.warning(
f"The following countries are missing and not considered when plotting historic CO2 emissions: {missing}"
)
countries = pd.Index(df.index.levels[2]).intersection(countries)
idx = pd.IndexSlice
co2_totals = (
@ -450,25 +462,52 @@ def plot_carbon_budget_distribution(input_eurostat):
plt.rcParams["xtick.labelsize"] = 20
plt.rcParams["ytick.labelsize"] = 20
emissions_scope = snakemake.params.emissions_scope
report_year = snakemake.params.eurostat_report_year
input_co2 = snakemake.input.co2
# historic emissions
countries = snakemake.params.countries
e_1990 = co2_emissions_year(
countries,
input_eurostat,
opts,
emissions_scope,
report_year,
input_co2,
year=1990,
)
emissions = historical_emissions(countries)
# add other years https://sdi.eea.europa.eu/data/0569441f-2853-4664-a7cd-db969ef54de0
emissions.loc[2019] = 2.971372
emissions.loc[2020] = 2.691958
emissions.loc[2021] = 2.869355
if snakemake.config["foresight"] == "myopic":
path_cb = "results/" + snakemake.params.RDIR + "/csvs/"
co2_cap = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)[
["cb"]
]
co2_cap *= e_1990
else:
supply_energy = pd.read_csv(
snakemake.input.balances, index_col=[0, 1, 2], header=[0, 1, 2, 3]
)
co2_cap = (
supply_energy.loc["co2"].droplevel(0).drop("co2").sum().unstack().T / 1e9
)
co2_cap.rename(index=lambda x: int(x), inplace=True)
plt.figure(figsize=(10, 7))
gs1 = gridspec.GridSpec(1, 1)
ax1 = plt.subplot(gs1[0, 0])
ax1.set_ylabel("CO$_2$ emissions (Gt per year)", fontsize=22)
ax1.set_ylim([0, 5])
ax1.set_ylabel("CO$_2$ emissions \n [Gt per year]", fontsize=22)
# ax1.set_ylim([0, 5])
ax1.set_xlim([1990, snakemake.params.planning_horizons[-1] + 1])
path_cb = "results/" + snakemake.params.RDIR + "/csvs/"
countries = snakemake.params.countries
e_1990 = co2_emissions_year(countries, input_eurostat, opts, year=1990)
CO2_CAP = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)
ax1.plot(e_1990 * CO2_CAP[o], linewidth=3, color="dodgerblue", label=None)
emissions = historical_emissions(countries)
ax1.plot(emissions, color="black", linewidth=3, label=None)
# plot committed and uder-discussion targets
# plot committed and under-discussion targets
# (notice that historical emissions include all countries in the
# network, but targets refer to EU)
ax1.plot(
@ -485,7 +524,7 @@ def plot_carbon_budget_distribution(input_eurostat):
[0.45 * emissions[1990]],
marker="*",
markersize=12,
markerfacecolor="white",
markerfacecolor="black",
markeredgecolor="black",
)
@ -509,21 +548,7 @@ def plot_carbon_budget_distribution(input_eurostat):
ax1.plot(
[2050],
[0.01 * emissions[1990]],
marker="*",
markersize=12,
markerfacecolor="white",
linewidth=0,
markeredgecolor="black",
label="EU under-discussion target",
zorder=10,
clip_on=False,
)
ax1.plot(
[2050],
[0.125 * emissions[1990]],
"ro",
[0.0 * emissions[1990]],
marker="*",
markersize=12,
markerfacecolor="black",
@ -531,14 +556,19 @@ def plot_carbon_budget_distribution(input_eurostat):
label="EU committed target",
)
for col in co2_cap.columns:
ax1.plot(co2_cap[col], linewidth=3, label=col)
ax1.legend(
fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True
)
path_cb_plot = "results/" + snakemake.params.RDIR + "/graphs/"
plt.savefig(path_cb_plot + "carbon_budget_plot.pdf", dpi=300)
plt.grid(axis="y")
path = snakemake.output.balances.split("balances")[0] + "carbon_budget.pdf"
plt.savefig(path, bbox_inches="tight")
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
@ -557,6 +587,7 @@ if __name__ == "__main__":
for sector_opts in snakemake.params.sector_opts:
opts = sector_opts.split("-")
for o in opts:
if "cb" in o:
plot_carbon_budget_distribution(snakemake.input.eurostat)
if any(["cb" in o for o in opts]) or (
snakemake.config["foresight"] == "perfect"
):
plot_carbon_budget_distribution(snakemake.input.eurostat)

View File

@ -0,0 +1,242 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import country_converter as coco
import matplotlib.pyplot as plt
import pandas as pd
import pypsa
import seaborn as sns
from _helpers import configure_logging
sns.set_theme("paper", style="whitegrid")
cc = coco.CountryConverter()
color_country = {
"AL": "#440154",
"AT": "#482677",
"BA": "#43398e",
"BE": "#3953a4",
"BG": "#2c728e",
"CH": "#228b8d",
"CZ": "#1f9d8a",
"DE": "#29af7f",
"DK": "#3fbc73",
"EE": "#5ec962",
"ES": "#84d44b",
"FI": "#addc30",
"FR": "#d8e219",
"GB": "#fde725",
"GR": "#f0f921",
"HR": "#f1c25e",
"HU": "#f4a784",
"IE": "#f78f98",
"IT": "#f87ea0",
"LT": "#f87a9a",
"LU": "#f57694",
"LV": "#f3758d",
"ME": "#f37685",
"MK": "#f37b7c",
"NL": "#FF6666",
"NO": "#FF3333",
"PL": "#eb0000",
"PT": "#d70000",
"RO": "#c00000",
"RS": "#a50000",
"SE": "#8a0000",
"SI": "#6f0000",
"SK": "#550000",
}
def sort_one_country(country, df):
indices = [link for link in df.columns if country in link]
df_country = df[indices].copy()
for link in df_country.columns:
if country in link[5:]:
df_country[link] = -df_country[link]
link_reverse = str(link[5:] + " - " + link[:2])
df_country = df_country.rename(columns={link: link_reverse})
return df_country.reindex(sorted(df_country.columns), axis=1)
def cross_border_time_series(countries, data):
fig, ax = plt.subplots(2 * len(countries), 1, figsize=(15, 10 * len(countries)))
axis = 0
for country in countries:
ymin = 0
ymax = 0
for df in data:
df_country = sort_one_country(country, df)
df_neg, df_pos = df_country.clip(upper=0), df_country.clip(lower=0)
color = [color_country[link[5:]] for link in df_country.columns]
df_pos.plot.area(
ax=ax[axis], stacked=True, linewidth=0.0, color=color, ylim=[-1, 1]
)
df_neg.plot.area(
ax=ax[axis], stacked=True, linewidth=0.0, color=color, ylim=[-1, 1]
)
if (axis % 2) == 0:
title = "Historic"
else:
title = "Optimized"
ax[axis].set_title(
title + " Import / Export for " + cc.convert(country, to="name_short")
)
# Custom legend elements
legend_elements = []
for link in df_country.columns:
legend_elements = legend_elements + [
plt.fill_between(
[],
[],
color=color_country[link[5:]],
label=cc.convert(link[5:], to="name_short"),
)
]
# Create the legend
ax[axis].legend(handles=legend_elements, loc="upper right")
# rescale the y axis
neg_min = df_neg.sum(axis=1).min() * 1.2
if neg_min < ymin:
ymin = neg_min
pos_max = df_pos.sum(axis=1).max() * 1.2
if pos_max < ymax:
ymax = pos_max
axis = axis + 1
for x in range(axis - 2, axis):
ax[x].set_ylim([neg_min, pos_max])
fig.savefig(snakemake.output.trade_time_series, bbox_inches="tight")
def cross_border_bar(countries, data):
df_positive = pd.DataFrame()
df_negative = pd.DataFrame()
color = []
for country in countries:
order = 0
for df in data:
df_country = sort_one_country(country, df)
df_neg, df_pos = df_country.clip(upper=0), df_country.clip(lower=0)
if (order % 2) == 0:
title = "Historic"
else:
title = "Optimized"
df_positive_new = pd.DataFrame(data=df_pos.sum()).T.rename(
{0: title + " " + cc.convert(country, to="name_short")}
)
df_negative_new = pd.DataFrame(data=df_neg.sum()).T.rename(
{0: title + " " + cc.convert(country, to="name_short")}
)
df_positive = pd.concat([df_positive_new, df_positive])
df_negative = pd.concat([df_negative_new, df_negative])
order = order + 1
color = [color_country[link[5:]] for link in df_positive.columns]
fig, ax = plt.subplots(figsize=(15, 60))
df_positive.plot.barh(ax=ax, stacked=True, color=color, zorder=2)
df_negative.plot.barh(ax=ax, stacked=True, color=color, zorder=2)
plt.grid(axis="x", zorder=0)
plt.grid(axis="y", zorder=0)
# Custom legend elements
legend_elements = []
for country in list(color_country.keys()):
legend_elements = legend_elements + [
plt.fill_between(
[],
[],
color=color_country[country],
label=cc.convert(country, to="name_short"),
)
]
# Create the legend
plt.legend(handles=legend_elements, loc="upper right")
fig.savefig(snakemake.output.cross_border_bar, bbox_inches="tight")
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"plot_electricity_prices",
simpl="",
opts="Ept-12h",
clusters="37",
ll="v1.0",
)
configure_logging(snakemake)
countries = snakemake.params.countries
n = pypsa.Network(snakemake.input.network)
n.loads.carrier = "load"
historic = pd.read_csv(
snakemake.input.cross_border_flows,
index_col=0,
header=0,
parse_dates=True,
)
if len(historic.index) > len(n.snapshots):
historic = historic.resample(n.snapshots.inferred_freq).mean().loc[n.snapshots]
# Preparing network data to be shaped similar to ENTSOE datastructure
optimized_links = n.links_t.p0.rename(
columns=dict(n.links.bus0.str[:2] + " - " + n.links.bus1.str[:2])
)
optimized_lines = n.lines_t.p0.rename(
columns=dict(n.lines.bus0.str[:2] + " - " + n.lines.bus1.str[:2])
)
optimized = pd.concat([optimized_links, optimized_lines], axis=1)
# Drop internal country connection
optimized.drop(
[c for c in optimized.columns if c[:2] == c[5:]], axis=1, inplace=True
)
# align columns name
for c1 in optimized.columns:
for c2 in optimized.columns:
if c1[:2] == c2[5:] and c2[:2] == c1[5:]:
optimized = optimized.rename(columns={c1: c2})
optimized = optimized.groupby(lambda x: x, axis=1).sum()
cross_border_bar(countries, [historic, optimized])
cross_border_time_series(countries, [historic, optimized])
# touch file
with open(snakemake.output.plots_touch, "a"):
pass

View File

@ -0,0 +1,63 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import matplotlib.pyplot as plt
import pandas as pd
import pypsa
import seaborn as sns
from _helpers import configure_logging
from pypsa.statistics import get_bus_and_carrier
sns.set_theme("paper", style="whitegrid")
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"plot_electricity_prices",
simpl="",
opts="Ept-12h",
clusters="37",
ll="v1.0",
)
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network)
n.loads.carrier = "load"
historic = pd.read_csv(
snakemake.input.electricity_prices,
index_col=0,
header=0,
parse_dates=True,
)
if len(historic.index) > len(n.snapshots):
historic = historic.resample(n.snapshots.inferred_freq).mean().loc[n.snapshots]
optimized = n.buses_t.marginal_price.groupby(n.buses.country, axis=1).mean()
data = pd.concat([historic, optimized], keys=["Historic", "Optimized"], axis=1)
data.columns.names = ["Kind", "Country"]
fig, ax = plt.subplots(figsize=(6, 6))
df = data.mean().unstack().T
df.plot.barh(ax=ax, xlabel="Electricity Price [€/MWh]", ylabel="")
ax.grid(axis="y")
fig.savefig(snakemake.output.price_bar, bbox_inches="tight")
fig, ax = plt.subplots()
df = data.groupby(level="Kind", axis=1).mean()
df.plot(ax=ax, xlabel="", ylabel="Electricity Price [€/MWh]", alpha=0.8)
ax.grid(axis="x")
fig.savefig(snakemake.output.price_line, bbox_inches="tight")
# touch file
with open(snakemake.output.plots_touch, "a"):
pass

View File

@ -0,0 +1,144 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import matplotlib.pyplot as plt
import pandas as pd
import pypsa
import seaborn as sns
from _helpers import configure_logging
from pypsa.statistics import get_bus_and_carrier
sns.set_theme("paper", style="whitegrid")
carrier_groups = {
"Offshore Wind (AC)": "Offshore Wind",
"Offshore Wind (DC)": "Offshore Wind",
"Open-Cycle Gas": "Gas",
"Combined-Cycle Gas": "Gas",
"Reservoir & Dam": "Hydro",
"Pumped Hydro Storage": "Hydro",
}
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"plot_validation_electricity_production",
simpl="",
opts="Ept",
clusters="37c",
ll="v1.0",
)
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network)
n.loads.carrier = "load"
historic = pd.read_csv(
snakemake.input.electricity_production,
index_col=0,
header=[0, 1],
parse_dates=True,
)
colors = n.carriers.set_index("nice_name").color.where(
lambda s: s != "", "lightgrey"
)
colors["Offshore Wind"] = colors["Offshore Wind (AC)"]
colors["Gas"] = colors["Combined-Cycle Gas"]
colors["Hydro"] = colors["Reservoir & Dam"]
colors["Other"] = "lightgray"
if len(historic.index) > len(n.snapshots):
historic = historic.resample(n.snapshots.inferred_freq).mean().loc[n.snapshots]
optimized = n.statistics.dispatch(
groupby=get_bus_and_carrier, aggregate_time=False
).T
optimized = optimized[["Generator", "StorageUnit"]].droplevel(0, axis=1)
optimized = optimized.rename(columns=n.buses.country, level=0)
optimized = optimized.rename(columns=carrier_groups, level=1)
optimized = optimized.groupby(axis=1, level=[0, 1]).sum()
data = pd.concat([historic, optimized], keys=["Historic", "Optimized"], axis=1)
data.columns.names = ["Kind", "Country", "Carrier"]
data = data.mul(n.snapshot_weightings.generators, axis=0)
# total production per carrier
fig, ax = plt.subplots(figsize=(6, 6))
df = data.groupby(level=["Kind", "Carrier"], axis=1).sum().sum().unstack().T
df = df / 1e6 # TWh
df.plot.barh(ax=ax, xlabel="Electricity Production [TWh]", ylabel="")
ax.grid(axis="y")
fig.savefig(snakemake.output.production_bar, bbox_inches="tight")
# highest diffs
fig, ax = plt.subplots(figsize=(6, 10))
df = data.sum() / 1e6 # TWh
df = df["Optimized"] - df["Historic"]
df = df.dropna().sort_values()
df = pd.concat([df.iloc[:5], df.iloc[-5:]])
c = colors[df.index.get_level_values(1)]
df.plot.barh(
xlabel="Optimized Production - Historic Production [TWh]", ax=ax, color=c.values
)
ax.set_title("Strongest Deviations")
ax.grid(axis="y")
fig.savefig(snakemake.output.production_deviation_bar, bbox_inches="tight")
# seasonal operation
fig, axes = plt.subplots(3, 1, figsize=(9, 9))
df = (
data.groupby(level=["Kind", "Carrier"], axis=1)
.sum()
.resample("1W")
.mean()
.clip(lower=0)
)
df = df / 1e3
order = (
(df["Historic"].diff().abs().sum() / df["Historic"].sum()).sort_values().index
)
c = colors[order]
optimized = df["Optimized"].reindex(order, axis=1, level=1)
historical = df["Historic"].reindex(order, axis=1, level=1)
kwargs = dict(color=c, legend=False, ylabel="Production [GW]", xlabel="")
optimized.plot.area(ax=axes[0], **kwargs, title="Optimized")
historical.plot.area(ax=axes[1], **kwargs, title="Historic")
diff = optimized - historical
diff.clip(lower=0).plot.area(
ax=axes[2], **kwargs, title="$\Delta$ (Optimized - Historic)"
)
lim = axes[2].get_ylim()[1]
diff.clip(upper=0).plot.area(ax=axes[2], **kwargs)
axes[2].set_ylim(bottom=-lim, top=lim)
h, l = axes[0].get_legend_handles_labels()
fig.legend(
h[::-1],
l[::-1],
loc="center left",
bbox_to_anchor=(1, 0.5),
ncol=1,
frameon=False,
labelspacing=1,
)
fig.savefig(snakemake.output.seasonal_operation_area, bbox_inches="tight")
# touch file
with open(snakemake.output.plots_touch, "a"):
pass

View File

@ -65,6 +65,7 @@ import pandas as pd
import pypsa
from _helpers import configure_logging
from add_electricity import load_costs, update_transmission_costs
from pypsa.descriptors import expand_series
idx = pd.IndexSlice
@ -103,10 +104,30 @@ def add_emission_prices(n, emission_prices={"co2": 0.0}, exclude_co2=False):
).sum(axis=1)
gen_ep = n.generators.carrier.map(ep) / n.generators.efficiency
n.generators["marginal_cost"] += gen_ep
n.generators_t["marginal_cost"] += gen_ep[n.generators_t["marginal_cost"].columns]
su_ep = n.storage_units.carrier.map(ep) / n.storage_units.efficiency_dispatch
n.storage_units["marginal_cost"] += su_ep
def add_dynamic_emission_prices(n):
co2_price = pd.read_csv(snakemake.input.co2_price, index_col=0, parse_dates=True)
co2_price = co2_price[~co2_price.index.duplicated()]
co2_price = (
co2_price.reindex(n.snapshots).fillna(method="ffill").fillna(method="bfill")
)
emissions = (
n.generators.carrier.map(n.carriers.co2_emissions) / n.generators.efficiency
)
co2_cost = expand_series(emissions, n.snapshots).T.mul(co2_price.iloc[:, 0], axis=0)
static = n.generators.marginal_cost
dynamic = n.get_switchable_as_dense("Generator", "marginal_cost")
marginal_cost = dynamic + co2_cost.reindex(columns=dynamic.columns, fill_value=0)
n.generators_t.marginal_cost = marginal_cost.loc[:, marginal_cost.ne(static).any()]
def set_line_s_max_pu(n, s_max_pu=0.7):
n.lines["s_max_pu"] = s_max_pu
logger.info(f"N-1 security margin of lines set to {s_max_pu}")
@ -253,6 +274,7 @@ def set_line_nom_max(
n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True)
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
@ -337,7 +359,12 @@ if __name__ == "__main__":
c.df.loc[sel, attr] *= factor
for o in opts:
if "Ep" in o:
if "Ept" in o:
logger.info(
"Setting time dependent emission prices according spot market price"
)
add_dynamic_emission_prices(n)
elif "Ep" in o:
m = re.findall("[0-9]*\.?[0-9]+$", o)
if len(m) > 0:
logger.info("Setting emission prices according to wildcard value.")

View File

@ -0,0 +1,553 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Concats pypsa networks of single investment periods to one network.
"""
import logging
import re
import numpy as np
import pandas as pd
import pypsa
from _helpers import update_config_with_sector_opts
from add_existing_baseyear import add_build_year_to_new_assets
from pypsa.descriptors import expand_series
from pypsa.io import import_components_from_dataframe
from six import iterkeys
logger = logging.getLogger(__name__)
# helper functions ---------------------------------------------------
def get_missing(df, n, c):
"""
Get in network n missing assets of df for component c.
Input:
df: pandas DataFrame, static values of pypsa components
n : pypsa Network to which new assets should be added
c : string, pypsa component.list_name (e.g. "generators")
Return:
pd.DataFrame with static values of missing assets
"""
df_final = getattr(n, c)
missing_i = df.index.difference(df_final.index)
return df.loc[missing_i]
def get_social_discount(t, r=0.01):
"""
Calculate for a given time t and social discount rate r [per unit] the
social discount.
"""
return 1 / (1 + r) ** t
def get_investment_weighting(time_weighting, r=0.01):
"""
Define cost weighting.
Returns cost weightings depending on the the time_weighting
(pd.Series) and the social discountrate r
"""
end = time_weighting.cumsum()
start = time_weighting.cumsum().shift().fillna(0)
return pd.concat([start, end], axis=1).apply(
lambda x: sum([get_social_discount(t, r) for t in range(int(x[0]), int(x[1]))]),
axis=1,
)
def add_year_to_constraints(n, baseyear):
"""
Add investment period to global constraints and rename index.
Parameters
----------
n : pypsa.Network
baseyear : int
year in which optimized assets are built
"""
for c in n.iterate_components(["GlobalConstraint"]):
c.df["investment_period"] = baseyear
c.df.rename(index=lambda x: x + "-" + str(baseyear), inplace=True)
def hvdc_transport_model(n):
"""
Convert AC lines to DC links for multi-decade optimisation with line
expansion.
Losses of DC links are assumed to be 3% per 1000km
"""
logger.info("Convert AC lines to DC links to perform multi-decade optimisation.")
n.madd(
"Link",
n.lines.index,
bus0=n.lines.bus0,
bus1=n.lines.bus1,
p_nom_extendable=True,
p_nom=n.lines.s_nom,
p_nom_min=n.lines.s_nom,
p_min_pu=-1,
efficiency=1 - 0.03 * n.lines.length / 1000,
marginal_cost=0,
carrier="DC",
length=n.lines.length,
capital_cost=n.lines.capital_cost,
)
# Remove AC lines
logger.info("Removing AC lines")
lines_rm = n.lines.index
n.mremove("Line", lines_rm)
# Set efficiency of all DC links to include losses depending on length
n.links.loc[n.links.carrier == "DC", "efficiency"] = (
1 - 0.03 * n.links.loc[n.links.carrier == "DC", "length"] / 1000
)
def adjust_electricity_grid(n, year, years):
"""
Add carrier to lines. Replace AC lines with DC links in case of line
expansion. Add lifetime to DC links in case of line expansion.
Parameters
----------
n : pypsa.Network
year : int
year in which optimized assets are built
years: list
investment periods
"""
n.lines["carrier"] = "AC"
links_i = n.links[n.links.carrier == "DC"].index
if n.lines.s_nom_extendable.any() or n.links.loc[links_i, "p_nom_extendable"].any():
hvdc_transport_model(n)
links_i = n.links[n.links.carrier == "DC"].index
n.links.loc[links_i, "lifetime"] = 100
if year != years[0]:
n.links.loc[links_i, "p_nom_min"] = 0
n.links.loc[links_i, "p_nom"] = 0
# --------------------------------------------------------------------
def concat_networks(years):
"""
Concat given pypsa networks and adds build_year.
Return:
n : pypsa.Network for the whole planning horizon
"""
# input paths of sector coupling networks
network_paths = [snakemake.input.brownfield_network] + [
snakemake.input[f"network_{year}"] for year in years[1:]
]
# final concatenated network
n = pypsa.Network()
# iterate over single year networks and concat to perfect foresight network
for i, network_path in enumerate(network_paths):
year = years[i]
network = pypsa.Network(network_path)
adjust_electricity_grid(network, year, years)
add_build_year_to_new_assets(network, year)
# static ----------------------------------
# (1) add buses and carriers
for component in network.iterate_components(["Bus", "Carrier"]):
df_year = component.df
# get missing assets
missing = get_missing(df_year, n, component.list_name)
import_components_from_dataframe(n, missing, component.name)
# (2) add generators, links, stores and loads
for component in network.iterate_components(
["Generator", "Link", "Store", "Load", "Line", "StorageUnit"]
):
df_year = component.df.copy()
missing = get_missing(df_year, n, component.list_name)
import_components_from_dataframe(n, missing, component.name)
# time variant --------------------------------------------------
network_sns = pd.MultiIndex.from_product([[year], network.snapshots])
snapshots = n.snapshots.drop("now", errors="ignore").union(network_sns)
n.set_snapshots(snapshots)
for component in network.iterate_components():
pnl = getattr(n, component.list_name + "_t")
for k in iterkeys(component.pnl):
pnl_year = component.pnl[k].copy().reindex(snapshots, level=1)
if pnl_year.empty and ~(component.name == "Load" and k == "p_set"):
continue
if component.name == "Load":
static_load = network.loads.loc[network.loads.p_set != 0]
static_load_t = expand_series(static_load.p_set, network_sns).T
pnl_year = pd.concat(
[pnl_year.reindex(network_sns), static_load_t], axis=1
)
columns = (pnl[k].columns.union(pnl_year.columns)).unique()
pnl[k] = pnl[k].reindex(columns=columns)
pnl[k].loc[pnl_year.index, pnl_year.columns] = pnl_year
else:
# this is to avoid adding multiple times assets with
# infinite lifetime as ror
cols = pnl_year.columns.difference(pnl[k].columns)
pnl[k] = pd.concat([pnl[k], pnl_year[cols]], axis=1)
n.snapshot_weightings.loc[year, :] = network.snapshot_weightings.values
# (3) global constraints
for component in network.iterate_components(["GlobalConstraint"]):
add_year_to_constraints(network, year)
import_components_from_dataframe(n, component.df, component.name)
# set investment periods
n.investment_periods = n.snapshots.levels[0]
# weighting of the investment period -> assuming last period same weighting as the period before
time_w = n.investment_periods.to_series().diff().shift(-1).fillna(method="ffill")
n.investment_period_weightings["years"] = time_w
# set objective weightings
objective_w = get_investment_weighting(
n.investment_period_weightings["years"], social_discountrate
)
n.investment_period_weightings["objective"] = objective_w
# all former static loads are now time-dependent -> set static = 0
n.loads["p_set"] = 0
n.loads_t.p_set.fillna(0, inplace=True)
return n
def adjust_stores(n):
"""
Make sure that stores still behave cyclic over one year and not whole
modelling horizon.
"""
# cyclic constraint
cyclic_i = n.stores[n.stores.e_cyclic].index
n.stores.loc[cyclic_i, "e_cyclic_per_period"] = True
n.stores.loc[cyclic_i, "e_cyclic"] = False
# non cyclic store assumptions
non_cyclic_store = ["co2", "co2 stored", "solid biomass", "biogas", "Li ion"]
co2_i = n.stores[n.stores.carrier.isin(non_cyclic_store)].index
n.stores.loc[co2_i, "e_cyclic_per_period"] = False
n.stores.loc[co2_i, "e_cyclic"] = False
# e_initial at beginning of each investment period
e_initial_store = ["solid biomass", "biogas"]
co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index
n.stores.loc[co2_i, "e_initial_per_period"] = True
# n.stores.loc[co2_i, "e_initial"] *= 10
# n.stores.loc[co2_i, "e_nom"] *= 10
e_initial_store = ["co2 stored"]
co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index
n.stores.loc[co2_i, "e_initial_per_period"] = True
return n
def set_phase_out(n, carrier, ct, phase_out_year):
"""
Set planned phase outs for given carrier,country (ct) and planned year of
phase out (phase_out_year).
"""
df = n.links[(n.links.carrier.isin(carrier)) & (n.links.bus1.str[:2] == ct)]
# assets which are going to be phased out before end of their lifetime
assets_i = df[df[["build_year", "lifetime"]].sum(axis=1) > phase_out_year].index
build_year = n.links.loc[assets_i, "build_year"]
# adjust lifetime
n.links.loc[assets_i, "lifetime"] = (phase_out_year - build_year).astype(float)
def set_all_phase_outs(n):
# TODO move this to a csv or to the config
planned = [
(["nuclear"], "DE", 2022),
(["nuclear"], "BE", 2025),
(["nuclear"], "ES", 2027),
(["coal", "lignite"], "DE", 2030),
(["coal", "lignite"], "ES", 2027),
(["coal", "lignite"], "FR", 2022),
(["coal", "lignite"], "GB", 2024),
(["coal", "lignite"], "IT", 2025),
(["coal", "lignite"], "DK", 2030),
(["coal", "lignite"], "FI", 2030),
(["coal", "lignite"], "HU", 2030),
(["coal", "lignite"], "SK", 2030),
(["coal", "lignite"], "GR", 2030),
(["coal", "lignite"], "IE", 2030),
(["coal", "lignite"], "NL", 2030),
(["coal", "lignite"], "RS", 2030),
]
for carrier, ct, phase_out_year in planned:
set_phase_out(n, carrier, ct, phase_out_year)
# remove assets which are already phased out
remove_i = n.links[n.links[["build_year", "lifetime"]].sum(axis=1) < years[0]].index
n.mremove("Link", remove_i)
def set_carbon_constraints(n, opts):
"""
Add global constraints for carbon emissions.
"""
budget = None
for o in opts:
# other budgets
m = re.match(r"^\d+p\d$", o, re.IGNORECASE)
if m is not None:
budget = snakemake.config["co2_budget"][m.group(0)] * 1e9
if budget != None:
logger.info("add carbon budget of {}".format(budget))
n.add(
"GlobalConstraint",
"Budget",
type="Co2Budget",
carrier_attribute="co2_emissions",
sense="<=",
constant=budget,
investment_period=n.investment_periods[-1],
)
# drop other CO2 limits
drop_i = n.global_constraints[n.global_constraints.type == "co2_limit"].index
n.mremove("GlobalConstraint", drop_i)
n.add(
"GlobalConstraint",
"carbon_neutral",
type="co2_limit",
carrier_attribute="co2_emissions",
sense="<=",
constant=0,
investment_period=n.investment_periods[-1],
)
# set minimum CO2 emission constraint to avoid too fast reduction
if "co2min" in opts:
emissions_1990 = 4.53693
emissions_2019 = 3.344096
target_2030 = 0.45 * emissions_1990
annual_reduction = (emissions_2019 - target_2030) / 11
first_year = n.snapshots.levels[0][0]
time_weightings = n.investment_period_weightings.loc[first_year, "years"]
co2min = emissions_2019 - ((first_year - 2019) * annual_reduction)
logger.info(
"add minimum emissions for {} of {} t CO2/a".format(first_year, co2min)
)
n.add(
"GlobalConstraint",
f"Co2Min-{first_year}",
type="Co2min",
carrier_attribute="co2_emissions",
sense=">=",
investment_period=first_year,
constant=co2min * 1e9 * time_weightings,
)
return n
def adjust_lvlimit(n):
"""
Convert global constraints for single investment period to one uniform if
all attributes stay the same.
"""
c = "GlobalConstraint"
cols = ["carrier_attribute", "sense", "constant", "type"]
glc_type = "transmission_volume_expansion_limit"
if (n.df(c)[n.df(c).type == glc_type][cols].nunique() == 1).all():
glc = n.df(c)[n.df(c).type == glc_type][cols].iloc[[0]]
glc.index = pd.Index(["lv_limit"])
remove_i = n.df(c)[n.df(c).type == glc_type].index
n.mremove(c, remove_i)
import_components_from_dataframe(n, glc, c)
return n
def adjust_CO2_glc(n):
c = "GlobalConstraint"
glc_name = "CO2Limit"
glc_type = "primary_energy"
mask = (n.df(c).index.str.contains(glc_name)) & (n.df(c).type == glc_type)
n.df(c).loc[mask, "type"] = "co2_limit"
return n
def add_H2_boilers(n):
"""
Gas boilers can be retrofitted to run with H2.
Add H2 boilers for heating for all existing gas boilers.
"""
c = "Link"
logger.info("Add H2 boilers.")
# existing gas boilers
mask = n.links.carrier.str.contains("gas boiler") & ~n.links.p_nom_extendable
gas_i = n.links[mask].index
df = n.links.loc[gas_i]
# adjust bus 0
df["bus0"] = df.bus1.map(n.buses.location) + " H2"
# rename carrier and index
df["carrier"] = df.carrier.apply(
lambda x: x.replace("gas boiler", "retrofitted H2 boiler")
)
df.rename(
index=lambda x: x.replace("gas boiler", "retrofitted H2 boiler"), inplace=True
)
# todo, costs for retrofitting
df["capital_costs"] = 100
# set existing capacity to zero
df["p_nom"] = 0
df["p_nom_extendable"] = True
# add H2 boilers to network
import_components_from_dataframe(n, df, c)
def apply_time_segmentation_perfect(
n, segments, solver_name="cbc", overwrite_time_dependent=True
):
"""
Aggregating time series to segments with different lengths.
Input:
n: pypsa Network
segments: (int) number of segments in which the typical period should be
subdivided
solver_name: (str) name of solver
overwrite_time_dependent: (bool) overwrite time dependent data of pypsa network
with typical time series created by tsam
"""
try:
import tsam.timeseriesaggregation as tsam
except:
raise ModuleNotFoundError(
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
)
# get all time-dependent data
columns = pd.MultiIndex.from_tuples([], names=["component", "key", "asset"])
raw = pd.DataFrame(index=n.snapshots, columns=columns)
for c in n.iterate_components():
for attr, pnl in c.pnl.items():
# exclude e_min_pu which is used for SOC of EVs in the morning
if not pnl.empty and attr != "e_min_pu":
df = pnl.copy()
df.columns = pd.MultiIndex.from_product([[c.name], [attr], df.columns])
raw = pd.concat([raw, df], axis=1)
raw = raw.dropna(axis=1)
sn_weightings = {}
for year in raw.index.levels[0]:
logger.info(f"Find representative snapshots for {year}.")
raw_t = raw.loc[year]
# normalise all time-dependent data
annual_max = raw_t.max().replace(0, 1)
raw_t = raw_t.div(annual_max, level=0)
# get representative segments
agg = tsam.TimeSeriesAggregation(
raw_t,
hoursPerPeriod=len(raw_t),
noTypicalPeriods=1,
noSegments=int(segments),
segmentation=True,
solver=solver_name,
)
segmented = agg.createTypicalPeriods()
weightings = segmented.index.get_level_values("Segment Duration")
offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0)
timesteps = [raw_t.index[0] + pd.Timedelta(f"{offset}h") for offset in offsets]
snapshots = pd.DatetimeIndex(timesteps)
sn_weightings[year] = pd.Series(
weightings, index=snapshots, name="weightings", dtype="float64"
)
sn_weightings = pd.concat(sn_weightings)
n.set_snapshots(sn_weightings.index)
n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0)
return n
def set_temporal_aggregation_SEG(n, opts, solver_name):
"""
Aggregate network temporally with tsam.
"""
for o in opts:
# segments with package tsam
m = re.match(r"^(\d+)seg$", o, re.IGNORECASE)
if m is not None:
segments = int(m[1])
logger.info(f"Use temporal segmentation with {segments} segments")
n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name)
break
return n
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"prepare_perfect_foresight",
simpl="",
opts="",
clusters="37",
ll="v1.5",
sector_opts="1p7-4380H-T-H-B-I-A-solar+p3-dist1",
)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
# parameters -----------------------------------------------------------
years = snakemake.config["scenario"]["planning_horizons"]
opts = snakemake.wildcards.sector_opts.split("-")
social_discountrate = snakemake.config["costs"]["social_discountrate"]
for o in opts:
if "sdr" in o:
social_discountrate = float(o.replace("sdr", "")) / 100
logger.info(
"Concat networks of investment period {} with social discount rate of {}%".format(
years, social_discountrate * 100
)
)
# concat prenetworks of planning horizon to single network ------------
n = concat_networks(years)
# temporal aggregate
opts = snakemake.wildcards.sector_opts.split("-")
solver_name = snakemake.config["solving"]["solver"]["name"]
n = set_temporal_aggregation_SEG(n, opts, solver_name)
# adjust global constraints lv limit if the same for all years
n = adjust_lvlimit(n)
# adjust global constraints CO2 limit
n = adjust_CO2_glc(n)
# adjust stores to multi period investment
n = adjust_stores(n)
# set phase outs
set_all_phase_outs(n)
# add H2 boiler
add_H2_boilers(n)
# set carbon constraints
opts = snakemake.wildcards.sector_opts.split("-")
n = set_carbon_constraints(n, opts)
# export network
n.export_to_netcdf(snakemake.output[0])

View File

@ -191,17 +191,15 @@ def get(item, investment_year=None):
def co2_emissions_year(
countries, input_eurostat, opts, emissions_scope, report_year, year
countries, input_eurostat, opts, emissions_scope, report_year, input_co2, year
):
"""
Calculate CO2 emissions in one specific year (e.g. 1990 or 2018).
"""
emissions_scope = snakemake.params.energy["emissions"]
eea_co2 = build_eea_co2(snakemake.input.co2, year, emissions_scope)
eea_co2 = build_eea_co2(input_co2, year, emissions_scope)
# TODO: read Eurostat data from year > 2014
# this only affects the estimation of CO2 emissions for BA, RS, AL, ME, MK
report_year = snakemake.params.energy["eurostat_report_year"]
if year > 2014:
eurostat_co2 = build_eurostat_co2(
input_eurostat, countries, report_year, year=2014
@ -240,12 +238,24 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year):
countries = snakemake.params.countries
e_1990 = co2_emissions_year(
countries, input_eurostat, opts, emissions_scope, report_year, year=1990
countries,
input_eurostat,
opts,
emissions_scope,
report_year,
input_co2,
year=1990,
)
# emissions at the beginning of the path (last year available 2018)
e_0 = co2_emissions_year(
countries, input_eurostat, opts, emissions_scope, report_year, year=2018
countries,
input_eurostat,
opts,
emissions_scope,
report_year,
input_co2,
year=2018,
)
planning_horizons = snakemake.params.planning_horizons
@ -567,6 +577,7 @@ def add_co2_tracking(n, options):
capital_cost=options["co2_sequestration_cost"],
carrier="co2 stored",
bus=spatial.co2.nodes,
lifetime=options["co2_sequestration_lifetime"],
)
n.add("Carrier", "co2 stored")
@ -2160,12 +2171,11 @@ def add_biomass(n, costs):
)
if options["biomass_transport"]:
transport_costs = pd.read_csv(
snakemake.input.biomass_transport_costs,
index_col=0,
).squeeze()
# add biomass transport
transport_costs = pd.read_csv(
snakemake.input.biomass_transport_costs, index_col=0
)
transport_costs = transport_costs.squeeze()
biomass_transport = create_network_topology(
n, "biomass transport ", bidirectional=False
)
@ -2189,6 +2199,27 @@ def add_biomass(n, costs):
carrier="solid biomass transport",
)
elif options["biomass_spatial"]:
# add artificial biomass generators at nodes which include transport costs
transport_costs = pd.read_csv(
snakemake.input.biomass_transport_costs, index_col=0
)
transport_costs = transport_costs.squeeze()
bus_transport_costs = spatial.biomass.nodes.to_series().apply(
lambda x: transport_costs[x[:2]]
)
average_distance = 200 # km #TODO: validate this assumption
n.madd(
"Generator",
spatial.biomass.nodes,
bus=spatial.biomass.nodes,
carrier="solid biomass",
p_nom=10000,
marginal_cost=costs.at["solid biomass", "fuel"]
+ bus_transport_costs * average_distance,
)
# AC buses with district heating
urban_central = n.buses.index[n.buses.carrier == "urban central heat"]
if not urban_central.empty and options["chp"]:
@ -3304,7 +3335,7 @@ if __name__ == "__main__":
spatial = define_spatial(pop_layout.index, options)
if snakemake.params.foresight == "myopic":
if snakemake.params.foresight in ["myopic", "perfect"]:
add_lifetime_wind_solar(n, costs)
conventional = snakemake.params.conventional_carriers
@ -3386,8 +3417,14 @@ if __name__ == "__main__":
if not os.path.exists(fn):
emissions_scope = snakemake.params.emissions_scope
report_year = snakemake.params.eurostat_report_year
input_co2 = snakemake.input.co2
build_carbon_budget(
o, snakemake.input.eurostat, fn, emissions_scope, report_year
o,
snakemake.input.eurostat,
fn,
emissions_scope,
report_year,
input_co2,
)
co2_cap = pd.read_csv(fn, index_col=0).squeeze()
limit = co2_cap.loc[investment_year]
@ -3420,7 +3457,7 @@ if __name__ == "__main__":
if options["electricity_grid_connection"]:
add_electricity_grid_connection(n, costs)
first_year_myopic = (snakemake.params.foresight == "myopic") and (
first_year_myopic = (snakemake.params.foresight in ["myopic", "perfect"]) and (
snakemake.params.planning_horizons[0] == investment_year
)

View File

@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Retrieve monthly fuel prices from Destatis.
"""
import logging
logger = logging.getLogger(__name__)
from pathlib import Path
from _helpers import configure_logging, progress_retrieve
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("retrieve_monthly_fuel_prices")
rootpath = ".."
else:
rootpath = "."
configure_logging(snakemake)
url = "https://www.destatis.de/EN/Themes/Economy/Prices/Publications/Downloads-Energy-Price-Trends/energy-price-trends-xlsx-5619002.xlsx?__blob=publicationFile"
to_fn = Path(rootpath) / Path(snakemake.output[0])
logger.info(f"Downloading monthly fuel prices from '{url}'.")
disable_progress = snakemake.config["run"].get("disable_progressbar", False)
progress_retrieve(url, to_fn, disable=disable_progress)
logger.info(f"Monthly fuel prices available at {to_fn}")

View File

@ -613,6 +613,7 @@ if __name__ == "__main__":
"substation_lv",
"substation_off",
"geometry",
"underground",
]
n.buses.drop(remove, axis=1, inplace=True, errors="ignore")
n.lines.drop(remove, axis=1, errors="ignore", inplace=True)

View File

@ -33,7 +33,9 @@ import numpy as np
import pandas as pd
import pypsa
import xarray as xr
from _benchmark import memory_logger
from _helpers import configure_logging, update_config_with_sector_opts
from pypsa.descriptors import get_activity_mask
logger = logging.getLogger(__name__)
pypsa.pf.logger.setLevel(logging.WARNING)
@ -47,10 +49,76 @@ def add_land_use_constraint(n, planning_horizons, config):
_add_land_use_constraint(n)
def add_land_use_constraint_perfect(n):
"""
Add global constraints for tech capacity limit.
"""
logger.info("Add land-use constraint for perfect foresight")
def compress_series(s):
def process_group(group):
if group.nunique() == 1:
return pd.Series(group.iloc[0], index=[None])
else:
return group
return s.groupby(level=[0, 1]).apply(process_group)
def new_index_name(t):
# Convert all elements to string and filter out None values
parts = [str(x) for x in t if x is not None]
# Join with space, but use a dash for the last item if not None
return " ".join(parts[:2]) + (f"-{parts[-1]}" if len(parts) > 2 else "")
def check_p_min_p_max(p_nom_max):
p_nom_min = n.generators[ext_i].groupby(grouper).sum().p_nom_min
p_nom_min = p_nom_min.reindex(p_nom_max.index)
check = (
p_nom_min.groupby(level=[0, 1]).sum()
> p_nom_max.groupby(level=[0, 1]).min()
)
if check.sum():
logger.warning(
f"summed p_min_pu values at node larger than technical potential {check[check].index}"
)
grouper = [n.generators.carrier, n.generators.bus, n.generators.build_year]
ext_i = n.generators.p_nom_extendable
# get technical limit per node and investment period
p_nom_max = n.generators[ext_i].groupby(grouper).min().p_nom_max
# drop carriers without tech limit
p_nom_max = p_nom_max[~p_nom_max.isin([np.inf, np.nan])]
# carrier
carriers = p_nom_max.index.get_level_values(0).unique()
gen_i = n.generators[(n.generators.carrier.isin(carriers)) & (ext_i)].index
n.generators.loc[gen_i, "p_nom_min"] = 0
# check minimum capacities
check_p_min_p_max(p_nom_max)
# drop multi entries in case p_nom_max stays constant in different periods
# p_nom_max = compress_series(p_nom_max)
# adjust name to fit syntax of nominal constraint per bus
df = p_nom_max.reset_index()
df["name"] = df.apply(
lambda row: f"nom_max_{row['carrier']}"
+ (f"_{row['build_year']}" if row["build_year"] is not None else ""),
axis=1,
)
for name in df.name.unique():
df_carrier = df[df.name == name]
bus = df_carrier.bus
n.buses.loc[bus, name] = df_carrier.p_nom_max.values
return n
def _add_land_use_constraint(n):
# warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
extendable_i = (n.generators.carrier == carrier) & n.generators.p_nom_extendable
n.generators.loc[extendable_i, "p_nom_min"] = 0
ext_i = (n.generators.carrier == carrier) & ~n.generators.p_nom_extendable
existing = (
n.generators.loc[ext_i, "p_nom"]
@ -67,7 +135,7 @@ def _add_land_use_constraint(n):
if len(existing_large):
logger.warning(
f"Existing capacities larger than technical potential for {existing_large},\
adjust technical potential to existing capacities"
adjust technical potential to existing capacities"
)
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[
existing_large, "p_nom_min"
@ -79,7 +147,6 @@ def _add_land_use_constraint(n):
def _add_land_use_constraint_m(n, planning_horizons, config):
# if generators clustering is lower than network clustering, land_use accounting is at generators clusters
planning_horizons = param["planning_horizons"]
grouping_years = config["existing_capacities"]["grouping_years"]
current_horizon = snakemake.wildcards.planning_horizons
@ -113,7 +180,7 @@ def _add_land_use_constraint_m(n, planning_horizons, config):
n.generators.p_nom_max.clip(lower=0, inplace=True)
def add_co2_sequestration_limit(n, limit=200):
def add_co2_sequestration_limit(n, config, limit=200):
"""
Add a global constraint on the amount of Mt CO2 that can be sequestered.
"""
@ -127,16 +194,146 @@ def add_co2_sequestration_limit(n, limit=200):
limit = float(o[o.find("seq") + 3 :]) * 1e6
break
n.add(
if not n.investment_periods.empty:
periods = n.investment_periods
names = pd.Index([f"co2_sequestration_limit-{period}" for period in periods])
else:
periods = [np.nan]
names = pd.Index(["co2_sequestration_limit"])
n.madd(
"GlobalConstraint",
"co2_sequestration_limit",
names,
sense="<=",
constant=limit,
type="primary_energy",
carrier_attribute="co2_absorptions",
investment_period=periods,
)
def add_carbon_constraint(n, snapshots):
glcs = n.global_constraints.query('type == "co2_limit"')
if glcs.empty:
return
for name, glc in glcs.iterrows():
rhs = glc.constant
carattr = glc.carrier_attribute
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
if emissions.empty:
continue
# stores
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
stores = n.stores.query("carrier in @emissions.index and not e_cyclic")
time_valid = int(glc.loc["investment_period"])
if not stores.empty:
last = n.snapshot_weightings.reset_index().groupby("period").last()
last_i = last.set_index([last.index, last.timestep]).index
final_e = n.model["Store-e"].loc[last_i, stores.index]
time_i = pd.IndexSlice[time_valid, :]
lhs = final_e.loc[time_i, :] - final_e.shift(snapshot=1).loc[time_i, :]
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
def add_carbon_budget_constraint(n, snapshots):
glcs = n.global_constraints.query('type == "Co2Budget"')
if glcs.empty:
return
for name, glc in glcs.iterrows():
rhs = glc.constant
carattr = glc.carrier_attribute
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
if emissions.empty:
continue
# stores
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
stores = n.stores.query("carrier in @emissions.index and not e_cyclic")
time_valid = int(glc.loc["investment_period"])
weighting = n.investment_period_weightings.loc[time_valid, "years"]
if not stores.empty:
last = n.snapshot_weightings.reset_index().groupby("period").last()
last_i = last.set_index([last.index, last.timestep]).index
final_e = n.model["Store-e"].loc[last_i, stores.index]
time_i = pd.IndexSlice[time_valid, :]
lhs = final_e.loc[time_i, :] * weighting
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
def add_max_growth(n, config):
"""
Add maximum growth rates for different carriers.
"""
opts = snakemake.params["sector"]["limit_max_growth"]
# take maximum yearly difference between investment periods since historic growth is per year
factor = n.investment_period_weightings.years.max() * opts["factor"]
for carrier in opts["max_growth"].keys():
max_per_period = opts["max_growth"][carrier] * factor
logger.info(
f"set maximum growth rate per investment period of {carrier} to {max_per_period} GW."
)
n.carriers.loc[carrier, "max_growth"] = max_per_period * 1e3
for carrier in opts["max_relative_growth"].keys():
max_r_per_period = opts["max_relative_growth"][carrier]
logger.info(
f"set maximum relative growth per investment period of {carrier} to {max_r_per_period}."
)
n.carriers.loc[carrier, "max_relative_growth"] = max_r_per_period
return n
def add_retrofit_gas_boiler_constraint(n, snapshots):
"""
Allow retrofitting of existing gas boilers to H2 boilers.
"""
c = "Link"
logger.info("Add constraint for retrofitting gas boilers to H2 boilers.")
# existing gas boilers
mask = n.links.carrier.str.contains("gas boiler") & ~n.links.p_nom_extendable
gas_i = n.links[mask].index
mask = n.links.carrier.str.contains("retrofitted H2 boiler")
h2_i = n.links[mask].index
n.links.loc[gas_i, "p_nom_extendable"] = True
p_nom = n.links.loc[gas_i, "p_nom"]
n.links.loc[gas_i, "p_nom"] = 0
# heat profile
cols = n.loads_t.p_set.columns[
n.loads_t.p_set.columns.str.contains("heat")
& ~n.loads_t.p_set.columns.str.contains("industry")
& ~n.loads_t.p_set.columns.str.contains("agriculture")
]
profile = n.loads_t.p_set[cols].div(
n.loads_t.p_set[cols].groupby(level=0).max(), level=0
)
# to deal if max value is zero
profile.fillna(0, inplace=True)
profile.rename(columns=n.loads.bus.to_dict(), inplace=True)
profile = profile.reindex(columns=n.links.loc[gas_i, "bus1"])
profile.columns = gas_i
rhs = profile.mul(p_nom)
dispatch = n.model["Link-p"]
active = get_activity_mask(n, c, snapshots, gas_i)
rhs = rhs[active]
p_gas = dispatch.sel(Link=gas_i)
p_h2 = dispatch.sel(Link=h2_i)
lhs = p_gas + p_h2
n.model.add_constraints(lhs == rhs, name="gas_retrofit")
def prepare_network(
n,
solve_opts=None,
@ -197,9 +394,14 @@ def prepare_network(
if foresight == "myopic":
add_land_use_constraint(n, planning_horizons, config)
if foresight == "perfect":
n = add_land_use_constraint_perfect(n)
if snakemake.params["sector"]["limit_max_growth"]["enable"]:
n = add_max_growth(n, config)
if n.stores.carrier.eq("co2 stored").any():
limit = co2_sequestration_potential
add_co2_sequestration_limit(n, limit=limit)
add_co2_sequestration_limit(n, config, limit=limit)
return n
@ -591,48 +793,56 @@ def extra_functionality(n, snapshots):
add_EQ_constraints(n, o)
add_battery_constraints(n)
add_pipe_retrofit_constraint(n)
if n._multi_invest:
add_carbon_constraint(n, snapshots)
add_carbon_budget_constraint(n, snapshots)
add_retrofit_gas_boiler_constraint(n, snapshots)
def solve_network(n, config, solving, opts="", **kwargs):
set_of_options = solving["solver"]["options"]
solver_options = solving["solver_options"][set_of_options] if set_of_options else {}
solver_name = solving["solver"]["name"]
cf_solving = solving["options"]
track_iterations = cf_solving.get("track_iterations", False)
min_iterations = cf_solving.get("min_iterations", 4)
max_iterations = cf_solving.get("max_iterations", 6)
transmission_losses = cf_solving.get("transmission_losses", 0)
kwargs["multi_investment_periods"] = (
True if config["foresight"] == "perfect" else False
)
kwargs["solver_options"] = (
solving["solver_options"][set_of_options] if set_of_options else {}
)
kwargs["solver_name"] = solving["solver"]["name"]
kwargs["extra_functionality"] = extra_functionality
kwargs["transmission_losses"] = cf_solving.get("transmission_losses", False)
kwargs["linearized_unit_commitment"] = cf_solving.get(
"linearized_unit_commitment", False
)
kwargs["assign_all_duals"] = cf_solving.get("assign_all_duals", False)
rolling_horizon = cf_solving.pop("rolling_horizon", False)
skip_iterations = cf_solving.pop("skip_iterations", False)
if not n.lines.s_nom_extendable.any():
skip_iterations = True
logger.info("No expandable lines found. Skipping iterative solving.")
# add to network for extra_functionality
n.config = config
n.opts = opts
skip_iterations = cf_solving.get("skip_iterations", False)
if not n.lines.s_nom_extendable.any():
skip_iterations = True
logger.info("No expandable lines found. Skipping iterative solving.")
if skip_iterations:
status, condition = n.optimize(
solver_name=solver_name,
transmission_losses=transmission_losses,
extra_functionality=extra_functionality,
**solver_options,
**kwargs,
)
if rolling_horizon:
kwargs["horizon"] = cf_solving.get("horizon", 365)
kwargs["overlap"] = cf_solving.get("overlap", 0)
n.optimize.optimize_with_rolling_horizon(**kwargs)
status, condition = "", ""
elif skip_iterations:
status, condition = n.optimize(**kwargs)
else:
kwargs["track_iterations"] = (cf_solving.get("track_iterations", False),)
kwargs["min_iterations"] = (cf_solving.get("min_iterations", 4),)
kwargs["max_iterations"] = (cf_solving.get("max_iterations", 6),)
status, condition = n.optimize.optimize_transmission_expansion_iteratively(
solver_name=solver_name,
track_iterations=track_iterations,
min_iterations=min_iterations,
max_iterations=max_iterations,
transmission_losses=transmission_losses,
extra_functionality=extra_functionality,
**solver_options,
**kwargs,
**kwargs
)
if status != "ok":
if status != "ok" and not rolling_horizon:
logger.warning(
f"Solving status '{status}' with termination condition '{condition}'"
)
@ -642,6 +852,7 @@ def solve_network(n, config, solving, opts="", **kwargs):
return n
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
@ -654,7 +865,7 @@ if __name__ == "__main__":
opts="",
clusters="5",
ll="v1.5",
sector_opts="CO2L0-24H-T-H-B-I-A-solar+p3-dist1",
sector_opts="8760H-T-H-B-I-A-solar+p3-dist1",
planning_horizons="2030",
)
configure_logging(snakemake)
@ -682,13 +893,18 @@ if __name__ == "__main__":
co2_sequestration_potential=snakemake.params["co2_sequestration_potential"],
)
n = solve_network(
n,
config=snakemake.config,
solving=snakemake.params.solving,
opts=opts,
log_fn=snakemake.log.solver,
)
with memory_logger(
filename=getattr(snakemake.log, "memory", None), interval=30.0
) as mem:
n = solve_network(
n,
config=snakemake.config,
solving=snakemake.params.solving,
opts=opts,
log_fn=snakemake.log.solver,
)
logger.info("Maximum memory usage: {}".format(mem.mem_usage))
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0])