Merge branch 'master' into ukraine_hackathon
This commit is contained in:
commit
ea99f3c43e
1
.github/workflows/ci.yaml
vendored
1
.github/workflows/ci.yaml
vendored
@ -83,6 +83,7 @@ jobs:
|
|||||||
snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime
|
snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime
|
||||||
snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime
|
snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime
|
||||||
snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime
|
snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime
|
||||||
|
snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
|
15
.gitignore
vendored
15
.gitignore
vendored
@ -8,6 +8,7 @@ __pycache__
|
|||||||
*dconf
|
*dconf
|
||||||
gurobi.log
|
gurobi.log
|
||||||
.vscode
|
.vscode
|
||||||
|
*.orig
|
||||||
|
|
||||||
/bak
|
/bak
|
||||||
/resources
|
/resources
|
||||||
@ -28,18 +29,18 @@ dconf
|
|||||||
/data/links_p_nom.csv
|
/data/links_p_nom.csv
|
||||||
/data/*totals.csv
|
/data/*totals.csv
|
||||||
/data/biomass*
|
/data/biomass*
|
||||||
/data/emobility/
|
/data/bundle-sector/emobility/
|
||||||
/data/eea*
|
/data/bundle-sector/eea*
|
||||||
/data/jrc*
|
/data/bundle-sector/jrc*
|
||||||
/data/heating/
|
/data/heating/
|
||||||
/data/eurostat*
|
/data/bundle-sector/eurostat*
|
||||||
/data/odyssee/
|
/data/odyssee/
|
||||||
/data/transport_data.csv
|
/data/transport_data.csv
|
||||||
/data/switzerland*
|
/data/bundle-sector/switzerland*
|
||||||
/data/.nfs*
|
/data/.nfs*
|
||||||
/data/Industrial_Database.csv
|
/data/bundle-sector/Industrial_Database.csv
|
||||||
/data/retro/tabula-calculator-calcsetbuilding.csv
|
/data/retro/tabula-calculator-calcsetbuilding.csv
|
||||||
/data/nuts*
|
/data/bundle-sector/nuts*
|
||||||
data/gas_network/scigrid-gas/
|
data/gas_network/scigrid-gas/
|
||||||
data/costs_*.csv
|
data/costs_*.csv
|
||||||
|
|
||||||
|
@ -30,10 +30,10 @@ repos:
|
|||||||
|
|
||||||
# Find common spelling mistakes in comments and docstrings
|
# Find common spelling mistakes in comments and docstrings
|
||||||
- repo: https://github.com/codespell-project/codespell
|
- repo: https://github.com/codespell-project/codespell
|
||||||
rev: v2.2.5
|
rev: v2.2.6
|
||||||
hooks:
|
hooks:
|
||||||
- id: codespell
|
- id: codespell
|
||||||
args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco,berfore'] # Ignore capital case words, e.g. country codes
|
args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco,berfore,vor'] # Ignore capital case words, e.g. country codes
|
||||||
types_or: [python, rst, markdown]
|
types_or: [python, rst, markdown]
|
||||||
files: ^(scripts|doc)/
|
files: ^(scripts|doc)/
|
||||||
|
|
||||||
@ -51,7 +51,7 @@ repos:
|
|||||||
|
|
||||||
# Formatting with "black" coding style
|
# Formatting with "black" coding style
|
||||||
- repo: https://github.com/psf/black
|
- repo: https://github.com/psf/black
|
||||||
rev: 23.7.0
|
rev: 23.9.1
|
||||||
hooks:
|
hooks:
|
||||||
# Format Python files
|
# Format Python files
|
||||||
- id: black
|
- id: black
|
||||||
|
@ -14,4 +14,3 @@ build:
|
|||||||
python:
|
python:
|
||||||
install:
|
install:
|
||||||
- requirements: doc/requirements.txt
|
- requirements: doc/requirements.txt
|
||||||
system_packages: false
|
|
||||||
|
11
.sync-send
Normal file
11
.sync-send
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: CC0-1.0
|
||||||
|
|
||||||
|
rules
|
||||||
|
scripts
|
||||||
|
config
|
||||||
|
config/test
|
||||||
|
envs
|
||||||
|
matplotlibrc
|
||||||
|
Snakefile
|
@ -1,21 +0,0 @@
|
|||||||
# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: CC0-1.0
|
|
||||||
|
|
||||||
.snakemake
|
|
||||||
.git
|
|
||||||
.pytest_cache
|
|
||||||
.ipynb_checkpoints
|
|
||||||
.vscode
|
|
||||||
.DS_Store
|
|
||||||
__pycache__
|
|
||||||
*.pyc
|
|
||||||
*.pyo
|
|
||||||
*.ipynb
|
|
||||||
notebooks
|
|
||||||
doc
|
|
||||||
cutouts
|
|
||||||
data
|
|
||||||
benchmarks
|
|
||||||
*.nc
|
|
||||||
configs
|
|
@ -1,23 +0,0 @@
|
|||||||
# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors
|
|
||||||
#
|
|
||||||
# SPDX-License-Identifier: CC0-1.0
|
|
||||||
|
|
||||||
.snakemake
|
|
||||||
.git
|
|
||||||
.pytest_cache
|
|
||||||
.ipynb_checkpoints
|
|
||||||
.vscode
|
|
||||||
.DS_Store
|
|
||||||
__pycache__
|
|
||||||
*.pyc
|
|
||||||
*.pyo
|
|
||||||
*.ipynb
|
|
||||||
notebooks
|
|
||||||
benchmarks
|
|
||||||
logs
|
|
||||||
resources*
|
|
||||||
results
|
|
||||||
networks*
|
|
||||||
cutouts
|
|
||||||
data/bundle
|
|
||||||
doc
|
|
42
Snakefile
42
Snakefile
@ -40,7 +40,7 @@ localrules:
|
|||||||
|
|
||||||
wildcard_constraints:
|
wildcard_constraints:
|
||||||
simpl="[a-zA-Z0-9]*",
|
simpl="[a-zA-Z0-9]*",
|
||||||
clusters="[0-9]+m?|all",
|
clusters="[0-9]+(m|c)?|all",
|
||||||
ll="(v|c)([0-9\.]+|opt)",
|
ll="(v|c)([0-9\.]+|opt)",
|
||||||
opts="[-+a-zA-Z0-9\.]*",
|
opts="[-+a-zA-Z0-9\.]*",
|
||||||
sector_opts="[-+a-zA-Z0-9\.\s]*",
|
sector_opts="[-+a-zA-Z0-9\.\s]*",
|
||||||
@ -53,6 +53,7 @@ include: "rules/build_electricity.smk"
|
|||||||
include: "rules/build_sector.smk"
|
include: "rules/build_sector.smk"
|
||||||
include: "rules/solve_electricity.smk"
|
include: "rules/solve_electricity.smk"
|
||||||
include: "rules/postprocess.smk"
|
include: "rules/postprocess.smk"
|
||||||
|
include: "rules/validate.smk"
|
||||||
|
|
||||||
|
|
||||||
if config["foresight"] == "overnight":
|
if config["foresight"] == "overnight":
|
||||||
@ -65,13 +66,31 @@ if config["foresight"] == "myopic":
|
|||||||
include: "rules/solve_myopic.smk"
|
include: "rules/solve_myopic.smk"
|
||||||
|
|
||||||
|
|
||||||
|
if config["foresight"] == "perfect":
|
||||||
|
|
||||||
|
include: "rules/solve_perfect.smk"
|
||||||
|
|
||||||
|
|
||||||
|
rule all:
|
||||||
|
input:
|
||||||
|
RESULTS + "graphs/costs.pdf",
|
||||||
|
default_target: True
|
||||||
|
|
||||||
|
|
||||||
rule purge:
|
rule purge:
|
||||||
message:
|
|
||||||
"Purging generated resources, results and docs. Downloads are kept."
|
|
||||||
run:
|
run:
|
||||||
rmtree("resources/", ignore_errors=True)
|
import builtins
|
||||||
rmtree("results/", ignore_errors=True)
|
|
||||||
rmtree("doc/_build", ignore_errors=True)
|
do_purge = builtins.input(
|
||||||
|
"Do you really want to delete all generated resources, \nresults and docs (downloads are kept)? [y/N] "
|
||||||
|
)
|
||||||
|
if do_purge == "y":
|
||||||
|
rmtree("resources/", ignore_errors=True)
|
||||||
|
rmtree("results/", ignore_errors=True)
|
||||||
|
rmtree("doc/_build", ignore_errors=True)
|
||||||
|
print("Purging generated resources, results and docs. Downloads are kept.")
|
||||||
|
else:
|
||||||
|
raise Exception(f"Input {do_purge}. Aborting purge.")
|
||||||
|
|
||||||
|
|
||||||
rule dag:
|
rule dag:
|
||||||
@ -98,3 +117,14 @@ rule doc:
|
|||||||
directory("doc/_build"),
|
directory("doc/_build"),
|
||||||
shell:
|
shell:
|
||||||
"make -C doc html"
|
"make -C doc html"
|
||||||
|
|
||||||
|
|
||||||
|
rule sync:
|
||||||
|
params:
|
||||||
|
cluster=f"{config['remote']['ssh']}:{config['remote']['path']}",
|
||||||
|
shell:
|
||||||
|
"""
|
||||||
|
rsync -uvarh --ignore-missing-args --files-from=.sync-send . {params.cluster}
|
||||||
|
rsync -uvarh --no-g {params.cluster}/results . || echo "No results directory, skipping rsync"
|
||||||
|
rsync -uvarh --no-g {params.cluster}/logs . || echo "No logs directory, skipping rsync"
|
||||||
|
"""
|
||||||
|
@ -10,6 +10,14 @@ logging:
|
|||||||
level: INFO
|
level: INFO
|
||||||
format: '%(levelname)s:%(name)s:%(message)s'
|
format: '%(levelname)s:%(name)s:%(message)s'
|
||||||
|
|
||||||
|
private:
|
||||||
|
keys:
|
||||||
|
entsoe_api:
|
||||||
|
|
||||||
|
remote:
|
||||||
|
ssh: ""
|
||||||
|
path: ""
|
||||||
|
|
||||||
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#run
|
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#run
|
||||||
run:
|
run:
|
||||||
name: ""
|
name: ""
|
||||||
@ -209,10 +217,14 @@ renewable:
|
|||||||
carriers: [ror, PHS, hydro]
|
carriers: [ror, PHS, hydro]
|
||||||
PHS_max_hours: 6
|
PHS_max_hours: 6
|
||||||
hydro_max_hours: "energy_capacity_totals_by_country" # one of energy_capacity_totals_by_country, estimate_by_large_installations or a float
|
hydro_max_hours: "energy_capacity_totals_by_country" # one of energy_capacity_totals_by_country, estimate_by_large_installations or a float
|
||||||
|
flatten_dispatch: false
|
||||||
|
flatten_dispatch_buffer: 0.2
|
||||||
clip_min_inflow: 1.0
|
clip_min_inflow: 1.0
|
||||||
|
|
||||||
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#conventional
|
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#conventional
|
||||||
conventional:
|
conventional:
|
||||||
|
unit_commitment: false
|
||||||
|
dynamic_fuel_price: false
|
||||||
nuclear:
|
nuclear:
|
||||||
p_max_pu: "data/nuclear_p_max_pu.csv" # float of file name
|
p_max_pu: "data/nuclear_p_max_pu.csv" # float of file name
|
||||||
|
|
||||||
@ -450,6 +462,7 @@ sector:
|
|||||||
years_of_storage: 25
|
years_of_storage: 25
|
||||||
co2_sequestration_potential: 200
|
co2_sequestration_potential: 200
|
||||||
co2_sequestration_cost: 10
|
co2_sequestration_cost: 10
|
||||||
|
co2_sequestration_lifetime: 50
|
||||||
co2_spatial: false
|
co2_spatial: false
|
||||||
co2network: false
|
co2network: false
|
||||||
cc_fraction: 0.9
|
cc_fraction: 0.9
|
||||||
@ -480,6 +493,20 @@ sector:
|
|||||||
OCGT: gas
|
OCGT: gas
|
||||||
biomass_to_liquid: false
|
biomass_to_liquid: false
|
||||||
biosng: false
|
biosng: false
|
||||||
|
limit_max_growth:
|
||||||
|
enable: false
|
||||||
|
# allowing 30% larger than max historic growth
|
||||||
|
factor: 1.3
|
||||||
|
max_growth: # unit GW
|
||||||
|
onwind: 16 # onshore max grow so far 16 GW in Europe https://www.iea.org/reports/renewables-2020/wind
|
||||||
|
solar: 28 # solar max grow so far 28 GW in Europe https://www.iea.org/reports/renewables-2020/solar-pv
|
||||||
|
offwind-ac: 35 # offshore max grow so far 3.5 GW in Europe https://windeurope.org/about-wind/statistics/offshore/european-offshore-wind-industry-key-trends-statistics-2019/
|
||||||
|
offwind-dc: 35
|
||||||
|
max_relative_growth:
|
||||||
|
onwind: 3
|
||||||
|
solar: 3
|
||||||
|
offwind-ac: 3
|
||||||
|
offwind-dc: 3
|
||||||
|
|
||||||
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#industry
|
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#industry
|
||||||
industry:
|
industry:
|
||||||
@ -532,11 +559,13 @@ industry:
|
|||||||
hotmaps_locate_missing: false
|
hotmaps_locate_missing: false
|
||||||
reference_year: 2015
|
reference_year: 2015
|
||||||
|
|
||||||
|
|
||||||
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#costs
|
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#costs
|
||||||
costs:
|
costs:
|
||||||
year: 2030
|
year: 2030
|
||||||
version: v0.6.0
|
version: v0.6.0
|
||||||
rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person)
|
rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person)
|
||||||
|
social_discountrate: 0.02
|
||||||
fill_values:
|
fill_values:
|
||||||
FOM: 0
|
FOM: 0
|
||||||
VOM: 0
|
VOM: 0
|
||||||
@ -575,16 +604,12 @@ clustering:
|
|||||||
algorithm: kmeans
|
algorithm: kmeans
|
||||||
feature: solar+onwind-time
|
feature: solar+onwind-time
|
||||||
exclude_carriers: []
|
exclude_carriers: []
|
||||||
|
consider_efficiency_classes: false
|
||||||
aggregation_strategies:
|
aggregation_strategies:
|
||||||
generators:
|
generators:
|
||||||
p_nom_max: sum
|
|
||||||
p_nom_min: sum
|
|
||||||
p_min_pu: mean
|
|
||||||
marginal_cost: mean
|
|
||||||
committable: any
|
committable: any
|
||||||
ramp_limit_up: max
|
ramp_limit_up: max
|
||||||
ramp_limit_down: max
|
ramp_limit_down: max
|
||||||
efficiency: mean
|
|
||||||
|
|
||||||
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#solving
|
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#solving
|
||||||
solving:
|
solving:
|
||||||
@ -592,13 +617,17 @@ solving:
|
|||||||
options:
|
options:
|
||||||
clip_p_max_pu: 1.e-2
|
clip_p_max_pu: 1.e-2
|
||||||
load_shedding: false
|
load_shedding: false
|
||||||
transmission_losses: 0
|
|
||||||
noisy_costs: true
|
noisy_costs: true
|
||||||
skip_iterations: true
|
skip_iterations: true
|
||||||
|
rolling_horizon: false
|
||||||
|
seed: 123
|
||||||
|
# options that go into the optimize function
|
||||||
track_iterations: false
|
track_iterations: false
|
||||||
min_iterations: 4
|
min_iterations: 4
|
||||||
max_iterations: 6
|
max_iterations: 6
|
||||||
seed: 123
|
transmission_losses: 0
|
||||||
|
linearized_unit_commitment: true
|
||||||
|
horizon: 365
|
||||||
|
|
||||||
solver:
|
solver:
|
||||||
name: gurobi
|
name: gurobi
|
||||||
@ -626,7 +655,6 @@ solving:
|
|||||||
AggFill: 0
|
AggFill: 0
|
||||||
PreDual: 0
|
PreDual: 0
|
||||||
GURO_PAR_BARDENSETHRESH: 200
|
GURO_PAR_BARDENSETHRESH: 200
|
||||||
seed: 10 # Consistent seed for all plattforms
|
|
||||||
gurobi-numeric-focus:
|
gurobi-numeric-focus:
|
||||||
name: gurobi
|
name: gurobi
|
||||||
NumericFocus: 3 # Favour numeric stability over speed
|
NumericFocus: 3 # Favour numeric stability over speed
|
||||||
@ -659,6 +687,7 @@ solving:
|
|||||||
glpk-default: {} # Used in CI
|
glpk-default: {} # Used in CI
|
||||||
|
|
||||||
mem: 30000 #memory in MB; 20 GB enough for 50+B+I+H2; 100 GB for 181+B+I+H2
|
mem: 30000 #memory in MB; 20 GB enough for 50+B+I+H2; 100 GB for 181+B+I+H2
|
||||||
|
walltime: "12:00:00"
|
||||||
|
|
||||||
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#plotting
|
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#plotting
|
||||||
plotting:
|
plotting:
|
||||||
@ -690,6 +719,8 @@ plotting:
|
|||||||
lines: "Transmission Lines"
|
lines: "Transmission Lines"
|
||||||
ror: "Run of River"
|
ror: "Run of River"
|
||||||
load: "Load Shedding"
|
load: "Load Shedding"
|
||||||
|
ac: "AC"
|
||||||
|
dc: "DC"
|
||||||
|
|
||||||
tech_colors:
|
tech_colors:
|
||||||
# wind
|
# wind
|
||||||
@ -749,6 +780,7 @@ plotting:
|
|||||||
gas pipeline new: '#a87c62'
|
gas pipeline new: '#a87c62'
|
||||||
# oil
|
# oil
|
||||||
oil: '#c9c9c9'
|
oil: '#c9c9c9'
|
||||||
|
imported oil: '#a3a3a3'
|
||||||
oil boiler: '#adadad'
|
oil boiler: '#adadad'
|
||||||
residential rural oil boiler: '#a9a9a9'
|
residential rural oil boiler: '#a9a9a9'
|
||||||
services rural oil boiler: '#a5a5a5'
|
services rural oil boiler: '#a5a5a5'
|
||||||
@ -880,6 +912,7 @@ plotting:
|
|||||||
H2 for shipping: "#ebaee0"
|
H2 for shipping: "#ebaee0"
|
||||||
H2: '#bf13a0'
|
H2: '#bf13a0'
|
||||||
hydrogen: '#bf13a0'
|
hydrogen: '#bf13a0'
|
||||||
|
retrofitted H2 boiler: '#e5a0d9'
|
||||||
SMR: '#870c71'
|
SMR: '#870c71'
|
||||||
SMR CC: '#4f1745'
|
SMR CC: '#4f1745'
|
||||||
H2 liquefaction: '#d647bd'
|
H2 liquefaction: '#d647bd'
|
||||||
|
43
config/config.perfect.yaml
Normal file
43
config/config.perfect.yaml
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: CC0-1.0
|
||||||
|
run:
|
||||||
|
name: "perfect"
|
||||||
|
|
||||||
|
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#foresight
|
||||||
|
foresight: perfect
|
||||||
|
|
||||||
|
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#scenario
|
||||||
|
# Wildcard docs in https://pypsa-eur.readthedocs.io/en/latest/wildcards.html
|
||||||
|
scenario:
|
||||||
|
simpl:
|
||||||
|
- ''
|
||||||
|
ll:
|
||||||
|
- v1.0
|
||||||
|
clusters:
|
||||||
|
- 37
|
||||||
|
opts:
|
||||||
|
- ''
|
||||||
|
sector_opts:
|
||||||
|
- 1p5-4380H-T-H-B-I-A-solar+p3-dist1
|
||||||
|
- 1p7-4380H-T-H-B-I-A-solar+p3-dist1
|
||||||
|
- 2p0-4380H-T-H-B-I-A-solar+p3-dist1
|
||||||
|
planning_horizons:
|
||||||
|
- 2020
|
||||||
|
- 2030
|
||||||
|
- 2040
|
||||||
|
- 2050
|
||||||
|
|
||||||
|
|
||||||
|
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#co2-budget
|
||||||
|
co2_budget:
|
||||||
|
# update of IPCC 6th AR compared to the 1.5SR. (discussed here: https://twitter.com/JoeriRogelj/status/1424743828339167233)
|
||||||
|
1p5: 34.2 # 25.7 # Budget in Gt CO2 for 1.5 for Europe, global 420 Gt, assuming per capita share
|
||||||
|
1p6: 43.259666 # 35 # Budget in Gt CO2 for 1.6 for Europe, global 580 Gt
|
||||||
|
1p7: 51.4 # 45 # Budget in Gt CO2 for 1.7 for Europe, global 800 Gt
|
||||||
|
2p0: 69.778 # 73.9 # Budget in Gt CO2 for 2 for Europe, global 1170 Gt
|
||||||
|
|
||||||
|
|
||||||
|
sector:
|
||||||
|
min_part_load_fischer_tropsch: 0
|
||||||
|
min_part_load_methanolisation: 0
|
24
config/config.ua.yaml
Normal file
24
config/config.ua.yaml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
scenario:
|
||||||
|
simpl:
|
||||||
|
- ''
|
||||||
|
ll:
|
||||||
|
- v1.5
|
||||||
|
clusters:
|
||||||
|
- 6
|
||||||
|
opts:
|
||||||
|
- '24H'
|
||||||
|
sector_opts:
|
||||||
|
- ''
|
||||||
|
planning_horizons:
|
||||||
|
- 2050
|
||||||
|
|
||||||
|
countries:
|
||||||
|
- UA
|
||||||
|
|
||||||
|
enable:
|
||||||
|
retrieve: true
|
||||||
|
retrieve_databundle: true
|
||||||
|
retrieve_sector_databundle: false
|
||||||
|
retrieve_cost_data: true
|
||||||
|
retrieve_cutout: false
|
||||||
|
|
98
config/config.validation.yaml
Normal file
98
config/config.validation.yaml
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: CC0-1.0
|
||||||
|
run:
|
||||||
|
name: "validation"
|
||||||
|
|
||||||
|
scenario:
|
||||||
|
ll:
|
||||||
|
- v1.0
|
||||||
|
clusters:
|
||||||
|
- 37
|
||||||
|
opts:
|
||||||
|
- 'Ept'
|
||||||
|
|
||||||
|
snapshots:
|
||||||
|
start: "2019-01-01"
|
||||||
|
end: "2020-01-01"
|
||||||
|
inclusive: 'left'
|
||||||
|
|
||||||
|
enable:
|
||||||
|
retrieve_cutout: false
|
||||||
|
|
||||||
|
electricity:
|
||||||
|
co2limit: 1e9
|
||||||
|
|
||||||
|
extendable_carriers:
|
||||||
|
Generator: []
|
||||||
|
StorageUnit: []
|
||||||
|
Store: []
|
||||||
|
Link: []
|
||||||
|
|
||||||
|
powerplants_filter: not (DateOut < 2019)
|
||||||
|
|
||||||
|
conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass]
|
||||||
|
renewable_carriers: [solar, onwind, offwind-ac, offwind-dc, hydro]
|
||||||
|
|
||||||
|
estimate_renewable_capacities:
|
||||||
|
year: 2019
|
||||||
|
|
||||||
|
atlite:
|
||||||
|
default_cutout: europe-2019-era5
|
||||||
|
cutouts:
|
||||||
|
europe-2019-era5:
|
||||||
|
module: era5
|
||||||
|
x: [-12., 35.]
|
||||||
|
y: [33., 72]
|
||||||
|
dx: 0.3
|
||||||
|
dy: 0.3
|
||||||
|
time: ['2019', '2019']
|
||||||
|
|
||||||
|
renewable:
|
||||||
|
onwind:
|
||||||
|
cutout: europe-2019-era5
|
||||||
|
offwind-ac:
|
||||||
|
cutout: europe-2019-era5
|
||||||
|
offwind-dc:
|
||||||
|
cutout: europe-2019-era5
|
||||||
|
solar:
|
||||||
|
cutout: europe-2019-era5
|
||||||
|
hydro:
|
||||||
|
cutout: europe-2019-era5
|
||||||
|
flatten_dispatch: 0.01
|
||||||
|
|
||||||
|
conventional:
|
||||||
|
unit_commitment: false
|
||||||
|
dynamic_fuel_price: true
|
||||||
|
nuclear:
|
||||||
|
p_max_pu: "data/nuclear_p_max_pu.csv"
|
||||||
|
biomass:
|
||||||
|
p_max_pu: 0.65
|
||||||
|
|
||||||
|
load:
|
||||||
|
power_statistics: false
|
||||||
|
|
||||||
|
lines:
|
||||||
|
s_max_pu: 0.23
|
||||||
|
under_construction: 'remove'
|
||||||
|
|
||||||
|
links:
|
||||||
|
include_tyndp: false
|
||||||
|
|
||||||
|
costs:
|
||||||
|
year: 2020
|
||||||
|
emission_prices:
|
||||||
|
co2: 25
|
||||||
|
|
||||||
|
clustering:
|
||||||
|
simplify_network:
|
||||||
|
exclude_carriers: [oil, coal, lignite, OCGT, CCGT]
|
||||||
|
cluster_network:
|
||||||
|
consider_efficiency_classes: true
|
||||||
|
|
||||||
|
solving:
|
||||||
|
options:
|
||||||
|
load_shedding: true
|
||||||
|
rolling_horizon: false
|
||||||
|
horizon: 1000
|
||||||
|
overlap: 48
|
91
config/test/config.perfect.yaml
Normal file
91
config/test/config.perfect.yaml
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: CC0-1.0
|
||||||
|
|
||||||
|
tutorial: true
|
||||||
|
|
||||||
|
run:
|
||||||
|
name: "test-sector-perfect"
|
||||||
|
disable_progressbar: true
|
||||||
|
shared_resources: true
|
||||||
|
shared_cutouts: true
|
||||||
|
|
||||||
|
foresight: perfect
|
||||||
|
|
||||||
|
scenario:
|
||||||
|
ll:
|
||||||
|
- v1.0
|
||||||
|
clusters:
|
||||||
|
- 5
|
||||||
|
sector_opts:
|
||||||
|
- 8760H-T-H-B-I-A-solar+p3-dist1
|
||||||
|
planning_horizons:
|
||||||
|
- 2030
|
||||||
|
- 2040
|
||||||
|
- 2050
|
||||||
|
|
||||||
|
countries: ['BE']
|
||||||
|
|
||||||
|
snapshots:
|
||||||
|
start: "2013-03-01"
|
||||||
|
end: "2013-03-08"
|
||||||
|
|
||||||
|
electricity:
|
||||||
|
co2limit: 100.e+6
|
||||||
|
|
||||||
|
extendable_carriers:
|
||||||
|
Generator: [OCGT]
|
||||||
|
StorageUnit: [battery]
|
||||||
|
Store: [H2]
|
||||||
|
Link: [H2 pipeline]
|
||||||
|
|
||||||
|
renewable_carriers: [solar, onwind, offwind-ac, offwind-dc]
|
||||||
|
|
||||||
|
sector:
|
||||||
|
min_part_load_fischer_tropsch: 0
|
||||||
|
min_part_load_methanolisation: 0
|
||||||
|
atlite:
|
||||||
|
default_cutout: be-03-2013-era5
|
||||||
|
cutouts:
|
||||||
|
be-03-2013-era5:
|
||||||
|
module: era5
|
||||||
|
x: [4., 15.]
|
||||||
|
y: [46., 56.]
|
||||||
|
time: ["2013-03-01", "2013-03-08"]
|
||||||
|
|
||||||
|
renewable:
|
||||||
|
onwind:
|
||||||
|
cutout: be-03-2013-era5
|
||||||
|
offwind-ac:
|
||||||
|
cutout: be-03-2013-era5
|
||||||
|
max_depth: false
|
||||||
|
offwind-dc:
|
||||||
|
cutout: be-03-2013-era5
|
||||||
|
max_depth: false
|
||||||
|
solar:
|
||||||
|
cutout: be-03-2013-era5
|
||||||
|
|
||||||
|
industry:
|
||||||
|
St_primary_fraction:
|
||||||
|
2020: 0.8
|
||||||
|
2030: 0.6
|
||||||
|
2040: 0.5
|
||||||
|
2050: 0.4
|
||||||
|
|
||||||
|
solving:
|
||||||
|
solver:
|
||||||
|
name: glpk
|
||||||
|
options: glpk-default
|
||||||
|
mem: 4000
|
||||||
|
|
||||||
|
plotting:
|
||||||
|
map:
|
||||||
|
boundaries:
|
||||||
|
eu_node_location:
|
||||||
|
x: -5.5
|
||||||
|
y: 46.
|
||||||
|
costs_max: 1000
|
||||||
|
costs_threshold: 0.0000001
|
||||||
|
energy_max:
|
||||||
|
energy_min:
|
||||||
|
energy_threshold: 0.000001
|
@ -1,16 +1,16 @@
|
|||||||
country,factor
|
country,factor
|
||||||
BE,0.65
|
BE,0.796
|
||||||
BG,0.89
|
BG,0.894
|
||||||
CZ,0.82
|
CZ,0.827
|
||||||
FI,0.92
|
FI,0.936
|
||||||
FR,0.70
|
FR,0.71
|
||||||
DE,0.88
|
DE,0.871
|
||||||
HU,0.90
|
HU,0.913
|
||||||
NL,0.86
|
NL,0.868
|
||||||
RO,0.92
|
RO,0.909
|
||||||
SK,0.89
|
SK,0.9
|
||||||
SI,0.94
|
SI,0.913
|
||||||
ES,0.89
|
ES,0.897
|
||||||
SE,0.82
|
SE,0.851
|
||||||
CH,0.86
|
CH,0.87
|
||||||
GB,0.67
|
GB,0.656
|
||||||
|
|
8
data/unit_commitment.csv
Normal file
8
data/unit_commitment.csv
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
attribute,OCGT,CCGT,coal,lignite,nuclear
|
||||||
|
ramp_limit_up,1,1,1,1,0.3
|
||||||
|
ramp_limit_start_up,0.2,0.45,0.38,0.4,0.5
|
||||||
|
ramp_limit_shut_down,0.2,0.45,0.38,0.4,0.5
|
||||||
|
p_min_pu,0.2,0.45,0.325,0.4,0.5
|
||||||
|
min_up_time,,3,5,7,6
|
||||||
|
min_down_time,,2,6,6,10
|
||||||
|
start_up_cost,9.6,34.2,35.64,19.14,16.5
|
|
@ -1,17 +1,18 @@
|
|||||||
,Unit,Values,Description
|
,Unit,Values,Description
|
||||||
simplify_network,,,
|
simplify_network,,,
|
||||||
-- to_substations,bool,"{'true','false'}","Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones"
|
-- to_substations,bool,"{'true','false'}","Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones"
|
||||||
-- algorithm,str,"One of {‘kmeans’, ‘hac’, ‘modularity‘}",
|
-- algorithm,str,"One of {‘kmeans’, ‘hac’, ‘modularity‘}",
|
||||||
-- feature,str,"Str in the format ‘carrier1+carrier2+...+carrierN-X’, where CarrierI can be from {‘solar’, ‘onwind’, ‘offwind’, ‘ror’} and X is one of {‘cap’, ‘time’}.",
|
-- feature,str,"Str in the format ‘carrier1+carrier2+...+carrierN-X’, where CarrierI can be from {‘solar’, ‘onwind’, ‘offwind’, ‘ror’} and X is one of {‘cap’, ‘time’}.",
|
||||||
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
|
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
|
||||||
-- remove stubs,bool,"true/false","Controls whether radial parts of the network should be recursively aggregated. Defaults to true."
|
-- remove stubs,bool,"{'true','false'}",Controls whether radial parts of the network should be recursively aggregated. Defaults to true.
|
||||||
-- remove_stubs_across_borders,bool,"true/false","Controls whether radial parts of the network should be recursively aggregated across borders. Defaults to true."
|
-- remove_stubs_across_borders,bool,"{'true','false'}",Controls whether radial parts of the network should be recursively aggregated across borders. Defaults to true.
|
||||||
cluster_network,,,
|
cluster_network,,,
|
||||||
-- algorithm,str,"One of {‘kmeans’, ‘hac’}",
|
-- algorithm,str,"One of {‘kmeans’, ‘hac’}",
|
||||||
-- feature,str,"Str in the format ‘carrier1+carrier2+...+carrierN-X’, where CarrierI can be from {‘solar’, ‘onwind’, ‘offwind’, ‘ror’} and X is one of {‘cap’, ‘time’}.",
|
-- feature,str,"Str in the format ‘carrier1+carrier2+...+carrierN-X’, where CarrierI can be from {‘solar’, ‘onwind’, ‘offwind’, ‘ror’} and X is one of {‘cap’, ‘time’}.",
|
||||||
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
|
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
|
||||||
aggregation_strategies,,,
|
-- consider_efficiency_classes,bool,"{'true','false'}","Aggregated each carriers into the top 10-quantile (high), the bottom 90-quantile (low), and everything in between (medium)."
|
||||||
-- generators,,,
|
aggregation_strategies,,,
|
||||||
-- -- {key},str,"{key} can be any of the component of the generator (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator."
|
-- generators,,,
|
||||||
-- buses,,,
|
-- -- {key},str,"{key} can be any of the component of the generator (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator."
|
||||||
-- -- {key},str,"{key} can be any of the component of the bus (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus."
|
-- buses,,,
|
||||||
|
-- -- {key},str,"{key} can be any of the component of the bus (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus."
|
||||||
|
|
@ -1,3 +1,5 @@
|
|||||||
,Unit,Values,Description
|
,Unit,Values,Description
|
||||||
{name},--,"string","For any carrier/technology overwrite attributes as listed below."
|
unit_commitment ,bool,"{true, false}","Allow the overwrite of ramp_limit_up, ramp_limit_start_up, ramp_limit_shut_down, p_min_pu, min_up_time, min_down_time, and start_up_cost of conventional generators. Refer to the CSV file „unit_commitment.csv“."
|
||||||
-- {attribute},--,"string or float","For any attribute, can specify a float or reference to a file path to a CSV file giving floats for each country (2-letter code)."
|
dynamic_fuel_price ,bool,"{true, false}","Consider the monthly fluctuating fuel prices for each conventional generator. Refer to the CSV file ""data/validation/monthly_fuel_price.csv""."
|
||||||
|
{name},--,string,For any carrier/technology overwrite attributes as listed below.
|
||||||
|
-- {attribute},--,string or float,"For any attribute, can specify a float or reference to a file path to a CSV file giving floats for each country (2-letter code)."
|
||||||
|
|
@ -1,6 +1,8 @@
|
|||||||
,Unit,Values,Description
|
,Unit,Values,Description
|
||||||
cutout,--,"Must be 'europe-2013-era5'","Specifies the directory where the relevant weather data ist stored."
|
cutout,--,Must be 'europe-2013-era5',Specifies the directory where the relevant weather data ist stored.
|
||||||
carriers,--,"Any subset of {'ror', 'PHS', 'hydro'}","Specifies the types of hydro power plants to build per-unit availability time series for. 'ror' stands for run-of-river plants, 'PHS' represents pumped-hydro storage, and 'hydro' stands for hydroelectric dams."
|
carriers,--,"Any subset of {'ror', 'PHS', 'hydro'}","Specifies the types of hydro power plants to build per-unit availability time series for. 'ror' stands for run-of-river plants, 'PHS' represents pumped-hydro storage, and 'hydro' stands for hydroelectric dams."
|
||||||
PHS_max_hours,h,float,"Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_."
|
PHS_max_hours,h,float,Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
|
||||||
hydro_max_hours,h,"Any of {float, 'energy_capacity_totals_by_country', 'estimate_by_large_installations'}","Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom`` or heuristically determined. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_."
|
hydro_max_hours,h,"Any of {float, 'energy_capacity_totals_by_country', 'estimate_by_large_installations'}",Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom`` or heuristically determined. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
|
||||||
clip_min_inflow,MW,float,"To avoid too small values in the inflow time series, values below this threshold are set to zero."
|
flatten_dispatch,bool,"{true, false}",Consider an upper limit for the hydro dispatch. The limit is given by the average capacity factor plus the buffer given in ``flatten_dispatch_buffer``
|
||||||
|
flatten_dispatch_buffer,--,float,"If ``flatten_dispatch`` is true, specify the value added above the average capacity factor."
|
||||||
|
clip_min_inflow,MW,float,"To avoid too small values in the inflow time series, values below this threshold are set to zero."
|
||||||
|
|
@ -3,6 +3,7 @@ Trigger, Description, Definition, Status
|
|||||||
``nSEG``; e.g. ``4380SEG``, "Apply time series segmentation with `tsam <https://tsam.readthedocs.io/en/latest/index.html>`_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load.", ``prepare_network``: apply_time_segmentation(), In active use
|
``nSEG``; e.g. ``4380SEG``, "Apply time series segmentation with `tsam <https://tsam.readthedocs.io/en/latest/index.html>`_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load.", ``prepare_network``: apply_time_segmentation(), In active use
|
||||||
``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L19>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L154>`__, In active use
|
``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L19>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L154>`__, In active use
|
||||||
``Ep``, Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L24>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L158>`__, In active use
|
``Ep``, Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L24>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L158>`__, In active use
|
||||||
|
``Ept``, Add monthly cost for a carbon-dioxide price based on historical values built by the rule ``build_monthly_prices``, In active use
|
||||||
``CCL``, Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``., ``solve_network``, In active use
|
``CCL``, Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``., ``solve_network``, In active use
|
||||||
``EQ``, "Require each country or node to on average produce a minimal share of its total consumption itself. Example: ``EQ0.5c`` demands each country to produce on average at least 50% of its consumption; ``EQ0.5`` demands each node to produce on average at least 50% of its consumption.", ``solve_network``, In active use
|
``EQ``, "Require each country or node to on average produce a minimal share of its total consumption itself. Example: ``EQ0.5c`` demands each country to produce on average at least 50% of its consumption; ``EQ0.5`` demands each node to produce on average at least 50% of its consumption.", ``solve_network``, In active use
|
||||||
``ATK``, "Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.", ``prepare_network``, In active use
|
``ATK``, "Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.", ``prepare_network``, In active use
|
||||||
|
Can't render this file because it has a wrong number of fields in line 6.
|
@ -1,17 +1,19 @@
|
|||||||
,Unit,Values,Description
|
,Unit,Values,Description
|
||||||
options,,,
|
options,,,
|
||||||
-- load_shedding,bool/float,"{'true','false', float}","Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh."
|
-- clip_p_max_pu,p.u.,float,To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero.
|
||||||
-- transmission_losses,int,"[0-9]","Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored."
|
-- load_shedding,bool/float,"{'true','false', float}","Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh."
|
||||||
-- noisy_costs,bool,"{'true','false'}","Add random noise to marginal cost of generators by :math:`\mathcal{U}(0.009,0,011)` and capital cost of lines and links by :math:`\mathcal{U}(0.09,0,11)`."
|
-- noisy_costs,bool,"{'true','false'}","Add random noise to marginal cost of generators by :math:`\mathcal{U}(0.009,0,011)` and capital cost of lines and links by :math:`\mathcal{U}(0.09,0,11)`."
|
||||||
-- min_iterations,--,int,"Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
|
-- skip_iterations,bool,"{'true','false'}","Skip iterating, do not update impedances of branches. Defaults to true."
|
||||||
-- max_iterations,--,int,"Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
|
-- rolling_horizon,bool,"{'true','false'}","Whether to optimize the network in a rolling horizon manner, where the snapshot range is split into slices of size `horizon` which are solved consecutively."
|
||||||
-- nhours,--,int,"Specifies the :math:`n` first snapshots to take into account. Must be less than the total number of snapshots. Rather recommended only for debugging."
|
-- seed,--,int,Random seed for increased deterministic behaviour.
|
||||||
-- clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero."
|
-- track_iterations,bool,"{'true','false'}",Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration)
|
||||||
-- skip_iterations,bool,"{'true','false'}","Skip iterating, do not update impedances of branches. Defaults to true."
|
-- min_iterations,--,int,Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run.
|
||||||
-- track_iterations,bool,"{'true','false'}","Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration)"
|
-- max_iterations,--,int,Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run.
|
||||||
-- seed,--,int,"Random seed for increased deterministic behaviour."
|
-- transmission_losses,int,[0-9],"Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored."
|
||||||
solver,,,
|
-- linearized_unit_commitment,bool,"{'true','false'}",Whether to optimise using the linearized unit commitment formulation.
|
||||||
-- name,--,"One of {'gurobi', 'cplex', 'cbc', 'glpk', 'ipopt'}; potentially more possible","Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow."
|
-- horizon,--,int,Number of snapshots to consider in each iteration. Defaults to 100.
|
||||||
-- options,--,"Key listed under ``solver_options``.","Link to specific parameter settings."
|
solver,,,
|
||||||
solver_options,,"dict","Dictionaries with solver-specific parameter settings."
|
-- name,--,"One of {'gurobi', 'cplex', 'cbc', 'glpk', 'ipopt'}; potentially more possible",Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow.
|
||||||
mem,MB,"int","Estimated maximum memory requirement for solving networks."
|
-- options,--,Key listed under ``solver_options``.,Link to specific parameter settings.
|
||||||
|
solver_options,,dict,Dictionaries with solver-specific parameter settings.
|
||||||
|
mem,MB,int,Estimated maximum memory requirement for solving networks.
|
||||||
|
|
@ -1,6 +1,12 @@
|
|||||||
,Unit,Values,Description
|
,Unit,Values,Description
|
||||||
version,--,0.x.x,"Version of PyPSA-Eur. Descriptive only."
|
version,--,0.x.x,Version of PyPSA-Eur. Descriptive only.
|
||||||
tutorial,bool,"{true, false}","Switch to retrieve the tutorial data set instead of the full data set."
|
tutorial,bool,"{true, false}",Switch to retrieve the tutorial data set instead of the full data set.
|
||||||
logging,,,
|
logging,,,
|
||||||
-- level,--,"Any of {'INFO', 'WARNING', 'ERROR'}","Restrict console outputs to all infos, warning or errors only"
|
-- level,--,"Any of {'INFO', 'WARNING', 'ERROR'}","Restrict console outputs to all infos, warning or errors only"
|
||||||
-- format,--,"","Custom format for log messages. See `LogRecord <https://docs.python.org/3/library/logging.html#logging.LogRecord>`_ attributes."
|
-- format,--,,Custom format for log messages. See `LogRecord <https://docs.python.org/3/library/logging.html#logging.LogRecord>`_ attributes.
|
||||||
|
private,,,
|
||||||
|
-- keys,,,
|
||||||
|
-- -- entsoe_api,--,,Optionally specify the ENTSO-E API key. See the guidelines to get `ENTSO-E API key <https://transparency.entsoe.eu/content/static_content/Static%20content/web%20api/Guide.html>`_
|
||||||
|
remote,,,
|
||||||
|
-- ssh,--,,Optionally specify the SSH of a remote cluster to be synchronized.
|
||||||
|
-- path,--,,Optionally specify the file path within the remote cluster to be synchronized.
|
||||||
|
|
@ -16,12 +16,13 @@ PyPSA-Eur has several configuration options which are documented in this section
|
|||||||
Top-level configuration
|
Top-level configuration
|
||||||
=======================
|
=======================
|
||||||
|
|
||||||
|
"Private" refers to local, machine-specific settings or data meant for personal use, not to be shared. "Remote" indicates the address of a server used for data exchange, often for clusters and data pushing/pulling.
|
||||||
|
|
||||||
.. literalinclude:: ../config/config.default.yaml
|
.. literalinclude:: ../config/config.default.yaml
|
||||||
:language: yaml
|
:language: yaml
|
||||||
:start-at: version:
|
:start-at: version:
|
||||||
:end-before: # docs
|
:end-before: # docs
|
||||||
|
|
||||||
|
|
||||||
.. csv-table::
|
.. csv-table::
|
||||||
:header-rows: 1
|
:header-rows: 1
|
||||||
:widths: 22,7,22,33
|
:widths: 22,7,22,33
|
||||||
|
@ -41,10 +41,10 @@ Perfect foresight scenarios
|
|||||||
|
|
||||||
.. warning::
|
.. warning::
|
||||||
|
|
||||||
Perfect foresight is currently under development and not yet implemented.
|
Perfect foresight is currently implemented as a first test version.
|
||||||
|
|
||||||
For running perfect foresight scenarios, in future versions you will be able to
|
For running perfect foresight scenarios, you can adjust the
|
||||||
set in the ``config/config.yaml``:
|
``config/config.perfect.yaml``:
|
||||||
|
|
||||||
.. code:: yaml
|
.. code:: yaml
|
||||||
|
|
||||||
|
@ -280,6 +280,7 @@ The PyPSA-Eur workflow is continuously tested for Linux, macOS and Windows (WSL
|
|||||||
|
|
||||||
release_notes
|
release_notes
|
||||||
licenses
|
licenses
|
||||||
|
validation
|
||||||
limitations
|
limitations
|
||||||
contributing
|
contributing
|
||||||
support
|
support
|
||||||
|
@ -12,6 +12,29 @@ Upcoming Release
|
|||||||
|
|
||||||
* Updated Global Energy Monitor LNG terminal data to March 2023 version.
|
* Updated Global Energy Monitor LNG terminal data to March 2023 version.
|
||||||
|
|
||||||
|
* For industry distribution, use EPRTR as fallback if ETS data is not available.
|
||||||
|
|
||||||
|
* The minimum capacity for renewable generators when using the myopic option has been fixed.
|
||||||
|
|
||||||
|
* Files downloaded from zenodo are now write-protected to prevent accidental re-download.
|
||||||
|
|
||||||
|
* Files extracted from sector-coupled data bundle have been moved from ``data/`` to ``data/sector-bundle``.
|
||||||
|
|
||||||
|
* New feature multi-decade optimisation with perfect foresight.
|
||||||
|
|
||||||
|
* It is now possible to specify years for biomass potentials which do not exist
|
||||||
|
in the JRC-ENSPRESO database, e.g. 2037. These are linearly interpolated.
|
||||||
|
|
||||||
|
* In pathway mode, the biomass potential is linked to the investment year.
|
||||||
|
|
||||||
|
* Rule ``purge`` now initiates a dialog to confirm if purge is desired.
|
||||||
|
|
||||||
|
|
||||||
|
**Bugs and Compatibility**
|
||||||
|
|
||||||
|
* A bug preventing custom powerplants specified in ``data/custom_powerplants.csv`` was fixed. (https://github.com/PyPSA/pypsa-eur/pull/732)
|
||||||
|
|
||||||
|
|
||||||
PyPSA-Eur 0.8.1 (27th July 2023)
|
PyPSA-Eur 0.8.1 (27th July 2023)
|
||||||
================================
|
================================
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ This rule, as a substitute for :mod:`build_natura_raster`, downloads an already
|
|||||||
Rule ``retrieve_electricity_demand``
|
Rule ``retrieve_electricity_demand``
|
||||||
====================================
|
====================================
|
||||||
|
|
||||||
This rule downloads hourly electric load data for each country from the `OPSD platform <data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv>`_.
|
This rule downloads hourly electric load data for each country from the `OPSD platform <https://data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv>`_.
|
||||||
|
|
||||||
**Relevant Settings**
|
**Relevant Settings**
|
||||||
|
|
||||||
|
159
doc/tutorial.rst
159
doc/tutorial.rst
@ -133,89 +133,82 @@ This triggers a workflow of multiple preceding jobs that depend on each rule's i
|
|||||||
graph[bgcolor=white, margin=0];
|
graph[bgcolor=white, margin=0];
|
||||||
node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2];
|
node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2];
|
||||||
edge[penwidth=2, color=grey];
|
edge[penwidth=2, color=grey];
|
||||||
0[label = "solve_network", color = "0.21 0.6 0.85", style="rounded"];
|
0[label = "solve_network", color = "0.33 0.6 0.85", style="rounded"];
|
||||||
1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.02 0.6 0.85", style="rounded"];
|
1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.03 0.6 0.85", style="rounded"];
|
||||||
2[label = "add_extra_components", color = "0.37 0.6 0.85", style="rounded"];
|
2[label = "add_extra_components", color = "0.45 0.6 0.85", style="rounded"];
|
||||||
3[label = "cluster_network\nclusters: 6", color = "0.39 0.6 0.85", style="rounded"];
|
3[label = "cluster_network\nclusters: 6", color = "0.46 0.6 0.85", style="rounded"];
|
||||||
4[label = "simplify_network\nsimpl: ", color = "0.11 0.6 0.85", style="rounded"];
|
4[label = "simplify_network\nsimpl: ", color = "0.52 0.6 0.85", style="rounded"];
|
||||||
5[label = "add_electricity", color = "0.23 0.6 0.85", style="rounded"];
|
5[label = "add_electricity", color = "0.55 0.6 0.85", style="rounded"];
|
||||||
6[label = "build_renewable_profiles\ntechnology: onwind", color = "0.57 0.6 0.85", style="rounded"];
|
6[label = "build_renewable_profiles\ntechnology: solar", color = "0.15 0.6 0.85", style="rounded"];
|
||||||
7[label = "base_network", color = "0.09 0.6 0.85", style="rounded"];
|
7[label = "base_network", color = "0.37 0.6 0.85", style="rounded,dashed"];
|
||||||
8[label = "build_shapes", color = "0.41 0.6 0.85", style="rounded"];
|
8[label = "build_shapes", color = "0.07 0.6 0.85", style="rounded,dashed"];
|
||||||
9[label = "retrieve_databundle", color = "0.28 0.6 0.85", style="rounded"];
|
9[label = "retrieve_databundle", color = "0.60 0.6 0.85", style="rounded"];
|
||||||
10[label = "retrieve_natura_raster", color = "0.62 0.6 0.85", style="rounded"];
|
10[label = "retrieve_natura_raster", color = "0.42 0.6 0.85", style="rounded"];
|
||||||
11[label = "build_bus_regions", color = "0.53 0.6 0.85", style="rounded"];
|
11[label = "build_bus_regions", color = "0.09 0.6 0.85", style="rounded,dashed"];
|
||||||
12[label = "retrieve_cutout\ncutout: europe-2013-era5", color = "0.05 0.6 0.85", style="rounded,dashed"];
|
12[label = "build_renewable_profiles\ntechnology: onwind", color = "0.15 0.6 0.85", style="rounded"];
|
||||||
13[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.57 0.6 0.85", style="rounded"];
|
13[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.15 0.6 0.85", style="rounded"];
|
||||||
14[label = "build_ship_raster", color = "0.64 0.6 0.85", style="rounded"];
|
14[label = "build_ship_raster", color = "0.02 0.6 0.85", style="rounded"];
|
||||||
15[label = "retrieve_ship_raster", color = "0.07 0.6 0.85", style="rounded,dashed"];
|
15[label = "retrieve_ship_raster", color = "0.40 0.6 0.85", style="rounded"];
|
||||||
16[label = "retrieve_cutout\ncutout: europe-2013-sarah", color = "0.05 0.6 0.85", style="rounded,dashed"];
|
16[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.15 0.6 0.85", style="rounded"];
|
||||||
17[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.57 0.6 0.85", style="rounded"];
|
17[label = "build_line_rating", color = "0.32 0.6 0.85", style="rounded"];
|
||||||
18[label = "build_renewable_profiles\ntechnology: solar", color = "0.57 0.6 0.85", style="rounded"];
|
18[label = "retrieve_cost_data\nyear: 2030", color = "0.50 0.6 0.85", style="rounded"];
|
||||||
19[label = "build_hydro_profile", color = "0.44 0.6 0.85", style="rounded"];
|
19[label = "build_powerplants", color = "0.64 0.6 0.85", style="rounded,dashed"];
|
||||||
20[label = "retrieve_cost_data", color = "0.30 0.6 0.85", style="rounded"];
|
20[label = "build_electricity_demand", color = "0.13 0.6 0.85", style="rounded,dashed"];
|
||||||
21[label = "build_powerplants", color = "0.16 0.6 0.85", style="rounded"];
|
21[label = "retrieve_electricity_demand", color = "0.31 0.6 0.85", style="rounded"];
|
||||||
22[label = "build_electricity_demand", color = "0.00 0.6 0.85", style="rounded"];
|
22[label = "copy_config", color = "0.23 0.6 0.85", style="rounded"];
|
||||||
23[label = "retrieve_electricity_demand", color = "0.34 0.6 0.85", style="rounded,dashed"];
|
1 -> 0
|
||||||
1 -> 0
|
22 -> 0
|
||||||
2 -> 1
|
2 -> 1
|
||||||
20 -> 1
|
18 -> 1
|
||||||
3 -> 2
|
3 -> 2
|
||||||
20 -> 2
|
18 -> 2
|
||||||
4 -> 3
|
4 -> 3
|
||||||
20 -> 3
|
18 -> 3
|
||||||
5 -> 4
|
5 -> 4
|
||||||
20 -> 4
|
18 -> 4
|
||||||
11 -> 4
|
11 -> 4
|
||||||
6 -> 5
|
6 -> 5
|
||||||
13 -> 5
|
12 -> 5
|
||||||
17 -> 5
|
13 -> 5
|
||||||
18 -> 5
|
16 -> 5
|
||||||
19 -> 5
|
7 -> 5
|
||||||
7 -> 5
|
17 -> 5
|
||||||
20 -> 5
|
18 -> 5
|
||||||
11 -> 5
|
11 -> 5
|
||||||
21 -> 5
|
19 -> 5
|
||||||
9 -> 5
|
9 -> 5
|
||||||
22 -> 5
|
20 -> 5
|
||||||
8 -> 5
|
8 -> 5
|
||||||
7 -> 6
|
7 -> 6
|
||||||
9 -> 6
|
9 -> 6
|
||||||
10 -> 6
|
10 -> 6
|
||||||
8 -> 6
|
8 -> 6
|
||||||
11 -> 6
|
11 -> 6
|
||||||
12 -> 6
|
8 -> 7
|
||||||
8 -> 7
|
9 -> 8
|
||||||
9 -> 8
|
8 -> 11
|
||||||
8 -> 11
|
7 -> 11
|
||||||
7 -> 11
|
7 -> 12
|
||||||
7 -> 13
|
9 -> 12
|
||||||
9 -> 13
|
10 -> 12
|
||||||
10 -> 13
|
8 -> 12
|
||||||
14 -> 13
|
11 -> 12
|
||||||
8 -> 13
|
7 -> 13
|
||||||
11 -> 13
|
9 -> 13
|
||||||
12 -> 13
|
10 -> 13
|
||||||
15 -> 14
|
14 -> 13
|
||||||
12 -> 14
|
8 -> 13
|
||||||
16 -> 14
|
11 -> 13
|
||||||
7 -> 17
|
15 -> 14
|
||||||
9 -> 17
|
7 -> 16
|
||||||
10 -> 17
|
9 -> 16
|
||||||
14 -> 17
|
10 -> 16
|
||||||
8 -> 17
|
14 -> 16
|
||||||
11 -> 17
|
8 -> 16
|
||||||
12 -> 17
|
11 -> 16
|
||||||
7 -> 18
|
7 -> 17
|
||||||
9 -> 18
|
7 -> 19
|
||||||
10 -> 18
|
21 -> 20
|
||||||
8 -> 18
|
|
||||||
11 -> 18
|
|
||||||
16 -> 18
|
|
||||||
8 -> 19
|
|
||||||
12 -> 19
|
|
||||||
7 -> 21
|
|
||||||
23 -> 22
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
|
||||||
|
@ -59,7 +59,7 @@ To run an overnight / greenfiled scenario with the specifications above, run
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
snakemake -call --configfile config/test/config.overnight.yaml all
|
snakemake -call all --configfile config/test/config.overnight.yaml
|
||||||
|
|
||||||
which will result in the following *additional* jobs ``snakemake`` wants to run
|
which will result in the following *additional* jobs ``snakemake`` wants to run
|
||||||
on top of those already included in the electricity-only tutorial:
|
on top of those already included in the electricity-only tutorial:
|
||||||
@ -318,7 +318,7 @@ To run a myopic foresight scenario with the specifications above, run
|
|||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
snakemake -call --configfile config/test/config.myopic.yaml all
|
snakemake -call all --configfile config/test/config.myopic.yaml
|
||||||
|
|
||||||
which will result in the following *additional* jobs ``snakemake`` wants to run:
|
which will result in the following *additional* jobs ``snakemake`` wants to run:
|
||||||
|
|
||||||
|
53
doc/validation.rst
Normal file
53
doc/validation.rst
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
..
|
||||||
|
SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors
|
||||||
|
|
||||||
|
SPDX-License-Identifier: CC-BY-4.0
|
||||||
|
|
||||||
|
##########################################
|
||||||
|
Validation
|
||||||
|
##########################################
|
||||||
|
|
||||||
|
The PyPSA-Eur model workflow provides a built-in mechanism for validation. This allows users to contrast the outcomes of network optimization against the historical behaviour of the European power system. The snakemake rule ``validate_elec_networks`` enables this by generating comparative figures that encapsulate key data points such as dispatch carrier, cross-border flows, and market prices per price zone.
|
||||||
|
|
||||||
|
These comparisons utilize data from the 2019 ENTSO-E Transparency Platform. To enable this, an ENTSO-E API key must be inserted into the ``config.yaml`` file. Detailed steps for this process can be found in the user guide `here <https://transparency.entsoe.eu/content/static_content/Static%20content/web%20api/Guide.html>`_.
|
||||||
|
|
||||||
|
Once the API key is set, the validation workflow can be triggered by running the following command:
|
||||||
|
|
||||||
|
snakemake validate_elec_networks --configfile config/config.validation.yaml -c8
|
||||||
|
|
||||||
|
|
||||||
|
The configuration file `config/config.validation.yaml` contains the following parameters:
|
||||||
|
|
||||||
|
.. literalinclude:: ../config/config.validation.yaml
|
||||||
|
:language: yaml
|
||||||
|
|
||||||
|
The setup uses monthly varying fuel prices for gas, lignite, coal and oil as well as CO2 prices, which are created by the script ``build_monthly_prices``. Upon completion of the validation process, the resulting network and generated figures will be stored in the ``results/validation`` directory for further analysis.
|
||||||
|
|
||||||
|
|
||||||
|
Results
|
||||||
|
=======
|
||||||
|
|
||||||
|
By the time of writing the comparison with the historical data shows partially accurate, partially improvable results. The following figures show the comparison of the dispatch of the different carriers.
|
||||||
|
|
||||||
|
.. image:: ../graphics/validation_seasonal_operation_area_elec_s_37_ec_lv1.0_Ept.png
|
||||||
|
:width: 100%
|
||||||
|
:align: center
|
||||||
|
|
||||||
|
.. image:: ../graphics/validation_production_bar_elec_s_37_ec_lv1.0_Ept.png
|
||||||
|
:width: 100%
|
||||||
|
:align: center
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Issues and possible improvements
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
**Overestimated dispatch of wind and solar:** Renewable potentials of wind and solar are slightly overestimated in the model. This leads to a higher dispatch of these carriers than in the historical data. In particular, the solar dispatch during winter is overestimated.
|
||||||
|
|
||||||
|
**Coal - Lignite fuel switch:** The model has a fuel switch from coal to lignite. This might result from non-captured subsidies for lignite and coal in the model. In order to fix the fuel switch from coal to lignite, a manual cost correction was added to the script ``build_monthly_prices``.
|
||||||
|
|
||||||
|
**Planned outages of nuclear power plants:** Planned outages of nuclear power plants are not captured in the model. This leads to a underestimated dispatch of nuclear power plants in winter and a overestimated dispatch in summer. This point is hard to fix, since the planned outages are not published in the ENTSO-E Transparency Platform.
|
||||||
|
|
||||||
|
**False classification of run-of-river power plants:** Some run-of-river power plants are classified as hydro power plants in the model. This leads to a general overestimation of the hydro power dispatch. In particular, Swedish hydro power plants are overestimated.
|
||||||
|
|
||||||
|
**Load shedding:** Due to constraint NTC's (crossborder capacities), the model has to shed load in some regions. This leads to a high market prices in the regions which drive the average market price up. Further fine-tuning of the NTC's is needed to avoid load shedding.
|
@ -57,5 +57,5 @@ dependencies:
|
|||||||
|
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
- tsam>=1.1.0
|
- git+https://github.com/fneum/tsam.git@performance
|
||||||
- pypsa>=0.25.1
|
- pypsa>=0.25.2
|
||||||
|
BIN
graphics/validation_production_bar_elec_s_37_ec_lv1.0_Ept.png
Normal file
BIN
graphics/validation_production_bar_elec_s_37_ec_lv1.0_Ept.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 99 KiB |
Binary file not shown.
After Width: | Height: | Size: 801 KiB |
@ -4,3 +4,4 @@
|
|||||||
font.family: sans-serif
|
font.family: sans-serif
|
||||||
font.sans-serif: Ubuntu, DejaVu Sans
|
font.sans-serif: Ubuntu, DejaVu Sans
|
||||||
image.cmap: viridis
|
image.cmap: viridis
|
||||||
|
figure.autolayout : True
|
||||||
|
@ -62,6 +62,9 @@ rule base_network:
|
|||||||
params:
|
params:
|
||||||
countries=config["countries"],
|
countries=config["countries"],
|
||||||
snapshots=config["snapshots"],
|
snapshots=config["snapshots"],
|
||||||
|
lines=config["lines"],
|
||||||
|
links=config["links"],
|
||||||
|
transformers=config["transformers"],
|
||||||
input:
|
input:
|
||||||
eg_buses="data/entsoegridkit/buses.csv",
|
eg_buses="data/entsoegridkit/buses.csv",
|
||||||
eg_lines="data/entsoegridkit/lines.csv",
|
eg_lines="data/entsoegridkit/lines.csv",
|
||||||
@ -298,6 +301,24 @@ rule build_renewable_profiles:
|
|||||||
"../scripts/build_renewable_profiles.py"
|
"../scripts/build_renewable_profiles.py"
|
||||||
|
|
||||||
|
|
||||||
|
rule build_monthly_prices:
|
||||||
|
input:
|
||||||
|
co2_price_raw="data/validation/emission-spot-primary-market-auction-report-2019-data.xls",
|
||||||
|
fuel_price_raw="data/validation/energy-price-trends-xlsx-5619002.xlsx",
|
||||||
|
output:
|
||||||
|
co2_price=RESOURCES + "co2_price.csv",
|
||||||
|
fuel_price=RESOURCES + "monthly_fuel_price.csv",
|
||||||
|
log:
|
||||||
|
LOGS + "build_monthly_prices.log",
|
||||||
|
threads: 1
|
||||||
|
resources:
|
||||||
|
mem_mb=5000,
|
||||||
|
conda:
|
||||||
|
"../envs/environment.yaml"
|
||||||
|
script:
|
||||||
|
"../scripts/build_monthly_prices.py"
|
||||||
|
|
||||||
|
|
||||||
rule build_hydro_profile:
|
rule build_hydro_profile:
|
||||||
params:
|
params:
|
||||||
hydro=config["renewable"]["hydro"],
|
hydro=config["renewable"]["hydro"],
|
||||||
@ -349,7 +370,7 @@ rule add_electricity:
|
|||||||
countries=config["countries"],
|
countries=config["countries"],
|
||||||
renewable=config["renewable"],
|
renewable=config["renewable"],
|
||||||
electricity=config["electricity"],
|
electricity=config["electricity"],
|
||||||
conventional=config.get("conventional", {}),
|
conventional=config["conventional"],
|
||||||
costs=config["costs"],
|
costs=config["costs"],
|
||||||
input:
|
input:
|
||||||
**{
|
**{
|
||||||
@ -359,6 +380,7 @@ rule add_electricity:
|
|||||||
**{
|
**{
|
||||||
f"conventional_{carrier}_{attr}": fn
|
f"conventional_{carrier}_{attr}": fn
|
||||||
for carrier, d in config.get("conventional", {None: {}}).items()
|
for carrier, d in config.get("conventional", {None: {}}).items()
|
||||||
|
if carrier in config["electricity"]["conventional_carriers"]
|
||||||
for attr, fn in d.items()
|
for attr, fn in d.items()
|
||||||
if str(fn).startswith("data/")
|
if str(fn).startswith("data/")
|
||||||
},
|
},
|
||||||
@ -371,6 +393,10 @@ rule add_electricity:
|
|||||||
powerplants=RESOURCES + "powerplants.csv",
|
powerplants=RESOURCES + "powerplants.csv",
|
||||||
hydro_capacities=ancient("data/bundle/hydro_capacities.csv"),
|
hydro_capacities=ancient("data/bundle/hydro_capacities.csv"),
|
||||||
geth_hydro_capacities="data/geth2015_hydro_capacities.csv",
|
geth_hydro_capacities="data/geth2015_hydro_capacities.csv",
|
||||||
|
unit_commitment="data/unit_commitment.csv",
|
||||||
|
fuel_price=RESOURCES + "monthly_fuel_price.csv"
|
||||||
|
if config["conventional"]["dynamic_fuel_price"]
|
||||||
|
else [],
|
||||||
load=RESOURCES + "load.csv",
|
load=RESOURCES + "load.csv",
|
||||||
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
|
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
|
||||||
ua_md_gdp="data/GDP_PPP_30arcsec_v3_mapped_default.csv",
|
ua_md_gdp="data/GDP_PPP_30arcsec_v3_mapped_default.csv",
|
||||||
@ -382,7 +408,7 @@ rule add_electricity:
|
|||||||
BENCHMARKS + "add_electricity"
|
BENCHMARKS + "add_electricity"
|
||||||
threads: 1
|
threads: 1
|
||||||
resources:
|
resources:
|
||||||
mem_mb=5000,
|
mem_mb=10000,
|
||||||
conda:
|
conda:
|
||||||
"../envs/environment.yaml"
|
"../envs/environment.yaml"
|
||||||
script:
|
script:
|
||||||
@ -416,7 +442,7 @@ rule simplify_network:
|
|||||||
BENCHMARKS + "simplify_network/elec_s{simpl}"
|
BENCHMARKS + "simplify_network/elec_s{simpl}"
|
||||||
threads: 1
|
threads: 1
|
||||||
resources:
|
resources:
|
||||||
mem_mb=4000,
|
mem_mb=12000,
|
||||||
conda:
|
conda:
|
||||||
"../envs/environment.yaml"
|
"../envs/environment.yaml"
|
||||||
script:
|
script:
|
||||||
@ -457,7 +483,7 @@ rule cluster_network:
|
|||||||
BENCHMARKS + "cluster_network/elec_s{simpl}_{clusters}"
|
BENCHMARKS + "cluster_network/elec_s{simpl}_{clusters}"
|
||||||
threads: 1
|
threads: 1
|
||||||
resources:
|
resources:
|
||||||
mem_mb=6000,
|
mem_mb=10000,
|
||||||
conda:
|
conda:
|
||||||
"../envs/environment.yaml"
|
"../envs/environment.yaml"
|
||||||
script:
|
script:
|
||||||
@ -480,7 +506,7 @@ rule add_extra_components:
|
|||||||
BENCHMARKS + "add_extra_components/elec_s{simpl}_{clusters}_ec"
|
BENCHMARKS + "add_extra_components/elec_s{simpl}_{clusters}_ec"
|
||||||
threads: 1
|
threads: 1
|
||||||
resources:
|
resources:
|
||||||
mem_mb=3000,
|
mem_mb=4000,
|
||||||
conda:
|
conda:
|
||||||
"../envs/environment.yaml"
|
"../envs/environment.yaml"
|
||||||
script:
|
script:
|
||||||
@ -499,6 +525,7 @@ rule prepare_network:
|
|||||||
input:
|
input:
|
||||||
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc",
|
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc",
|
||||||
tech_costs=COSTS,
|
tech_costs=COSTS,
|
||||||
|
co2_price=lambda w: RESOURCES + "co2_price.csv" if "Ept" in w.opts else [],
|
||||||
output:
|
output:
|
||||||
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||||
log:
|
log:
|
||||||
|
@ -242,9 +242,9 @@ rule build_energy_totals:
|
|||||||
energy=config["energy"],
|
energy=config["energy"],
|
||||||
input:
|
input:
|
||||||
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
|
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
|
||||||
co2="data/eea/UNFCCC_v23.csv",
|
co2="data/bundle-sector/eea/UNFCCC_v23.csv",
|
||||||
swiss="data/switzerland-sfoe/switzerland-new_format.csv",
|
swiss="data/bundle-sector/switzerland-sfoe/switzerland-new_format.csv",
|
||||||
idees="data/jrc-idees-2015",
|
idees="data/bundle-sector/jrc-idees-2015",
|
||||||
district_heat_share="data/district_heat_share.csv",
|
district_heat_share="data/district_heat_share.csv",
|
||||||
eurostat=input_eurostat,
|
eurostat=input_eurostat,
|
||||||
output:
|
output:
|
||||||
@ -272,7 +272,7 @@ rule build_biomass_potentials:
|
|||||||
"https://cidportal.jrc.ec.europa.eu/ftp/jrc-opendata/ENSPRESO/ENSPRESO_BIOMASS.xlsx",
|
"https://cidportal.jrc.ec.europa.eu/ftp/jrc-opendata/ENSPRESO/ENSPRESO_BIOMASS.xlsx",
|
||||||
keep_local=True,
|
keep_local=True,
|
||||||
),
|
),
|
||||||
nuts2="data/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", # https://gisco-services.ec.europa.eu/distribution/v2/nuts/download/#nuts21
|
nuts2="data/bundle-sector/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", # https://gisco-services.ec.europa.eu/distribution/v2/nuts/download/#nuts21
|
||||||
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||||
nuts3_population=ancient("data/bundle/nama_10r_3popgdp.tsv.gz"),
|
nuts3_population=ancient("data/bundle/nama_10r_3popgdp.tsv.gz"),
|
||||||
swiss_cantons=ancient("data/bundle/ch_cantons.csv"),
|
swiss_cantons=ancient("data/bundle/ch_cantons.csv"),
|
||||||
@ -280,22 +280,23 @@ rule build_biomass_potentials:
|
|||||||
country_shapes=RESOURCES + "country_shapes.geojson",
|
country_shapes=RESOURCES + "country_shapes.geojson",
|
||||||
output:
|
output:
|
||||||
biomass_potentials_all=RESOURCES
|
biomass_potentials_all=RESOURCES
|
||||||
+ "biomass_potentials_all_s{simpl}_{clusters}.csv",
|
+ "biomass_potentials_all_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||||
biomass_potentials=RESOURCES + "biomass_potentials_s{simpl}_{clusters}.csv",
|
biomass_potentials=RESOURCES
|
||||||
|
+ "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||||
threads: 1
|
threads: 1
|
||||||
resources:
|
resources:
|
||||||
mem_mb=1000,
|
mem_mb=1000,
|
||||||
log:
|
log:
|
||||||
LOGS + "build_biomass_potentials_s{simpl}_{clusters}.log",
|
LOGS + "build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.log",
|
||||||
benchmark:
|
benchmark:
|
||||||
BENCHMARKS + "build_biomass_potentials_s{simpl}_{clusters}"
|
BENCHMARKS + "build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}"
|
||||||
conda:
|
conda:
|
||||||
"../envs/environment.yaml"
|
"../envs/environment.yaml"
|
||||||
script:
|
script:
|
||||||
"../scripts/build_biomass_potentials.py"
|
"../scripts/build_biomass_potentials.py"
|
||||||
|
|
||||||
|
|
||||||
if config["sector"]["biomass_transport"]:
|
if config["sector"]["biomass_transport"] or config["sector"]["biomass_spatial"]:
|
||||||
|
|
||||||
rule build_biomass_transport_costs:
|
rule build_biomass_transport_costs:
|
||||||
input:
|
input:
|
||||||
@ -320,9 +321,8 @@ if config["sector"]["biomass_transport"]:
|
|||||||
build_biomass_transport_costs_output = rules.build_biomass_transport_costs.output
|
build_biomass_transport_costs_output = rules.build_biomass_transport_costs.output
|
||||||
|
|
||||||
|
|
||||||
if not config["sector"]["biomass_transport"]:
|
if not (config["sector"]["biomass_transport"] or config["sector"]["biomass_spatial"]):
|
||||||
# this is effecively an `else` statement which is however not liked by snakefmt
|
# this is effecively an `else` statement which is however not liked by snakefmt
|
||||||
|
|
||||||
build_biomass_transport_costs_output = {}
|
build_biomass_transport_costs_output = {}
|
||||||
|
|
||||||
|
|
||||||
@ -367,7 +367,7 @@ if not config["sector"]["regional_co2_sequestration_potential"]["enable"]:
|
|||||||
|
|
||||||
rule build_salt_cavern_potentials:
|
rule build_salt_cavern_potentials:
|
||||||
input:
|
input:
|
||||||
salt_caverns="data/h2_salt_caverns_GWh_per_sqkm.geojson",
|
salt_caverns="data/bundle-sector/h2_salt_caverns_GWh_per_sqkm.geojson",
|
||||||
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||||
regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}_{clusters}.geojson",
|
regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}_{clusters}.geojson",
|
||||||
output:
|
output:
|
||||||
@ -389,7 +389,7 @@ rule build_ammonia_production:
|
|||||||
params:
|
params:
|
||||||
countries=config["countries"],
|
countries=config["countries"],
|
||||||
input:
|
input:
|
||||||
usgs="data/myb1-2017-nitro.xls",
|
usgs="data/bundle-sector/myb1-2017-nitro.xls",
|
||||||
output:
|
output:
|
||||||
ammonia_production=RESOURCES + "ammonia_production.csv",
|
ammonia_production=RESOURCES + "ammonia_production.csv",
|
||||||
threads: 1
|
threads: 1
|
||||||
@ -411,7 +411,7 @@ rule build_industry_sector_ratios:
|
|||||||
ammonia=config["sector"].get("ammonia", False),
|
ammonia=config["sector"].get("ammonia", False),
|
||||||
input:
|
input:
|
||||||
ammonia_production=RESOURCES + "ammonia_production.csv",
|
ammonia_production=RESOURCES + "ammonia_production.csv",
|
||||||
idees="data/jrc-idees-2015",
|
idees="data/bundle-sector/jrc-idees-2015",
|
||||||
output:
|
output:
|
||||||
industry_sector_ratios=RESOURCES + "industry_sector_ratios.csv",
|
industry_sector_ratios=RESOURCES + "industry_sector_ratios.csv",
|
||||||
threads: 1
|
threads: 1
|
||||||
@ -433,8 +433,8 @@ rule build_industrial_production_per_country:
|
|||||||
countries=config["countries"],
|
countries=config["countries"],
|
||||||
input:
|
input:
|
||||||
ammonia_production=RESOURCES + "ammonia_production.csv",
|
ammonia_production=RESOURCES + "ammonia_production.csv",
|
||||||
jrc="data/jrc-idees-2015",
|
jrc="data/bundle-sector/jrc-idees-2015",
|
||||||
eurostat="data/eurostat-energy_balances-may_2018_edition",
|
eurostat="data/bundle-sector/eurostat-energy_balances-may_2018_edition",
|
||||||
output:
|
output:
|
||||||
industrial_production_per_country=RESOURCES
|
industrial_production_per_country=RESOURCES
|
||||||
+ "industrial_production_per_country.csv",
|
+ "industrial_production_per_country.csv",
|
||||||
@ -484,7 +484,7 @@ rule build_industrial_distribution_key:
|
|||||||
input:
|
input:
|
||||||
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||||
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
|
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
|
||||||
hotmaps_industrial_database="data/Industrial_Database.csv",
|
hotmaps_industrial_database="data/bundle-sector/Industrial_Database.csv",
|
||||||
output:
|
output:
|
||||||
industrial_distribution_key=RESOURCES
|
industrial_distribution_key=RESOURCES
|
||||||
+ "industrial_distribution_key_elec_s{simpl}_{clusters}.csv",
|
+ "industrial_distribution_key_elec_s{simpl}_{clusters}.csv",
|
||||||
@ -559,7 +559,7 @@ rule build_industrial_energy_demand_per_country_today:
|
|||||||
countries=config["countries"],
|
countries=config["countries"],
|
||||||
industry=config["industry"],
|
industry=config["industry"],
|
||||||
input:
|
input:
|
||||||
jrc="data/jrc-idees-2015",
|
jrc="data/bundle-sector/jrc-idees-2015",
|
||||||
ammonia_production=RESOURCES + "ammonia_production.csv",
|
ammonia_production=RESOURCES + "ammonia_production.csv",
|
||||||
industrial_production_per_country=RESOURCES
|
industrial_production_per_country=RESOURCES
|
||||||
+ "industrial_production_per_country.csv",
|
+ "industrial_production_per_country.csv",
|
||||||
@ -685,8 +685,8 @@ rule build_transport_demand:
|
|||||||
pop_weighted_energy_totals=RESOURCES
|
pop_weighted_energy_totals=RESOURCES
|
||||||
+ "pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
|
+ "pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
|
||||||
transport_data=RESOURCES + "transport_data.csv",
|
transport_data=RESOURCES + "transport_data.csv",
|
||||||
traffic_data_KFZ="data/emobility/KFZ__count",
|
traffic_data_KFZ="data/bundle-sector/emobility/KFZ__count",
|
||||||
traffic_data_Pkw="data/emobility/Pkw__count",
|
traffic_data_Pkw="data/bundle-sector/emobility/Pkw__count",
|
||||||
temp_air_total=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc",
|
temp_air_total=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc",
|
||||||
output:
|
output:
|
||||||
transport_demand=RESOURCES + "transport_demand_s{simpl}_{clusters}.csv",
|
transport_demand=RESOURCES + "transport_demand_s{simpl}_{clusters}.csv",
|
||||||
@ -735,8 +735,13 @@ rule prepare_sector_network:
|
|||||||
avail_profile=RESOURCES + "avail_profile_s{simpl}_{clusters}.csv",
|
avail_profile=RESOURCES + "avail_profile_s{simpl}_{clusters}.csv",
|
||||||
dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv",
|
dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv",
|
||||||
co2_totals_name=RESOURCES + "co2_totals.csv",
|
co2_totals_name=RESOURCES + "co2_totals.csv",
|
||||||
co2="data/eea/UNFCCC_v23.csv",
|
co2="data/bundle-sector/eea/UNFCCC_v23.csv",
|
||||||
biomass_potentials=RESOURCES + "biomass_potentials_s{simpl}_{clusters}.csv",
|
biomass_potentials=RESOURCES
|
||||||
|
+ "biomass_potentials_s{simpl}_{clusters}_"
|
||||||
|
+ "{}.csv".format(config["biomass"]["year"])
|
||||||
|
if config["foresight"] == "overnight"
|
||||||
|
else RESOURCES
|
||||||
|
+ "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||||
heat_profile="data/heat_load_profile_BDEW.csv",
|
heat_profile="data/heat_load_profile_BDEW.csv",
|
||||||
costs="data/costs_{}.csv".format(config["costs"]["year"])
|
costs="data/costs_{}.csv".format(config["costs"]["year"])
|
||||||
if config["foresight"] == "overnight"
|
if config["foresight"] == "overnight"
|
||||||
|
@ -14,12 +14,6 @@ localrules:
|
|||||||
plot_networks,
|
plot_networks,
|
||||||
|
|
||||||
|
|
||||||
rule all:
|
|
||||||
input:
|
|
||||||
RESULTS + "graphs/costs.pdf",
|
|
||||||
default_target: True
|
|
||||||
|
|
||||||
|
|
||||||
rule cluster_networks:
|
rule cluster_networks:
|
||||||
input:
|
input:
|
||||||
expand(RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", **config["scenario"]),
|
expand(RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", **config["scenario"]),
|
||||||
@ -66,6 +60,15 @@ rule solve_sector_networks:
|
|||||||
),
|
),
|
||||||
|
|
||||||
|
|
||||||
|
rule solve_sector_networks_perfect:
|
||||||
|
input:
|
||||||
|
expand(
|
||||||
|
RESULTS
|
||||||
|
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
|
||||||
|
**config["scenario"]
|
||||||
|
),
|
||||||
|
|
||||||
|
|
||||||
rule plot_networks:
|
rule plot_networks:
|
||||||
input:
|
input:
|
||||||
expand(
|
expand(
|
||||||
@ -73,3 +76,18 @@ rule plot_networks:
|
|||||||
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
|
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
|
||||||
**config["scenario"]
|
**config["scenario"]
|
||||||
),
|
),
|
||||||
|
|
||||||
|
|
||||||
|
rule validate_elec_networks:
|
||||||
|
input:
|
||||||
|
expand(
|
||||||
|
RESULTS
|
||||||
|
+ "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
|
||||||
|
**config["scenario"]
|
||||||
|
),
|
||||||
|
expand(
|
||||||
|
RESULTS
|
||||||
|
+ "figures/.validation_{kind}_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
|
||||||
|
**config["scenario"],
|
||||||
|
kind=["production", "prices", "cross_border"]
|
||||||
|
),
|
||||||
|
@ -15,8 +15,8 @@ def memory(w):
|
|||||||
if m is not None:
|
if m is not None:
|
||||||
factor *= int(m.group(1)) / 8760
|
factor *= int(m.group(1)) / 8760
|
||||||
break
|
break
|
||||||
if w.clusters.endswith("m"):
|
if w.clusters.endswith("m") or w.clusters.endswith("c"):
|
||||||
return int(factor * (18000 + 180 * int(w.clusters[:-1])))
|
return int(factor * (55000 + 600 * int(w.clusters[:-1])))
|
||||||
elif w.clusters == "all":
|
elif w.clusters == "all":
|
||||||
return int(factor * (18000 + 180 * 4000))
|
return int(factor * (18000 + 180 * 4000))
|
||||||
else:
|
else:
|
||||||
@ -42,7 +42,7 @@ def has_internet_access(url="www.zenodo.org") -> bool:
|
|||||||
def input_eurostat(w):
|
def input_eurostat(w):
|
||||||
# 2016 includes BA, 2017 does not
|
# 2016 includes BA, 2017 does not
|
||||||
report_year = config["energy"]["eurostat_report_year"]
|
report_year = config["energy"]["eurostat_report_year"]
|
||||||
return f"data/eurostat-energy_balances-june_{report_year}_edition"
|
return f"data/bundle-sector/eurostat-energy_balances-june_{report_year}_edition"
|
||||||
|
|
||||||
|
|
||||||
def solved_previous_horizon(wildcards):
|
def solved_previous_horizon(wildcards):
|
||||||
|
@ -8,38 +8,69 @@ localrules:
|
|||||||
copy_conda_env,
|
copy_conda_env,
|
||||||
|
|
||||||
|
|
||||||
rule plot_network:
|
if config["foresight"] != "perfect":
|
||||||
params:
|
|
||||||
foresight=config["foresight"],
|
rule plot_network:
|
||||||
plotting=config["plotting"],
|
params:
|
||||||
input:
|
foresight=config["foresight"],
|
||||||
network=RESULTS
|
plotting=config["plotting"],
|
||||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
input:
|
||||||
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
network=RESULTS
|
||||||
output:
|
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||||
map=RESULTS
|
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||||
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
|
output:
|
||||||
today=RESULTS
|
map=RESULTS
|
||||||
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf",
|
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
|
||||||
threads: 2
|
today=RESULTS
|
||||||
resources:
|
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf",
|
||||||
mem_mb=10000,
|
threads: 2
|
||||||
benchmark:
|
resources:
|
||||||
(
|
mem_mb=10000,
|
||||||
|
benchmark:
|
||||||
|
(
|
||||||
|
BENCHMARKS
|
||||||
|
+ "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||||
|
)
|
||||||
|
conda:
|
||||||
|
"../envs/environment.yaml"
|
||||||
|
script:
|
||||||
|
"../scripts/plot_network.py"
|
||||||
|
|
||||||
|
|
||||||
|
if config["foresight"] == "perfect":
|
||||||
|
|
||||||
|
rule plot_network:
|
||||||
|
params:
|
||||||
|
foresight=config["foresight"],
|
||||||
|
plotting=config["plotting"],
|
||||||
|
input:
|
||||||
|
network=RESULTS
|
||||||
|
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
|
||||||
|
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||||
|
output:
|
||||||
|
**{
|
||||||
|
f"map_{year}": RESULTS
|
||||||
|
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_"
|
||||||
|
+ f"{year}.pdf"
|
||||||
|
for year in config["scenario"]["planning_horizons"]
|
||||||
|
},
|
||||||
|
threads: 2
|
||||||
|
resources:
|
||||||
|
mem_mb=10000,
|
||||||
|
benchmark:
|
||||||
BENCHMARKS
|
BENCHMARKS
|
||||||
+ "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
+"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_benchmark"
|
||||||
)
|
conda:
|
||||||
conda:
|
"../envs/environment.yaml"
|
||||||
"../envs/environment.yaml"
|
script:
|
||||||
script:
|
"../scripts/plot_network.py"
|
||||||
"../scripts/plot_network.py"
|
|
||||||
|
|
||||||
|
|
||||||
rule copy_config:
|
rule copy_config:
|
||||||
params:
|
params:
|
||||||
RDIR=RDIR,
|
RDIR=RDIR,
|
||||||
output:
|
output:
|
||||||
RESULTS + "config/config.yaml",
|
RESULTS + "config.yaml",
|
||||||
threads: 1
|
threads: 1
|
||||||
resources:
|
resources:
|
||||||
mem_mb=1000,
|
mem_mb=1000,
|
||||||
@ -51,22 +82,6 @@ rule copy_config:
|
|||||||
"../scripts/copy_config.py"
|
"../scripts/copy_config.py"
|
||||||
|
|
||||||
|
|
||||||
rule copy_conda_env:
|
|
||||||
output:
|
|
||||||
RESULTS + "config/environment.yaml",
|
|
||||||
threads: 1
|
|
||||||
resources:
|
|
||||||
mem_mb=500,
|
|
||||||
log:
|
|
||||||
LOGS + "copy_conda_env.log",
|
|
||||||
benchmark:
|
|
||||||
BENCHMARKS + "copy_conda_env"
|
|
||||||
conda:
|
|
||||||
"../envs/environment.yaml"
|
|
||||||
shell:
|
|
||||||
"conda env export -f {output} --no-builds"
|
|
||||||
|
|
||||||
|
|
||||||
rule make_summary:
|
rule make_summary:
|
||||||
params:
|
params:
|
||||||
foresight=config["foresight"],
|
foresight=config["foresight"],
|
||||||
@ -122,6 +137,8 @@ rule plot_summary:
|
|||||||
countries=config["countries"],
|
countries=config["countries"],
|
||||||
planning_horizons=config["scenario"]["planning_horizons"],
|
planning_horizons=config["scenario"]["planning_horizons"],
|
||||||
sector_opts=config["scenario"]["sector_opts"],
|
sector_opts=config["scenario"]["sector_opts"],
|
||||||
|
emissions_scope=config["energy"]["emissions"],
|
||||||
|
eurostat_report_year=config["energy"]["eurostat_report_year"],
|
||||||
plotting=config["plotting"],
|
plotting=config["plotting"],
|
||||||
RDIR=RDIR,
|
RDIR=RDIR,
|
||||||
input:
|
input:
|
||||||
@ -129,6 +146,7 @@ rule plot_summary:
|
|||||||
energy=RESULTS + "csvs/energy.csv",
|
energy=RESULTS + "csvs/energy.csv",
|
||||||
balances=RESULTS + "csvs/supply_energy.csv",
|
balances=RESULTS + "csvs/supply_energy.csv",
|
||||||
eurostat=input_eurostat,
|
eurostat=input_eurostat,
|
||||||
|
co2="data/bundle-sector/eea/UNFCCC_v23.csv",
|
||||||
output:
|
output:
|
||||||
costs=RESULTS + "graphs/costs.pdf",
|
costs=RESULTS + "graphs/costs.pdf",
|
||||||
energy=RESULTS + "graphs/energy.pdf",
|
energy=RESULTS + "graphs/energy.pdf",
|
||||||
@ -144,3 +162,34 @@ rule plot_summary:
|
|||||||
"../envs/environment.yaml"
|
"../envs/environment.yaml"
|
||||||
script:
|
script:
|
||||||
"../scripts/plot_summary.py"
|
"../scripts/plot_summary.py"
|
||||||
|
|
||||||
|
|
||||||
|
STATISTICS_BARPLOTS = [
|
||||||
|
"capacity_factor",
|
||||||
|
"installed_capacity",
|
||||||
|
"optimal_capacity",
|
||||||
|
"capital_expenditure",
|
||||||
|
"operational_expenditure",
|
||||||
|
"curtailment",
|
||||||
|
"supply",
|
||||||
|
"withdrawal",
|
||||||
|
"market_value",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
rule plot_elec_statistics:
|
||||||
|
params:
|
||||||
|
plotting=config["plotting"],
|
||||||
|
barplots=STATISTICS_BARPLOTS,
|
||||||
|
input:
|
||||||
|
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||||
|
output:
|
||||||
|
**{
|
||||||
|
f"{plot}_bar": RESULTS
|
||||||
|
+ f"figures/statistics_{plot}_bar_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
|
||||||
|
for plot in STATISTICS_BARPLOTS
|
||||||
|
},
|
||||||
|
barplots_touch=RESULTS
|
||||||
|
+ "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
|
||||||
|
script:
|
||||||
|
"../scripts/plot_statistics.py"
|
||||||
|
@ -27,7 +27,7 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_databundle",
|
|||||||
|
|
||||||
rule retrieve_databundle:
|
rule retrieve_databundle:
|
||||||
output:
|
output:
|
||||||
expand("data/bundle/{file}", file=datafiles),
|
protected(expand("data/bundle/{file}", file=datafiles)),
|
||||||
log:
|
log:
|
||||||
LOGS + "retrieve_databundle.log",
|
LOGS + "retrieve_databundle.log",
|
||||||
resources:
|
resources:
|
||||||
@ -92,7 +92,7 @@ if config["enable"]["retrieve"] and config["enable"].get(
|
|||||||
static=True,
|
static=True,
|
||||||
),
|
),
|
||||||
output:
|
output:
|
||||||
RESOURCES + "natura.tiff",
|
protected(RESOURCES + "natura.tiff"),
|
||||||
log:
|
log:
|
||||||
LOGS + "retrieve_natura_raster.log",
|
LOGS + "retrieve_natura_raster.log",
|
||||||
resources:
|
resources:
|
||||||
@ -106,22 +106,30 @@ if config["enable"]["retrieve"] and config["enable"].get(
|
|||||||
"retrieve_sector_databundle", True
|
"retrieve_sector_databundle", True
|
||||||
):
|
):
|
||||||
datafiles = [
|
datafiles = [
|
||||||
"data/eea/UNFCCC_v23.csv",
|
"eea/UNFCCC_v23.csv",
|
||||||
"data/switzerland-sfoe/switzerland-new_format.csv",
|
"switzerland-sfoe/switzerland-new_format.csv",
|
||||||
"data/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson",
|
"nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson",
|
||||||
"data/myb1-2017-nitro.xls",
|
"myb1-2017-nitro.xls",
|
||||||
"data/Industrial_Database.csv",
|
"Industrial_Database.csv",
|
||||||
"data/emobility/KFZ__count",
|
"emobility/KFZ__count",
|
||||||
"data/emobility/Pkw__count",
|
"emobility/Pkw__count",
|
||||||
"data/h2_salt_caverns_GWh_per_sqkm.geojson",
|
"h2_salt_caverns_GWh_per_sqkm.geojson",
|
||||||
directory("data/eurostat-energy_balances-june_2016_edition"),
|
]
|
||||||
directory("data/eurostat-energy_balances-may_2018_edition"),
|
|
||||||
directory("data/jrc-idees-2015"),
|
datafolders = [
|
||||||
|
protected(
|
||||||
|
directory("data/bundle-sector/eurostat-energy_balances-june_2016_edition")
|
||||||
|
),
|
||||||
|
protected(
|
||||||
|
directory("data/bundle-sector/eurostat-energy_balances-may_2018_edition")
|
||||||
|
),
|
||||||
|
protected(directory("data/bundle-sector/jrc-idees-2015")),
|
||||||
]
|
]
|
||||||
|
|
||||||
rule retrieve_sector_databundle:
|
rule retrieve_sector_databundle:
|
||||||
output:
|
output:
|
||||||
*datafiles,
|
protected(expand("data/bundle-sector/{files}", files=datafiles)),
|
||||||
|
*datafolders,
|
||||||
log:
|
log:
|
||||||
LOGS + "retrieve_sector_databundle.log",
|
LOGS + "retrieve_sector_databundle.log",
|
||||||
retries: 2
|
retries: 2
|
||||||
@ -143,7 +151,9 @@ if config["enable"]["retrieve"] and (
|
|||||||
|
|
||||||
rule retrieve_gas_infrastructure_data:
|
rule retrieve_gas_infrastructure_data:
|
||||||
output:
|
output:
|
||||||
expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles),
|
protected(
|
||||||
|
expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles)
|
||||||
|
),
|
||||||
log:
|
log:
|
||||||
LOGS + "retrieve_gas_infrastructure_data.log",
|
LOGS + "retrieve_gas_infrastructure_data.log",
|
||||||
retries: 2
|
retries: 2
|
||||||
@ -158,7 +168,11 @@ if config["enable"]["retrieve"]:
|
|||||||
rule retrieve_electricity_demand:
|
rule retrieve_electricity_demand:
|
||||||
input:
|
input:
|
||||||
HTTP.remote(
|
HTTP.remote(
|
||||||
"data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv",
|
"data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv".format(
|
||||||
|
version="2019-06-05"
|
||||||
|
if config["snapshots"]["end"] < "2019"
|
||||||
|
else "2020-10-06"
|
||||||
|
),
|
||||||
keep_local=True,
|
keep_local=True,
|
||||||
static=True,
|
static=True,
|
||||||
),
|
),
|
||||||
@ -183,7 +197,7 @@ if config["enable"]["retrieve"]:
|
|||||||
static=True,
|
static=True,
|
||||||
),
|
),
|
||||||
output:
|
output:
|
||||||
"data/shipdensity_global.zip",
|
protected("data/shipdensity_global.zip"),
|
||||||
log:
|
log:
|
||||||
LOGS + "retrieve_ship_raster.log",
|
LOGS + "retrieve_ship_raster.log",
|
||||||
resources:
|
resources:
|
||||||
@ -205,3 +219,38 @@ if config["enable"]["retrieve"]:
|
|||||||
output:
|
output:
|
||||||
RESOURCES + "Copernicus_LC100_global_v3.0.1_2019-nrt_Discrete-Classification-map_EPSG-4326.tif",
|
RESOURCES + "Copernicus_LC100_global_v3.0.1_2019-nrt_Discrete-Classification-map_EPSG-4326.tif",
|
||||||
run: move(input[0], output[0])
|
run: move(input[0], output[0])
|
||||||
|
|
||||||
|
if config["enable"]["retrieve"]:
|
||||||
|
|
||||||
|
rule retrieve_monthly_co2_prices:
|
||||||
|
input:
|
||||||
|
HTTP.remote(
|
||||||
|
"https://www.eex.com/fileadmin/EEX/Downloads/EUA_Emission_Spot_Primary_Market_Auction_Report/Archive_Reports/emission-spot-primary-market-auction-report-2019-data.xls",
|
||||||
|
keep_local=True,
|
||||||
|
static=True,
|
||||||
|
),
|
||||||
|
output:
|
||||||
|
"data/validation/emission-spot-primary-market-auction-report-2019-data.xls",
|
||||||
|
log:
|
||||||
|
LOGS + "retrieve_monthly_co2_prices.log",
|
||||||
|
resources:
|
||||||
|
mem_mb=5000,
|
||||||
|
retries: 2
|
||||||
|
run:
|
||||||
|
move(input[0], output[0])
|
||||||
|
|
||||||
|
|
||||||
|
if config["enable"]["retrieve"]:
|
||||||
|
|
||||||
|
rule retrieve_monthly_fuel_prices:
|
||||||
|
output:
|
||||||
|
"data/validation/energy-price-trends-xlsx-5619002.xlsx",
|
||||||
|
log:
|
||||||
|
LOGS + "retrieve_monthly_fuel_prices.log",
|
||||||
|
resources:
|
||||||
|
mem_mb=5000,
|
||||||
|
retries: 2
|
||||||
|
conda:
|
||||||
|
"../envs/environment.yaml"
|
||||||
|
script:
|
||||||
|
"../scripts/retrieve_monthly_fuel_prices.py"
|
||||||
|
@ -13,6 +13,7 @@ rule solve_network:
|
|||||||
),
|
),
|
||||||
input:
|
input:
|
||||||
network=RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
network=RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||||
|
config=RESULTS + "config.yaml",
|
||||||
output:
|
output:
|
||||||
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||||
log:
|
log:
|
||||||
@ -26,6 +27,7 @@ rule solve_network:
|
|||||||
threads: 4
|
threads: 4
|
||||||
resources:
|
resources:
|
||||||
mem_mb=memory,
|
mem_mb=memory,
|
||||||
|
walltime=config["solving"].get("walltime", "12:00:00"),
|
||||||
shadow:
|
shadow:
|
||||||
"minimal"
|
"minimal"
|
||||||
conda:
|
conda:
|
||||||
@ -55,7 +57,8 @@ rule solve_operations_network:
|
|||||||
)
|
)
|
||||||
threads: 4
|
threads: 4
|
||||||
resources:
|
resources:
|
||||||
mem_mb=(lambda w: 5000 + 372 * int(w.clusters)),
|
mem_mb=(lambda w: 10000 + 372 * int(w.clusters)),
|
||||||
|
walltime=config["solving"].get("walltime", "12:00:00"),
|
||||||
shadow:
|
shadow:
|
||||||
"minimal"
|
"minimal"
|
||||||
conda:
|
conda:
|
||||||
|
@ -92,7 +92,7 @@ rule solve_sector_network_myopic:
|
|||||||
network=RESULTS
|
network=RESULTS
|
||||||
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||||
costs="data/costs_{planning_horizons}.csv",
|
costs="data/costs_{planning_horizons}.csv",
|
||||||
config=RESULTS + "config/config.yaml",
|
config=RESULTS + "config.yaml",
|
||||||
output:
|
output:
|
||||||
RESULTS
|
RESULTS
|
||||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||||
@ -106,6 +106,7 @@ rule solve_sector_network_myopic:
|
|||||||
threads: 4
|
threads: 4
|
||||||
resources:
|
resources:
|
||||||
mem_mb=config["solving"]["mem"],
|
mem_mb=config["solving"]["mem"],
|
||||||
|
walltime=config["solving"].get("walltime", "12:00:00"),
|
||||||
benchmark:
|
benchmark:
|
||||||
(
|
(
|
||||||
BENCHMARKS
|
BENCHMARKS
|
||||||
|
@ -14,9 +14,7 @@ rule solve_sector_network:
|
|||||||
input:
|
input:
|
||||||
network=RESULTS
|
network=RESULTS
|
||||||
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||||
costs="data/costs_{}.csv".format(config["costs"]["year"]),
|
config=RESULTS + "config.yaml",
|
||||||
config=RESULTS + "config/config.yaml",
|
|
||||||
#env=RDIR + 'config/environment.yaml',
|
|
||||||
output:
|
output:
|
||||||
RESULTS
|
RESULTS
|
||||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||||
@ -30,6 +28,7 @@ rule solve_sector_network:
|
|||||||
threads: config["solving"]["solver"].get("threads", 4)
|
threads: config["solving"]["solver"].get("threads", 4)
|
||||||
resources:
|
resources:
|
||||||
mem_mb=config["solving"]["mem"],
|
mem_mb=config["solving"]["mem"],
|
||||||
|
walltime=config["solving"].get("walltime", "12:00:00"),
|
||||||
benchmark:
|
benchmark:
|
||||||
(
|
(
|
||||||
RESULTS
|
RESULTS
|
||||||
|
194
rules/solve_perfect.smk
Normal file
194
rules/solve_perfect.smk
Normal file
@ -0,0 +1,194 @@
|
|||||||
|
# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
rule add_existing_baseyear:
|
||||||
|
params:
|
||||||
|
baseyear=config["scenario"]["planning_horizons"][0],
|
||||||
|
sector=config["sector"],
|
||||||
|
existing_capacities=config["existing_capacities"],
|
||||||
|
costs=config["costs"],
|
||||||
|
input:
|
||||||
|
network=RESULTS
|
||||||
|
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||||
|
powerplants=RESOURCES + "powerplants.csv",
|
||||||
|
busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv",
|
||||||
|
busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv",
|
||||||
|
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
|
||||||
|
costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]),
|
||||||
|
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
|
||||||
|
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
|
||||||
|
existing_heating="data/existing_infrastructure/existing_heating_raw.csv",
|
||||||
|
existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv",
|
||||||
|
existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv",
|
||||||
|
existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv",
|
||||||
|
output:
|
||||||
|
RESULTS
|
||||||
|
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||||
|
wildcard_constraints:
|
||||||
|
planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear
|
||||||
|
threads: 1
|
||||||
|
resources:
|
||||||
|
mem_mb=2000,
|
||||||
|
log:
|
||||||
|
LOGS
|
||||||
|
+ "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
|
||||||
|
benchmark:
|
||||||
|
(
|
||||||
|
BENCHMARKS
|
||||||
|
+ "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||||
|
)
|
||||||
|
conda:
|
||||||
|
"../envs/environment.yaml"
|
||||||
|
script:
|
||||||
|
"../scripts/add_existing_baseyear.py"
|
||||||
|
|
||||||
|
|
||||||
|
rule add_brownfield:
|
||||||
|
params:
|
||||||
|
H2_retrofit=config["sector"]["H2_retrofit"],
|
||||||
|
H2_retrofit_capacity_per_CH4=config["sector"]["H2_retrofit_capacity_per_CH4"],
|
||||||
|
threshold_capacity=config["existing_capacities"]["threshold_capacity"],
|
||||||
|
input:
|
||||||
|
network=RESULTS
|
||||||
|
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||||
|
network_p=solved_previous_horizon, #solved network at previous time step
|
||||||
|
costs="data/costs_{planning_horizons}.csv",
|
||||||
|
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
|
||||||
|
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
|
||||||
|
output:
|
||||||
|
RESULTS
|
||||||
|
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||||
|
threads: 4
|
||||||
|
resources:
|
||||||
|
mem_mb=10000,
|
||||||
|
log:
|
||||||
|
LOGS
|
||||||
|
+ "add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
|
||||||
|
benchmark:
|
||||||
|
(
|
||||||
|
BENCHMARKS
|
||||||
|
+ "add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||||
|
)
|
||||||
|
conda:
|
||||||
|
"../envs/environment.yaml"
|
||||||
|
script:
|
||||||
|
"../scripts/add_brownfield.py"
|
||||||
|
|
||||||
|
|
||||||
|
rule prepare_perfect_foresight:
|
||||||
|
input:
|
||||||
|
**{
|
||||||
|
f"network_{year}": RESULTS
|
||||||
|
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
|
||||||
|
+ f"{year}.nc"
|
||||||
|
for year in config["scenario"]["planning_horizons"][1:]
|
||||||
|
},
|
||||||
|
brownfield_network=lambda w: (
|
||||||
|
RESULTS
|
||||||
|
+ "prenetworks-brownfield/"
|
||||||
|
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
|
||||||
|
+ "{}.nc".format(str(config["scenario"]["planning_horizons"][0]))
|
||||||
|
),
|
||||||
|
output:
|
||||||
|
RESULTS
|
||||||
|
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
|
||||||
|
threads: 2
|
||||||
|
resources:
|
||||||
|
mem_mb=10000,
|
||||||
|
log:
|
||||||
|
LOGS
|
||||||
|
+ "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log",
|
||||||
|
benchmark:
|
||||||
|
(
|
||||||
|
BENCHMARKS
|
||||||
|
+ "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}"
|
||||||
|
)
|
||||||
|
conda:
|
||||||
|
"../envs/environment.yaml"
|
||||||
|
script:
|
||||||
|
"../scripts/prepare_perfect_foresight.py"
|
||||||
|
|
||||||
|
|
||||||
|
rule solve_sector_network_perfect:
|
||||||
|
params:
|
||||||
|
solving=config["solving"],
|
||||||
|
foresight=config["foresight"],
|
||||||
|
sector=config["sector"],
|
||||||
|
planning_horizons=config["scenario"]["planning_horizons"],
|
||||||
|
co2_sequestration_potential=config["sector"].get(
|
||||||
|
"co2_sequestration_potential", 200
|
||||||
|
),
|
||||||
|
input:
|
||||||
|
network=RESULTS
|
||||||
|
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
|
||||||
|
costs="data/costs_2030.csv",
|
||||||
|
config=RESULTS + "config.yaml",
|
||||||
|
output:
|
||||||
|
RESULTS
|
||||||
|
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
|
||||||
|
threads: 4
|
||||||
|
resources:
|
||||||
|
mem_mb=config["solving"]["mem"],
|
||||||
|
shadow:
|
||||||
|
"shallow"
|
||||||
|
log:
|
||||||
|
solver=RESULTS
|
||||||
|
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_solver.log",
|
||||||
|
python=RESULTS
|
||||||
|
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_python.log",
|
||||||
|
memory=RESULTS
|
||||||
|
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_memory.log",
|
||||||
|
benchmark:
|
||||||
|
(
|
||||||
|
BENCHMARKS
|
||||||
|
+ "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}"
|
||||||
|
)
|
||||||
|
conda:
|
||||||
|
"../envs/environment.yaml"
|
||||||
|
script:
|
||||||
|
"../scripts/solve_network.py"
|
||||||
|
|
||||||
|
|
||||||
|
rule make_summary_perfect:
|
||||||
|
input:
|
||||||
|
**{
|
||||||
|
f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS
|
||||||
|
+ f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc"
|
||||||
|
for simpl in config["scenario"]["simpl"]
|
||||||
|
for clusters in config["scenario"]["clusters"]
|
||||||
|
for opts in config["scenario"]["opts"]
|
||||||
|
for sector_opts in config["scenario"]["sector_opts"]
|
||||||
|
for ll in config["scenario"]["ll"]
|
||||||
|
},
|
||||||
|
costs="data/costs_2020.csv",
|
||||||
|
output:
|
||||||
|
nodal_costs=RESULTS + "csvs/nodal_costs.csv",
|
||||||
|
nodal_capacities=RESULTS + "csvs/nodal_capacities.csv",
|
||||||
|
nodal_cfs=RESULTS + "csvs/nodal_cfs.csv",
|
||||||
|
cfs=RESULTS + "csvs/cfs.csv",
|
||||||
|
costs=RESULTS + "csvs/costs.csv",
|
||||||
|
capacities=RESULTS + "csvs/capacities.csv",
|
||||||
|
curtailment=RESULTS + "csvs/curtailment.csv",
|
||||||
|
energy=RESULTS + "csvs/energy.csv",
|
||||||
|
supply=RESULTS + "csvs/supply.csv",
|
||||||
|
supply_energy=RESULTS + "csvs/supply_energy.csv",
|
||||||
|
prices=RESULTS + "csvs/prices.csv",
|
||||||
|
weighted_prices=RESULTS + "csvs/weighted_prices.csv",
|
||||||
|
market_values=RESULTS + "csvs/market_values.csv",
|
||||||
|
price_statistics=RESULTS + "csvs/price_statistics.csv",
|
||||||
|
metrics=RESULTS + "csvs/metrics.csv",
|
||||||
|
co2_emissions=RESULTS + "csvs/co2_emissions.csv",
|
||||||
|
threads: 2
|
||||||
|
resources:
|
||||||
|
mem_mb=10000,
|
||||||
|
log:
|
||||||
|
LOGS + "make_summary_perfect.log",
|
||||||
|
benchmark:
|
||||||
|
(BENCHMARKS + "make_summary_perfect")
|
||||||
|
conda:
|
||||||
|
"../envs/environment.yaml"
|
||||||
|
script:
|
||||||
|
"../scripts/make_summary_perfect.py"
|
||||||
|
|
||||||
|
|
||||||
|
ruleorder: add_existing_baseyear > add_brownfield
|
117
rules/validate.smk
Normal file
117
rules/validate.smk
Normal file
@ -0,0 +1,117 @@
|
|||||||
|
# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
PRODUCTION_PLOTS = [
|
||||||
|
"production_bar",
|
||||||
|
"production_deviation_bar",
|
||||||
|
"seasonal_operation_area",
|
||||||
|
]
|
||||||
|
CROSS_BORDER_PLOTS = ["trade_time_series", "cross_border_bar"]
|
||||||
|
PRICES_PLOTS = ["price_bar", "price_line"]
|
||||||
|
|
||||||
|
|
||||||
|
rule build_electricity_production:
|
||||||
|
"""
|
||||||
|
This rule builds the electricity production for each country and technology from ENTSO-E data.
|
||||||
|
The data is used for validation of the optimization results.
|
||||||
|
"""
|
||||||
|
params:
|
||||||
|
snapshots=config["snapshots"],
|
||||||
|
countries=config["countries"],
|
||||||
|
output:
|
||||||
|
RESOURCES + "historical_electricity_production.csv",
|
||||||
|
log:
|
||||||
|
LOGS + "build_electricity_production.log",
|
||||||
|
resources:
|
||||||
|
mem_mb=5000,
|
||||||
|
script:
|
||||||
|
"../scripts/build_electricity_production.py"
|
||||||
|
|
||||||
|
|
||||||
|
rule build_cross_border_flows:
|
||||||
|
"""
|
||||||
|
This rule builds the cross-border flows from ENTSO-E data.
|
||||||
|
The data is used for validation of the optimization results.
|
||||||
|
"""
|
||||||
|
params:
|
||||||
|
snapshots=config["snapshots"],
|
||||||
|
countries=config["countries"],
|
||||||
|
input:
|
||||||
|
network=RESOURCES + "networks/base.nc",
|
||||||
|
output:
|
||||||
|
RESOURCES + "historical_cross_border_flows.csv",
|
||||||
|
log:
|
||||||
|
LOGS + "build_cross_border_flows.log",
|
||||||
|
resources:
|
||||||
|
mem_mb=5000,
|
||||||
|
script:
|
||||||
|
"../scripts/build_cross_border_flows.py"
|
||||||
|
|
||||||
|
|
||||||
|
rule build_electricity_prices:
|
||||||
|
"""
|
||||||
|
This rule builds the electricity prices from ENTSO-E data.
|
||||||
|
The data is used for validation of the optimization results.
|
||||||
|
"""
|
||||||
|
params:
|
||||||
|
snapshots=config["snapshots"],
|
||||||
|
countries=config["countries"],
|
||||||
|
output:
|
||||||
|
RESOURCES + "historical_electricity_prices.csv",
|
||||||
|
log:
|
||||||
|
LOGS + "build_electricity_prices.log",
|
||||||
|
resources:
|
||||||
|
mem_mb=5000,
|
||||||
|
script:
|
||||||
|
"../scripts/build_electricity_prices.py"
|
||||||
|
|
||||||
|
|
||||||
|
rule plot_validation_electricity_production:
|
||||||
|
input:
|
||||||
|
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||||
|
electricity_production=RESOURCES + "historical_electricity_production.csv",
|
||||||
|
output:
|
||||||
|
**{
|
||||||
|
plot: RESULTS
|
||||||
|
+ f"figures/validation_{plot}_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
|
||||||
|
for plot in PRODUCTION_PLOTS
|
||||||
|
},
|
||||||
|
plots_touch=RESULTS
|
||||||
|
+ "figures/.validation_production_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
|
||||||
|
script:
|
||||||
|
"../scripts/plot_validation_electricity_production.py"
|
||||||
|
|
||||||
|
|
||||||
|
rule plot_validation_cross_border_flows:
|
||||||
|
params:
|
||||||
|
countries=config["countries"],
|
||||||
|
input:
|
||||||
|
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||||
|
cross_border_flows=RESOURCES + "historical_cross_border_flows.csv",
|
||||||
|
output:
|
||||||
|
**{
|
||||||
|
plot: RESULTS
|
||||||
|
+ f"figures/validation_{plot}_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
|
||||||
|
for plot in CROSS_BORDER_PLOTS
|
||||||
|
},
|
||||||
|
plots_touch=RESULTS
|
||||||
|
+ "figures/.validation_cross_border_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
|
||||||
|
script:
|
||||||
|
"../scripts/plot_validation_cross_border_flows.py"
|
||||||
|
|
||||||
|
|
||||||
|
rule plot_validation_electricity_prices:
|
||||||
|
input:
|
||||||
|
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||||
|
electricity_prices=RESOURCES + "historical_electricity_prices.csv",
|
||||||
|
output:
|
||||||
|
**{
|
||||||
|
plot: RESULTS
|
||||||
|
+ f"figures/validation_{plot}_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
|
||||||
|
for plot in PRICES_PLOTS
|
||||||
|
},
|
||||||
|
plots_touch=RESULTS
|
||||||
|
+ "figures/.validation_prices_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
|
||||||
|
script:
|
||||||
|
"../scripts/plot_validation_electricity_prices.py"
|
256
scripts/_benchmark.py
Normal file
256
scripts/_benchmark.py
Normal file
@ -0,0 +1,256 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
"""
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import, print_function
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# TODO: provide alternative when multiprocessing is not available
|
||||||
|
try:
|
||||||
|
from multiprocessing import Pipe, Process
|
||||||
|
except ImportError:
|
||||||
|
from multiprocessing.dummy import Process, Pipe
|
||||||
|
|
||||||
|
from memory_profiler import _get_memory, choose_backend
|
||||||
|
|
||||||
|
|
||||||
|
# The memory logging facilities have been adapted from memory_profiler
|
||||||
|
class MemTimer(Process):
|
||||||
|
"""
|
||||||
|
Write memory consumption over a time interval to file until signaled to
|
||||||
|
stop on the pipe.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, monitor_pid, interval, pipe, filename, max_usage, backend, *args, **kw
|
||||||
|
):
|
||||||
|
self.monitor_pid = monitor_pid
|
||||||
|
self.interval = interval
|
||||||
|
self.pipe = pipe
|
||||||
|
self.filename = filename
|
||||||
|
self.max_usage = max_usage
|
||||||
|
self.backend = backend
|
||||||
|
|
||||||
|
self.timestamps = kw.pop("timestamps", True)
|
||||||
|
self.include_children = kw.pop("include_children", True)
|
||||||
|
|
||||||
|
super(MemTimer, self).__init__(*args, **kw)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
# get baseline memory usage
|
||||||
|
cur_mem = _get_memory(
|
||||||
|
self.monitor_pid,
|
||||||
|
self.backend,
|
||||||
|
timestamps=self.timestamps,
|
||||||
|
include_children=self.include_children,
|
||||||
|
)
|
||||||
|
|
||||||
|
n_measurements = 1
|
||||||
|
mem_usage = cur_mem if self.max_usage else [cur_mem]
|
||||||
|
|
||||||
|
if self.filename is not None:
|
||||||
|
stream = open(self.filename, "w")
|
||||||
|
stream.write("MEM {0:.6f} {1:.4f}\n".format(*cur_mem))
|
||||||
|
stream.flush()
|
||||||
|
else:
|
||||||
|
stream = None
|
||||||
|
|
||||||
|
self.pipe.send(0) # we're ready
|
||||||
|
stop = False
|
||||||
|
while True:
|
||||||
|
cur_mem = _get_memory(
|
||||||
|
self.monitor_pid,
|
||||||
|
self.backend,
|
||||||
|
timestamps=self.timestamps,
|
||||||
|
include_children=self.include_children,
|
||||||
|
)
|
||||||
|
|
||||||
|
if stream is not None:
|
||||||
|
stream.write("MEM {0:.6f} {1:.4f}\n".format(*cur_mem))
|
||||||
|
stream.flush()
|
||||||
|
|
||||||
|
n_measurements += 1
|
||||||
|
if not self.max_usage:
|
||||||
|
mem_usage.append(cur_mem)
|
||||||
|
else:
|
||||||
|
mem_usage = max(cur_mem, mem_usage)
|
||||||
|
|
||||||
|
if stop:
|
||||||
|
break
|
||||||
|
stop = self.pipe.poll(self.interval)
|
||||||
|
# do one more iteration
|
||||||
|
|
||||||
|
if stream is not None:
|
||||||
|
stream.close()
|
||||||
|
|
||||||
|
self.pipe.send(mem_usage)
|
||||||
|
self.pipe.send(n_measurements)
|
||||||
|
|
||||||
|
|
||||||
|
class memory_logger(object):
|
||||||
|
"""
|
||||||
|
Context manager for taking and reporting memory measurements at fixed
|
||||||
|
intervals from a separate process, for the duration of a context.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
filename : None|str
|
||||||
|
Name of the text file to log memory measurements, if None no log is
|
||||||
|
created (defaults to None)
|
||||||
|
interval : float
|
||||||
|
Interval between measurements (defaults to 1.)
|
||||||
|
max_usage : bool
|
||||||
|
If True, only store and report the maximum value (defaults to True)
|
||||||
|
timestamps : bool
|
||||||
|
Whether to record tuples of memory usage and timestamps; if logging to
|
||||||
|
a file timestamps are always kept (defaults to True)
|
||||||
|
include_children : bool
|
||||||
|
Whether the memory of subprocesses is to be included (default: True)
|
||||||
|
|
||||||
|
Arguments
|
||||||
|
---------
|
||||||
|
n_measurements : int
|
||||||
|
Number of measurements that have been taken
|
||||||
|
mem_usage : (float, float)|[(float, float)]
|
||||||
|
All memory measurements and timestamps (if timestamps was True) or only
|
||||||
|
the maximum memory usage and its timestamp
|
||||||
|
|
||||||
|
Note
|
||||||
|
----
|
||||||
|
The arguments are only set after all the measurements, i.e. outside of the
|
||||||
|
with statement.
|
||||||
|
|
||||||
|
Example
|
||||||
|
-------
|
||||||
|
with memory_logger(filename="memory.log", max_usage=True) as mem:
|
||||||
|
# Do a lot of long running memory intensive stuff
|
||||||
|
hard_memory_bound_stuff()
|
||||||
|
|
||||||
|
max_mem, timestamp = mem.mem_usage
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
filename=None,
|
||||||
|
interval=1.0,
|
||||||
|
max_usage=True,
|
||||||
|
timestamps=True,
|
||||||
|
include_children=True,
|
||||||
|
):
|
||||||
|
if filename is not None:
|
||||||
|
timestamps = True
|
||||||
|
|
||||||
|
self.filename = filename
|
||||||
|
self.interval = interval
|
||||||
|
self.max_usage = max_usage
|
||||||
|
self.timestamps = timestamps
|
||||||
|
self.include_children = include_children
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
backend = choose_backend()
|
||||||
|
|
||||||
|
self.child_conn, self.parent_conn = Pipe() # this will store MemTimer's results
|
||||||
|
self.p = MemTimer(
|
||||||
|
os.getpid(),
|
||||||
|
self.interval,
|
||||||
|
self.child_conn,
|
||||||
|
self.filename,
|
||||||
|
backend=backend,
|
||||||
|
timestamps=self.timestamps,
|
||||||
|
max_usage=self.max_usage,
|
||||||
|
include_children=self.include_children,
|
||||||
|
)
|
||||||
|
self.p.start()
|
||||||
|
self.parent_conn.recv() # wait until memory logging in subprocess is ready
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
if exc_type is None:
|
||||||
|
self.parent_conn.send(0) # finish timing
|
||||||
|
|
||||||
|
self.mem_usage = self.parent_conn.recv()
|
||||||
|
self.n_measurements = self.parent_conn.recv()
|
||||||
|
else:
|
||||||
|
self.p.terminate()
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class timer(object):
|
||||||
|
level = 0
|
||||||
|
opened = False
|
||||||
|
|
||||||
|
def __init__(self, name="", verbose=True):
|
||||||
|
self.name = name
|
||||||
|
self.verbose = verbose
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
if self.verbose:
|
||||||
|
if self.opened:
|
||||||
|
sys.stdout.write("\n")
|
||||||
|
|
||||||
|
if len(self.name) > 0:
|
||||||
|
sys.stdout.write((".. " * self.level) + self.name + ": ")
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
self.__class__.opened = True
|
||||||
|
|
||||||
|
self.__class__.level += 1
|
||||||
|
|
||||||
|
self.start = time.time()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def print_usec(self, usec):
|
||||||
|
if usec < 1000:
|
||||||
|
print("%.1f usec" % usec)
|
||||||
|
else:
|
||||||
|
msec = usec / 1000
|
||||||
|
if msec < 1000:
|
||||||
|
print("%.1f msec" % msec)
|
||||||
|
else:
|
||||||
|
sec = msec / 1000
|
||||||
|
print("%.1f sec" % sec)
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
if not self.opened and self.verbose:
|
||||||
|
sys.stdout.write(".. " * self.level)
|
||||||
|
|
||||||
|
if exc_type is None:
|
||||||
|
stop = time.time()
|
||||||
|
self.usec = usec = (stop - self.start) * 1e6
|
||||||
|
if self.verbose:
|
||||||
|
self.print_usec(usec)
|
||||||
|
elif self.verbose:
|
||||||
|
print("failed")
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
self.__class__.level -= 1
|
||||||
|
if self.verbose:
|
||||||
|
self.__class__.opened = False
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class optional(object):
|
||||||
|
def __init__(self, variable, contextman):
|
||||||
|
self.variable = variable
|
||||||
|
self.contextman = contextman
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
if self.variable:
|
||||||
|
return self.contextman.__enter__()
|
||||||
|
|
||||||
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
|
if self.variable:
|
||||||
|
return self.contextman.__exit__(exc_type, exc_val, exc_tb)
|
||||||
|
return False
|
@ -165,7 +165,7 @@ def sanitize_carriers(n, config):
|
|||||||
nice_names = (
|
nice_names = (
|
||||||
pd.Series(config["plotting"]["nice_names"])
|
pd.Series(config["plotting"]["nice_names"])
|
||||||
.reindex(carrier_i)
|
.reindex(carrier_i)
|
||||||
.fillna(carrier_i.to_series().str.title())
|
.fillna(carrier_i.to_series())
|
||||||
)
|
)
|
||||||
n.carriers["nice_name"] = n.carriers.nice_name.where(
|
n.carriers["nice_name"] = n.carriers.nice_name.where(
|
||||||
n.carriers.nice_name != "", nice_names
|
n.carriers.nice_name != "", nice_names
|
||||||
@ -204,7 +204,6 @@ def load_costs(tech_costs, config, max_hours, Nyears=1.0):
|
|||||||
* costs["investment"]
|
* costs["investment"]
|
||||||
* Nyears
|
* Nyears
|
||||||
)
|
)
|
||||||
|
|
||||||
costs.at["OCGT", "fuel"] = costs.at["gas", "fuel"]
|
costs.at["OCGT", "fuel"] = costs.at["gas", "fuel"]
|
||||||
costs.at["CCGT", "fuel"] = costs.at["gas", "fuel"]
|
costs.at["CCGT", "fuel"] = costs.at["gas", "fuel"]
|
||||||
|
|
||||||
@ -368,7 +367,6 @@ def attach_wind_and_solar(
|
|||||||
n, costs, input_profiles, carriers, extendable_carriers, line_length_factor=1
|
n, costs, input_profiles, carriers, extendable_carriers, line_length_factor=1
|
||||||
):
|
):
|
||||||
add_missing_carriers(n, carriers)
|
add_missing_carriers(n, carriers)
|
||||||
|
|
||||||
for car in carriers:
|
for car in carriers:
|
||||||
if car == "hydro":
|
if car == "hydro":
|
||||||
continue
|
continue
|
||||||
@ -416,6 +414,7 @@ def attach_wind_and_solar(
|
|||||||
capital_cost=capital_cost,
|
capital_cost=capital_cost,
|
||||||
efficiency=costs.at[supcar, "efficiency"],
|
efficiency=costs.at[supcar, "efficiency"],
|
||||||
p_max_pu=ds["profile"].transpose("time", "bus").to_pandas(),
|
p_max_pu=ds["profile"].transpose("time", "bus").to_pandas(),
|
||||||
|
lifetime=costs.at[supcar, "lifetime"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -427,6 +426,8 @@ def attach_conventional_generators(
|
|||||||
extendable_carriers,
|
extendable_carriers,
|
||||||
conventional_params,
|
conventional_params,
|
||||||
conventional_inputs,
|
conventional_inputs,
|
||||||
|
unit_commitment=None,
|
||||||
|
fuel_price=None,
|
||||||
):
|
):
|
||||||
carriers = list(set(conventional_carriers) | set(extendable_carriers["Generator"]))
|
carriers = list(set(conventional_carriers) | set(extendable_carriers["Generator"]))
|
||||||
add_missing_carriers(n, carriers)
|
add_missing_carriers(n, carriers)
|
||||||
@ -445,15 +446,34 @@ def attach_conventional_generators(
|
|||||||
.rename(index=lambda s: "C" + str(s))
|
.rename(index=lambda s: "C" + str(s))
|
||||||
)
|
)
|
||||||
ppl["efficiency"] = ppl.efficiency.fillna(ppl.efficiency_r)
|
ppl["efficiency"] = ppl.efficiency.fillna(ppl.efficiency_r)
|
||||||
ppl["marginal_cost"] = (
|
|
||||||
ppl.carrier.map(costs.VOM) + ppl.carrier.map(costs.fuel) / ppl.efficiency
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(
|
if unit_commitment is not None:
|
||||||
"Adding {} generators with capacities [GW] \n{}".format(
|
committable_attrs = ppl.carrier.isin(unit_commitment).to_frame("committable")
|
||||||
len(ppl), ppl.groupby("carrier").p_nom.sum().div(1e3).round(2)
|
for attr in unit_commitment.index:
|
||||||
|
default = pypsa.components.component_attrs["Generator"].default[attr]
|
||||||
|
committable_attrs[attr] = ppl.carrier.map(unit_commitment.loc[attr]).fillna(
|
||||||
|
default
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
committable_attrs = {}
|
||||||
|
|
||||||
|
if fuel_price is not None:
|
||||||
|
fuel_price = fuel_price.assign(
|
||||||
|
OCGT=fuel_price["gas"], CCGT=fuel_price["gas"]
|
||||||
|
).drop("gas", axis=1)
|
||||||
|
missing_carriers = list(set(carriers) - set(fuel_price))
|
||||||
|
fuel_price = fuel_price.assign(**costs.fuel[missing_carriers])
|
||||||
|
fuel_price = fuel_price.reindex(ppl.carrier, axis=1)
|
||||||
|
fuel_price.columns = ppl.index
|
||||||
|
marginal_cost = fuel_price.div(ppl.efficiency).add(ppl.carrier.map(costs.VOM))
|
||||||
|
else:
|
||||||
|
marginal_cost = (
|
||||||
|
ppl.carrier.map(costs.VOM) + ppl.carrier.map(costs.fuel) / ppl.efficiency
|
||||||
)
|
)
|
||||||
)
|
|
||||||
|
# Define generators using modified ppl DataFrame
|
||||||
|
caps = ppl.groupby("carrier").p_nom.sum().div(1e3).round(2)
|
||||||
|
logger.info(f"Adding {len(ppl)} generators with capacities [GW] \n{caps}")
|
||||||
|
|
||||||
n.madd(
|
n.madd(
|
||||||
"Generator",
|
"Generator",
|
||||||
@ -464,13 +484,14 @@ def attach_conventional_generators(
|
|||||||
p_nom=ppl.p_nom.where(ppl.carrier.isin(conventional_carriers), 0),
|
p_nom=ppl.p_nom.where(ppl.carrier.isin(conventional_carriers), 0),
|
||||||
p_nom_extendable=ppl.carrier.isin(extendable_carriers["Generator"]),
|
p_nom_extendable=ppl.carrier.isin(extendable_carriers["Generator"]),
|
||||||
efficiency=ppl.efficiency,
|
efficiency=ppl.efficiency,
|
||||||
marginal_cost=ppl.marginal_cost,
|
marginal_cost=marginal_cost,
|
||||||
capital_cost=ppl.capital_cost,
|
capital_cost=ppl.capital_cost,
|
||||||
build_year=ppl.datein.fillna(0).astype(int),
|
build_year=ppl.datein.fillna(0).astype(int),
|
||||||
lifetime=(ppl.dateout - ppl.datein).fillna(np.inf),
|
lifetime=(ppl.dateout - ppl.datein).fillna(np.inf),
|
||||||
|
**committable_attrs,
|
||||||
)
|
)
|
||||||
|
|
||||||
for carrier in conventional_params:
|
for carrier in set(conventional_params) & set(carriers):
|
||||||
# Generators with technology affected
|
# Generators with technology affected
|
||||||
idx = n.generators.query("carrier == @carrier").index
|
idx = n.generators.query("carrier == @carrier").index
|
||||||
|
|
||||||
@ -604,6 +625,14 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
|
|||||||
hydro.max_hours > 0, hydro.country.map(max_hours_country)
|
hydro.max_hours > 0, hydro.country.map(max_hours_country)
|
||||||
).fillna(6)
|
).fillna(6)
|
||||||
|
|
||||||
|
flatten_dispatch = params.get("flatten_dispatch", False)
|
||||||
|
if flatten_dispatch:
|
||||||
|
buffer = params.get("flatten_dispatch_buffer", 0.2)
|
||||||
|
average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"]
|
||||||
|
p_max_pu = (average_capacity_factor + buffer).clip(upper=1)
|
||||||
|
else:
|
||||||
|
p_max_pu = 1
|
||||||
|
|
||||||
n.madd(
|
n.madd(
|
||||||
"StorageUnit",
|
"StorageUnit",
|
||||||
hydro.index,
|
hydro.index,
|
||||||
@ -613,7 +642,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
|
|||||||
max_hours=hydro_max_hours,
|
max_hours=hydro_max_hours,
|
||||||
capital_cost=costs.at["hydro", "capital_cost"],
|
capital_cost=costs.at["hydro", "capital_cost"],
|
||||||
marginal_cost=costs.at["hydro", "marginal_cost"],
|
marginal_cost=costs.at["hydro", "marginal_cost"],
|
||||||
p_max_pu=1.0, # dispatch
|
p_max_pu=p_max_pu, # dispatch
|
||||||
p_min_pu=0.0, # store
|
p_min_pu=0.0, # store
|
||||||
efficiency_dispatch=costs.at["hydro", "efficiency"],
|
efficiency_dispatch=costs.at["hydro", "efficiency"],
|
||||||
efficiency_store=0.0,
|
efficiency_store=0.0,
|
||||||
@ -703,13 +732,14 @@ def attach_OPSD_renewables(n, tech_map):
|
|||||||
{"Solar": "PV"}
|
{"Solar": "PV"}
|
||||||
)
|
)
|
||||||
df = df.query("Fueltype in @tech_map").powerplant.convert_country_to_alpha2()
|
df = df.query("Fueltype in @tech_map").powerplant.convert_country_to_alpha2()
|
||||||
|
df = df.dropna(subset=["lat", "lon"])
|
||||||
|
|
||||||
for fueltype, carriers in tech_map.items():
|
for fueltype, carriers in tech_map.items():
|
||||||
gens = n.generators[lambda df: df.carrier.isin(carriers)]
|
gens = n.generators[lambda df: df.carrier.isin(carriers)]
|
||||||
buses = n.buses.loc[gens.bus.unique()]
|
buses = n.buses.loc[gens.bus.unique()]
|
||||||
gens_per_bus = gens.groupby("bus").p_nom.count()
|
gens_per_bus = gens.groupby("bus").p_nom.count()
|
||||||
|
|
||||||
caps = map_country_bus(df.query("Fueltype == @fueltype and lat == lat"), buses)
|
caps = map_country_bus(df.query("Fueltype == @fueltype"), buses)
|
||||||
caps = caps.groupby(["bus"]).Capacity.sum()
|
caps = caps.groupby(["bus"]).Capacity.sum()
|
||||||
caps = caps / gens_per_bus.reindex(caps.index, fill_value=1)
|
caps = caps / gens_per_bus.reindex(caps.index, fill_value=1)
|
||||||
|
|
||||||
@ -820,6 +850,20 @@ if __name__ == "__main__":
|
|||||||
conventional_inputs = {
|
conventional_inputs = {
|
||||||
k: v for k, v in snakemake.input.items() if k.startswith("conventional_")
|
k: v for k, v in snakemake.input.items() if k.startswith("conventional_")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if params.conventional["unit_commitment"]:
|
||||||
|
unit_commitment = pd.read_csv(snakemake.input.unit_commitment, index_col=0)
|
||||||
|
else:
|
||||||
|
unit_commitment = None
|
||||||
|
|
||||||
|
if params.conventional["dynamic_fuel_price"]:
|
||||||
|
fuel_price = pd.read_csv(
|
||||||
|
snakemake.input.fuel_price, index_col=0, header=0, parse_dates=True
|
||||||
|
)
|
||||||
|
fuel_price = fuel_price.reindex(n.snapshots).fillna(method="ffill")
|
||||||
|
else:
|
||||||
|
fuel_price = None
|
||||||
|
|
||||||
attach_conventional_generators(
|
attach_conventional_generators(
|
||||||
n,
|
n,
|
||||||
costs,
|
costs,
|
||||||
@ -828,6 +872,8 @@ if __name__ == "__main__":
|
|||||||
extendable_carriers,
|
extendable_carriers,
|
||||||
params.conventional,
|
params.conventional,
|
||||||
conventional_inputs,
|
conventional_inputs,
|
||||||
|
unit_commitment=unit_commitment,
|
||||||
|
fuel_price=fuel_price,
|
||||||
)
|
)
|
||||||
|
|
||||||
attach_wind_and_solar(
|
attach_wind_and_solar(
|
||||||
@ -840,15 +886,16 @@ if __name__ == "__main__":
|
|||||||
)
|
)
|
||||||
|
|
||||||
if "hydro" in renewable_carriers:
|
if "hydro" in renewable_carriers:
|
||||||
para = params.renewable["hydro"]
|
p = params.renewable["hydro"]
|
||||||
|
carriers = p.pop("carriers", [])
|
||||||
attach_hydro(
|
attach_hydro(
|
||||||
n,
|
n,
|
||||||
costs,
|
costs,
|
||||||
ppl,
|
ppl,
|
||||||
snakemake.input.profile_hydro,
|
snakemake.input.profile_hydro,
|
||||||
snakemake.input.hydro_capacities,
|
snakemake.input.hydro_capacities,
|
||||||
para.pop("carriers", []),
|
carriers,
|
||||||
**para,
|
**p,
|
||||||
)
|
)
|
||||||
|
|
||||||
estimate_renewable_caps = params.electricity["estimate_renewable_capacities"]
|
estimate_renewable_caps = params.electricity["estimate_renewable_capacities"]
|
||||||
|
@ -305,6 +305,18 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
|
|||||||
if "EU" not in vars(spatial)[carrier[generator]].locations:
|
if "EU" not in vars(spatial)[carrier[generator]].locations:
|
||||||
bus0 = bus0.intersection(capacity.index + " gas")
|
bus0 = bus0.intersection(capacity.index + " gas")
|
||||||
|
|
||||||
|
# check for missing bus
|
||||||
|
missing_bus = pd.Index(bus0).difference(n.buses.index)
|
||||||
|
if not missing_bus.empty:
|
||||||
|
logger.info(f"add buses {bus0}")
|
||||||
|
n.madd(
|
||||||
|
"Bus",
|
||||||
|
bus0,
|
||||||
|
carrier=generator,
|
||||||
|
location=vars(spatial)[carrier[generator]].locations,
|
||||||
|
unit="MWh_el",
|
||||||
|
)
|
||||||
|
|
||||||
already_build = n.links.index.intersection(asset_i)
|
already_build = n.links.index.intersection(asset_i)
|
||||||
new_build = asset_i.difference(n.links.index)
|
new_build = asset_i.difference(n.links.index)
|
||||||
lifetime_assets = lifetime.loc[grouping_year, generator].dropna()
|
lifetime_assets = lifetime.loc[grouping_year, generator].dropna()
|
||||||
@ -435,15 +447,23 @@ def add_heating_capacities_installed_before_baseyear(
|
|||||||
|
|
||||||
# split existing capacities between residential and services
|
# split existing capacities between residential and services
|
||||||
# proportional to energy demand
|
# proportional to energy demand
|
||||||
|
p_set_sum = n.loads_t.p_set.sum()
|
||||||
ratio_residential = pd.Series(
|
ratio_residential = pd.Series(
|
||||||
[
|
[
|
||||||
(
|
(
|
||||||
n.loads_t.p_set.sum()[f"{node} residential rural heat"]
|
p_set_sum[f"{node} residential rural heat"]
|
||||||
/ (
|
/ (
|
||||||
n.loads_t.p_set.sum()[f"{node} residential rural heat"]
|
p_set_sum[f"{node} residential rural heat"]
|
||||||
+ n.loads_t.p_set.sum()[f"{node} services rural heat"]
|
+ p_set_sum[f"{node} services rural heat"]
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
# if rural heating demand for one of the nodes doesn't exist,
|
||||||
|
# then columns were dropped before and heating demand share should be 0.0
|
||||||
|
if all(
|
||||||
|
f"{node} {service} rural heat" in p_set_sum.index
|
||||||
|
for service in ["residential", "services"]
|
||||||
|
)
|
||||||
|
else 0.0
|
||||||
for node in nodal_df.index
|
for node in nodal_df.index
|
||||||
],
|
],
|
||||||
index=nodal_df.index,
|
index=nodal_df.index,
|
||||||
@ -597,6 +617,10 @@ def add_heating_capacities_installed_before_baseyear(
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# drop assets which are at the end of their lifetime
|
||||||
|
links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index
|
||||||
|
n.mremove("Link", links_i)
|
||||||
|
|
||||||
|
|
||||||
# %%
|
# %%
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@ -605,13 +629,13 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
snakemake = mock_snakemake(
|
snakemake = mock_snakemake(
|
||||||
"add_existing_baseyear",
|
"add_existing_baseyear",
|
||||||
configfiles="config/test/config.myopic.yaml",
|
# configfiles="config/test/config.myopic.yaml",
|
||||||
simpl="",
|
simpl="",
|
||||||
clusters="5",
|
clusters="37",
|
||||||
ll="v1.5",
|
ll="v1.0",
|
||||||
opts="",
|
opts="",
|
||||||
sector_opts="24H-T-H-B-I-A-solar+p3-dist1",
|
sector_opts="1p7-4380H-T-H-B-I-A-solar+p3-dist1",
|
||||||
planning_horizons=2030,
|
planning_horizons=2020,
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.basicConfig(level=snakemake.config["logging"]["level"])
|
logging.basicConfig(level=snakemake.config["logging"]["level"])
|
||||||
|
@ -337,7 +337,7 @@ def _load_lines_from_eg(buses, eg_lines):
|
|||||||
)
|
)
|
||||||
|
|
||||||
lines["length"] /= 1e3
|
lines["length"] /= 1e3
|
||||||
|
lines["carrier"] = "AC"
|
||||||
lines = _remove_dangling_branches(lines, buses)
|
lines = _remove_dangling_branches(lines, buses)
|
||||||
|
|
||||||
return lines
|
return lines
|
||||||
|
@ -7,9 +7,15 @@ Compute biogas and solid biomass potentials for each clustered model region
|
|||||||
using data from JRC ENSPRESO.
|
using data from JRC ENSPRESO.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
import geopandas as gpd
|
import geopandas as gpd
|
||||||
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
|
AVAILABLE_BIOMASS_YEARS = [2010, 2020, 2030, 2040, 2050]
|
||||||
|
|
||||||
|
|
||||||
def build_nuts_population_data(year=2013):
|
def build_nuts_population_data(year=2013):
|
||||||
pop = pd.read_csv(
|
pop = pd.read_csv(
|
||||||
@ -208,13 +214,41 @@ if __name__ == "__main__":
|
|||||||
if "snakemake" not in globals():
|
if "snakemake" not in globals():
|
||||||
from _helpers import mock_snakemake
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
snakemake = mock_snakemake("build_biomass_potentials", simpl="", clusters="5")
|
snakemake = mock_snakemake(
|
||||||
|
"build_biomass_potentials",
|
||||||
|
simpl="",
|
||||||
|
clusters="5",
|
||||||
|
planning_horizons=2050,
|
||||||
|
)
|
||||||
|
|
||||||
|
overnight = snakemake.config["foresight"] == "overnight"
|
||||||
params = snakemake.params.biomass
|
params = snakemake.params.biomass
|
||||||
year = params["year"]
|
investment_year = int(snakemake.wildcards.planning_horizons)
|
||||||
|
year = params["year"] if overnight else investment_year
|
||||||
scenario = params["scenario"]
|
scenario = params["scenario"]
|
||||||
|
|
||||||
enspreso = enspreso_biomass_potentials(year, scenario)
|
if year > 2050:
|
||||||
|
logger.info("No biomass potentials for years after 2050, using 2050.")
|
||||||
|
max_year = max(AVAILABLE_BIOMASS_YEARS)
|
||||||
|
enspreso = enspreso_biomass_potentials(max_year, scenario)
|
||||||
|
|
||||||
|
elif year not in AVAILABLE_BIOMASS_YEARS:
|
||||||
|
before = int(np.floor(year / 10) * 10)
|
||||||
|
after = int(np.ceil(year / 10) * 10)
|
||||||
|
logger.info(
|
||||||
|
f"No biomass potentials for {year}, interpolating linearly between {before} and {after}."
|
||||||
|
)
|
||||||
|
|
||||||
|
enspreso_before = enspreso_biomass_potentials(before, scenario)
|
||||||
|
enspreso_after = enspreso_biomass_potentials(after, scenario)
|
||||||
|
|
||||||
|
fraction = (year - before) / (after - before)
|
||||||
|
|
||||||
|
enspreso = enspreso_before + fraction * (enspreso_after - enspreso_before)
|
||||||
|
|
||||||
|
else:
|
||||||
|
logger.info(f"Using biomass potentials for {year}.")
|
||||||
|
enspreso = enspreso_biomass_potentials(year, scenario)
|
||||||
|
|
||||||
enspreso = disaggregate_nuts0(enspreso)
|
enspreso = disaggregate_nuts0(enspreso)
|
||||||
|
|
||||||
|
65
scripts/build_cross_border_flows.py
Normal file
65
scripts/build_cross_border_flows.py
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
import pypsa
|
||||||
|
from _helpers import configure_logging
|
||||||
|
from entsoe import EntsoePandasClient
|
||||||
|
from entsoe.exceptions import InvalidBusinessParameterError, NoMatchingDataError
|
||||||
|
from requests import HTTPError
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if "snakemake" not in globals():
|
||||||
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
|
snakemake = mock_snakemake("build_cross_border_flows")
|
||||||
|
configure_logging(snakemake)
|
||||||
|
|
||||||
|
api_key = snakemake.config["private"]["keys"]["entsoe_api"]
|
||||||
|
client = EntsoePandasClient(api_key=api_key)
|
||||||
|
|
||||||
|
n = pypsa.Network(snakemake.input.network)
|
||||||
|
start = pd.Timestamp(snakemake.params.snapshots["start"], tz="Europe/Brussels")
|
||||||
|
end = pd.Timestamp(snakemake.params.snapshots["end"], tz="Europe/Brussels")
|
||||||
|
|
||||||
|
branches = n.branches().query("carrier in ['AC', 'DC']")
|
||||||
|
c = n.buses.country
|
||||||
|
branch_countries = pd.concat([branches.bus0.map(c), branches.bus1.map(c)], axis=1)
|
||||||
|
branch_countries = branch_countries.query("bus0 != bus1")
|
||||||
|
branch_countries = branch_countries.apply(sorted, axis=1, result_type="broadcast")
|
||||||
|
country_pairs = branch_countries.drop_duplicates().reset_index(drop=True)
|
||||||
|
|
||||||
|
flows = []
|
||||||
|
unavailable_borders = []
|
||||||
|
for from_country, to_country in country_pairs.values:
|
||||||
|
try:
|
||||||
|
flow_directed = client.query_crossborder_flows(
|
||||||
|
from_country, to_country, start=start, end=end
|
||||||
|
)
|
||||||
|
flow_reverse = client.query_crossborder_flows(
|
||||||
|
to_country, from_country, start=start, end=end
|
||||||
|
)
|
||||||
|
flow = (flow_directed - flow_reverse).rename(
|
||||||
|
f"{from_country} - {to_country}"
|
||||||
|
)
|
||||||
|
flow = flow.tz_localize(None).resample("1h").mean()
|
||||||
|
flow = flow.loc[start.tz_localize(None) : end.tz_localize(None)]
|
||||||
|
flows.append(flow)
|
||||||
|
except (HTTPError, NoMatchingDataError, InvalidBusinessParameterError):
|
||||||
|
unavailable_borders.append(f"{from_country}-{to_country}")
|
||||||
|
|
||||||
|
if unavailable_borders:
|
||||||
|
logger.warning(
|
||||||
|
"Historical electricity cross-border flows for countries"
|
||||||
|
f" {', '.join(unavailable_borders)} not available."
|
||||||
|
)
|
||||||
|
|
||||||
|
flows = pd.concat(flows, axis=1)
|
||||||
|
flows.to_csv(snakemake.output[0])
|
@ -80,11 +80,9 @@ def load_timeseries(fn, years, countries, powerstatistics=True):
|
|||||||
def rename(s):
|
def rename(s):
|
||||||
return s[: -len(pattern)]
|
return s[: -len(pattern)]
|
||||||
|
|
||||||
def date_parser(x):
|
|
||||||
return dateutil.parser.parse(x, ignoretz=True)
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)
|
pd.read_csv(fn, index_col=0, parse_dates=[0])
|
||||||
|
.tz_localize(None)
|
||||||
.filter(like=pattern)
|
.filter(like=pattern)
|
||||||
.rename(columns=rename)
|
.rename(columns=rename)
|
||||||
.dropna(how="all", axis=0)
|
.dropna(how="all", axis=0)
|
||||||
@ -168,6 +166,7 @@ def manual_adjustment(load, fn_load, powerstatistics, countries):
|
|||||||
by the corresponding ratio of total energy consumptions reported by
|
by the corresponding ratio of total energy consumptions reported by
|
||||||
IEA Data browser [0] for the year 2013.
|
IEA Data browser [0] for the year 2013.
|
||||||
|
|
||||||
|
|
||||||
2. For the ENTSOE transparency load data (if powerstatistics is False)
|
2. For the ENTSOE transparency load data (if powerstatistics is False)
|
||||||
|
|
||||||
Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the
|
Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the
|
||||||
@ -176,6 +175,9 @@ def manual_adjustment(load, fn_load, powerstatistics, countries):
|
|||||||
|
|
||||||
[0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons
|
[0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons
|
||||||
|
|
||||||
|
Bosnia and Herzegovina (BA) does not exist in the data set for 2019. It gets the
|
||||||
|
electricity consumption data from Croatia (HR) for the year 2019, scaled by the
|
||||||
|
factors derived from https://energy.at-site.be/eurostat-2021/
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
@ -264,9 +266,17 @@ def manual_adjustment(load, fn_load, powerstatistics, countries):
|
|||||||
load["AL"] = load.ME * (5.7 / 2.9)
|
load["AL"] = load.ME * (5.7 / 2.9)
|
||||||
if "MK" not in load and "MK" in countries:
|
if "MK" not in load and "MK" in countries:
|
||||||
load["MK"] = load.ME * (6.7 / 2.9)
|
load["MK"] = load.ME * (6.7 / 2.9)
|
||||||
|
if "BA" not in load and "BA" in countries:
|
||||||
|
load["BA"] = load.HR * (11.0 / 16.2)
|
||||||
copy_timeslice(
|
copy_timeslice(
|
||||||
load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1)
|
load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1)
|
||||||
)
|
)
|
||||||
|
copy_timeslice(
|
||||||
|
load, "LU", "2019-01-02 11:00", "2019-01-05 05:00", Delta(weeks=-1)
|
||||||
|
)
|
||||||
|
copy_timeslice(
|
||||||
|
load, "LU", "2019-02-05 20:00", "2019-02-06 19:00", Delta(weeks=-1)
|
||||||
|
)
|
||||||
|
|
||||||
if "UA" in countries:
|
if "UA" in countries:
|
||||||
copy_timeslice(
|
copy_timeslice(
|
||||||
@ -309,6 +319,9 @@ if __name__ == "__main__":
|
|||||||
if snakemake.params.load["manual_adjustments"]:
|
if snakemake.params.load["manual_adjustments"]:
|
||||||
load = manual_adjustment(load, snakemake.input[0], powerstatistics, countries)
|
load = manual_adjustment(load, snakemake.input[0], powerstatistics, countries)
|
||||||
|
|
||||||
|
if load.empty:
|
||||||
|
logger.warning("Build electricity demand time series is empty.")
|
||||||
|
|
||||||
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")
|
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")
|
||||||
load = load.interpolate(method="linear", limit=interpolate_limit)
|
load = load.interpolate(method="linear", limit=interpolate_limit)
|
||||||
|
|
||||||
|
52
scripts/build_electricity_prices.py
Normal file
52
scripts/build_electricity_prices.py
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
from _helpers import configure_logging
|
||||||
|
from entsoe import EntsoePandasClient
|
||||||
|
from entsoe.exceptions import NoMatchingDataError
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if "snakemake" not in globals():
|
||||||
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
|
snakemake = mock_snakemake("build_cross_border_flows")
|
||||||
|
configure_logging(snakemake)
|
||||||
|
|
||||||
|
api_key = snakemake.config["private"]["keys"]["entsoe_api"]
|
||||||
|
client = EntsoePandasClient(api_key=api_key)
|
||||||
|
|
||||||
|
start = pd.Timestamp(snakemake.params.snapshots["start"], tz="Europe/Brussels")
|
||||||
|
end = pd.Timestamp(snakemake.params.snapshots["end"], tz="Europe/Brussels")
|
||||||
|
|
||||||
|
countries = snakemake.params.countries
|
||||||
|
|
||||||
|
prices = []
|
||||||
|
unavailable_countries = []
|
||||||
|
|
||||||
|
for country in countries:
|
||||||
|
country_code = country
|
||||||
|
|
||||||
|
try:
|
||||||
|
gen = client.query_day_ahead_prices(country, start=start, end=end)
|
||||||
|
gen = gen.tz_localize(None).resample("1h").mean()
|
||||||
|
gen = gen.loc[start.tz_localize(None) : end.tz_localize(None)]
|
||||||
|
prices.append(gen)
|
||||||
|
except NoMatchingDataError:
|
||||||
|
unavailable_countries.append(country)
|
||||||
|
|
||||||
|
if unavailable_countries:
|
||||||
|
logger.warning(
|
||||||
|
f"Historical electricity prices for countries {', '.join(unavailable_countries)} not available."
|
||||||
|
)
|
||||||
|
|
||||||
|
keys = [c for c in countries if c not in unavailable_countries]
|
||||||
|
prices = pd.concat(prices, keys=keys, axis=1)
|
||||||
|
prices.to_csv(snakemake.output[0])
|
73
scripts/build_electricity_production.py
Normal file
73
scripts/build_electricity_production.py
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
from _helpers import configure_logging
|
||||||
|
from entsoe import EntsoePandasClient
|
||||||
|
from entsoe.exceptions import NoMatchingDataError
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
carrier_grouper = {
|
||||||
|
"Waste": "Biomass",
|
||||||
|
"Hydro Pumped Storage": "Hydro",
|
||||||
|
"Hydro Water Reservoir": "Hydro",
|
||||||
|
"Hydro Run-of-river and poundage": "Run of River",
|
||||||
|
"Fossil Coal-derived gas": "Gas",
|
||||||
|
"Fossil Gas": "Gas",
|
||||||
|
"Fossil Oil": "Oil",
|
||||||
|
"Fossil Oil shale": "Oil",
|
||||||
|
"Fossil Brown coal/Lignite": "Lignite",
|
||||||
|
"Fossil Peat": "Lignite",
|
||||||
|
"Fossil Hard coal": "Coal",
|
||||||
|
"Wind Onshore": "Onshore Wind",
|
||||||
|
"Wind Offshore": "Offshore Wind",
|
||||||
|
"Other renewable": "Other",
|
||||||
|
"Marine": "Other",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if "snakemake" not in globals():
|
||||||
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
|
snakemake = mock_snakemake("build_electricity_production")
|
||||||
|
configure_logging(snakemake)
|
||||||
|
|
||||||
|
api_key = snakemake.config["private"]["keys"]["entsoe_api"]
|
||||||
|
client = EntsoePandasClient(api_key=api_key)
|
||||||
|
|
||||||
|
start = pd.Timestamp(snakemake.params.snapshots["start"], tz="Europe/Brussels")
|
||||||
|
end = pd.Timestamp(snakemake.params.snapshots["end"], tz="Europe/Brussels")
|
||||||
|
|
||||||
|
countries = snakemake.params.countries
|
||||||
|
|
||||||
|
generation = []
|
||||||
|
unavailable_countries = []
|
||||||
|
|
||||||
|
for country in countries:
|
||||||
|
country_code = country
|
||||||
|
|
||||||
|
try:
|
||||||
|
gen = client.query_generation(country, start=start, end=end, nett=True)
|
||||||
|
gen = gen.tz_localize(None).resample("1h").mean()
|
||||||
|
gen = gen.loc[start.tz_localize(None) : end.tz_localize(None)]
|
||||||
|
gen = gen.rename(columns=carrier_grouper).groupby(level=0, axis=1).sum()
|
||||||
|
generation.append(gen)
|
||||||
|
except NoMatchingDataError:
|
||||||
|
unavailable_countries.append(country)
|
||||||
|
|
||||||
|
if unavailable_countries:
|
||||||
|
logger.warning(
|
||||||
|
f"Historical electricity production for countries {', '.join(unavailable_countries)} not available."
|
||||||
|
)
|
||||||
|
|
||||||
|
keys = [c for c in countries if c not in unavailable_countries]
|
||||||
|
generation = pd.concat(generation, keys=keys, axis=1)
|
||||||
|
generation.to_csv(snakemake.output[0])
|
@ -13,10 +13,13 @@ logger = logging.getLogger(__name__)
|
|||||||
import uuid
|
import uuid
|
||||||
from itertools import product
|
from itertools import product
|
||||||
|
|
||||||
|
import country_converter as coco
|
||||||
import geopandas as gpd
|
import geopandas as gpd
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from packaging.version import Version, parse
|
from packaging.version import Version, parse
|
||||||
|
|
||||||
|
cc = coco.CountryConverter()
|
||||||
|
|
||||||
|
|
||||||
def locate_missing_industrial_sites(df):
|
def locate_missing_industrial_sites(df):
|
||||||
"""
|
"""
|
||||||
@ -93,6 +96,34 @@ def prepare_hotmaps_database(regions):
|
|||||||
gdf.rename(columns={"index_right": "bus"}, inplace=True)
|
gdf.rename(columns={"index_right": "bus"}, inplace=True)
|
||||||
gdf["country"] = gdf.bus.str[:2]
|
gdf["country"] = gdf.bus.str[:2]
|
||||||
|
|
||||||
|
# the .sjoin can lead to duplicates if a geom is in two regions
|
||||||
|
if gdf.index.duplicated().any():
|
||||||
|
import pycountry
|
||||||
|
|
||||||
|
# get all duplicated entries
|
||||||
|
duplicated_i = gdf.index[gdf.index.duplicated()]
|
||||||
|
# convert from raw data country name to iso-2-code
|
||||||
|
s = df.loc[duplicated_i, "Country"].apply(
|
||||||
|
lambda x: pycountry.countries.lookup(x).alpha_2
|
||||||
|
)
|
||||||
|
# Get a boolean mask where gdf's country column matches s's values for the same index
|
||||||
|
mask = gdf["country"] == gdf.index.map(s)
|
||||||
|
# Filter gdf using the mask
|
||||||
|
gdf_filtered = gdf[mask]
|
||||||
|
# concat not duplicated and filtered gdf
|
||||||
|
gdf = pd.concat([gdf.drop(duplicated_i), gdf_filtered]).sort_index()
|
||||||
|
|
||||||
|
# the .sjoin can lead to duplicates if a geom is in two overlapping regions
|
||||||
|
if gdf.index.duplicated().any():
|
||||||
|
# get all duplicated entries
|
||||||
|
duplicated_i = gdf.index[gdf.index.duplicated()]
|
||||||
|
# convert from raw data country name to iso-2-code
|
||||||
|
code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2")
|
||||||
|
# screen out malformed country allocation
|
||||||
|
gdf_filtered = gdf.loc[duplicated_i].query("country == @code")
|
||||||
|
# concat not duplicated and filtered gdf
|
||||||
|
gdf = pd.concat([gdf.drop(duplicated_i), gdf_filtered])
|
||||||
|
|
||||||
return gdf
|
return gdf
|
||||||
|
|
||||||
|
|
||||||
@ -115,7 +146,9 @@ def build_nodal_distribution_key(hotmaps, regions, countries):
|
|||||||
facilities = hotmaps.query("country == @country and Subsector == @sector")
|
facilities = hotmaps.query("country == @country and Subsector == @sector")
|
||||||
|
|
||||||
if not facilities.empty:
|
if not facilities.empty:
|
||||||
emissions = facilities["Emissions_ETS_2014"]
|
emissions = facilities["Emissions_ETS_2014"].fillna(
|
||||||
|
hotmaps["Emissions_EPRTR_2014"]
|
||||||
|
)
|
||||||
if emissions.sum() == 0:
|
if emissions.sum() == 0:
|
||||||
key = pd.Series(1 / len(facilities), facilities.index)
|
key = pd.Series(1 / len(facilities), facilities.index)
|
||||||
else:
|
else:
|
||||||
@ -131,6 +164,7 @@ def build_nodal_distribution_key(hotmaps, regions, countries):
|
|||||||
return keys
|
return keys
|
||||||
|
|
||||||
|
|
||||||
|
# %%
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if "snakemake" not in globals():
|
if "snakemake" not in globals():
|
||||||
from _helpers import mock_snakemake
|
from _helpers import mock_snakemake
|
||||||
@ -138,7 +172,7 @@ if __name__ == "__main__":
|
|||||||
snakemake = mock_snakemake(
|
snakemake = mock_snakemake(
|
||||||
"build_industrial_distribution_key",
|
"build_industrial_distribution_key",
|
||||||
simpl="",
|
simpl="",
|
||||||
clusters=48,
|
clusters=128,
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.basicConfig(level=snakemake.config["logging"]["level"])
|
logging.basicConfig(level=snakemake.config["logging"]["level"])
|
||||||
|
@ -41,7 +41,7 @@ The following heat gains and losses are considered:
|
|||||||
|
|
||||||
- heat gain through resistive losses
|
- heat gain through resistive losses
|
||||||
- heat gain through solar radiation
|
- heat gain through solar radiation
|
||||||
- heat loss through radiation of the trasnmission line
|
- heat loss through radiation of the transmission line
|
||||||
- heat loss through forced convection with wind
|
- heat loss through forced convection with wind
|
||||||
- heat loss through natural convection
|
- heat loss through natural convection
|
||||||
|
|
||||||
|
122
scripts/build_monthly_prices.py
Normal file
122
scripts/build_monthly_prices.py
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Created on Tue May 16 10:37:35 2023.
|
||||||
|
|
||||||
|
This script extracts monthly fuel prices of oil, gas, coal and lignite,
|
||||||
|
as well as CO2 prices
|
||||||
|
|
||||||
|
|
||||||
|
Inputs
|
||||||
|
------
|
||||||
|
- ``data/energy-price-trends-xlsx-5619002.xlsx``: energy price index of fossil fuels
|
||||||
|
- ``emission-spot-primary-market-auction-report-2019-data.xls``: CO2 Prices spot primary auction
|
||||||
|
|
||||||
|
|
||||||
|
Outputs
|
||||||
|
-------
|
||||||
|
|
||||||
|
- ``data/validation/monthly_fuel_price.csv``
|
||||||
|
- ``data/validation/CO2_price_2019.csv``
|
||||||
|
|
||||||
|
|
||||||
|
Description
|
||||||
|
-----------
|
||||||
|
|
||||||
|
The rule :mod:`build_monthly_prices` collects monthly fuel prices and CO2 prices
|
||||||
|
and translates them from different input sources to pypsa syntax
|
||||||
|
|
||||||
|
Data sources:
|
||||||
|
[1] Fuel price index. Destatis
|
||||||
|
https://www.destatis.de/EN/Home/_node.html
|
||||||
|
[2] average annual fuel price lignite, ENTSO-E
|
||||||
|
https://2020.entsos-tyndp-scenarios.eu/fuel-commodities-and-carbon-prices/
|
||||||
|
[3] CO2 Prices, Emission spot primary auction, EEX
|
||||||
|
https://www.eex.com/en/market-data/environmental-markets/eua-primary-auction-spot-download
|
||||||
|
|
||||||
|
|
||||||
|
Data was accessed at 16.5.2023
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
from _helpers import configure_logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# keywords in datasheet
|
||||||
|
keywords = {
|
||||||
|
"coal": " GP09-051 Hard coal",
|
||||||
|
"lignite": " GP09-052 Lignite and lignite briquettes",
|
||||||
|
"oil": " GP09-0610 10 Mineral oil, crude",
|
||||||
|
"gas": "GP09-062 Natural gas",
|
||||||
|
}
|
||||||
|
|
||||||
|
# sheet names to pypsa syntax
|
||||||
|
sheet_name_map = {
|
||||||
|
"coal": "5.1 Hard coal and lignite",
|
||||||
|
"lignite": "5.1 Hard coal and lignite",
|
||||||
|
"oil": "5.2 Mineral oil",
|
||||||
|
"gas": "5.3.1 Natural gas - indices",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# import fuel price 2015 in Eur/MWh
|
||||||
|
# source lignite, price for 2020, scaled by price index, ENTSO-E [3]
|
||||||
|
price_2020 = (
|
||||||
|
pd.Series({"coal": 3.0, "oil": 10.6, "gas": 5.6, "lignite": 1.1}) * 3.6
|
||||||
|
) # Eur/MWh
|
||||||
|
|
||||||
|
# manual adjustment of coal price
|
||||||
|
price_2020["coal"] = 2.4 * 3.6
|
||||||
|
price_2020["lignite"] = 1.6 * 3.6
|
||||||
|
|
||||||
|
|
||||||
|
def get_fuel_price():
|
||||||
|
price = {}
|
||||||
|
for carrier, keyword in keywords.items():
|
||||||
|
sheet_name = sheet_name_map[carrier]
|
||||||
|
df = pd.read_excel(
|
||||||
|
snakemake.input.fuel_price_raw,
|
||||||
|
sheet_name=sheet_name,
|
||||||
|
index_col=0,
|
||||||
|
skiprows=6,
|
||||||
|
nrows=18,
|
||||||
|
)
|
||||||
|
df = df.dropna(axis=0).iloc[:, :12]
|
||||||
|
start, end = df.index[0], str(int(df.index[-1][:4]) + 1)
|
||||||
|
df = df.stack()
|
||||||
|
df.index = pd.date_range(start=start, end=end, freq="MS", inclusive="left")
|
||||||
|
scale = price_2020[carrier] / df["2020"].mean() # scale to 2020 price
|
||||||
|
df = df.mul(scale)
|
||||||
|
price[carrier] = df
|
||||||
|
|
||||||
|
return pd.concat(price, axis=1)
|
||||||
|
|
||||||
|
|
||||||
|
def get_co2_price():
|
||||||
|
# emission price
|
||||||
|
co2_price = pd.read_excel(snakemake.input.co2_price_raw, index_col=1, header=5)
|
||||||
|
return co2_price["Auction Price €/tCO2"]
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if "snakemake" not in globals():
|
||||||
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
|
snakemake = mock_snakemake("build_monthly_prices")
|
||||||
|
|
||||||
|
configure_logging(snakemake)
|
||||||
|
|
||||||
|
fuel_price = get_fuel_price()
|
||||||
|
fuel_price.to_csv(snakemake.output.fuel_price)
|
||||||
|
|
||||||
|
co2_price = get_co2_price()
|
||||||
|
co2_price.to_csv(snakemake.output.co2_price)
|
@ -54,6 +54,23 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
def determine_cutout_xXyY(cutout_name):
|
def determine_cutout_xXyY(cutout_name):
|
||||||
|
"""
|
||||||
|
Determine the full extent of a cutout.
|
||||||
|
|
||||||
|
Since the coordinates of the cutout data are given as the
|
||||||
|
center of the grid cells, the extent of the cutout is
|
||||||
|
calculated by adding/subtracting half of the grid cell size.
|
||||||
|
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
cutout_name : str
|
||||||
|
Path to the cutout.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
A list of extent coordinates in the order [x, X, y, Y].
|
||||||
|
"""
|
||||||
cutout = atlite.Cutout(cutout_name)
|
cutout = atlite.Cutout(cutout_name)
|
||||||
assert cutout.crs.to_epsg() == 4326
|
assert cutout.crs.to_epsg() == 4326
|
||||||
x, X, y, Y = cutout.extent
|
x, X, y, Y = cutout.extent
|
||||||
|
@ -89,7 +89,7 @@ logger = logging.getLogger(__name__)
|
|||||||
def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False):
|
def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False):
|
||||||
if not custom_ppl_query:
|
if not custom_ppl_query:
|
||||||
return ppl
|
return ppl
|
||||||
add_ppls = pd.read_csv(custom_powerplants, index_col=0, dtype={"bus": "str"})
|
add_ppls = pd.read_csv(custom_powerplants, dtype={"bus": "str"})
|
||||||
if isinstance(custom_ppl_query, str):
|
if isinstance(custom_ppl_query, str):
|
||||||
add_ppls.query(custom_ppl_query, inplace=True)
|
add_ppls.query(custom_ppl_query, inplace=True)
|
||||||
return pd.concat(
|
return pd.concat(
|
||||||
|
@ -28,9 +28,7 @@ def allocate_sequestration_potential(
|
|||||||
overlay["share"] = area(overlay) / overlay["area_sqkm"]
|
overlay["share"] = area(overlay) / overlay["area_sqkm"]
|
||||||
adjust_cols = overlay.columns.difference({"name", "area_sqkm", "geometry", "share"})
|
adjust_cols = overlay.columns.difference({"name", "area_sqkm", "geometry", "share"})
|
||||||
overlay[adjust_cols] = overlay[adjust_cols].multiply(overlay["share"], axis=0)
|
overlay[adjust_cols] = overlay[adjust_cols].multiply(overlay["share"], axis=0)
|
||||||
gdf_regions = overlay.groupby("name").sum()
|
return overlay.dissolve("name", aggfunc="sum")[attr]
|
||||||
gdf_regions.drop(["area_sqkm", "share"], axis=1, inplace=True)
|
|
||||||
return gdf_regions.squeeze()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -461,7 +461,7 @@ if __name__ == "__main__":
|
|||||||
if "snakemake" not in globals():
|
if "snakemake" not in globals():
|
||||||
from _helpers import mock_snakemake
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
snakemake = mock_snakemake("cluster_network", simpl="", clusters="37c")
|
snakemake = mock_snakemake("cluster_network", simpl="", clusters="37")
|
||||||
configure_logging(snakemake)
|
configure_logging(snakemake)
|
||||||
|
|
||||||
params = snakemake.params
|
params = snakemake.params
|
||||||
@ -483,6 +483,23 @@ if __name__ == "__main__":
|
|||||||
else:
|
else:
|
||||||
n_clusters = int(snakemake.wildcards.clusters)
|
n_clusters = int(snakemake.wildcards.clusters)
|
||||||
|
|
||||||
|
if params.cluster_network.get("consider_efficiency_classes", False):
|
||||||
|
carriers = []
|
||||||
|
for c in aggregate_carriers:
|
||||||
|
gens = n.generators.query("carrier == @c")
|
||||||
|
low = gens.efficiency.quantile(0.10)
|
||||||
|
high = gens.efficiency.quantile(0.90)
|
||||||
|
if low >= high:
|
||||||
|
carriers += [c]
|
||||||
|
else:
|
||||||
|
labels = ["low", "medium", "high"]
|
||||||
|
suffix = pd.cut(
|
||||||
|
gens.efficiency, bins=[0, low, high, 1], labels=labels
|
||||||
|
).astype(str)
|
||||||
|
carriers += [f"{c} {label} efficiency" for label in labels]
|
||||||
|
n.generators.carrier.update(gens.carrier + " " + suffix + " efficiency")
|
||||||
|
aggregate_carriers = carriers
|
||||||
|
|
||||||
if n_clusters == len(n.buses):
|
if n_clusters == len(n.buses):
|
||||||
# Fast-path if no clustering is necessary
|
# Fast-path if no clustering is necessary
|
||||||
busmap = n.buses.index.to_series()
|
busmap = n.buses.index.to_series()
|
||||||
@ -524,6 +541,11 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
update_p_nom_max(clustering.network)
|
update_p_nom_max(clustering.network)
|
||||||
|
|
||||||
|
if params.cluster_network.get("consider_efficiency_classes"):
|
||||||
|
labels = [f" {label} efficiency" for label in ["low", "medium", "high"]]
|
||||||
|
nc = clustering.network
|
||||||
|
nc.generators["carrier"] = nc.generators.carrier.replace(labels, "", regex=True)
|
||||||
|
|
||||||
clustering.network.meta = dict(
|
clustering.network.meta = dict(
|
||||||
snakemake.config, **dict(wildcards=dict(snakemake.wildcards))
|
snakemake.config, **dict(wildcards=dict(snakemake.wildcards))
|
||||||
)
|
)
|
||||||
|
@ -11,25 +11,13 @@ from shutil import copy
|
|||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
files = {
|
|
||||||
"config/config.yaml": "config.yaml",
|
|
||||||
"Snakefile": "Snakefile",
|
|
||||||
"scripts/solve_network.py": "solve_network.py",
|
|
||||||
"scripts/prepare_sector_network.py": "prepare_sector_network.py",
|
|
||||||
}
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if "snakemake" not in globals():
|
if "snakemake" not in globals():
|
||||||
from _helpers import mock_snakemake
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
snakemake = mock_snakemake("copy_config")
|
snakemake = mock_snakemake("copy_config")
|
||||||
|
|
||||||
basepath = Path(f"results/{snakemake.params.RDIR}config/")
|
with open(snakemake.output[0], "w") as yaml_file:
|
||||||
|
|
||||||
for f, name in files.items():
|
|
||||||
copy(f, basepath / name)
|
|
||||||
|
|
||||||
with open(basepath / "config.snakemake.yaml", "w") as yaml_file:
|
|
||||||
yaml.dump(
|
yaml.dump(
|
||||||
snakemake.config,
|
snakemake.config,
|
||||||
yaml_file,
|
yaml_file,
|
||||||
|
@ -711,5 +711,5 @@ if __name__ == "__main__":
|
|||||||
if snakemake.params.foresight == "myopic":
|
if snakemake.params.foresight == "myopic":
|
||||||
cumulative_cost = calculate_cumulative_cost()
|
cumulative_cost = calculate_cumulative_cost()
|
||||||
cumulative_cost.to_csv(
|
cumulative_cost.to_csv(
|
||||||
"results/" + snakemake.params.RDIR + "/csvs/cumulative_cost.csv"
|
"results/" + snakemake.params.RDIR + "csvs/cumulative_cost.csv"
|
||||||
)
|
)
|
||||||
|
745
scripts/make_summary_perfect.py
Normal file
745
scripts/make_summary_perfect.py
Normal file
@ -0,0 +1,745 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
"""
|
||||||
|
Create summary CSV files for all scenario runs with perfect foresight including
|
||||||
|
costs, capacities, capacity factors, curtailment, energy balances, prices and
|
||||||
|
other metrics.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import pypsa
|
||||||
|
from make_summary import (
|
||||||
|
assign_carriers,
|
||||||
|
assign_locations,
|
||||||
|
calculate_cfs,
|
||||||
|
calculate_nodal_cfs,
|
||||||
|
calculate_nodal_costs,
|
||||||
|
)
|
||||||
|
from prepare_sector_network import prepare_costs
|
||||||
|
from pypsa.descriptors import get_active_assets, nominal_attrs
|
||||||
|
from six import iteritems
|
||||||
|
|
||||||
|
idx = pd.IndexSlice
|
||||||
|
|
||||||
|
opt_name = {"Store": "e", "Line": "s", "Transformer": "s"}
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_costs(n, label, costs):
|
||||||
|
investments = n.investment_periods
|
||||||
|
cols = pd.MultiIndex.from_product(
|
||||||
|
[
|
||||||
|
costs.columns.levels[0],
|
||||||
|
costs.columns.levels[1],
|
||||||
|
costs.columns.levels[2],
|
||||||
|
investments,
|
||||||
|
],
|
||||||
|
names=costs.columns.names[:3] + ["year"],
|
||||||
|
)
|
||||||
|
costs = costs.reindex(cols, axis=1)
|
||||||
|
|
||||||
|
for c in n.iterate_components(
|
||||||
|
n.branch_components | n.controllable_one_port_components ^ {"Load"}
|
||||||
|
):
|
||||||
|
capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
|
||||||
|
active = pd.concat(
|
||||||
|
[
|
||||||
|
get_active_assets(n, c.name, inv_p).rename(inv_p)
|
||||||
|
for inv_p in investments
|
||||||
|
],
|
||||||
|
axis=1,
|
||||||
|
).astype(int)
|
||||||
|
capital_costs = active.mul(capital_costs, axis=0)
|
||||||
|
discount = (
|
||||||
|
n.investment_period_weightings["objective"]
|
||||||
|
/ n.investment_period_weightings["years"]
|
||||||
|
)
|
||||||
|
capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum().mul(discount)
|
||||||
|
|
||||||
|
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"])
|
||||||
|
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=[c.list_name])
|
||||||
|
|
||||||
|
costs = costs.reindex(capital_costs_grouped.index.union(costs.index))
|
||||||
|
|
||||||
|
costs.loc[capital_costs_grouped.index, label] = capital_costs_grouped.values
|
||||||
|
|
||||||
|
if c.name == "Link":
|
||||||
|
p = (
|
||||||
|
c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0)
|
||||||
|
.groupby(level=0)
|
||||||
|
.sum()
|
||||||
|
)
|
||||||
|
elif c.name == "Line":
|
||||||
|
continue
|
||||||
|
elif c.name == "StorageUnit":
|
||||||
|
p_all = c.pnl.p.multiply(n.snapshot_weightings.stores, axis=0)
|
||||||
|
p_all[p_all < 0.0] = 0.0
|
||||||
|
p = p_all.groupby(level=0).sum()
|
||||||
|
else:
|
||||||
|
p = (
|
||||||
|
round(c.pnl.p, ndigits=2)
|
||||||
|
.multiply(n.snapshot_weightings.generators, axis=0)
|
||||||
|
.groupby(level=0)
|
||||||
|
.sum()
|
||||||
|
)
|
||||||
|
|
||||||
|
# correct sequestration cost
|
||||||
|
if c.name == "Store":
|
||||||
|
items = c.df.index[
|
||||||
|
(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.0)
|
||||||
|
]
|
||||||
|
c.df.loc[items, "marginal_cost"] = -20.0
|
||||||
|
|
||||||
|
marginal_costs = p.mul(c.df.marginal_cost).T
|
||||||
|
# marginal_costs = active.mul(marginal_costs, axis=0)
|
||||||
|
marginal_costs_grouped = (
|
||||||
|
marginal_costs.groupby(c.df.carrier).sum().mul(discount)
|
||||||
|
)
|
||||||
|
|
||||||
|
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=["marginal"])
|
||||||
|
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=[c.list_name])
|
||||||
|
|
||||||
|
costs = costs.reindex(marginal_costs_grouped.index.union(costs.index))
|
||||||
|
|
||||||
|
costs.loc[marginal_costs_grouped.index, label] = marginal_costs_grouped.values
|
||||||
|
|
||||||
|
# add back in all hydro
|
||||||
|
# costs.loc[("storage_units","capital","hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro","p_nom"].sum()
|
||||||
|
# costs.loc[("storage_units","capital","PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS","p_nom"].sum()
|
||||||
|
# costs.loc[("generators","capital","ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror","p_nom"].sum()
|
||||||
|
|
||||||
|
return costs
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_cumulative_cost():
|
||||||
|
planning_horizons = snakemake.config["scenario"]["planning_horizons"]
|
||||||
|
|
||||||
|
cumulative_cost = pd.DataFrame(
|
||||||
|
index=df["costs"].sum().index,
|
||||||
|
columns=pd.Series(data=np.arange(0, 0.1, 0.01), name="social discount rate"),
|
||||||
|
)
|
||||||
|
|
||||||
|
# discount cost and express them in money value of planning_horizons[0]
|
||||||
|
for r in cumulative_cost.columns:
|
||||||
|
cumulative_cost[r] = [
|
||||||
|
df["costs"].sum()[index] / ((1 + r) ** (index[-1] - planning_horizons[0]))
|
||||||
|
for index in cumulative_cost.index
|
||||||
|
]
|
||||||
|
|
||||||
|
# integrate cost throughout the transition path
|
||||||
|
for r in cumulative_cost.columns:
|
||||||
|
for cluster in cumulative_cost.index.get_level_values(level=0).unique():
|
||||||
|
for lv in cumulative_cost.index.get_level_values(level=1).unique():
|
||||||
|
for sector_opts in cumulative_cost.index.get_level_values(
|
||||||
|
level=2
|
||||||
|
).unique():
|
||||||
|
cumulative_cost.loc[
|
||||||
|
(cluster, lv, sector_opts, "cumulative cost"), r
|
||||||
|
] = np.trapz(
|
||||||
|
cumulative_cost.loc[
|
||||||
|
idx[cluster, lv, sector_opts, planning_horizons], r
|
||||||
|
].values,
|
||||||
|
x=planning_horizons,
|
||||||
|
)
|
||||||
|
|
||||||
|
return cumulative_cost
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_nodal_capacities(n, label, nodal_capacities):
|
||||||
|
# Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff
|
||||||
|
for c in n.iterate_components(
|
||||||
|
n.branch_components | n.controllable_one_port_components ^ {"Load"}
|
||||||
|
):
|
||||||
|
nodal_capacities_c = c.df.groupby(["location", "carrier"])[
|
||||||
|
opt_name.get(c.name, "p") + "_nom_opt"
|
||||||
|
].sum()
|
||||||
|
index = pd.MultiIndex.from_tuples(
|
||||||
|
[(c.list_name,) + t for t in nodal_capacities_c.index.to_list()]
|
||||||
|
)
|
||||||
|
nodal_capacities = nodal_capacities.reindex(index.union(nodal_capacities.index))
|
||||||
|
nodal_capacities.loc[index, label] = nodal_capacities_c.values
|
||||||
|
|
||||||
|
return nodal_capacities
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_capacities(n, label, capacities):
|
||||||
|
investments = n.investment_periods
|
||||||
|
cols = pd.MultiIndex.from_product(
|
||||||
|
[
|
||||||
|
capacities.columns.levels[0],
|
||||||
|
capacities.columns.levels[1],
|
||||||
|
capacities.columns.levels[2],
|
||||||
|
investments,
|
||||||
|
],
|
||||||
|
names=capacities.columns.names[:3] + ["year"],
|
||||||
|
)
|
||||||
|
capacities = capacities.reindex(cols, axis=1)
|
||||||
|
|
||||||
|
for c in n.iterate_components(
|
||||||
|
n.branch_components | n.controllable_one_port_components ^ {"Load"}
|
||||||
|
):
|
||||||
|
active = pd.concat(
|
||||||
|
[
|
||||||
|
get_active_assets(n, c.name, inv_p).rename(inv_p)
|
||||||
|
for inv_p in investments
|
||||||
|
],
|
||||||
|
axis=1,
|
||||||
|
).astype(int)
|
||||||
|
caps = c.df[opt_name.get(c.name, "p") + "_nom_opt"]
|
||||||
|
caps = active.mul(caps, axis=0)
|
||||||
|
capacities_grouped = (
|
||||||
|
caps.groupby(c.df.carrier).sum().drop("load", errors="ignore")
|
||||||
|
)
|
||||||
|
capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name])
|
||||||
|
|
||||||
|
capacities = capacities.reindex(
|
||||||
|
capacities_grouped.index.union(capacities.index)
|
||||||
|
)
|
||||||
|
|
||||||
|
capacities.loc[capacities_grouped.index, label] = capacities_grouped.values
|
||||||
|
|
||||||
|
return capacities
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_curtailment(n, label, curtailment):
|
||||||
|
avail = (
|
||||||
|
n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt)
|
||||||
|
.sum()
|
||||||
|
.groupby(n.generators.carrier)
|
||||||
|
.sum()
|
||||||
|
)
|
||||||
|
used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()
|
||||||
|
|
||||||
|
curtailment[label] = (((avail - used) / avail) * 100).round(3)
|
||||||
|
|
||||||
|
return curtailment
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_energy(n, label, energy):
|
||||||
|
investments = n.investment_periods
|
||||||
|
cols = pd.MultiIndex.from_product(
|
||||||
|
[
|
||||||
|
energy.columns.levels[0],
|
||||||
|
energy.columns.levels[1],
|
||||||
|
energy.columns.levels[2],
|
||||||
|
investments,
|
||||||
|
],
|
||||||
|
names=energy.columns.names[:3] + ["year"],
|
||||||
|
)
|
||||||
|
energy = energy.reindex(cols, axis=1)
|
||||||
|
|
||||||
|
for c in n.iterate_components(n.one_port_components | n.branch_components):
|
||||||
|
if c.name in n.one_port_components:
|
||||||
|
c_energies = (
|
||||||
|
c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
|
||||||
|
.groupby(level=0)
|
||||||
|
.sum()
|
||||||
|
.multiply(c.df.sign)
|
||||||
|
.groupby(c.df.carrier, axis=1)
|
||||||
|
.sum()
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
c_energies = pd.DataFrame(
|
||||||
|
0.0, columns=c.df.carrier.unique(), index=n.investment_periods
|
||||||
|
)
|
||||||
|
for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
|
||||||
|
totals = (
|
||||||
|
c.pnl["p" + port]
|
||||||
|
.multiply(n.snapshot_weightings.generators, axis=0)
|
||||||
|
.groupby(level=0)
|
||||||
|
.sum()
|
||||||
|
)
|
||||||
|
# remove values where bus is missing (bug in nomopyomo)
|
||||||
|
no_bus = c.df.index[c.df["bus" + port] == ""]
|
||||||
|
totals[no_bus] = float(
|
||||||
|
n.component_attrs[c.name].loc["p" + port, "default"]
|
||||||
|
)
|
||||||
|
c_energies -= totals.groupby(c.df.carrier, axis=1).sum()
|
||||||
|
|
||||||
|
c_energies = pd.concat([c_energies.T], keys=[c.list_name])
|
||||||
|
|
||||||
|
energy = energy.reindex(c_energies.index.union(energy.index))
|
||||||
|
|
||||||
|
energy.loc[c_energies.index, label] = c_energies.values
|
||||||
|
|
||||||
|
return energy
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_supply(n, label, supply):
|
||||||
|
"""
|
||||||
|
Calculate the max dispatch of each component at the buses aggregated by
|
||||||
|
carrier.
|
||||||
|
"""
|
||||||
|
|
||||||
|
bus_carriers = n.buses.carrier.unique()
|
||||||
|
|
||||||
|
for i in bus_carriers:
|
||||||
|
bus_map = n.buses.carrier == i
|
||||||
|
bus_map.at[""] = False
|
||||||
|
|
||||||
|
for c in n.iterate_components(n.one_port_components):
|
||||||
|
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
|
||||||
|
|
||||||
|
if len(items) == 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
s = (
|
||||||
|
c.pnl.p[items]
|
||||||
|
.max()
|
||||||
|
.multiply(c.df.loc[items, "sign"])
|
||||||
|
.groupby(c.df.loc[items, "carrier"])
|
||||||
|
.sum()
|
||||||
|
)
|
||||||
|
s = pd.concat([s], keys=[c.list_name])
|
||||||
|
s = pd.concat([s], keys=[i])
|
||||||
|
|
||||||
|
supply = supply.reindex(s.index.union(supply.index))
|
||||||
|
supply.loc[s.index, label] = s
|
||||||
|
|
||||||
|
for c in n.iterate_components(n.branch_components):
|
||||||
|
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
|
||||||
|
items = c.df.index[c.df["bus" + end].map(bus_map).fillna(False)]
|
||||||
|
|
||||||
|
if len(items) == 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# lots of sign compensation for direction and to do maximums
|
||||||
|
s = (-1) ** (1 - int(end)) * (
|
||||||
|
(-1) ** int(end) * c.pnl["p" + end][items]
|
||||||
|
).max().groupby(c.df.loc[items, "carrier"]).sum()
|
||||||
|
s.index = s.index + end
|
||||||
|
s = pd.concat([s], keys=[c.list_name])
|
||||||
|
s = pd.concat([s], keys=[i])
|
||||||
|
|
||||||
|
supply = supply.reindex(s.index.union(supply.index))
|
||||||
|
supply.loc[s.index, label] = s
|
||||||
|
|
||||||
|
return supply
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_supply_energy(n, label, supply_energy):
|
||||||
|
"""
|
||||||
|
Calculate the total energy supply/consuption of each component at the buses
|
||||||
|
aggregated by carrier.
|
||||||
|
"""
|
||||||
|
|
||||||
|
investments = n.investment_periods
|
||||||
|
cols = pd.MultiIndex.from_product(
|
||||||
|
[
|
||||||
|
supply_energy.columns.levels[0],
|
||||||
|
supply_energy.columns.levels[1],
|
||||||
|
supply_energy.columns.levels[2],
|
||||||
|
investments,
|
||||||
|
],
|
||||||
|
names=supply_energy.columns.names[:3] + ["year"],
|
||||||
|
)
|
||||||
|
supply_energy = supply_energy.reindex(cols, axis=1)
|
||||||
|
|
||||||
|
bus_carriers = n.buses.carrier.unique()
|
||||||
|
|
||||||
|
for i in bus_carriers:
|
||||||
|
bus_map = n.buses.carrier == i
|
||||||
|
bus_map.at[""] = False
|
||||||
|
|
||||||
|
for c in n.iterate_components(n.one_port_components):
|
||||||
|
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
|
||||||
|
|
||||||
|
if len(items) == 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if c.name == "Generator":
|
||||||
|
weightings = n.snapshot_weightings.generators
|
||||||
|
else:
|
||||||
|
weightings = n.snapshot_weightings.stores
|
||||||
|
|
||||||
|
if i in ["oil", "co2", "H2"]:
|
||||||
|
if c.name == "Load":
|
||||||
|
c.df.loc[items, "carrier"] = [
|
||||||
|
load.split("-202")[0] for load in items
|
||||||
|
]
|
||||||
|
if i == "oil" and c.name == "Generator":
|
||||||
|
c.df.loc[items, "carrier"] = "imported oil"
|
||||||
|
s = (
|
||||||
|
c.pnl.p[items]
|
||||||
|
.multiply(weightings, axis=0)
|
||||||
|
.groupby(level=0)
|
||||||
|
.sum()
|
||||||
|
.multiply(c.df.loc[items, "sign"])
|
||||||
|
.groupby(c.df.loc[items, "carrier"], axis=1)
|
||||||
|
.sum()
|
||||||
|
.T
|
||||||
|
)
|
||||||
|
s = pd.concat([s], keys=[c.list_name])
|
||||||
|
s = pd.concat([s], keys=[i])
|
||||||
|
|
||||||
|
supply_energy = supply_energy.reindex(
|
||||||
|
s.index.union(supply_energy.index, sort=False)
|
||||||
|
)
|
||||||
|
supply_energy.loc[s.index, label] = s.values
|
||||||
|
|
||||||
|
for c in n.iterate_components(n.branch_components):
|
||||||
|
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
|
||||||
|
items = c.df.index[c.df["bus" + str(end)].map(bus_map).fillna(False)]
|
||||||
|
|
||||||
|
if len(items) == 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
s = (
|
||||||
|
(-1)
|
||||||
|
* c.pnl["p" + end]
|
||||||
|
.reindex(items, axis=1)
|
||||||
|
.multiply(n.snapshot_weightings.objective, axis=0)
|
||||||
|
.groupby(level=0)
|
||||||
|
.sum()
|
||||||
|
.groupby(c.df.loc[items, "carrier"], axis=1)
|
||||||
|
.sum()
|
||||||
|
).T
|
||||||
|
s.index = s.index + end
|
||||||
|
s = pd.concat([s], keys=[c.list_name])
|
||||||
|
s = pd.concat([s], keys=[i])
|
||||||
|
|
||||||
|
supply_energy = supply_energy.reindex(
|
||||||
|
s.index.union(supply_energy.index, sort=False)
|
||||||
|
)
|
||||||
|
|
||||||
|
supply_energy.loc[s.index, label] = s.values
|
||||||
|
|
||||||
|
return supply_energy
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_metrics(n, label, metrics):
|
||||||
|
metrics = metrics.reindex(
|
||||||
|
pd.Index(
|
||||||
|
[
|
||||||
|
"line_volume",
|
||||||
|
"line_volume_limit",
|
||||||
|
"line_volume_AC",
|
||||||
|
"line_volume_DC",
|
||||||
|
"line_volume_shadow",
|
||||||
|
"co2_shadow",
|
||||||
|
]
|
||||||
|
).union(metrics.index)
|
||||||
|
)
|
||||||
|
|
||||||
|
metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[
|
||||||
|
n.links.carrier == "DC"
|
||||||
|
].sum()
|
||||||
|
metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum()
|
||||||
|
metrics.at["line_volume", label] = metrics.loc[
|
||||||
|
["line_volume_AC", "line_volume_DC"], label
|
||||||
|
].sum()
|
||||||
|
|
||||||
|
if hasattr(n, "line_volume_limit"):
|
||||||
|
metrics.at["line_volume_limit", label] = n.line_volume_limit
|
||||||
|
metrics.at["line_volume_shadow", label] = n.line_volume_limit_dual
|
||||||
|
|
||||||
|
if "CO2Limit" in n.global_constraints.index:
|
||||||
|
metrics.at["co2_shadow", label] = n.global_constraints.at["CO2Limit", "mu"]
|
||||||
|
|
||||||
|
return metrics
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_prices(n, label, prices):
|
||||||
|
prices = prices.reindex(prices.index.union(n.buses.carrier.unique()))
|
||||||
|
|
||||||
|
# WARNING: this is time-averaged, see weighted_prices for load-weighted average
|
||||||
|
prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean()
|
||||||
|
|
||||||
|
return prices
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_weighted_prices(n, label, weighted_prices):
|
||||||
|
# Warning: doesn't include storage units as loads
|
||||||
|
|
||||||
|
weighted_prices = weighted_prices.reindex(
|
||||||
|
pd.Index(
|
||||||
|
[
|
||||||
|
"electricity",
|
||||||
|
"heat",
|
||||||
|
"space heat",
|
||||||
|
"urban heat",
|
||||||
|
"space urban heat",
|
||||||
|
"gas",
|
||||||
|
"H2",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
link_loads = {
|
||||||
|
"electricity": [
|
||||||
|
"heat pump",
|
||||||
|
"resistive heater",
|
||||||
|
"battery charger",
|
||||||
|
"H2 Electrolysis",
|
||||||
|
],
|
||||||
|
"heat": ["water tanks charger"],
|
||||||
|
"urban heat": ["water tanks charger"],
|
||||||
|
"space heat": [],
|
||||||
|
"space urban heat": [],
|
||||||
|
"gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"],
|
||||||
|
"H2": ["Sabatier", "H2 Fuel Cell"],
|
||||||
|
}
|
||||||
|
|
||||||
|
for carrier in link_loads:
|
||||||
|
if carrier == "electricity":
|
||||||
|
suffix = ""
|
||||||
|
elif carrier[:5] == "space":
|
||||||
|
suffix = carrier[5:]
|
||||||
|
else:
|
||||||
|
suffix = " " + carrier
|
||||||
|
|
||||||
|
buses = n.buses.index[n.buses.index.str[2:] == suffix]
|
||||||
|
|
||||||
|
if buses.empty:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if carrier in ["H2", "gas"]:
|
||||||
|
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
|
||||||
|
else:
|
||||||
|
load = n.loads_t.p_set.reindex(buses, axis=1)
|
||||||
|
|
||||||
|
for tech in link_loads[carrier]:
|
||||||
|
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
|
||||||
|
|
||||||
|
if names.empty:
|
||||||
|
continue
|
||||||
|
|
||||||
|
load += (
|
||||||
|
n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum()
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add H2 Store when charging
|
||||||
|
# if carrier == "H2":
|
||||||
|
# stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1)
|
||||||
|
# stores[stores > 0.] = 0.
|
||||||
|
# load += -stores
|
||||||
|
|
||||||
|
weighted_prices.loc[carrier, label] = (
|
||||||
|
load * n.buses_t.marginal_price[buses]
|
||||||
|
).sum().sum() / load.sum().sum()
|
||||||
|
|
||||||
|
if carrier[:5] == "space":
|
||||||
|
print(load * n.buses_t.marginal_price[buses])
|
||||||
|
|
||||||
|
return weighted_prices
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_market_values(n, label, market_values):
|
||||||
|
# Warning: doesn't include storage units
|
||||||
|
|
||||||
|
carrier = "AC"
|
||||||
|
|
||||||
|
buses = n.buses.index[n.buses.carrier == carrier]
|
||||||
|
|
||||||
|
## First do market value of generators ##
|
||||||
|
|
||||||
|
generators = n.generators.index[n.buses.loc[n.generators.bus, "carrier"] == carrier]
|
||||||
|
|
||||||
|
techs = n.generators.loc[generators, "carrier"].value_counts().index
|
||||||
|
|
||||||
|
market_values = market_values.reindex(market_values.index.union(techs))
|
||||||
|
|
||||||
|
for tech in techs:
|
||||||
|
gens = generators[n.generators.loc[generators, "carrier"] == tech]
|
||||||
|
|
||||||
|
dispatch = (
|
||||||
|
n.generators_t.p[gens]
|
||||||
|
.groupby(n.generators.loc[gens, "bus"], axis=1)
|
||||||
|
.sum()
|
||||||
|
.reindex(columns=buses, fill_value=0.0)
|
||||||
|
)
|
||||||
|
|
||||||
|
revenue = dispatch * n.buses_t.marginal_price[buses]
|
||||||
|
|
||||||
|
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
|
||||||
|
|
||||||
|
## Now do market value of links ##
|
||||||
|
|
||||||
|
for i in ["0", "1"]:
|
||||||
|
all_links = n.links.index[n.buses.loc[n.links["bus" + i], "carrier"] == carrier]
|
||||||
|
|
||||||
|
techs = n.links.loc[all_links, "carrier"].value_counts().index
|
||||||
|
|
||||||
|
market_values = market_values.reindex(market_values.index.union(techs))
|
||||||
|
|
||||||
|
for tech in techs:
|
||||||
|
links = all_links[n.links.loc[all_links, "carrier"] == tech]
|
||||||
|
|
||||||
|
dispatch = (
|
||||||
|
n.links_t["p" + i][links]
|
||||||
|
.groupby(n.links.loc[links, "bus" + i], axis=1)
|
||||||
|
.sum()
|
||||||
|
.reindex(columns=buses, fill_value=0.0)
|
||||||
|
)
|
||||||
|
|
||||||
|
revenue = dispatch * n.buses_t.marginal_price[buses]
|
||||||
|
|
||||||
|
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
|
||||||
|
|
||||||
|
return market_values
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_price_statistics(n, label, price_statistics):
|
||||||
|
price_statistics = price_statistics.reindex(
|
||||||
|
price_statistics.index.union(
|
||||||
|
pd.Index(["zero_hours", "mean", "standard_deviation"])
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
buses = n.buses.index[n.buses.carrier == "AC"]
|
||||||
|
|
||||||
|
threshold = 0.1 # higher than phoney marginal_cost of wind/solar
|
||||||
|
|
||||||
|
df = pd.DataFrame(data=0.0, columns=buses, index=n.snapshots)
|
||||||
|
|
||||||
|
df[n.buses_t.marginal_price[buses] < threshold] = 1.0
|
||||||
|
|
||||||
|
price_statistics.at["zero_hours", label] = df.sum().sum() / (
|
||||||
|
df.shape[0] * df.shape[1]
|
||||||
|
)
|
||||||
|
|
||||||
|
price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].mean().mean()
|
||||||
|
|
||||||
|
price_statistics.at["standard_deviation", label] = (
|
||||||
|
n.buses_t.marginal_price[buses].droplevel(0).unstack().std()
|
||||||
|
)
|
||||||
|
|
||||||
|
return price_statistics
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_co2_emissions(n, label, df):
|
||||||
|
carattr = "co2_emissions"
|
||||||
|
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
|
||||||
|
|
||||||
|
if emissions.empty:
|
||||||
|
return
|
||||||
|
|
||||||
|
weightings = n.snapshot_weightings.generators.mul(
|
||||||
|
n.investment_period_weightings["years"]
|
||||||
|
.reindex(n.snapshots)
|
||||||
|
.fillna(method="bfill")
|
||||||
|
.fillna(1.0),
|
||||||
|
axis=0,
|
||||||
|
)
|
||||||
|
|
||||||
|
# generators
|
||||||
|
gens = n.generators.query("carrier in @emissions.index")
|
||||||
|
if not gens.empty:
|
||||||
|
em_pu = gens.carrier.map(emissions) / gens.efficiency
|
||||||
|
em_pu = (
|
||||||
|
weightings["generators"].to_frame("weightings")
|
||||||
|
@ em_pu.to_frame("weightings").T
|
||||||
|
)
|
||||||
|
emitted = n.generators_t.p[gens.index].mul(em_pu)
|
||||||
|
|
||||||
|
emitted_grouped = (
|
||||||
|
emitted.groupby(level=0).sum().groupby(n.generators.carrier, axis=1).sum().T
|
||||||
|
)
|
||||||
|
|
||||||
|
df = df.reindex(emitted_grouped.index.union(df.index))
|
||||||
|
|
||||||
|
df.loc[emitted_grouped.index, label] = emitted_grouped.values
|
||||||
|
|
||||||
|
if any(n.stores.carrier == "co2"):
|
||||||
|
co2_i = n.stores[n.stores.carrier == "co2"].index
|
||||||
|
df[label] = n.stores_t.e.groupby(level=0).last()[co2_i].iloc[:, 0]
|
||||||
|
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
|
outputs = [
|
||||||
|
"nodal_costs",
|
||||||
|
"nodal_capacities",
|
||||||
|
"nodal_cfs",
|
||||||
|
"cfs",
|
||||||
|
"costs",
|
||||||
|
"capacities",
|
||||||
|
"curtailment",
|
||||||
|
"energy",
|
||||||
|
"supply",
|
||||||
|
"supply_energy",
|
||||||
|
"prices",
|
||||||
|
"weighted_prices",
|
||||||
|
"price_statistics",
|
||||||
|
"market_values",
|
||||||
|
"metrics",
|
||||||
|
"co2_emissions",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def make_summaries(networks_dict):
|
||||||
|
columns = pd.MultiIndex.from_tuples(
|
||||||
|
networks_dict.keys(), names=["cluster", "lv", "opt"]
|
||||||
|
)
|
||||||
|
df = {}
|
||||||
|
|
||||||
|
for output in outputs:
|
||||||
|
df[output] = pd.DataFrame(columns=columns, dtype=float)
|
||||||
|
|
||||||
|
for label, filename in iteritems(networks_dict):
|
||||||
|
print(label, filename)
|
||||||
|
try:
|
||||||
|
n = pypsa.Network(filename)
|
||||||
|
except OSError:
|
||||||
|
print(label, " not solved yet.")
|
||||||
|
continue
|
||||||
|
# del networks_dict[label]
|
||||||
|
|
||||||
|
if not hasattr(n, "objective"):
|
||||||
|
print(label, " not solved correctly. Check log if infeasible or unbounded.")
|
||||||
|
continue
|
||||||
|
assign_carriers(n)
|
||||||
|
assign_locations(n)
|
||||||
|
|
||||||
|
for output in outputs:
|
||||||
|
df[output] = globals()["calculate_" + output](n, label, df[output])
|
||||||
|
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
|
def to_csv(df):
|
||||||
|
for key in df:
|
||||||
|
df[key] = df[key].apply(lambda x: pd.to_numeric(x))
|
||||||
|
df[key].to_csv(snakemake.output[key])
|
||||||
|
|
||||||
|
|
||||||
|
# %%
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Detect running outside of snakemake and mock snakemake for testing
|
||||||
|
if "snakemake" not in globals():
|
||||||
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
|
snakemake = mock_snakemake("make_summary_perfect")
|
||||||
|
|
||||||
|
run = snakemake.config["run"]["name"]
|
||||||
|
if run != "":
|
||||||
|
run += "/"
|
||||||
|
|
||||||
|
networks_dict = {
|
||||||
|
(clusters, lv, opts + sector_opts): "results/"
|
||||||
|
+ run
|
||||||
|
+ f"postnetworks/elec_s{simpl}_{clusters}_l{lv}_{opts}_{sector_opts}_brownfield_all_years.nc"
|
||||||
|
for simpl in snakemake.config["scenario"]["simpl"]
|
||||||
|
for clusters in snakemake.config["scenario"]["clusters"]
|
||||||
|
for opts in snakemake.config["scenario"]["opts"]
|
||||||
|
for sector_opts in snakemake.config["scenario"]["sector_opts"]
|
||||||
|
for lv in snakemake.config["scenario"]["ll"]
|
||||||
|
}
|
||||||
|
|
||||||
|
print(networks_dict)
|
||||||
|
|
||||||
|
nyears = 1
|
||||||
|
costs_db = prepare_costs(
|
||||||
|
snakemake.input.costs,
|
||||||
|
snakemake.config["costs"],
|
||||||
|
nyears,
|
||||||
|
)
|
||||||
|
|
||||||
|
df = make_summaries(networks_dict)
|
||||||
|
|
||||||
|
df["metrics"].loc["total costs"] = df["costs"].sum().groupby(level=[0, 1, 2]).sum()
|
||||||
|
|
||||||
|
to_csv(df)
|
@ -24,7 +24,7 @@ from make_summary import assign_carriers
|
|||||||
from plot_summary import preferred_order, rename_techs
|
from plot_summary import preferred_order, rename_techs
|
||||||
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
|
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
|
||||||
|
|
||||||
plt.style.use(["ggplot", "matplotlibrc"])
|
plt.style.use(["ggplot"])
|
||||||
|
|
||||||
|
|
||||||
def rename_techs_tyndp(tech):
|
def rename_techs_tyndp(tech):
|
||||||
@ -913,6 +913,159 @@ def plot_series(network, carrier="AC", name="test"):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def plot_map_perfect(
|
||||||
|
network,
|
||||||
|
components=["Link", "Store", "StorageUnit", "Generator"],
|
||||||
|
bus_size_factor=1.7e10,
|
||||||
|
):
|
||||||
|
n = network.copy()
|
||||||
|
assign_location(n)
|
||||||
|
# Drop non-electric buses so they don't clutter the plot
|
||||||
|
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
|
||||||
|
# investment periods
|
||||||
|
investments = n.snapshots.levels[0]
|
||||||
|
|
||||||
|
costs = {}
|
||||||
|
for comp in components:
|
||||||
|
df_c = n.df(comp)
|
||||||
|
if df_c.empty:
|
||||||
|
continue
|
||||||
|
df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp)
|
||||||
|
|
||||||
|
attr = "e_nom_opt" if comp == "Store" else "p_nom_opt"
|
||||||
|
|
||||||
|
active = pd.concat(
|
||||||
|
[n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments],
|
||||||
|
axis=1,
|
||||||
|
).astype(int)
|
||||||
|
capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost
|
||||||
|
capital_cost_t = (
|
||||||
|
(active.mul(capital_cost, axis=0))
|
||||||
|
.groupby([n.df(comp).location, n.df(comp).nice_group])
|
||||||
|
.sum()
|
||||||
|
)
|
||||||
|
|
||||||
|
capital_cost_t.drop("load", level=1, inplace=True, errors="ignore")
|
||||||
|
|
||||||
|
costs[comp] = capital_cost_t
|
||||||
|
|
||||||
|
costs = pd.concat(costs).groupby(level=[1, 2]).sum()
|
||||||
|
costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True)
|
||||||
|
|
||||||
|
new_columns = preferred_order.intersection(costs.index.levels[1]).append(
|
||||||
|
costs.index.levels[1].difference(preferred_order)
|
||||||
|
)
|
||||||
|
costs = costs.reindex(new_columns, level=1)
|
||||||
|
|
||||||
|
for item in new_columns:
|
||||||
|
if item not in snakemake.config["plotting"]["tech_colors"]:
|
||||||
|
print(
|
||||||
|
"Warning!",
|
||||||
|
item,
|
||||||
|
"not in config/plotting/tech_colors, assign random color",
|
||||||
|
)
|
||||||
|
snakemake.config["plotting"]["tech_colors"] = "pink"
|
||||||
|
|
||||||
|
n.links.drop(
|
||||||
|
n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")],
|
||||||
|
inplace=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# drop non-bus
|
||||||
|
to_drop = costs.index.levels[0].symmetric_difference(n.buses.index)
|
||||||
|
if len(to_drop) != 0:
|
||||||
|
print("dropping non-buses", to_drop)
|
||||||
|
costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore")
|
||||||
|
|
||||||
|
# make sure they are removed from index
|
||||||
|
costs.index = pd.MultiIndex.from_tuples(costs.index.values)
|
||||||
|
|
||||||
|
# PDF has minimum width, so set these to zero
|
||||||
|
line_lower_threshold = 500.0
|
||||||
|
line_upper_threshold = 1e4
|
||||||
|
linewidth_factor = 2e3
|
||||||
|
ac_color = "gray"
|
||||||
|
dc_color = "m"
|
||||||
|
|
||||||
|
line_widths = n.lines.s_nom_opt
|
||||||
|
link_widths = n.links.p_nom_opt
|
||||||
|
linewidth_factor = 2e3
|
||||||
|
line_lower_threshold = 0.0
|
||||||
|
title = "Today's transmission"
|
||||||
|
|
||||||
|
line_widths[line_widths < line_lower_threshold] = 0.0
|
||||||
|
link_widths[link_widths < line_lower_threshold] = 0.0
|
||||||
|
|
||||||
|
line_widths[line_widths > line_upper_threshold] = line_upper_threshold
|
||||||
|
link_widths[link_widths > line_upper_threshold] = line_upper_threshold
|
||||||
|
|
||||||
|
for year in costs.columns:
|
||||||
|
fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()})
|
||||||
|
fig.set_size_inches(7, 6)
|
||||||
|
fig.suptitle(year)
|
||||||
|
|
||||||
|
n.plot(
|
||||||
|
bus_sizes=costs[year] / bus_size_factor,
|
||||||
|
bus_colors=snakemake.config["plotting"]["tech_colors"],
|
||||||
|
line_colors=ac_color,
|
||||||
|
link_colors=dc_color,
|
||||||
|
line_widths=line_widths / linewidth_factor,
|
||||||
|
link_widths=link_widths / linewidth_factor,
|
||||||
|
ax=ax,
|
||||||
|
**map_opts,
|
||||||
|
)
|
||||||
|
|
||||||
|
sizes = [20, 10, 5]
|
||||||
|
labels = [f"{s} bEUR/a" for s in sizes]
|
||||||
|
sizes = [s / bus_size_factor * 1e9 for s in sizes]
|
||||||
|
|
||||||
|
legend_kw = dict(
|
||||||
|
loc="upper left",
|
||||||
|
bbox_to_anchor=(0.01, 1.06),
|
||||||
|
labelspacing=0.8,
|
||||||
|
frameon=False,
|
||||||
|
handletextpad=0,
|
||||||
|
title="system cost",
|
||||||
|
)
|
||||||
|
|
||||||
|
add_legend_circles(
|
||||||
|
ax,
|
||||||
|
sizes,
|
||||||
|
labels,
|
||||||
|
srid=n.srid,
|
||||||
|
patch_kw=dict(facecolor="lightgrey"),
|
||||||
|
legend_kw=legend_kw,
|
||||||
|
)
|
||||||
|
|
||||||
|
sizes = [10, 5]
|
||||||
|
labels = [f"{s} GW" for s in sizes]
|
||||||
|
scale = 1e3 / linewidth_factor
|
||||||
|
sizes = [s * scale for s in sizes]
|
||||||
|
|
||||||
|
legend_kw = dict(
|
||||||
|
loc="upper left",
|
||||||
|
bbox_to_anchor=(0.27, 1.06),
|
||||||
|
frameon=False,
|
||||||
|
labelspacing=0.8,
|
||||||
|
handletextpad=1,
|
||||||
|
title=title,
|
||||||
|
)
|
||||||
|
|
||||||
|
add_legend_lines(
|
||||||
|
ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw
|
||||||
|
)
|
||||||
|
|
||||||
|
legend_kw = dict(
|
||||||
|
bbox_to_anchor=(1.52, 1.04),
|
||||||
|
frameon=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
fig.savefig(
|
||||||
|
snakemake.output[f"map_{year}"], transparent=True, bbox_inches="tight"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# %%
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if "snakemake" not in globals():
|
if "snakemake" not in globals():
|
||||||
from _helpers import mock_snakemake
|
from _helpers import mock_snakemake
|
||||||
@ -921,10 +1074,9 @@ if __name__ == "__main__":
|
|||||||
"plot_network",
|
"plot_network",
|
||||||
simpl="",
|
simpl="",
|
||||||
opts="",
|
opts="",
|
||||||
clusters="5",
|
clusters="37",
|
||||||
ll="v1.5",
|
ll="v1.0",
|
||||||
sector_opts="CO2L0-1H-T-H-B-I-A-solar+p3-dist1",
|
sector_opts="4380H-T-H-B-I-A-solar+p3-dist1",
|
||||||
planning_horizons="2030",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.basicConfig(level=snakemake.config["logging"]["level"])
|
logging.basicConfig(level=snakemake.config["logging"]["level"])
|
||||||
@ -938,16 +1090,23 @@ if __name__ == "__main__":
|
|||||||
if map_opts["boundaries"] is None:
|
if map_opts["boundaries"] is None:
|
||||||
map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1]
|
map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1]
|
||||||
|
|
||||||
plot_map(
|
if snakemake.params["foresight"] == "perfect":
|
||||||
n,
|
plot_map_perfect(
|
||||||
components=["generators", "links", "stores", "storage_units"],
|
n,
|
||||||
bus_size_factor=2e10,
|
components=["Link", "Store", "StorageUnit", "Generator"],
|
||||||
transmission=False,
|
bus_size_factor=2e10,
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
plot_map(
|
||||||
|
n,
|
||||||
|
components=["generators", "links", "stores", "storage_units"],
|
||||||
|
bus_size_factor=2e10,
|
||||||
|
transmission=False,
|
||||||
|
)
|
||||||
|
|
||||||
plot_h2_map(n, regions)
|
plot_h2_map(n, regions)
|
||||||
plot_ch4_map(n)
|
plot_ch4_map(n)
|
||||||
plot_map_without(n)
|
plot_map_without(n)
|
||||||
|
|
||||||
# plot_series(n, carrier="AC", name=suffix)
|
# plot_series(n, carrier="AC", name=suffix)
|
||||||
# plot_series(n, carrier="heat", name=suffix)
|
# plot_series(n, carrier="heat", name=suffix)
|
||||||
|
116
scripts/plot_statistics.py
Normal file
116
scripts/plot_statistics.py
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import pypsa
|
||||||
|
import seaborn as sns
|
||||||
|
from _helpers import configure_logging
|
||||||
|
|
||||||
|
sns.set_theme("paper", style="whitegrid")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if "snakemake" not in globals():
|
||||||
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
|
snakemake = mock_snakemake(
|
||||||
|
"plot_elec_statistics",
|
||||||
|
simpl="",
|
||||||
|
opts="Ept-12h",
|
||||||
|
clusters="37",
|
||||||
|
ll="v1.0",
|
||||||
|
)
|
||||||
|
configure_logging(snakemake)
|
||||||
|
|
||||||
|
n = pypsa.Network(snakemake.input.network)
|
||||||
|
|
||||||
|
n.loads.carrier = "load"
|
||||||
|
n.carriers.loc["load", ["nice_name", "color"]] = "Load", "darkred"
|
||||||
|
colors = n.carriers.set_index("nice_name").color.where(
|
||||||
|
lambda s: s != "", "lightgrey"
|
||||||
|
)
|
||||||
|
|
||||||
|
# %%
|
||||||
|
|
||||||
|
def rename_index(ds):
|
||||||
|
specific = ds.index.map(lambda x: f"{x[1]}\n({x[0]})")
|
||||||
|
generic = ds.index.get_level_values("carrier")
|
||||||
|
duplicated = generic.duplicated(keep=False)
|
||||||
|
index = specific.where(duplicated, generic)
|
||||||
|
return ds.set_axis(index)
|
||||||
|
|
||||||
|
def plot_static_per_carrier(ds, ax, drop_zero=True):
|
||||||
|
if drop_zero:
|
||||||
|
ds = ds[ds != 0]
|
||||||
|
ds = ds.dropna()
|
||||||
|
c = colors[ds.index.get_level_values("carrier")]
|
||||||
|
ds = ds.pipe(rename_index)
|
||||||
|
label = f"{ds.attrs['name']} [{ds.attrs['unit']}]"
|
||||||
|
ds.plot.barh(color=c.values, xlabel=label, ax=ax)
|
||||||
|
ax.grid(axis="y")
|
||||||
|
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
ds = n.statistics.capacity_factor().dropna()
|
||||||
|
plot_static_per_carrier(ds, ax)
|
||||||
|
fig.savefig(snakemake.output.capacity_factor_bar)
|
||||||
|
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
ds = n.statistics.installed_capacity().dropna()
|
||||||
|
ds = ds.drop("Line")
|
||||||
|
ds = ds.drop(("Generator", "Load"))
|
||||||
|
ds = ds / 1e3
|
||||||
|
ds.attrs["unit"] = "GW"
|
||||||
|
plot_static_per_carrier(ds, ax)
|
||||||
|
fig.savefig(snakemake.output.installed_capacity_bar)
|
||||||
|
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
ds = n.statistics.optimal_capacity()
|
||||||
|
ds = ds.drop("Line")
|
||||||
|
ds = ds.drop(("Generator", "Load"))
|
||||||
|
ds = ds / 1e3
|
||||||
|
ds.attrs["unit"] = "GW"
|
||||||
|
plot_static_per_carrier(ds, ax)
|
||||||
|
fig.savefig(snakemake.output.optimal_capacity_bar)
|
||||||
|
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
ds = n.statistics.capex()
|
||||||
|
plot_static_per_carrier(ds, ax)
|
||||||
|
fig.savefig(snakemake.output.capital_expenditure_bar)
|
||||||
|
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
ds = n.statistics.opex()
|
||||||
|
plot_static_per_carrier(ds, ax)
|
||||||
|
fig.savefig(snakemake.output.operational_expenditure_bar)
|
||||||
|
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
ds = n.statistics.curtailment()
|
||||||
|
plot_static_per_carrier(ds, ax)
|
||||||
|
fig.savefig(snakemake.output.curtailment_bar)
|
||||||
|
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
ds = n.statistics.supply()
|
||||||
|
ds = ds.drop("Line")
|
||||||
|
ds = ds / 1e6
|
||||||
|
ds.attrs["unit"] = "TWh"
|
||||||
|
plot_static_per_carrier(ds, ax)
|
||||||
|
fig.savefig(snakemake.output.supply_bar)
|
||||||
|
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
ds = n.statistics.withdrawal()
|
||||||
|
ds = ds.drop("Line")
|
||||||
|
ds = ds / -1e6
|
||||||
|
ds.attrs["unit"] = "TWh"
|
||||||
|
plot_static_per_carrier(ds, ax)
|
||||||
|
fig.savefig(snakemake.output.withdrawal_bar)
|
||||||
|
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
ds = n.statistics.market_value()
|
||||||
|
plot_static_per_carrier(ds, ax)
|
||||||
|
fig.savefig(snakemake.output.market_value_bar)
|
||||||
|
|
||||||
|
# touch file
|
||||||
|
with open(snakemake.output.barplots_touch, "a"):
|
||||||
|
pass
|
@ -49,6 +49,10 @@ def rename_techs(label):
|
|||||||
# "H2 Fuel Cell": "hydrogen storage",
|
# "H2 Fuel Cell": "hydrogen storage",
|
||||||
# "H2 pipeline": "hydrogen storage",
|
# "H2 pipeline": "hydrogen storage",
|
||||||
"battery": "battery storage",
|
"battery": "battery storage",
|
||||||
|
"H2 for industry": "H2 for industry",
|
||||||
|
"land transport fuel cell": "land transport fuel cell",
|
||||||
|
"land transport oil": "land transport oil",
|
||||||
|
"oil shipping": "shipping oil",
|
||||||
# "CC": "CC"
|
# "CC": "CC"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,11 +161,11 @@ def plot_costs():
|
|||||||
df.index.difference(preferred_order)
|
df.index.difference(preferred_order)
|
||||||
)
|
)
|
||||||
|
|
||||||
new_columns = df.sum().sort_values().index
|
# new_columns = df.sum().sort_values().index
|
||||||
|
|
||||||
fig, ax = plt.subplots(figsize=(12, 8))
|
fig, ax = plt.subplots(figsize=(12, 8))
|
||||||
|
|
||||||
df.loc[new_index, new_columns].T.plot(
|
df.loc[new_index].T.plot(
|
||||||
kind="bar",
|
kind="bar",
|
||||||
ax=ax,
|
ax=ax,
|
||||||
stacked=True,
|
stacked=True,
|
||||||
@ -213,17 +217,22 @@ def plot_energy():
|
|||||||
|
|
||||||
logger.info(f"Total energy of {round(df.sum()[0])} TWh/a")
|
logger.info(f"Total energy of {round(df.sum()[0])} TWh/a")
|
||||||
|
|
||||||
|
if df.empty:
|
||||||
|
fig, ax = plt.subplots(figsize=(12, 8))
|
||||||
|
fig.savefig(snakemake.output.energy, bbox_inches="tight")
|
||||||
|
return
|
||||||
|
|
||||||
new_index = preferred_order.intersection(df.index).append(
|
new_index = preferred_order.intersection(df.index).append(
|
||||||
df.index.difference(preferred_order)
|
df.index.difference(preferred_order)
|
||||||
)
|
)
|
||||||
|
|
||||||
new_columns = df.columns.sort_values()
|
# new_columns = df.columns.sort_values()
|
||||||
|
|
||||||
fig, ax = plt.subplots(figsize=(12, 8))
|
fig, ax = plt.subplots(figsize=(12, 8))
|
||||||
|
|
||||||
logger.debug(df.loc[new_index, new_columns])
|
logger.debug(df.loc[new_index])
|
||||||
|
|
||||||
df.loc[new_index, new_columns].T.plot(
|
df.loc[new_index].T.plot(
|
||||||
kind="bar",
|
kind="bar",
|
||||||
ax=ax,
|
ax=ax,
|
||||||
stacked=True,
|
stacked=True,
|
||||||
@ -267,8 +276,6 @@ def plot_balances():
|
|||||||
i for i in balances_df.index.levels[0] if i not in co2_carriers
|
i for i in balances_df.index.levels[0] if i not in co2_carriers
|
||||||
]
|
]
|
||||||
|
|
||||||
fig, ax = plt.subplots(figsize=(12, 8))
|
|
||||||
|
|
||||||
for k, v in balances.items():
|
for k, v in balances.items():
|
||||||
df = balances_df.loc[v]
|
df = balances_df.loc[v]
|
||||||
df = df.groupby(df.index.get_level_values(2)).sum()
|
df = df.groupby(df.index.get_level_values(2)).sum()
|
||||||
@ -279,7 +286,7 @@ def plot_balances():
|
|||||||
# remove trailing link ports
|
# remove trailing link ports
|
||||||
df.index = [
|
df.index = [
|
||||||
i[:-1]
|
i[:-1]
|
||||||
if ((i not in ["co2", "NH3"]) and (i[-1:] in ["0", "1", "2", "3"]))
|
if ((i not in ["co2", "NH3", "H2"]) and (i[-1:] in ["0", "1", "2", "3"]))
|
||||||
else i
|
else i
|
||||||
for i in df.index
|
for i in df.index
|
||||||
]
|
]
|
||||||
@ -313,6 +320,8 @@ def plot_balances():
|
|||||||
|
|
||||||
new_columns = df.columns.sort_values()
|
new_columns = df.columns.sort_values()
|
||||||
|
|
||||||
|
fig, ax = plt.subplots(figsize=(12, 8))
|
||||||
|
|
||||||
df.loc[new_index, new_columns].T.plot(
|
df.loc[new_index, new_columns].T.plot(
|
||||||
kind="bar",
|
kind="bar",
|
||||||
ax=ax,
|
ax=ax,
|
||||||
@ -345,8 +354,6 @@ def plot_balances():
|
|||||||
|
|
||||||
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight")
|
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight")
|
||||||
|
|
||||||
plt.cla()
|
|
||||||
|
|
||||||
|
|
||||||
def historical_emissions(countries):
|
def historical_emissions(countries):
|
||||||
"""
|
"""
|
||||||
@ -354,8 +361,7 @@ def historical_emissions(countries):
|
|||||||
"""
|
"""
|
||||||
# https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
|
# https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
|
||||||
# downloaded 201228 (modified by EEA last on 201221)
|
# downloaded 201228 (modified by EEA last on 201221)
|
||||||
fn = "data/eea/UNFCCC_v23.csv"
|
df = pd.read_csv(snakemake.input.co2, encoding="latin-1", low_memory=False)
|
||||||
df = pd.read_csv(fn, encoding="latin-1")
|
|
||||||
df.loc[df["Year"] == "1985-1987", "Year"] = 1986
|
df.loc[df["Year"] == "1985-1987", "Year"] = 1986
|
||||||
df["Year"] = df["Year"].astype(int)
|
df["Year"] = df["Year"].astype(int)
|
||||||
df = df.set_index(
|
df = df.set_index(
|
||||||
@ -379,15 +385,21 @@ def historical_emissions(countries):
|
|||||||
e["waste management"] = "5 - Waste management"
|
e["waste management"] = "5 - Waste management"
|
||||||
e["other"] = "6 - Other Sector"
|
e["other"] = "6 - Other Sector"
|
||||||
e["indirect"] = "ind_CO2 - Indirect CO2"
|
e["indirect"] = "ind_CO2 - Indirect CO2"
|
||||||
e["total wL"] = "Total (with LULUCF)"
|
e["other LULUCF"] = "4.H - Other LULUCF"
|
||||||
e["total woL"] = "Total (without LULUCF)"
|
|
||||||
|
|
||||||
pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"]
|
pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"]
|
||||||
if "GB" in countries:
|
if "GB" in countries:
|
||||||
countries.remove("GB")
|
countries.remove("GB")
|
||||||
countries.append("UK")
|
countries.append("UK")
|
||||||
|
|
||||||
year = np.arange(1990, 2018).tolist()
|
year = df.index.levels[0][df.index.levels[0] >= 1990]
|
||||||
|
|
||||||
|
missing = pd.Index(countries).difference(df.index.levels[2])
|
||||||
|
if not missing.empty:
|
||||||
|
logger.warning(
|
||||||
|
f"The following countries are missing and not considered when plotting historic CO2 emissions: {missing}"
|
||||||
|
)
|
||||||
|
countries = pd.Index(df.index.levels[2]).intersection(countries)
|
||||||
|
|
||||||
idx = pd.IndexSlice
|
idx = pd.IndexSlice
|
||||||
co2_totals = (
|
co2_totals = (
|
||||||
@ -450,25 +462,52 @@ def plot_carbon_budget_distribution(input_eurostat):
|
|||||||
plt.rcParams["xtick.labelsize"] = 20
|
plt.rcParams["xtick.labelsize"] = 20
|
||||||
plt.rcParams["ytick.labelsize"] = 20
|
plt.rcParams["ytick.labelsize"] = 20
|
||||||
|
|
||||||
|
emissions_scope = snakemake.params.emissions_scope
|
||||||
|
report_year = snakemake.params.eurostat_report_year
|
||||||
|
input_co2 = snakemake.input.co2
|
||||||
|
|
||||||
|
# historic emissions
|
||||||
|
countries = snakemake.params.countries
|
||||||
|
e_1990 = co2_emissions_year(
|
||||||
|
countries,
|
||||||
|
input_eurostat,
|
||||||
|
opts,
|
||||||
|
emissions_scope,
|
||||||
|
report_year,
|
||||||
|
input_co2,
|
||||||
|
year=1990,
|
||||||
|
)
|
||||||
|
emissions = historical_emissions(countries)
|
||||||
|
# add other years https://sdi.eea.europa.eu/data/0569441f-2853-4664-a7cd-db969ef54de0
|
||||||
|
emissions.loc[2019] = 2.971372
|
||||||
|
emissions.loc[2020] = 2.691958
|
||||||
|
emissions.loc[2021] = 2.869355
|
||||||
|
|
||||||
|
if snakemake.config["foresight"] == "myopic":
|
||||||
|
path_cb = "results/" + snakemake.params.RDIR + "/csvs/"
|
||||||
|
co2_cap = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)[
|
||||||
|
["cb"]
|
||||||
|
]
|
||||||
|
co2_cap *= e_1990
|
||||||
|
else:
|
||||||
|
supply_energy = pd.read_csv(
|
||||||
|
snakemake.input.balances, index_col=[0, 1, 2], header=[0, 1, 2, 3]
|
||||||
|
)
|
||||||
|
co2_cap = (
|
||||||
|
supply_energy.loc["co2"].droplevel(0).drop("co2").sum().unstack().T / 1e9
|
||||||
|
)
|
||||||
|
co2_cap.rename(index=lambda x: int(x), inplace=True)
|
||||||
|
|
||||||
plt.figure(figsize=(10, 7))
|
plt.figure(figsize=(10, 7))
|
||||||
gs1 = gridspec.GridSpec(1, 1)
|
gs1 = gridspec.GridSpec(1, 1)
|
||||||
ax1 = plt.subplot(gs1[0, 0])
|
ax1 = plt.subplot(gs1[0, 0])
|
||||||
ax1.set_ylabel("CO$_2$ emissions (Gt per year)", fontsize=22)
|
ax1.set_ylabel("CO$_2$ emissions \n [Gt per year]", fontsize=22)
|
||||||
ax1.set_ylim([0, 5])
|
# ax1.set_ylim([0, 5])
|
||||||
ax1.set_xlim([1990, snakemake.params.planning_horizons[-1] + 1])
|
ax1.set_xlim([1990, snakemake.params.planning_horizons[-1] + 1])
|
||||||
|
|
||||||
path_cb = "results/" + snakemake.params.RDIR + "/csvs/"
|
|
||||||
countries = snakemake.params.countries
|
|
||||||
e_1990 = co2_emissions_year(countries, input_eurostat, opts, year=1990)
|
|
||||||
CO2_CAP = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)
|
|
||||||
|
|
||||||
ax1.plot(e_1990 * CO2_CAP[o], linewidth=3, color="dodgerblue", label=None)
|
|
||||||
|
|
||||||
emissions = historical_emissions(countries)
|
|
||||||
|
|
||||||
ax1.plot(emissions, color="black", linewidth=3, label=None)
|
ax1.plot(emissions, color="black", linewidth=3, label=None)
|
||||||
|
|
||||||
# plot committed and uder-discussion targets
|
# plot committed and under-discussion targets
|
||||||
# (notice that historical emissions include all countries in the
|
# (notice that historical emissions include all countries in the
|
||||||
# network, but targets refer to EU)
|
# network, but targets refer to EU)
|
||||||
ax1.plot(
|
ax1.plot(
|
||||||
@ -485,7 +524,7 @@ def plot_carbon_budget_distribution(input_eurostat):
|
|||||||
[0.45 * emissions[1990]],
|
[0.45 * emissions[1990]],
|
||||||
marker="*",
|
marker="*",
|
||||||
markersize=12,
|
markersize=12,
|
||||||
markerfacecolor="white",
|
markerfacecolor="black",
|
||||||
markeredgecolor="black",
|
markeredgecolor="black",
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -509,21 +548,7 @@ def plot_carbon_budget_distribution(input_eurostat):
|
|||||||
|
|
||||||
ax1.plot(
|
ax1.plot(
|
||||||
[2050],
|
[2050],
|
||||||
[0.01 * emissions[1990]],
|
[0.0 * emissions[1990]],
|
||||||
marker="*",
|
|
||||||
markersize=12,
|
|
||||||
markerfacecolor="white",
|
|
||||||
linewidth=0,
|
|
||||||
markeredgecolor="black",
|
|
||||||
label="EU under-discussion target",
|
|
||||||
zorder=10,
|
|
||||||
clip_on=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
ax1.plot(
|
|
||||||
[2050],
|
|
||||||
[0.125 * emissions[1990]],
|
|
||||||
"ro",
|
|
||||||
marker="*",
|
marker="*",
|
||||||
markersize=12,
|
markersize=12,
|
||||||
markerfacecolor="black",
|
markerfacecolor="black",
|
||||||
@ -531,14 +556,19 @@ def plot_carbon_budget_distribution(input_eurostat):
|
|||||||
label="EU committed target",
|
label="EU committed target",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
for col in co2_cap.columns:
|
||||||
|
ax1.plot(co2_cap[col], linewidth=3, label=col)
|
||||||
|
|
||||||
ax1.legend(
|
ax1.legend(
|
||||||
fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True
|
fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True
|
||||||
)
|
)
|
||||||
|
|
||||||
path_cb_plot = "results/" + snakemake.params.RDIR + "/graphs/"
|
plt.grid(axis="y")
|
||||||
plt.savefig(path_cb_plot + "carbon_budget_plot.pdf", dpi=300)
|
path = snakemake.output.balances.split("balances")[0] + "carbon_budget.pdf"
|
||||||
|
plt.savefig(path, bbox_inches="tight")
|
||||||
|
|
||||||
|
|
||||||
|
# %%
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if "snakemake" not in globals():
|
if "snakemake" not in globals():
|
||||||
from _helpers import mock_snakemake
|
from _helpers import mock_snakemake
|
||||||
@ -557,6 +587,7 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
for sector_opts in snakemake.params.sector_opts:
|
for sector_opts in snakemake.params.sector_opts:
|
||||||
opts = sector_opts.split("-")
|
opts = sector_opts.split("-")
|
||||||
for o in opts:
|
if any(["cb" in o for o in opts]) or (
|
||||||
if "cb" in o:
|
snakemake.config["foresight"] == "perfect"
|
||||||
plot_carbon_budget_distribution(snakemake.input.eurostat)
|
):
|
||||||
|
plot_carbon_budget_distribution(snakemake.input.eurostat)
|
||||||
|
242
scripts/plot_validation_cross_border_flows.py
Normal file
242
scripts/plot_validation_cross_border_flows.py
Normal file
@ -0,0 +1,242 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
import country_converter as coco
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import pandas as pd
|
||||||
|
import pypsa
|
||||||
|
import seaborn as sns
|
||||||
|
from _helpers import configure_logging
|
||||||
|
|
||||||
|
sns.set_theme("paper", style="whitegrid")
|
||||||
|
|
||||||
|
cc = coco.CountryConverter()
|
||||||
|
|
||||||
|
color_country = {
|
||||||
|
"AL": "#440154",
|
||||||
|
"AT": "#482677",
|
||||||
|
"BA": "#43398e",
|
||||||
|
"BE": "#3953a4",
|
||||||
|
"BG": "#2c728e",
|
||||||
|
"CH": "#228b8d",
|
||||||
|
"CZ": "#1f9d8a",
|
||||||
|
"DE": "#29af7f",
|
||||||
|
"DK": "#3fbc73",
|
||||||
|
"EE": "#5ec962",
|
||||||
|
"ES": "#84d44b",
|
||||||
|
"FI": "#addc30",
|
||||||
|
"FR": "#d8e219",
|
||||||
|
"GB": "#fde725",
|
||||||
|
"GR": "#f0f921",
|
||||||
|
"HR": "#f1c25e",
|
||||||
|
"HU": "#f4a784",
|
||||||
|
"IE": "#f78f98",
|
||||||
|
"IT": "#f87ea0",
|
||||||
|
"LT": "#f87a9a",
|
||||||
|
"LU": "#f57694",
|
||||||
|
"LV": "#f3758d",
|
||||||
|
"ME": "#f37685",
|
||||||
|
"MK": "#f37b7c",
|
||||||
|
"NL": "#FF6666",
|
||||||
|
"NO": "#FF3333",
|
||||||
|
"PL": "#eb0000",
|
||||||
|
"PT": "#d70000",
|
||||||
|
"RO": "#c00000",
|
||||||
|
"RS": "#a50000",
|
||||||
|
"SE": "#8a0000",
|
||||||
|
"SI": "#6f0000",
|
||||||
|
"SK": "#550000",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def sort_one_country(country, df):
|
||||||
|
indices = [link for link in df.columns if country in link]
|
||||||
|
df_country = df[indices].copy()
|
||||||
|
for link in df_country.columns:
|
||||||
|
if country in link[5:]:
|
||||||
|
df_country[link] = -df_country[link]
|
||||||
|
link_reverse = str(link[5:] + " - " + link[:2])
|
||||||
|
df_country = df_country.rename(columns={link: link_reverse})
|
||||||
|
|
||||||
|
return df_country.reindex(sorted(df_country.columns), axis=1)
|
||||||
|
|
||||||
|
|
||||||
|
def cross_border_time_series(countries, data):
|
||||||
|
fig, ax = plt.subplots(2 * len(countries), 1, figsize=(15, 10 * len(countries)))
|
||||||
|
axis = 0
|
||||||
|
|
||||||
|
for country in countries:
|
||||||
|
ymin = 0
|
||||||
|
ymax = 0
|
||||||
|
for df in data:
|
||||||
|
df_country = sort_one_country(country, df)
|
||||||
|
df_neg, df_pos = df_country.clip(upper=0), df_country.clip(lower=0)
|
||||||
|
|
||||||
|
color = [color_country[link[5:]] for link in df_country.columns]
|
||||||
|
|
||||||
|
df_pos.plot.area(
|
||||||
|
ax=ax[axis], stacked=True, linewidth=0.0, color=color, ylim=[-1, 1]
|
||||||
|
)
|
||||||
|
|
||||||
|
df_neg.plot.area(
|
||||||
|
ax=ax[axis], stacked=True, linewidth=0.0, color=color, ylim=[-1, 1]
|
||||||
|
)
|
||||||
|
if (axis % 2) == 0:
|
||||||
|
title = "Historic"
|
||||||
|
else:
|
||||||
|
title = "Optimized"
|
||||||
|
|
||||||
|
ax[axis].set_title(
|
||||||
|
title + " Import / Export for " + cc.convert(country, to="name_short")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Custom legend elements
|
||||||
|
legend_elements = []
|
||||||
|
|
||||||
|
for link in df_country.columns:
|
||||||
|
legend_elements = legend_elements + [
|
||||||
|
plt.fill_between(
|
||||||
|
[],
|
||||||
|
[],
|
||||||
|
color=color_country[link[5:]],
|
||||||
|
label=cc.convert(link[5:], to="name_short"),
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
# Create the legend
|
||||||
|
ax[axis].legend(handles=legend_elements, loc="upper right")
|
||||||
|
|
||||||
|
# rescale the y axis
|
||||||
|
neg_min = df_neg.sum(axis=1).min() * 1.2
|
||||||
|
if neg_min < ymin:
|
||||||
|
ymin = neg_min
|
||||||
|
|
||||||
|
pos_max = df_pos.sum(axis=1).max() * 1.2
|
||||||
|
if pos_max < ymax:
|
||||||
|
ymax = pos_max
|
||||||
|
|
||||||
|
axis = axis + 1
|
||||||
|
|
||||||
|
for x in range(axis - 2, axis):
|
||||||
|
ax[x].set_ylim([neg_min, pos_max])
|
||||||
|
|
||||||
|
fig.savefig(snakemake.output.trade_time_series, bbox_inches="tight")
|
||||||
|
|
||||||
|
|
||||||
|
def cross_border_bar(countries, data):
|
||||||
|
df_positive = pd.DataFrame()
|
||||||
|
df_negative = pd.DataFrame()
|
||||||
|
color = []
|
||||||
|
|
||||||
|
for country in countries:
|
||||||
|
order = 0
|
||||||
|
for df in data:
|
||||||
|
df_country = sort_one_country(country, df)
|
||||||
|
df_neg, df_pos = df_country.clip(upper=0), df_country.clip(lower=0)
|
||||||
|
|
||||||
|
if (order % 2) == 0:
|
||||||
|
title = "Historic"
|
||||||
|
else:
|
||||||
|
title = "Optimized"
|
||||||
|
|
||||||
|
df_positive_new = pd.DataFrame(data=df_pos.sum()).T.rename(
|
||||||
|
{0: title + " " + cc.convert(country, to="name_short")}
|
||||||
|
)
|
||||||
|
df_negative_new = pd.DataFrame(data=df_neg.sum()).T.rename(
|
||||||
|
{0: title + " " + cc.convert(country, to="name_short")}
|
||||||
|
)
|
||||||
|
|
||||||
|
df_positive = pd.concat([df_positive_new, df_positive])
|
||||||
|
df_negative = pd.concat([df_negative_new, df_negative])
|
||||||
|
|
||||||
|
order = order + 1
|
||||||
|
|
||||||
|
color = [color_country[link[5:]] for link in df_positive.columns]
|
||||||
|
|
||||||
|
fig, ax = plt.subplots(figsize=(15, 60))
|
||||||
|
|
||||||
|
df_positive.plot.barh(ax=ax, stacked=True, color=color, zorder=2)
|
||||||
|
df_negative.plot.barh(ax=ax, stacked=True, color=color, zorder=2)
|
||||||
|
|
||||||
|
plt.grid(axis="x", zorder=0)
|
||||||
|
plt.grid(axis="y", zorder=0)
|
||||||
|
|
||||||
|
# Custom legend elements
|
||||||
|
legend_elements = []
|
||||||
|
|
||||||
|
for country in list(color_country.keys()):
|
||||||
|
legend_elements = legend_elements + [
|
||||||
|
plt.fill_between(
|
||||||
|
[],
|
||||||
|
[],
|
||||||
|
color=color_country[country],
|
||||||
|
label=cc.convert(country, to="name_short"),
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
# Create the legend
|
||||||
|
plt.legend(handles=legend_elements, loc="upper right")
|
||||||
|
|
||||||
|
fig.savefig(snakemake.output.cross_border_bar, bbox_inches="tight")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if "snakemake" not in globals():
|
||||||
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
|
snakemake = mock_snakemake(
|
||||||
|
"plot_electricity_prices",
|
||||||
|
simpl="",
|
||||||
|
opts="Ept-12h",
|
||||||
|
clusters="37",
|
||||||
|
ll="v1.0",
|
||||||
|
)
|
||||||
|
configure_logging(snakemake)
|
||||||
|
|
||||||
|
countries = snakemake.params.countries
|
||||||
|
|
||||||
|
n = pypsa.Network(snakemake.input.network)
|
||||||
|
n.loads.carrier = "load"
|
||||||
|
|
||||||
|
historic = pd.read_csv(
|
||||||
|
snakemake.input.cross_border_flows,
|
||||||
|
index_col=0,
|
||||||
|
header=0,
|
||||||
|
parse_dates=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(historic.index) > len(n.snapshots):
|
||||||
|
historic = historic.resample(n.snapshots.inferred_freq).mean().loc[n.snapshots]
|
||||||
|
|
||||||
|
# Preparing network data to be shaped similar to ENTSOE datastructure
|
||||||
|
optimized_links = n.links_t.p0.rename(
|
||||||
|
columns=dict(n.links.bus0.str[:2] + " - " + n.links.bus1.str[:2])
|
||||||
|
)
|
||||||
|
optimized_lines = n.lines_t.p0.rename(
|
||||||
|
columns=dict(n.lines.bus0.str[:2] + " - " + n.lines.bus1.str[:2])
|
||||||
|
)
|
||||||
|
optimized = pd.concat([optimized_links, optimized_lines], axis=1)
|
||||||
|
|
||||||
|
# Drop internal country connection
|
||||||
|
optimized.drop(
|
||||||
|
[c for c in optimized.columns if c[:2] == c[5:]], axis=1, inplace=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# align columns name
|
||||||
|
for c1 in optimized.columns:
|
||||||
|
for c2 in optimized.columns:
|
||||||
|
if c1[:2] == c2[5:] and c2[:2] == c1[5:]:
|
||||||
|
optimized = optimized.rename(columns={c1: c2})
|
||||||
|
|
||||||
|
optimized = optimized.groupby(lambda x: x, axis=1).sum()
|
||||||
|
|
||||||
|
cross_border_bar(countries, [historic, optimized])
|
||||||
|
|
||||||
|
cross_border_time_series(countries, [historic, optimized])
|
||||||
|
|
||||||
|
# touch file
|
||||||
|
with open(snakemake.output.plots_touch, "a"):
|
||||||
|
pass
|
63
scripts/plot_validation_electricity_prices.py
Normal file
63
scripts/plot_validation_electricity_prices.py
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import pandas as pd
|
||||||
|
import pypsa
|
||||||
|
import seaborn as sns
|
||||||
|
from _helpers import configure_logging
|
||||||
|
from pypsa.statistics import get_bus_and_carrier
|
||||||
|
|
||||||
|
sns.set_theme("paper", style="whitegrid")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if "snakemake" not in globals():
|
||||||
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
|
snakemake = mock_snakemake(
|
||||||
|
"plot_electricity_prices",
|
||||||
|
simpl="",
|
||||||
|
opts="Ept-12h",
|
||||||
|
clusters="37",
|
||||||
|
ll="v1.0",
|
||||||
|
)
|
||||||
|
configure_logging(snakemake)
|
||||||
|
|
||||||
|
n = pypsa.Network(snakemake.input.network)
|
||||||
|
n.loads.carrier = "load"
|
||||||
|
|
||||||
|
historic = pd.read_csv(
|
||||||
|
snakemake.input.electricity_prices,
|
||||||
|
index_col=0,
|
||||||
|
header=0,
|
||||||
|
parse_dates=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(historic.index) > len(n.snapshots):
|
||||||
|
historic = historic.resample(n.snapshots.inferred_freq).mean().loc[n.snapshots]
|
||||||
|
|
||||||
|
optimized = n.buses_t.marginal_price.groupby(n.buses.country, axis=1).mean()
|
||||||
|
|
||||||
|
data = pd.concat([historic, optimized], keys=["Historic", "Optimized"], axis=1)
|
||||||
|
data.columns.names = ["Kind", "Country"]
|
||||||
|
|
||||||
|
fig, ax = plt.subplots(figsize=(6, 6))
|
||||||
|
|
||||||
|
df = data.mean().unstack().T
|
||||||
|
df.plot.barh(ax=ax, xlabel="Electricity Price [€/MWh]", ylabel="")
|
||||||
|
ax.grid(axis="y")
|
||||||
|
fig.savefig(snakemake.output.price_bar, bbox_inches="tight")
|
||||||
|
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
|
||||||
|
df = data.groupby(level="Kind", axis=1).mean()
|
||||||
|
df.plot(ax=ax, xlabel="", ylabel="Electricity Price [€/MWh]", alpha=0.8)
|
||||||
|
ax.grid(axis="x")
|
||||||
|
fig.savefig(snakemake.output.price_line, bbox_inches="tight")
|
||||||
|
|
||||||
|
# touch file
|
||||||
|
with open(snakemake.output.plots_touch, "a"):
|
||||||
|
pass
|
144
scripts/plot_validation_electricity_production.py
Normal file
144
scripts/plot_validation_electricity_production.py
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import pandas as pd
|
||||||
|
import pypsa
|
||||||
|
import seaborn as sns
|
||||||
|
from _helpers import configure_logging
|
||||||
|
from pypsa.statistics import get_bus_and_carrier
|
||||||
|
|
||||||
|
sns.set_theme("paper", style="whitegrid")
|
||||||
|
|
||||||
|
carrier_groups = {
|
||||||
|
"Offshore Wind (AC)": "Offshore Wind",
|
||||||
|
"Offshore Wind (DC)": "Offshore Wind",
|
||||||
|
"Open-Cycle Gas": "Gas",
|
||||||
|
"Combined-Cycle Gas": "Gas",
|
||||||
|
"Reservoir & Dam": "Hydro",
|
||||||
|
"Pumped Hydro Storage": "Hydro",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if "snakemake" not in globals():
|
||||||
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
|
snakemake = mock_snakemake(
|
||||||
|
"plot_validation_electricity_production",
|
||||||
|
simpl="",
|
||||||
|
opts="Ept",
|
||||||
|
clusters="37c",
|
||||||
|
ll="v1.0",
|
||||||
|
)
|
||||||
|
configure_logging(snakemake)
|
||||||
|
|
||||||
|
n = pypsa.Network(snakemake.input.network)
|
||||||
|
n.loads.carrier = "load"
|
||||||
|
|
||||||
|
historic = pd.read_csv(
|
||||||
|
snakemake.input.electricity_production,
|
||||||
|
index_col=0,
|
||||||
|
header=[0, 1],
|
||||||
|
parse_dates=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
colors = n.carriers.set_index("nice_name").color.where(
|
||||||
|
lambda s: s != "", "lightgrey"
|
||||||
|
)
|
||||||
|
colors["Offshore Wind"] = colors["Offshore Wind (AC)"]
|
||||||
|
colors["Gas"] = colors["Combined-Cycle Gas"]
|
||||||
|
colors["Hydro"] = colors["Reservoir & Dam"]
|
||||||
|
colors["Other"] = "lightgray"
|
||||||
|
|
||||||
|
if len(historic.index) > len(n.snapshots):
|
||||||
|
historic = historic.resample(n.snapshots.inferred_freq).mean().loc[n.snapshots]
|
||||||
|
|
||||||
|
optimized = n.statistics.dispatch(
|
||||||
|
groupby=get_bus_and_carrier, aggregate_time=False
|
||||||
|
).T
|
||||||
|
optimized = optimized[["Generator", "StorageUnit"]].droplevel(0, axis=1)
|
||||||
|
optimized = optimized.rename(columns=n.buses.country, level=0)
|
||||||
|
optimized = optimized.rename(columns=carrier_groups, level=1)
|
||||||
|
optimized = optimized.groupby(axis=1, level=[0, 1]).sum()
|
||||||
|
|
||||||
|
data = pd.concat([historic, optimized], keys=["Historic", "Optimized"], axis=1)
|
||||||
|
data.columns.names = ["Kind", "Country", "Carrier"]
|
||||||
|
data = data.mul(n.snapshot_weightings.generators, axis=0)
|
||||||
|
|
||||||
|
# total production per carrier
|
||||||
|
fig, ax = plt.subplots(figsize=(6, 6))
|
||||||
|
|
||||||
|
df = data.groupby(level=["Kind", "Carrier"], axis=1).sum().sum().unstack().T
|
||||||
|
df = df / 1e6 # TWh
|
||||||
|
df.plot.barh(ax=ax, xlabel="Electricity Production [TWh]", ylabel="")
|
||||||
|
ax.grid(axis="y")
|
||||||
|
fig.savefig(snakemake.output.production_bar, bbox_inches="tight")
|
||||||
|
|
||||||
|
# highest diffs
|
||||||
|
|
||||||
|
fig, ax = plt.subplots(figsize=(6, 10))
|
||||||
|
|
||||||
|
df = data.sum() / 1e6 # TWh
|
||||||
|
df = df["Optimized"] - df["Historic"]
|
||||||
|
df = df.dropna().sort_values()
|
||||||
|
df = pd.concat([df.iloc[:5], df.iloc[-5:]])
|
||||||
|
c = colors[df.index.get_level_values(1)]
|
||||||
|
df.plot.barh(
|
||||||
|
xlabel="Optimized Production - Historic Production [TWh]", ax=ax, color=c.values
|
||||||
|
)
|
||||||
|
ax.set_title("Strongest Deviations")
|
||||||
|
ax.grid(axis="y")
|
||||||
|
fig.savefig(snakemake.output.production_deviation_bar, bbox_inches="tight")
|
||||||
|
|
||||||
|
# seasonal operation
|
||||||
|
|
||||||
|
fig, axes = plt.subplots(3, 1, figsize=(9, 9))
|
||||||
|
|
||||||
|
df = (
|
||||||
|
data.groupby(level=["Kind", "Carrier"], axis=1)
|
||||||
|
.sum()
|
||||||
|
.resample("1W")
|
||||||
|
.mean()
|
||||||
|
.clip(lower=0)
|
||||||
|
)
|
||||||
|
df = df / 1e3
|
||||||
|
|
||||||
|
order = (
|
||||||
|
(df["Historic"].diff().abs().sum() / df["Historic"].sum()).sort_values().index
|
||||||
|
)
|
||||||
|
c = colors[order]
|
||||||
|
optimized = df["Optimized"].reindex(order, axis=1, level=1)
|
||||||
|
historical = df["Historic"].reindex(order, axis=1, level=1)
|
||||||
|
|
||||||
|
kwargs = dict(color=c, legend=False, ylabel="Production [GW]", xlabel="")
|
||||||
|
|
||||||
|
optimized.plot.area(ax=axes[0], **kwargs, title="Optimized")
|
||||||
|
historical.plot.area(ax=axes[1], **kwargs, title="Historic")
|
||||||
|
|
||||||
|
diff = optimized - historical
|
||||||
|
diff.clip(lower=0).plot.area(
|
||||||
|
ax=axes[2], **kwargs, title="$\Delta$ (Optimized - Historic)"
|
||||||
|
)
|
||||||
|
lim = axes[2].get_ylim()[1]
|
||||||
|
diff.clip(upper=0).plot.area(ax=axes[2], **kwargs)
|
||||||
|
axes[2].set_ylim(bottom=-lim, top=lim)
|
||||||
|
|
||||||
|
h, l = axes[0].get_legend_handles_labels()
|
||||||
|
fig.legend(
|
||||||
|
h[::-1],
|
||||||
|
l[::-1],
|
||||||
|
loc="center left",
|
||||||
|
bbox_to_anchor=(1, 0.5),
|
||||||
|
ncol=1,
|
||||||
|
frameon=False,
|
||||||
|
labelspacing=1,
|
||||||
|
)
|
||||||
|
fig.savefig(snakemake.output.seasonal_operation_area, bbox_inches="tight")
|
||||||
|
|
||||||
|
# touch file
|
||||||
|
with open(snakemake.output.plots_touch, "a"):
|
||||||
|
pass
|
@ -65,6 +65,7 @@ import pandas as pd
|
|||||||
import pypsa
|
import pypsa
|
||||||
from _helpers import configure_logging
|
from _helpers import configure_logging
|
||||||
from add_electricity import load_costs, update_transmission_costs
|
from add_electricity import load_costs, update_transmission_costs
|
||||||
|
from pypsa.descriptors import expand_series
|
||||||
|
|
||||||
idx = pd.IndexSlice
|
idx = pd.IndexSlice
|
||||||
|
|
||||||
@ -103,10 +104,30 @@ def add_emission_prices(n, emission_prices={"co2": 0.0}, exclude_co2=False):
|
|||||||
).sum(axis=1)
|
).sum(axis=1)
|
||||||
gen_ep = n.generators.carrier.map(ep) / n.generators.efficiency
|
gen_ep = n.generators.carrier.map(ep) / n.generators.efficiency
|
||||||
n.generators["marginal_cost"] += gen_ep
|
n.generators["marginal_cost"] += gen_ep
|
||||||
|
n.generators_t["marginal_cost"] += gen_ep[n.generators_t["marginal_cost"].columns]
|
||||||
su_ep = n.storage_units.carrier.map(ep) / n.storage_units.efficiency_dispatch
|
su_ep = n.storage_units.carrier.map(ep) / n.storage_units.efficiency_dispatch
|
||||||
n.storage_units["marginal_cost"] += su_ep
|
n.storage_units["marginal_cost"] += su_ep
|
||||||
|
|
||||||
|
|
||||||
|
def add_dynamic_emission_prices(n):
|
||||||
|
co2_price = pd.read_csv(snakemake.input.co2_price, index_col=0, parse_dates=True)
|
||||||
|
co2_price = co2_price[~co2_price.index.duplicated()]
|
||||||
|
co2_price = (
|
||||||
|
co2_price.reindex(n.snapshots).fillna(method="ffill").fillna(method="bfill")
|
||||||
|
)
|
||||||
|
|
||||||
|
emissions = (
|
||||||
|
n.generators.carrier.map(n.carriers.co2_emissions) / n.generators.efficiency
|
||||||
|
)
|
||||||
|
co2_cost = expand_series(emissions, n.snapshots).T.mul(co2_price.iloc[:, 0], axis=0)
|
||||||
|
|
||||||
|
static = n.generators.marginal_cost
|
||||||
|
dynamic = n.get_switchable_as_dense("Generator", "marginal_cost")
|
||||||
|
|
||||||
|
marginal_cost = dynamic + co2_cost.reindex(columns=dynamic.columns, fill_value=0)
|
||||||
|
n.generators_t.marginal_cost = marginal_cost.loc[:, marginal_cost.ne(static).any()]
|
||||||
|
|
||||||
|
|
||||||
def set_line_s_max_pu(n, s_max_pu=0.7):
|
def set_line_s_max_pu(n, s_max_pu=0.7):
|
||||||
n.lines["s_max_pu"] = s_max_pu
|
n.lines["s_max_pu"] = s_max_pu
|
||||||
logger.info(f"N-1 security margin of lines set to {s_max_pu}")
|
logger.info(f"N-1 security margin of lines set to {s_max_pu}")
|
||||||
@ -253,12 +274,13 @@ def set_line_nom_max(
|
|||||||
n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True)
|
n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True)
|
||||||
|
|
||||||
|
|
||||||
|
# %%
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if "snakemake" not in globals():
|
if "snakemake" not in globals():
|
||||||
from _helpers import mock_snakemake
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
snakemake = mock_snakemake(
|
snakemake = mock_snakemake(
|
||||||
"prepare_network", simpl="", clusters="40", ll="v0.3", opts="Co2L-24H"
|
"prepare_network", simpl="", clusters="37", ll="v1.0", opts="Ept"
|
||||||
)
|
)
|
||||||
configure_logging(snakemake)
|
configure_logging(snakemake)
|
||||||
|
|
||||||
@ -332,7 +354,12 @@ if __name__ == "__main__":
|
|||||||
c.df.loc[sel, attr] *= factor
|
c.df.loc[sel, attr] *= factor
|
||||||
|
|
||||||
for o in opts:
|
for o in opts:
|
||||||
if "Ep" in o:
|
if "Ept" in o:
|
||||||
|
logger.info(
|
||||||
|
"Setting time dependent emission prices according spot market price"
|
||||||
|
)
|
||||||
|
add_dynamic_emission_prices(n)
|
||||||
|
elif "Ep" in o:
|
||||||
m = re.findall("[0-9]*\.?[0-9]+$", o)
|
m = re.findall("[0-9]*\.?[0-9]+$", o)
|
||||||
if len(m) > 0:
|
if len(m) > 0:
|
||||||
logger.info("Setting emission prices according to wildcard value.")
|
logger.info("Setting emission prices according to wildcard value.")
|
||||||
|
553
scripts/prepare_perfect_foresight.py
Normal file
553
scripts/prepare_perfect_foresight.py
Normal file
@ -0,0 +1,553 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
"""
|
||||||
|
Concats pypsa networks of single investment periods to one network.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import pypsa
|
||||||
|
from _helpers import update_config_with_sector_opts
|
||||||
|
from add_existing_baseyear import add_build_year_to_new_assets
|
||||||
|
from pypsa.descriptors import expand_series
|
||||||
|
from pypsa.io import import_components_from_dataframe
|
||||||
|
from six import iterkeys
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# helper functions ---------------------------------------------------
|
||||||
|
def get_missing(df, n, c):
|
||||||
|
"""
|
||||||
|
Get in network n missing assets of df for component c.
|
||||||
|
|
||||||
|
Input:
|
||||||
|
df: pandas DataFrame, static values of pypsa components
|
||||||
|
n : pypsa Network to which new assets should be added
|
||||||
|
c : string, pypsa component.list_name (e.g. "generators")
|
||||||
|
Return:
|
||||||
|
pd.DataFrame with static values of missing assets
|
||||||
|
"""
|
||||||
|
df_final = getattr(n, c)
|
||||||
|
missing_i = df.index.difference(df_final.index)
|
||||||
|
return df.loc[missing_i]
|
||||||
|
|
||||||
|
|
||||||
|
def get_social_discount(t, r=0.01):
|
||||||
|
"""
|
||||||
|
Calculate for a given time t and social discount rate r [per unit] the
|
||||||
|
social discount.
|
||||||
|
"""
|
||||||
|
return 1 / (1 + r) ** t
|
||||||
|
|
||||||
|
|
||||||
|
def get_investment_weighting(time_weighting, r=0.01):
|
||||||
|
"""
|
||||||
|
Define cost weighting.
|
||||||
|
|
||||||
|
Returns cost weightings depending on the the time_weighting
|
||||||
|
(pd.Series) and the social discountrate r
|
||||||
|
"""
|
||||||
|
end = time_weighting.cumsum()
|
||||||
|
start = time_weighting.cumsum().shift().fillna(0)
|
||||||
|
return pd.concat([start, end], axis=1).apply(
|
||||||
|
lambda x: sum([get_social_discount(t, r) for t in range(int(x[0]), int(x[1]))]),
|
||||||
|
axis=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def add_year_to_constraints(n, baseyear):
|
||||||
|
"""
|
||||||
|
Add investment period to global constraints and rename index.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
n : pypsa.Network
|
||||||
|
baseyear : int
|
||||||
|
year in which optimized assets are built
|
||||||
|
"""
|
||||||
|
|
||||||
|
for c in n.iterate_components(["GlobalConstraint"]):
|
||||||
|
c.df["investment_period"] = baseyear
|
||||||
|
c.df.rename(index=lambda x: x + "-" + str(baseyear), inplace=True)
|
||||||
|
|
||||||
|
|
||||||
|
def hvdc_transport_model(n):
|
||||||
|
"""
|
||||||
|
Convert AC lines to DC links for multi-decade optimisation with line
|
||||||
|
expansion.
|
||||||
|
|
||||||
|
Losses of DC links are assumed to be 3% per 1000km
|
||||||
|
"""
|
||||||
|
|
||||||
|
logger.info("Convert AC lines to DC links to perform multi-decade optimisation.")
|
||||||
|
|
||||||
|
n.madd(
|
||||||
|
"Link",
|
||||||
|
n.lines.index,
|
||||||
|
bus0=n.lines.bus0,
|
||||||
|
bus1=n.lines.bus1,
|
||||||
|
p_nom_extendable=True,
|
||||||
|
p_nom=n.lines.s_nom,
|
||||||
|
p_nom_min=n.lines.s_nom,
|
||||||
|
p_min_pu=-1,
|
||||||
|
efficiency=1 - 0.03 * n.lines.length / 1000,
|
||||||
|
marginal_cost=0,
|
||||||
|
carrier="DC",
|
||||||
|
length=n.lines.length,
|
||||||
|
capital_cost=n.lines.capital_cost,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Remove AC lines
|
||||||
|
logger.info("Removing AC lines")
|
||||||
|
lines_rm = n.lines.index
|
||||||
|
n.mremove("Line", lines_rm)
|
||||||
|
|
||||||
|
# Set efficiency of all DC links to include losses depending on length
|
||||||
|
n.links.loc[n.links.carrier == "DC", "efficiency"] = (
|
||||||
|
1 - 0.03 * n.links.loc[n.links.carrier == "DC", "length"] / 1000
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def adjust_electricity_grid(n, year, years):
|
||||||
|
"""
|
||||||
|
Add carrier to lines. Replace AC lines with DC links in case of line
|
||||||
|
expansion. Add lifetime to DC links in case of line expansion.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
n : pypsa.Network
|
||||||
|
year : int
|
||||||
|
year in which optimized assets are built
|
||||||
|
years: list
|
||||||
|
investment periods
|
||||||
|
"""
|
||||||
|
n.lines["carrier"] = "AC"
|
||||||
|
links_i = n.links[n.links.carrier == "DC"].index
|
||||||
|
if n.lines.s_nom_extendable.any() or n.links.loc[links_i, "p_nom_extendable"].any():
|
||||||
|
hvdc_transport_model(n)
|
||||||
|
links_i = n.links[n.links.carrier == "DC"].index
|
||||||
|
n.links.loc[links_i, "lifetime"] = 100
|
||||||
|
if year != years[0]:
|
||||||
|
n.links.loc[links_i, "p_nom_min"] = 0
|
||||||
|
n.links.loc[links_i, "p_nom"] = 0
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------
|
||||||
|
def concat_networks(years):
|
||||||
|
"""
|
||||||
|
Concat given pypsa networks and adds build_year.
|
||||||
|
|
||||||
|
Return:
|
||||||
|
n : pypsa.Network for the whole planning horizon
|
||||||
|
"""
|
||||||
|
|
||||||
|
# input paths of sector coupling networks
|
||||||
|
network_paths = [snakemake.input.brownfield_network] + [
|
||||||
|
snakemake.input[f"network_{year}"] for year in years[1:]
|
||||||
|
]
|
||||||
|
# final concatenated network
|
||||||
|
n = pypsa.Network()
|
||||||
|
|
||||||
|
# iterate over single year networks and concat to perfect foresight network
|
||||||
|
for i, network_path in enumerate(network_paths):
|
||||||
|
year = years[i]
|
||||||
|
network = pypsa.Network(network_path)
|
||||||
|
adjust_electricity_grid(network, year, years)
|
||||||
|
add_build_year_to_new_assets(network, year)
|
||||||
|
|
||||||
|
# static ----------------------------------
|
||||||
|
# (1) add buses and carriers
|
||||||
|
for component in network.iterate_components(["Bus", "Carrier"]):
|
||||||
|
df_year = component.df
|
||||||
|
# get missing assets
|
||||||
|
missing = get_missing(df_year, n, component.list_name)
|
||||||
|
import_components_from_dataframe(n, missing, component.name)
|
||||||
|
# (2) add generators, links, stores and loads
|
||||||
|
for component in network.iterate_components(
|
||||||
|
["Generator", "Link", "Store", "Load", "Line", "StorageUnit"]
|
||||||
|
):
|
||||||
|
df_year = component.df.copy()
|
||||||
|
missing = get_missing(df_year, n, component.list_name)
|
||||||
|
|
||||||
|
import_components_from_dataframe(n, missing, component.name)
|
||||||
|
|
||||||
|
# time variant --------------------------------------------------
|
||||||
|
network_sns = pd.MultiIndex.from_product([[year], network.snapshots])
|
||||||
|
snapshots = n.snapshots.drop("now", errors="ignore").union(network_sns)
|
||||||
|
n.set_snapshots(snapshots)
|
||||||
|
|
||||||
|
for component in network.iterate_components():
|
||||||
|
pnl = getattr(n, component.list_name + "_t")
|
||||||
|
for k in iterkeys(component.pnl):
|
||||||
|
pnl_year = component.pnl[k].copy().reindex(snapshots, level=1)
|
||||||
|
if pnl_year.empty and ~(component.name == "Load" and k == "p_set"):
|
||||||
|
continue
|
||||||
|
if component.name == "Load":
|
||||||
|
static_load = network.loads.loc[network.loads.p_set != 0]
|
||||||
|
static_load_t = expand_series(static_load.p_set, network_sns).T
|
||||||
|
pnl_year = pd.concat(
|
||||||
|
[pnl_year.reindex(network_sns), static_load_t], axis=1
|
||||||
|
)
|
||||||
|
columns = (pnl[k].columns.union(pnl_year.columns)).unique()
|
||||||
|
pnl[k] = pnl[k].reindex(columns=columns)
|
||||||
|
pnl[k].loc[pnl_year.index, pnl_year.columns] = pnl_year
|
||||||
|
|
||||||
|
else:
|
||||||
|
# this is to avoid adding multiple times assets with
|
||||||
|
# infinite lifetime as ror
|
||||||
|
cols = pnl_year.columns.difference(pnl[k].columns)
|
||||||
|
pnl[k] = pd.concat([pnl[k], pnl_year[cols]], axis=1)
|
||||||
|
|
||||||
|
n.snapshot_weightings.loc[year, :] = network.snapshot_weightings.values
|
||||||
|
|
||||||
|
# (3) global constraints
|
||||||
|
for component in network.iterate_components(["GlobalConstraint"]):
|
||||||
|
add_year_to_constraints(network, year)
|
||||||
|
import_components_from_dataframe(n, component.df, component.name)
|
||||||
|
|
||||||
|
# set investment periods
|
||||||
|
n.investment_periods = n.snapshots.levels[0]
|
||||||
|
# weighting of the investment period -> assuming last period same weighting as the period before
|
||||||
|
time_w = n.investment_periods.to_series().diff().shift(-1).fillna(method="ffill")
|
||||||
|
n.investment_period_weightings["years"] = time_w
|
||||||
|
# set objective weightings
|
||||||
|
objective_w = get_investment_weighting(
|
||||||
|
n.investment_period_weightings["years"], social_discountrate
|
||||||
|
)
|
||||||
|
n.investment_period_weightings["objective"] = objective_w
|
||||||
|
# all former static loads are now time-dependent -> set static = 0
|
||||||
|
n.loads["p_set"] = 0
|
||||||
|
n.loads_t.p_set.fillna(0, inplace=True)
|
||||||
|
|
||||||
|
return n
|
||||||
|
|
||||||
|
|
||||||
|
def adjust_stores(n):
|
||||||
|
"""
|
||||||
|
Make sure that stores still behave cyclic over one year and not whole
|
||||||
|
modelling horizon.
|
||||||
|
"""
|
||||||
|
# cyclic constraint
|
||||||
|
cyclic_i = n.stores[n.stores.e_cyclic].index
|
||||||
|
n.stores.loc[cyclic_i, "e_cyclic_per_period"] = True
|
||||||
|
n.stores.loc[cyclic_i, "e_cyclic"] = False
|
||||||
|
# non cyclic store assumptions
|
||||||
|
non_cyclic_store = ["co2", "co2 stored", "solid biomass", "biogas", "Li ion"]
|
||||||
|
co2_i = n.stores[n.stores.carrier.isin(non_cyclic_store)].index
|
||||||
|
n.stores.loc[co2_i, "e_cyclic_per_period"] = False
|
||||||
|
n.stores.loc[co2_i, "e_cyclic"] = False
|
||||||
|
# e_initial at beginning of each investment period
|
||||||
|
e_initial_store = ["solid biomass", "biogas"]
|
||||||
|
co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index
|
||||||
|
n.stores.loc[co2_i, "e_initial_per_period"] = True
|
||||||
|
# n.stores.loc[co2_i, "e_initial"] *= 10
|
||||||
|
# n.stores.loc[co2_i, "e_nom"] *= 10
|
||||||
|
e_initial_store = ["co2 stored"]
|
||||||
|
co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index
|
||||||
|
n.stores.loc[co2_i, "e_initial_per_period"] = True
|
||||||
|
|
||||||
|
return n
|
||||||
|
|
||||||
|
|
||||||
|
def set_phase_out(n, carrier, ct, phase_out_year):
|
||||||
|
"""
|
||||||
|
Set planned phase outs for given carrier,country (ct) and planned year of
|
||||||
|
phase out (phase_out_year).
|
||||||
|
"""
|
||||||
|
df = n.links[(n.links.carrier.isin(carrier)) & (n.links.bus1.str[:2] == ct)]
|
||||||
|
# assets which are going to be phased out before end of their lifetime
|
||||||
|
assets_i = df[df[["build_year", "lifetime"]].sum(axis=1) > phase_out_year].index
|
||||||
|
build_year = n.links.loc[assets_i, "build_year"]
|
||||||
|
# adjust lifetime
|
||||||
|
n.links.loc[assets_i, "lifetime"] = (phase_out_year - build_year).astype(float)
|
||||||
|
|
||||||
|
|
||||||
|
def set_all_phase_outs(n):
|
||||||
|
# TODO move this to a csv or to the config
|
||||||
|
planned = [
|
||||||
|
(["nuclear"], "DE", 2022),
|
||||||
|
(["nuclear"], "BE", 2025),
|
||||||
|
(["nuclear"], "ES", 2027),
|
||||||
|
(["coal", "lignite"], "DE", 2030),
|
||||||
|
(["coal", "lignite"], "ES", 2027),
|
||||||
|
(["coal", "lignite"], "FR", 2022),
|
||||||
|
(["coal", "lignite"], "GB", 2024),
|
||||||
|
(["coal", "lignite"], "IT", 2025),
|
||||||
|
(["coal", "lignite"], "DK", 2030),
|
||||||
|
(["coal", "lignite"], "FI", 2030),
|
||||||
|
(["coal", "lignite"], "HU", 2030),
|
||||||
|
(["coal", "lignite"], "SK", 2030),
|
||||||
|
(["coal", "lignite"], "GR", 2030),
|
||||||
|
(["coal", "lignite"], "IE", 2030),
|
||||||
|
(["coal", "lignite"], "NL", 2030),
|
||||||
|
(["coal", "lignite"], "RS", 2030),
|
||||||
|
]
|
||||||
|
for carrier, ct, phase_out_year in planned:
|
||||||
|
set_phase_out(n, carrier, ct, phase_out_year)
|
||||||
|
# remove assets which are already phased out
|
||||||
|
remove_i = n.links[n.links[["build_year", "lifetime"]].sum(axis=1) < years[0]].index
|
||||||
|
n.mremove("Link", remove_i)
|
||||||
|
|
||||||
|
|
||||||
|
def set_carbon_constraints(n, opts):
|
||||||
|
"""
|
||||||
|
Add global constraints for carbon emissions.
|
||||||
|
"""
|
||||||
|
budget = None
|
||||||
|
for o in opts:
|
||||||
|
# other budgets
|
||||||
|
m = re.match(r"^\d+p\d$", o, re.IGNORECASE)
|
||||||
|
if m is not None:
|
||||||
|
budget = snakemake.config["co2_budget"][m.group(0)] * 1e9
|
||||||
|
if budget != None:
|
||||||
|
logger.info("add carbon budget of {}".format(budget))
|
||||||
|
n.add(
|
||||||
|
"GlobalConstraint",
|
||||||
|
"Budget",
|
||||||
|
type="Co2Budget",
|
||||||
|
carrier_attribute="co2_emissions",
|
||||||
|
sense="<=",
|
||||||
|
constant=budget,
|
||||||
|
investment_period=n.investment_periods[-1],
|
||||||
|
)
|
||||||
|
|
||||||
|
# drop other CO2 limits
|
||||||
|
drop_i = n.global_constraints[n.global_constraints.type == "co2_limit"].index
|
||||||
|
n.mremove("GlobalConstraint", drop_i)
|
||||||
|
|
||||||
|
n.add(
|
||||||
|
"GlobalConstraint",
|
||||||
|
"carbon_neutral",
|
||||||
|
type="co2_limit",
|
||||||
|
carrier_attribute="co2_emissions",
|
||||||
|
sense="<=",
|
||||||
|
constant=0,
|
||||||
|
investment_period=n.investment_periods[-1],
|
||||||
|
)
|
||||||
|
|
||||||
|
# set minimum CO2 emission constraint to avoid too fast reduction
|
||||||
|
if "co2min" in opts:
|
||||||
|
emissions_1990 = 4.53693
|
||||||
|
emissions_2019 = 3.344096
|
||||||
|
target_2030 = 0.45 * emissions_1990
|
||||||
|
annual_reduction = (emissions_2019 - target_2030) / 11
|
||||||
|
first_year = n.snapshots.levels[0][0]
|
||||||
|
time_weightings = n.investment_period_weightings.loc[first_year, "years"]
|
||||||
|
co2min = emissions_2019 - ((first_year - 2019) * annual_reduction)
|
||||||
|
logger.info(
|
||||||
|
"add minimum emissions for {} of {} t CO2/a".format(first_year, co2min)
|
||||||
|
)
|
||||||
|
n.add(
|
||||||
|
"GlobalConstraint",
|
||||||
|
f"Co2Min-{first_year}",
|
||||||
|
type="Co2min",
|
||||||
|
carrier_attribute="co2_emissions",
|
||||||
|
sense=">=",
|
||||||
|
investment_period=first_year,
|
||||||
|
constant=co2min * 1e9 * time_weightings,
|
||||||
|
)
|
||||||
|
|
||||||
|
return n
|
||||||
|
|
||||||
|
|
||||||
|
def adjust_lvlimit(n):
|
||||||
|
"""
|
||||||
|
Convert global constraints for single investment period to one uniform if
|
||||||
|
all attributes stay the same.
|
||||||
|
"""
|
||||||
|
c = "GlobalConstraint"
|
||||||
|
cols = ["carrier_attribute", "sense", "constant", "type"]
|
||||||
|
glc_type = "transmission_volume_expansion_limit"
|
||||||
|
if (n.df(c)[n.df(c).type == glc_type][cols].nunique() == 1).all():
|
||||||
|
glc = n.df(c)[n.df(c).type == glc_type][cols].iloc[[0]]
|
||||||
|
glc.index = pd.Index(["lv_limit"])
|
||||||
|
remove_i = n.df(c)[n.df(c).type == glc_type].index
|
||||||
|
n.mremove(c, remove_i)
|
||||||
|
import_components_from_dataframe(n, glc, c)
|
||||||
|
|
||||||
|
return n
|
||||||
|
|
||||||
|
|
||||||
|
def adjust_CO2_glc(n):
|
||||||
|
c = "GlobalConstraint"
|
||||||
|
glc_name = "CO2Limit"
|
||||||
|
glc_type = "primary_energy"
|
||||||
|
mask = (n.df(c).index.str.contains(glc_name)) & (n.df(c).type == glc_type)
|
||||||
|
n.df(c).loc[mask, "type"] = "co2_limit"
|
||||||
|
|
||||||
|
return n
|
||||||
|
|
||||||
|
|
||||||
|
def add_H2_boilers(n):
|
||||||
|
"""
|
||||||
|
Gas boilers can be retrofitted to run with H2.
|
||||||
|
|
||||||
|
Add H2 boilers for heating for all existing gas boilers.
|
||||||
|
"""
|
||||||
|
c = "Link"
|
||||||
|
logger.info("Add H2 boilers.")
|
||||||
|
# existing gas boilers
|
||||||
|
mask = n.links.carrier.str.contains("gas boiler") & ~n.links.p_nom_extendable
|
||||||
|
gas_i = n.links[mask].index
|
||||||
|
df = n.links.loc[gas_i]
|
||||||
|
# adjust bus 0
|
||||||
|
df["bus0"] = df.bus1.map(n.buses.location) + " H2"
|
||||||
|
# rename carrier and index
|
||||||
|
df["carrier"] = df.carrier.apply(
|
||||||
|
lambda x: x.replace("gas boiler", "retrofitted H2 boiler")
|
||||||
|
)
|
||||||
|
df.rename(
|
||||||
|
index=lambda x: x.replace("gas boiler", "retrofitted H2 boiler"), inplace=True
|
||||||
|
)
|
||||||
|
# todo, costs for retrofitting
|
||||||
|
df["capital_costs"] = 100
|
||||||
|
# set existing capacity to zero
|
||||||
|
df["p_nom"] = 0
|
||||||
|
df["p_nom_extendable"] = True
|
||||||
|
# add H2 boilers to network
|
||||||
|
import_components_from_dataframe(n, df, c)
|
||||||
|
|
||||||
|
|
||||||
|
def apply_time_segmentation_perfect(
|
||||||
|
n, segments, solver_name="cbc", overwrite_time_dependent=True
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Aggregating time series to segments with different lengths.
|
||||||
|
|
||||||
|
Input:
|
||||||
|
n: pypsa Network
|
||||||
|
segments: (int) number of segments in which the typical period should be
|
||||||
|
subdivided
|
||||||
|
solver_name: (str) name of solver
|
||||||
|
overwrite_time_dependent: (bool) overwrite time dependent data of pypsa network
|
||||||
|
with typical time series created by tsam
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
import tsam.timeseriesaggregation as tsam
|
||||||
|
except:
|
||||||
|
raise ModuleNotFoundError(
|
||||||
|
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
|
||||||
|
)
|
||||||
|
|
||||||
|
# get all time-dependent data
|
||||||
|
columns = pd.MultiIndex.from_tuples([], names=["component", "key", "asset"])
|
||||||
|
raw = pd.DataFrame(index=n.snapshots, columns=columns)
|
||||||
|
for c in n.iterate_components():
|
||||||
|
for attr, pnl in c.pnl.items():
|
||||||
|
# exclude e_min_pu which is used for SOC of EVs in the morning
|
||||||
|
if not pnl.empty and attr != "e_min_pu":
|
||||||
|
df = pnl.copy()
|
||||||
|
df.columns = pd.MultiIndex.from_product([[c.name], [attr], df.columns])
|
||||||
|
raw = pd.concat([raw, df], axis=1)
|
||||||
|
raw = raw.dropna(axis=1)
|
||||||
|
sn_weightings = {}
|
||||||
|
|
||||||
|
for year in raw.index.levels[0]:
|
||||||
|
logger.info(f"Find representative snapshots for {year}.")
|
||||||
|
raw_t = raw.loc[year]
|
||||||
|
# normalise all time-dependent data
|
||||||
|
annual_max = raw_t.max().replace(0, 1)
|
||||||
|
raw_t = raw_t.div(annual_max, level=0)
|
||||||
|
# get representative segments
|
||||||
|
agg = tsam.TimeSeriesAggregation(
|
||||||
|
raw_t,
|
||||||
|
hoursPerPeriod=len(raw_t),
|
||||||
|
noTypicalPeriods=1,
|
||||||
|
noSegments=int(segments),
|
||||||
|
segmentation=True,
|
||||||
|
solver=solver_name,
|
||||||
|
)
|
||||||
|
segmented = agg.createTypicalPeriods()
|
||||||
|
|
||||||
|
weightings = segmented.index.get_level_values("Segment Duration")
|
||||||
|
offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0)
|
||||||
|
timesteps = [raw_t.index[0] + pd.Timedelta(f"{offset}h") for offset in offsets]
|
||||||
|
snapshots = pd.DatetimeIndex(timesteps)
|
||||||
|
sn_weightings[year] = pd.Series(
|
||||||
|
weightings, index=snapshots, name="weightings", dtype="float64"
|
||||||
|
)
|
||||||
|
|
||||||
|
sn_weightings = pd.concat(sn_weightings)
|
||||||
|
n.set_snapshots(sn_weightings.index)
|
||||||
|
n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0)
|
||||||
|
|
||||||
|
return n
|
||||||
|
|
||||||
|
|
||||||
|
def set_temporal_aggregation_SEG(n, opts, solver_name):
|
||||||
|
"""
|
||||||
|
Aggregate network temporally with tsam.
|
||||||
|
"""
|
||||||
|
for o in opts:
|
||||||
|
# segments with package tsam
|
||||||
|
m = re.match(r"^(\d+)seg$", o, re.IGNORECASE)
|
||||||
|
if m is not None:
|
||||||
|
segments = int(m[1])
|
||||||
|
logger.info(f"Use temporal segmentation with {segments} segments")
|
||||||
|
n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name)
|
||||||
|
break
|
||||||
|
return n
|
||||||
|
|
||||||
|
|
||||||
|
# %%
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if "snakemake" not in globals():
|
||||||
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
|
snakemake = mock_snakemake(
|
||||||
|
"prepare_perfect_foresight",
|
||||||
|
simpl="",
|
||||||
|
opts="",
|
||||||
|
clusters="37",
|
||||||
|
ll="v1.5",
|
||||||
|
sector_opts="1p7-4380H-T-H-B-I-A-solar+p3-dist1",
|
||||||
|
)
|
||||||
|
|
||||||
|
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
|
||||||
|
# parameters -----------------------------------------------------------
|
||||||
|
years = snakemake.config["scenario"]["planning_horizons"]
|
||||||
|
opts = snakemake.wildcards.sector_opts.split("-")
|
||||||
|
social_discountrate = snakemake.config["costs"]["social_discountrate"]
|
||||||
|
for o in opts:
|
||||||
|
if "sdr" in o:
|
||||||
|
social_discountrate = float(o.replace("sdr", "")) / 100
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Concat networks of investment period {} with social discount rate of {}%".format(
|
||||||
|
years, social_discountrate * 100
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# concat prenetworks of planning horizon to single network ------------
|
||||||
|
n = concat_networks(years)
|
||||||
|
|
||||||
|
# temporal aggregate
|
||||||
|
opts = snakemake.wildcards.sector_opts.split("-")
|
||||||
|
solver_name = snakemake.config["solving"]["solver"]["name"]
|
||||||
|
n = set_temporal_aggregation_SEG(n, opts, solver_name)
|
||||||
|
|
||||||
|
# adjust global constraints lv limit if the same for all years
|
||||||
|
n = adjust_lvlimit(n)
|
||||||
|
# adjust global constraints CO2 limit
|
||||||
|
n = adjust_CO2_glc(n)
|
||||||
|
# adjust stores to multi period investment
|
||||||
|
n = adjust_stores(n)
|
||||||
|
|
||||||
|
# set phase outs
|
||||||
|
set_all_phase_outs(n)
|
||||||
|
|
||||||
|
# add H2 boiler
|
||||||
|
add_H2_boilers(n)
|
||||||
|
|
||||||
|
# set carbon constraints
|
||||||
|
opts = snakemake.wildcards.sector_opts.split("-")
|
||||||
|
n = set_carbon_constraints(n, opts)
|
||||||
|
|
||||||
|
# export network
|
||||||
|
n.export_to_netcdf(snakemake.output[0])
|
@ -191,17 +191,15 @@ def get(item, investment_year=None):
|
|||||||
|
|
||||||
|
|
||||||
def co2_emissions_year(
|
def co2_emissions_year(
|
||||||
countries, input_eurostat, opts, emissions_scope, report_year, year
|
countries, input_eurostat, opts, emissions_scope, report_year, input_co2, year
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Calculate CO2 emissions in one specific year (e.g. 1990 or 2018).
|
Calculate CO2 emissions in one specific year (e.g. 1990 or 2018).
|
||||||
"""
|
"""
|
||||||
emissions_scope = snakemake.params.energy["emissions"]
|
eea_co2 = build_eea_co2(input_co2, year, emissions_scope)
|
||||||
eea_co2 = build_eea_co2(snakemake.input.co2, year, emissions_scope)
|
|
||||||
|
|
||||||
# TODO: read Eurostat data from year > 2014
|
# TODO: read Eurostat data from year > 2014
|
||||||
# this only affects the estimation of CO2 emissions for BA, RS, AL, ME, MK
|
# this only affects the estimation of CO2 emissions for BA, RS, AL, ME, MK
|
||||||
report_year = snakemake.params.energy["eurostat_report_year"]
|
|
||||||
if year > 2014:
|
if year > 2014:
|
||||||
eurostat_co2 = build_eurostat_co2(
|
eurostat_co2 = build_eurostat_co2(
|
||||||
input_eurostat, countries, report_year, year=2014
|
input_eurostat, countries, report_year, year=2014
|
||||||
@ -240,12 +238,24 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year):
|
|||||||
countries = snakemake.params.countries
|
countries = snakemake.params.countries
|
||||||
|
|
||||||
e_1990 = co2_emissions_year(
|
e_1990 = co2_emissions_year(
|
||||||
countries, input_eurostat, opts, emissions_scope, report_year, year=1990
|
countries,
|
||||||
|
input_eurostat,
|
||||||
|
opts,
|
||||||
|
emissions_scope,
|
||||||
|
report_year,
|
||||||
|
input_co2,
|
||||||
|
year=1990,
|
||||||
)
|
)
|
||||||
|
|
||||||
# emissions at the beginning of the path (last year available 2018)
|
# emissions at the beginning of the path (last year available 2018)
|
||||||
e_0 = co2_emissions_year(
|
e_0 = co2_emissions_year(
|
||||||
countries, input_eurostat, opts, emissions_scope, report_year, year=2018
|
countries,
|
||||||
|
input_eurostat,
|
||||||
|
opts,
|
||||||
|
emissions_scope,
|
||||||
|
report_year,
|
||||||
|
input_co2,
|
||||||
|
year=2018,
|
||||||
)
|
)
|
||||||
|
|
||||||
planning_horizons = snakemake.params.planning_horizons
|
planning_horizons = snakemake.params.planning_horizons
|
||||||
@ -567,6 +577,7 @@ def add_co2_tracking(n, options):
|
|||||||
capital_cost=options["co2_sequestration_cost"],
|
capital_cost=options["co2_sequestration_cost"],
|
||||||
carrier="co2 stored",
|
carrier="co2 stored",
|
||||||
bus=spatial.co2.nodes,
|
bus=spatial.co2.nodes,
|
||||||
|
lifetime=options["co2_sequestration_lifetime"],
|
||||||
)
|
)
|
||||||
|
|
||||||
n.add("Carrier", "co2 stored")
|
n.add("Carrier", "co2 stored")
|
||||||
@ -2156,12 +2167,11 @@ def add_biomass(n, costs):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if options["biomass_transport"]:
|
if options["biomass_transport"]:
|
||||||
transport_costs = pd.read_csv(
|
|
||||||
snakemake.input.biomass_transport_costs,
|
|
||||||
index_col=0,
|
|
||||||
).squeeze()
|
|
||||||
|
|
||||||
# add biomass transport
|
# add biomass transport
|
||||||
|
transport_costs = pd.read_csv(
|
||||||
|
snakemake.input.biomass_transport_costs, index_col=0
|
||||||
|
)
|
||||||
|
transport_costs = transport_costs.squeeze()
|
||||||
biomass_transport = create_network_topology(
|
biomass_transport = create_network_topology(
|
||||||
n, "biomass transport ", bidirectional=False
|
n, "biomass transport ", bidirectional=False
|
||||||
)
|
)
|
||||||
@ -2185,6 +2195,27 @@ def add_biomass(n, costs):
|
|||||||
carrier="solid biomass transport",
|
carrier="solid biomass transport",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
elif options["biomass_spatial"]:
|
||||||
|
# add artificial biomass generators at nodes which include transport costs
|
||||||
|
transport_costs = pd.read_csv(
|
||||||
|
snakemake.input.biomass_transport_costs, index_col=0
|
||||||
|
)
|
||||||
|
transport_costs = transport_costs.squeeze()
|
||||||
|
bus_transport_costs = spatial.biomass.nodes.to_series().apply(
|
||||||
|
lambda x: transport_costs[x[:2]]
|
||||||
|
)
|
||||||
|
average_distance = 200 # km #TODO: validate this assumption
|
||||||
|
|
||||||
|
n.madd(
|
||||||
|
"Generator",
|
||||||
|
spatial.biomass.nodes,
|
||||||
|
bus=spatial.biomass.nodes,
|
||||||
|
carrier="solid biomass",
|
||||||
|
p_nom=10000,
|
||||||
|
marginal_cost=costs.at["solid biomass", "fuel"]
|
||||||
|
+ bus_transport_costs * average_distance,
|
||||||
|
)
|
||||||
|
|
||||||
# AC buses with district heating
|
# AC buses with district heating
|
||||||
urban_central = n.buses.index[n.buses.carrier == "urban central heat"]
|
urban_central = n.buses.index[n.buses.carrier == "urban central heat"]
|
||||||
if not urban_central.empty and options["chp"]:
|
if not urban_central.empty and options["chp"]:
|
||||||
@ -3295,7 +3326,7 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
spatial = define_spatial(pop_layout.index, options)
|
spatial = define_spatial(pop_layout.index, options)
|
||||||
|
|
||||||
if snakemake.params.foresight == "myopic":
|
if snakemake.params.foresight in ["myopic", "perfect"]:
|
||||||
add_lifetime_wind_solar(n, costs)
|
add_lifetime_wind_solar(n, costs)
|
||||||
|
|
||||||
conventional = snakemake.params.conventional_carriers
|
conventional = snakemake.params.conventional_carriers
|
||||||
@ -3376,8 +3407,14 @@ if __name__ == "__main__":
|
|||||||
if not os.path.exists(fn):
|
if not os.path.exists(fn):
|
||||||
emissions_scope = snakemake.params.emissions_scope
|
emissions_scope = snakemake.params.emissions_scope
|
||||||
report_year = snakemake.params.eurostat_report_year
|
report_year = snakemake.params.eurostat_report_year
|
||||||
|
input_co2 = snakemake.input.co2
|
||||||
build_carbon_budget(
|
build_carbon_budget(
|
||||||
o, snakemake.input.eurostat, fn, emissions_scope, report_year
|
o,
|
||||||
|
snakemake.input.eurostat,
|
||||||
|
fn,
|
||||||
|
emissions_scope,
|
||||||
|
report_year,
|
||||||
|
input_co2,
|
||||||
)
|
)
|
||||||
co2_cap = pd.read_csv(fn, index_col=0).squeeze()
|
co2_cap = pd.read_csv(fn, index_col=0).squeeze()
|
||||||
limit = co2_cap.loc[investment_year]
|
limit = co2_cap.loc[investment_year]
|
||||||
@ -3410,7 +3447,7 @@ if __name__ == "__main__":
|
|||||||
if options["electricity_grid_connection"]:
|
if options["electricity_grid_connection"]:
|
||||||
add_electricity_grid_connection(n, costs)
|
add_electricity_grid_connection(n, costs)
|
||||||
|
|
||||||
first_year_myopic = (snakemake.params.foresight == "myopic") and (
|
first_year_myopic = (snakemake.params.foresight in ["myopic", "perfect"]) and (
|
||||||
snakemake.params.planning_horizons[0] == investment_year
|
snakemake.params.planning_horizons[0] == investment_year
|
||||||
)
|
)
|
||||||
|
|
||||||
|
35
scripts/retrieve_monthly_fuel_prices.py
Normal file
35
scripts/retrieve_monthly_fuel_prices.py
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
"""
|
||||||
|
Retrieve monthly fuel prices from Destatis.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from _helpers import configure_logging, progress_retrieve
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
if "snakemake" not in globals():
|
||||||
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
|
snakemake = mock_snakemake("retrieve_monthly_fuel_prices")
|
||||||
|
rootpath = ".."
|
||||||
|
else:
|
||||||
|
rootpath = "."
|
||||||
|
configure_logging(snakemake)
|
||||||
|
|
||||||
|
url = "https://www.destatis.de/EN/Themes/Economy/Prices/Publications/Downloads-Energy-Price-Trends/energy-price-trends-xlsx-5619002.xlsx?__blob=publicationFile"
|
||||||
|
|
||||||
|
to_fn = Path(rootpath) / Path(snakemake.output[0])
|
||||||
|
|
||||||
|
logger.info(f"Downloading monthly fuel prices from '{url}'.")
|
||||||
|
disable_progress = snakemake.config["run"].get("disable_progressbar", False)
|
||||||
|
progress_retrieve(url, to_fn, disable=disable_progress)
|
||||||
|
|
||||||
|
logger.info(f"Monthly fuel prices available at {to_fn}")
|
@ -613,6 +613,7 @@ if __name__ == "__main__":
|
|||||||
"substation_lv",
|
"substation_lv",
|
||||||
"substation_off",
|
"substation_off",
|
||||||
"geometry",
|
"geometry",
|
||||||
|
"underground",
|
||||||
]
|
]
|
||||||
n.buses.drop(remove, axis=1, inplace=True, errors="ignore")
|
n.buses.drop(remove, axis=1, inplace=True, errors="ignore")
|
||||||
n.lines.drop(remove, axis=1, errors="ignore", inplace=True)
|
n.lines.drop(remove, axis=1, errors="ignore", inplace=True)
|
||||||
|
@ -33,7 +33,9 @@ import numpy as np
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
import pypsa
|
import pypsa
|
||||||
import xarray as xr
|
import xarray as xr
|
||||||
|
from _benchmark import memory_logger
|
||||||
from _helpers import configure_logging, update_config_with_sector_opts
|
from _helpers import configure_logging, update_config_with_sector_opts
|
||||||
|
from pypsa.descriptors import get_activity_mask
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
pypsa.pf.logger.setLevel(logging.WARNING)
|
pypsa.pf.logger.setLevel(logging.WARNING)
|
||||||
@ -47,10 +49,76 @@ def add_land_use_constraint(n, planning_horizons, config):
|
|||||||
_add_land_use_constraint(n)
|
_add_land_use_constraint(n)
|
||||||
|
|
||||||
|
|
||||||
|
def add_land_use_constraint_perfect(n):
|
||||||
|
"""
|
||||||
|
Add global constraints for tech capacity limit.
|
||||||
|
"""
|
||||||
|
logger.info("Add land-use constraint for perfect foresight")
|
||||||
|
|
||||||
|
def compress_series(s):
|
||||||
|
def process_group(group):
|
||||||
|
if group.nunique() == 1:
|
||||||
|
return pd.Series(group.iloc[0], index=[None])
|
||||||
|
else:
|
||||||
|
return group
|
||||||
|
|
||||||
|
return s.groupby(level=[0, 1]).apply(process_group)
|
||||||
|
|
||||||
|
def new_index_name(t):
|
||||||
|
# Convert all elements to string and filter out None values
|
||||||
|
parts = [str(x) for x in t if x is not None]
|
||||||
|
# Join with space, but use a dash for the last item if not None
|
||||||
|
return " ".join(parts[:2]) + (f"-{parts[-1]}" if len(parts) > 2 else "")
|
||||||
|
|
||||||
|
def check_p_min_p_max(p_nom_max):
|
||||||
|
p_nom_min = n.generators[ext_i].groupby(grouper).sum().p_nom_min
|
||||||
|
p_nom_min = p_nom_min.reindex(p_nom_max.index)
|
||||||
|
check = (
|
||||||
|
p_nom_min.groupby(level=[0, 1]).sum()
|
||||||
|
> p_nom_max.groupby(level=[0, 1]).min()
|
||||||
|
)
|
||||||
|
if check.sum():
|
||||||
|
logger.warning(
|
||||||
|
f"summed p_min_pu values at node larger than technical potential {check[check].index}"
|
||||||
|
)
|
||||||
|
|
||||||
|
grouper = [n.generators.carrier, n.generators.bus, n.generators.build_year]
|
||||||
|
ext_i = n.generators.p_nom_extendable
|
||||||
|
# get technical limit per node and investment period
|
||||||
|
p_nom_max = n.generators[ext_i].groupby(grouper).min().p_nom_max
|
||||||
|
# drop carriers without tech limit
|
||||||
|
p_nom_max = p_nom_max[~p_nom_max.isin([np.inf, np.nan])]
|
||||||
|
# carrier
|
||||||
|
carriers = p_nom_max.index.get_level_values(0).unique()
|
||||||
|
gen_i = n.generators[(n.generators.carrier.isin(carriers)) & (ext_i)].index
|
||||||
|
n.generators.loc[gen_i, "p_nom_min"] = 0
|
||||||
|
# check minimum capacities
|
||||||
|
check_p_min_p_max(p_nom_max)
|
||||||
|
# drop multi entries in case p_nom_max stays constant in different periods
|
||||||
|
# p_nom_max = compress_series(p_nom_max)
|
||||||
|
# adjust name to fit syntax of nominal constraint per bus
|
||||||
|
df = p_nom_max.reset_index()
|
||||||
|
df["name"] = df.apply(
|
||||||
|
lambda row: f"nom_max_{row['carrier']}"
|
||||||
|
+ (f"_{row['build_year']}" if row["build_year"] is not None else ""),
|
||||||
|
axis=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
for name in df.name.unique():
|
||||||
|
df_carrier = df[df.name == name]
|
||||||
|
bus = df_carrier.bus
|
||||||
|
n.buses.loc[bus, name] = df_carrier.p_nom_max.values
|
||||||
|
|
||||||
|
return n
|
||||||
|
|
||||||
|
|
||||||
def _add_land_use_constraint(n):
|
def _add_land_use_constraint(n):
|
||||||
# warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
|
# warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
|
||||||
|
|
||||||
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
|
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
|
||||||
|
extendable_i = (n.generators.carrier == carrier) & n.generators.p_nom_extendable
|
||||||
|
n.generators.loc[extendable_i, "p_nom_min"] = 0
|
||||||
|
|
||||||
ext_i = (n.generators.carrier == carrier) & ~n.generators.p_nom_extendable
|
ext_i = (n.generators.carrier == carrier) & ~n.generators.p_nom_extendable
|
||||||
existing = (
|
existing = (
|
||||||
n.generators.loc[ext_i, "p_nom"]
|
n.generators.loc[ext_i, "p_nom"]
|
||||||
@ -67,7 +135,7 @@ def _add_land_use_constraint(n):
|
|||||||
if len(existing_large):
|
if len(existing_large):
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Existing capacities larger than technical potential for {existing_large},\
|
f"Existing capacities larger than technical potential for {existing_large},\
|
||||||
adjust technical potential to existing capacities"
|
adjust technical potential to existing capacities"
|
||||||
)
|
)
|
||||||
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[
|
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[
|
||||||
existing_large, "p_nom_min"
|
existing_large, "p_nom_min"
|
||||||
@ -79,7 +147,6 @@ def _add_land_use_constraint(n):
|
|||||||
def _add_land_use_constraint_m(n, planning_horizons, config):
|
def _add_land_use_constraint_m(n, planning_horizons, config):
|
||||||
# if generators clustering is lower than network clustering, land_use accounting is at generators clusters
|
# if generators clustering is lower than network clustering, land_use accounting is at generators clusters
|
||||||
|
|
||||||
planning_horizons = param["planning_horizons"]
|
|
||||||
grouping_years = config["existing_capacities"]["grouping_years"]
|
grouping_years = config["existing_capacities"]["grouping_years"]
|
||||||
current_horizon = snakemake.wildcards.planning_horizons
|
current_horizon = snakemake.wildcards.planning_horizons
|
||||||
|
|
||||||
@ -113,7 +180,7 @@ def _add_land_use_constraint_m(n, planning_horizons, config):
|
|||||||
n.generators.p_nom_max.clip(lower=0, inplace=True)
|
n.generators.p_nom_max.clip(lower=0, inplace=True)
|
||||||
|
|
||||||
|
|
||||||
def add_co2_sequestration_limit(n, limit=200):
|
def add_co2_sequestration_limit(n, config, limit=200):
|
||||||
"""
|
"""
|
||||||
Add a global constraint on the amount of Mt CO2 that can be sequestered.
|
Add a global constraint on the amount of Mt CO2 that can be sequestered.
|
||||||
"""
|
"""
|
||||||
@ -127,16 +194,146 @@ def add_co2_sequestration_limit(n, limit=200):
|
|||||||
limit = float(o[o.find("seq") + 3 :]) * 1e6
|
limit = float(o[o.find("seq") + 3 :]) * 1e6
|
||||||
break
|
break
|
||||||
|
|
||||||
n.add(
|
if not n.investment_periods.empty:
|
||||||
|
periods = n.investment_periods
|
||||||
|
names = pd.Index([f"co2_sequestration_limit-{period}" for period in periods])
|
||||||
|
else:
|
||||||
|
periods = [np.nan]
|
||||||
|
names = pd.Index(["co2_sequestration_limit"])
|
||||||
|
|
||||||
|
n.madd(
|
||||||
"GlobalConstraint",
|
"GlobalConstraint",
|
||||||
"co2_sequestration_limit",
|
names,
|
||||||
sense="<=",
|
sense="<=",
|
||||||
constant=limit,
|
constant=limit,
|
||||||
type="primary_energy",
|
type="primary_energy",
|
||||||
carrier_attribute="co2_absorptions",
|
carrier_attribute="co2_absorptions",
|
||||||
|
investment_period=periods,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def add_carbon_constraint(n, snapshots):
|
||||||
|
glcs = n.global_constraints.query('type == "co2_limit"')
|
||||||
|
if glcs.empty:
|
||||||
|
return
|
||||||
|
for name, glc in glcs.iterrows():
|
||||||
|
rhs = glc.constant
|
||||||
|
carattr = glc.carrier_attribute
|
||||||
|
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
|
||||||
|
|
||||||
|
if emissions.empty:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# stores
|
||||||
|
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
|
||||||
|
stores = n.stores.query("carrier in @emissions.index and not e_cyclic")
|
||||||
|
time_valid = int(glc.loc["investment_period"])
|
||||||
|
if not stores.empty:
|
||||||
|
last = n.snapshot_weightings.reset_index().groupby("period").last()
|
||||||
|
last_i = last.set_index([last.index, last.timestep]).index
|
||||||
|
final_e = n.model["Store-e"].loc[last_i, stores.index]
|
||||||
|
time_i = pd.IndexSlice[time_valid, :]
|
||||||
|
lhs = final_e.loc[time_i, :] - final_e.shift(snapshot=1).loc[time_i, :]
|
||||||
|
|
||||||
|
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
|
||||||
|
|
||||||
|
|
||||||
|
def add_carbon_budget_constraint(n, snapshots):
|
||||||
|
glcs = n.global_constraints.query('type == "Co2Budget"')
|
||||||
|
if glcs.empty:
|
||||||
|
return
|
||||||
|
for name, glc in glcs.iterrows():
|
||||||
|
rhs = glc.constant
|
||||||
|
carattr = glc.carrier_attribute
|
||||||
|
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
|
||||||
|
|
||||||
|
if emissions.empty:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# stores
|
||||||
|
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
|
||||||
|
stores = n.stores.query("carrier in @emissions.index and not e_cyclic")
|
||||||
|
time_valid = int(glc.loc["investment_period"])
|
||||||
|
weighting = n.investment_period_weightings.loc[time_valid, "years"]
|
||||||
|
if not stores.empty:
|
||||||
|
last = n.snapshot_weightings.reset_index().groupby("period").last()
|
||||||
|
last_i = last.set_index([last.index, last.timestep]).index
|
||||||
|
final_e = n.model["Store-e"].loc[last_i, stores.index]
|
||||||
|
time_i = pd.IndexSlice[time_valid, :]
|
||||||
|
lhs = final_e.loc[time_i, :] * weighting
|
||||||
|
|
||||||
|
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
|
||||||
|
|
||||||
|
|
||||||
|
def add_max_growth(n, config):
|
||||||
|
"""
|
||||||
|
Add maximum growth rates for different carriers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
opts = snakemake.params["sector"]["limit_max_growth"]
|
||||||
|
# take maximum yearly difference between investment periods since historic growth is per year
|
||||||
|
factor = n.investment_period_weightings.years.max() * opts["factor"]
|
||||||
|
for carrier in opts["max_growth"].keys():
|
||||||
|
max_per_period = opts["max_growth"][carrier] * factor
|
||||||
|
logger.info(
|
||||||
|
f"set maximum growth rate per investment period of {carrier} to {max_per_period} GW."
|
||||||
|
)
|
||||||
|
n.carriers.loc[carrier, "max_growth"] = max_per_period * 1e3
|
||||||
|
|
||||||
|
for carrier in opts["max_relative_growth"].keys():
|
||||||
|
max_r_per_period = opts["max_relative_growth"][carrier]
|
||||||
|
logger.info(
|
||||||
|
f"set maximum relative growth per investment period of {carrier} to {max_r_per_period}."
|
||||||
|
)
|
||||||
|
n.carriers.loc[carrier, "max_relative_growth"] = max_r_per_period
|
||||||
|
|
||||||
|
return n
|
||||||
|
|
||||||
|
|
||||||
|
def add_retrofit_gas_boiler_constraint(n, snapshots):
|
||||||
|
"""
|
||||||
|
Allow retrofitting of existing gas boilers to H2 boilers.
|
||||||
|
"""
|
||||||
|
c = "Link"
|
||||||
|
logger.info("Add constraint for retrofitting gas boilers to H2 boilers.")
|
||||||
|
# existing gas boilers
|
||||||
|
mask = n.links.carrier.str.contains("gas boiler") & ~n.links.p_nom_extendable
|
||||||
|
gas_i = n.links[mask].index
|
||||||
|
mask = n.links.carrier.str.contains("retrofitted H2 boiler")
|
||||||
|
h2_i = n.links[mask].index
|
||||||
|
|
||||||
|
n.links.loc[gas_i, "p_nom_extendable"] = True
|
||||||
|
p_nom = n.links.loc[gas_i, "p_nom"]
|
||||||
|
n.links.loc[gas_i, "p_nom"] = 0
|
||||||
|
|
||||||
|
# heat profile
|
||||||
|
cols = n.loads_t.p_set.columns[
|
||||||
|
n.loads_t.p_set.columns.str.contains("heat")
|
||||||
|
& ~n.loads_t.p_set.columns.str.contains("industry")
|
||||||
|
& ~n.loads_t.p_set.columns.str.contains("agriculture")
|
||||||
|
]
|
||||||
|
profile = n.loads_t.p_set[cols].div(
|
||||||
|
n.loads_t.p_set[cols].groupby(level=0).max(), level=0
|
||||||
|
)
|
||||||
|
# to deal if max value is zero
|
||||||
|
profile.fillna(0, inplace=True)
|
||||||
|
profile.rename(columns=n.loads.bus.to_dict(), inplace=True)
|
||||||
|
profile = profile.reindex(columns=n.links.loc[gas_i, "bus1"])
|
||||||
|
profile.columns = gas_i
|
||||||
|
|
||||||
|
rhs = profile.mul(p_nom)
|
||||||
|
|
||||||
|
dispatch = n.model["Link-p"]
|
||||||
|
active = get_activity_mask(n, c, snapshots, gas_i)
|
||||||
|
rhs = rhs[active]
|
||||||
|
p_gas = dispatch.sel(Link=gas_i)
|
||||||
|
p_h2 = dispatch.sel(Link=h2_i)
|
||||||
|
|
||||||
|
lhs = p_gas + p_h2
|
||||||
|
|
||||||
|
n.model.add_constraints(lhs == rhs, name="gas_retrofit")
|
||||||
|
|
||||||
|
|
||||||
def prepare_network(
|
def prepare_network(
|
||||||
n,
|
n,
|
||||||
solve_opts=None,
|
solve_opts=None,
|
||||||
@ -197,9 +394,14 @@ def prepare_network(
|
|||||||
if foresight == "myopic":
|
if foresight == "myopic":
|
||||||
add_land_use_constraint(n, planning_horizons, config)
|
add_land_use_constraint(n, planning_horizons, config)
|
||||||
|
|
||||||
|
if foresight == "perfect":
|
||||||
|
n = add_land_use_constraint_perfect(n)
|
||||||
|
if snakemake.params["sector"]["limit_max_growth"]["enable"]:
|
||||||
|
n = add_max_growth(n, config)
|
||||||
|
|
||||||
if n.stores.carrier.eq("co2 stored").any():
|
if n.stores.carrier.eq("co2 stored").any():
|
||||||
limit = co2_sequestration_potential
|
limit = co2_sequestration_potential
|
||||||
add_co2_sequestration_limit(n, limit=limit)
|
add_co2_sequestration_limit(n, config, limit=limit)
|
||||||
|
|
||||||
return n
|
return n
|
||||||
|
|
||||||
@ -591,51 +793,56 @@ def extra_functionality(n, snapshots):
|
|||||||
add_EQ_constraints(n, o)
|
add_EQ_constraints(n, o)
|
||||||
add_battery_constraints(n)
|
add_battery_constraints(n)
|
||||||
add_pipe_retrofit_constraint(n)
|
add_pipe_retrofit_constraint(n)
|
||||||
|
if n._multi_invest:
|
||||||
|
add_carbon_constraint(n, snapshots)
|
||||||
|
add_carbon_budget_constraint(n, snapshots)
|
||||||
|
add_retrofit_gas_boiler_constraint(n, snapshots)
|
||||||
|
|
||||||
|
|
||||||
def solve_network(n, config, solving, opts="", **kwargs):
|
def solve_network(n, config, solving, opts="", **kwargs):
|
||||||
set_of_options = solving["solver"]["options"]
|
set_of_options = solving["solver"]["options"]
|
||||||
solver_options = solving["solver_options"][set_of_options] if set_of_options else {}
|
|
||||||
solver_name = solving["solver"]["name"]
|
|
||||||
cf_solving = solving["options"]
|
cf_solving = solving["options"]
|
||||||
track_iterations = cf_solving.get("track_iterations", False)
|
|
||||||
min_iterations = cf_solving.get("min_iterations", 4)
|
kwargs["multi_investment_periods"] = (
|
||||||
max_iterations = cf_solving.get("max_iterations", 6)
|
True if config["foresight"] == "perfect" else False
|
||||||
transmission_losses = cf_solving.get("transmission_losses", 0)
|
)
|
||||||
assign_all_duals = cf_solving.get("assign_all_duals", False)
|
kwargs["solver_options"] = (
|
||||||
|
solving["solver_options"][set_of_options] if set_of_options else {}
|
||||||
|
)
|
||||||
|
kwargs["solver_name"] = solving["solver"]["name"]
|
||||||
|
kwargs["extra_functionality"] = extra_functionality
|
||||||
|
kwargs["transmission_losses"] = cf_solving.get("transmission_losses", False)
|
||||||
|
kwargs["linearized_unit_commitment"] = cf_solving.get(
|
||||||
|
"linearized_unit_commitment", False
|
||||||
|
)
|
||||||
|
kwargs["assign_all_duals"] = cf_solving.get("assign_all_duals", False)
|
||||||
|
|
||||||
|
rolling_horizon = cf_solving.pop("rolling_horizon", False)
|
||||||
|
skip_iterations = cf_solving.pop("skip_iterations", False)
|
||||||
|
if not n.lines.s_nom_extendable.any():
|
||||||
|
skip_iterations = True
|
||||||
|
logger.info("No expandable lines found. Skipping iterative solving.")
|
||||||
|
|
||||||
# add to network for extra_functionality
|
# add to network for extra_functionality
|
||||||
n.config = config
|
n.config = config
|
||||||
n.opts = opts
|
n.opts = opts
|
||||||
|
|
||||||
skip_iterations = cf_solving.get("skip_iterations", False)
|
if rolling_horizon:
|
||||||
if not n.lines.s_nom_extendable.any():
|
kwargs["horizon"] = cf_solving.get("horizon", 365)
|
||||||
skip_iterations = True
|
kwargs["overlap"] = cf_solving.get("overlap", 0)
|
||||||
logger.info("No expandable lines found. Skipping iterative solving.")
|
n.optimize.optimize_with_rolling_horizon(**kwargs)
|
||||||
|
status, condition = "", ""
|
||||||
if skip_iterations:
|
elif skip_iterations:
|
||||||
status, condition = n.optimize(
|
status, condition = n.optimize(**kwargs)
|
||||||
solver_name=solver_name,
|
|
||||||
transmission_losses=transmission_losses,
|
|
||||||
assign_all_duals=assign_all_duals,
|
|
||||||
extra_functionality=extra_functionality,
|
|
||||||
**solver_options,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
|
kwargs["track_iterations"] = (cf_solving.get("track_iterations", False),)
|
||||||
|
kwargs["min_iterations"] = (cf_solving.get("min_iterations", 4),)
|
||||||
|
kwargs["max_iterations"] = (cf_solving.get("max_iterations", 6),)
|
||||||
status, condition = n.optimize.optimize_transmission_expansion_iteratively(
|
status, condition = n.optimize.optimize_transmission_expansion_iteratively(
|
||||||
solver_name=solver_name,
|
**kwargs
|
||||||
track_iterations=track_iterations,
|
|
||||||
min_iterations=min_iterations,
|
|
||||||
max_iterations=max_iterations,
|
|
||||||
transmission_losses=transmission_losses,
|
|
||||||
assign_all_duals=assign_all_duals,
|
|
||||||
extra_functionality=extra_functionality,
|
|
||||||
**solver_options,
|
|
||||||
**kwargs,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if status != "ok":
|
if status != "ok" and not rolling_horizon:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Solving status '{status}' with termination condition '{condition}'"
|
f"Solving status '{status}' with termination condition '{condition}'"
|
||||||
)
|
)
|
||||||
@ -645,18 +852,19 @@ def solve_network(n, config, solving, opts="", **kwargs):
|
|||||||
return n
|
return n
|
||||||
|
|
||||||
|
|
||||||
|
# %%
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if "snakemake" not in globals():
|
if "snakemake" not in globals():
|
||||||
from _helpers import mock_snakemake
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
snakemake = mock_snakemake(
|
snakemake = mock_snakemake(
|
||||||
"solve_sector_network",
|
"solve_sector_network_perfect",
|
||||||
configfiles="test/config.overnight.yaml",
|
configfiles="../config/test/config.perfect.yaml",
|
||||||
simpl="",
|
simpl="",
|
||||||
opts="",
|
opts="",
|
||||||
clusters="5",
|
clusters="5",
|
||||||
ll="v1.5",
|
ll="v1.5",
|
||||||
sector_opts="CO2L0-24H-T-H-B-I-A-solar+p3-dist1",
|
sector_opts="8760H-T-H-B-I-A-solar+p3-dist1",
|
||||||
planning_horizons="2030",
|
planning_horizons="2030",
|
||||||
)
|
)
|
||||||
configure_logging(snakemake)
|
configure_logging(snakemake)
|
||||||
@ -684,13 +892,18 @@ if __name__ == "__main__":
|
|||||||
co2_sequestration_potential=snakemake.params["co2_sequestration_potential"],
|
co2_sequestration_potential=snakemake.params["co2_sequestration_potential"],
|
||||||
)
|
)
|
||||||
|
|
||||||
n = solve_network(
|
with memory_logger(
|
||||||
n,
|
filename=getattr(snakemake.log, "memory", None), interval=30.0
|
||||||
config=snakemake.config,
|
) as mem:
|
||||||
solving=snakemake.params.solving,
|
n = solve_network(
|
||||||
opts=opts,
|
n,
|
||||||
log_fn=snakemake.log.solver,
|
config=snakemake.config,
|
||||||
)
|
solving=snakemake.params.solving,
|
||||||
|
opts=opts,
|
||||||
|
log_fn=snakemake.log.solver,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Maximum memory usage: {}".format(mem.mem_usage))
|
||||||
|
|
||||||
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
||||||
n.export_to_netcdf(snakemake.output[0])
|
n.export_to_netcdf(snakemake.output[0])
|
||||||
|
Loading…
Reference in New Issue
Block a user