Merge branch 'master' into complete-losses

This commit is contained in:
Fabian Neumann 2023-10-08 11:58:03 +02:00 committed by GitHub
commit 85c8812702
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 2578 additions and 210 deletions

View File

@ -83,6 +83,7 @@ jobs:
snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3

1
.gitignore vendored
View File

@ -8,6 +8,7 @@ __pycache__
*dconf *dconf
gurobi.log gurobi.log
.vscode .vscode
*.orig
/bak /bak
/resources /resources

View File

@ -33,7 +33,7 @@ repos:
rev: v2.2.6 rev: v2.2.6
hooks: hooks:
- id: codespell - id: codespell
args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco,berfore'] # Ignore capital case words, e.g. country codes args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco,berfore,vor'] # Ignore capital case words, e.g. country codes
types_or: [python, rst, markdown] types_or: [python, rst, markdown]
files: ^(scripts|doc)/ files: ^(scripts|doc)/

View File

@ -14,4 +14,3 @@ build:
python: python:
install: install:
- requirements: doc/requirements.txt - requirements: doc/requirements.txt
system_packages: false

View File

@ -66,6 +66,11 @@ if config["foresight"] == "myopic":
include: "rules/solve_myopic.smk" include: "rules/solve_myopic.smk"
if config["foresight"] == "perfect":
include: "rules/solve_perfect.smk"
rule all: rule all:
input: input:
RESULTS + "graphs/costs.pdf", RESULTS + "graphs/costs.pdf",

View File

@ -461,6 +461,7 @@ sector:
years_of_storage: 25 years_of_storage: 25
co2_sequestration_potential: 200 co2_sequestration_potential: 200
co2_sequestration_cost: 10 co2_sequestration_cost: 10
co2_sequestration_lifetime: 50
co2_spatial: false co2_spatial: false
co2network: false co2network: false
cc_fraction: 0.9 cc_fraction: 0.9
@ -501,6 +502,20 @@ sector:
OCGT: gas OCGT: gas
biomass_to_liquid: false biomass_to_liquid: false
biosng: false biosng: false
limit_max_growth:
enable: false
# allowing 30% larger than max historic growth
factor: 1.3
max_growth: # unit GW
onwind: 16 # onshore max grow so far 16 GW in Europe https://www.iea.org/reports/renewables-2020/wind
solar: 28 # solar max grow so far 28 GW in Europe https://www.iea.org/reports/renewables-2020/solar-pv
offwind-ac: 35 # offshore max grow so far 3.5 GW in Europe https://windeurope.org/about-wind/statistics/offshore/european-offshore-wind-industry-key-trends-statistics-2019/
offwind-dc: 35
max_relative_growth:
onwind: 3
solar: 3
offwind-ac: 3
offwind-dc: 3
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#industry # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#industry
industry: industry:
@ -553,11 +568,13 @@ industry:
hotmaps_locate_missing: false hotmaps_locate_missing: false
reference_year: 2015 reference_year: 2015
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#costs # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#costs
costs: costs:
year: 2030 year: 2030
version: v0.6.0 version: v0.6.0
rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person) rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person)
social_discountrate: 0.02
fill_values: fill_values:
FOM: 0 FOM: 0
VOM: 0 VOM: 0
@ -771,6 +788,7 @@ plotting:
gas pipeline new: '#a87c62' gas pipeline new: '#a87c62'
# oil # oil
oil: '#c9c9c9' oil: '#c9c9c9'
imported oil: '#a3a3a3'
oil boiler: '#adadad' oil boiler: '#adadad'
residential rural oil boiler: '#a9a9a9' residential rural oil boiler: '#a9a9a9'
services rural oil boiler: '#a5a5a5' services rural oil boiler: '#a5a5a5'
@ -902,6 +920,7 @@ plotting:
H2 for shipping: "#ebaee0" H2 for shipping: "#ebaee0"
H2: '#bf13a0' H2: '#bf13a0'
hydrogen: '#bf13a0' hydrogen: '#bf13a0'
retrofitted H2 boiler: '#e5a0d9'
SMR: '#870c71' SMR: '#870c71'
SMR CC: '#4f1745' SMR CC: '#4f1745'
H2 liquefaction: '#d647bd' H2 liquefaction: '#d647bd'

View File

@ -0,0 +1,43 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
run:
name: "perfect"
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#foresight
foresight: perfect
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#scenario
# Wildcard docs in https://pypsa-eur.readthedocs.io/en/latest/wildcards.html
scenario:
simpl:
- ''
ll:
- v1.0
clusters:
- 37
opts:
- ''
sector_opts:
- 1p5-4380H-T-H-B-I-A-solar+p3-dist1
- 1p7-4380H-T-H-B-I-A-solar+p3-dist1
- 2p0-4380H-T-H-B-I-A-solar+p3-dist1
planning_horizons:
- 2020
- 2030
- 2040
- 2050
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#co2-budget
co2_budget:
# update of IPCC 6th AR compared to the 1.5SR. (discussed here: https://twitter.com/JoeriRogelj/status/1424743828339167233)
1p5: 34.2 # 25.7 # Budget in Gt CO2 for 1.5 for Europe, global 420 Gt, assuming per capita share
1p6: 43.259666 # 35 # Budget in Gt CO2 for 1.6 for Europe, global 580 Gt
1p7: 51.4 # 45 # Budget in Gt CO2 for 1.7 for Europe, global 800 Gt
2p0: 69.778 # 73.9 # Budget in Gt CO2 for 2 for Europe, global 1170 Gt
sector:
min_part_load_fischer_tropsch: 0
min_part_load_methanolisation: 0

View File

@ -0,0 +1,91 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
tutorial: true
run:
name: "test-sector-perfect"
disable_progressbar: true
shared_resources: true
shared_cutouts: true
foresight: perfect
scenario:
ll:
- v1.0
clusters:
- 5
sector_opts:
- 8760H-T-H-B-I-A-solar+p3-dist1
planning_horizons:
- 2030
- 2040
- 2050
countries: ['BE']
snapshots:
start: "2013-03-01"
end: "2013-03-08"
electricity:
co2limit: 100.e+6
extendable_carriers:
Generator: [OCGT]
StorageUnit: [battery]
Store: [H2]
Link: [H2 pipeline]
renewable_carriers: [solar, onwind, offwind-ac, offwind-dc]
sector:
min_part_load_fischer_tropsch: 0
min_part_load_methanolisation: 0
atlite:
default_cutout: be-03-2013-era5
cutouts:
be-03-2013-era5:
module: era5
x: [4., 15.]
y: [46., 56.]
time: ["2013-03-01", "2013-03-08"]
renewable:
onwind:
cutout: be-03-2013-era5
offwind-ac:
cutout: be-03-2013-era5
max_depth: false
offwind-dc:
cutout: be-03-2013-era5
max_depth: false
solar:
cutout: be-03-2013-era5
industry:
St_primary_fraction:
2020: 0.8
2030: 0.6
2040: 0.5
2050: 0.4
solving:
solver:
name: glpk
options: glpk-default
mem: 4000
plotting:
map:
boundaries:
eu_node_location:
x: -5.5
y: 46.
costs_max: 1000
costs_threshold: 0.0000001
energy_max:
energy_min:
energy_threshold: 0.000001

View File

@ -41,10 +41,10 @@ Perfect foresight scenarios
.. warning:: .. warning::
Perfect foresight is currently under development and not yet implemented. Perfect foresight is currently implemented as a first test version.
For running perfect foresight scenarios, in future versions you will be able to For running perfect foresight scenarios, you can adjust the
set in the ``config/config.yaml``: ``config/config.perfect.yaml``:
.. code:: yaml .. code:: yaml

View File

@ -20,6 +20,8 @@ Upcoming Release
* Files extracted from sector-coupled data bundle have been moved from ``data/`` to ``data/sector-bundle``. * Files extracted from sector-coupled data bundle have been moved from ``data/`` to ``data/sector-bundle``.
* New feature multi-decade optimisation with perfect foresight.
* It is now possible to specify years for biomass potentials which do not exist * It is now possible to specify years for biomass potentials which do not exist
in the JRC-ENSPRESO database, e.g. 2037. These are linearly interpolated. in the JRC-ENSPRESO database, e.g. 2037. These are linearly interpolated.
@ -32,6 +34,7 @@ Upcoming Release
* A bug preventing custom powerplants specified in ``data/custom_powerplants.csv`` was fixed. (https://github.com/PyPSA/pypsa-eur/pull/732) * A bug preventing custom powerplants specified in ``data/custom_powerplants.csv`` was fixed. (https://github.com/PyPSA/pypsa-eur/pull/732)
PyPSA-Eur 0.8.1 (27th July 2023) PyPSA-Eur 0.8.1 (27th July 2023)
================================ ================================

View File

@ -133,89 +133,82 @@ This triggers a workflow of multiple preceding jobs that depend on each rule's i
graph[bgcolor=white, margin=0]; graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2]; node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2];
edge[penwidth=2, color=grey]; edge[penwidth=2, color=grey];
0[label = "solve_network", color = "0.21 0.6 0.85", style="rounded"]; 0[label = "solve_network", color = "0.33 0.6 0.85", style="rounded"];
1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.02 0.6 0.85", style="rounded"]; 1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.03 0.6 0.85", style="rounded"];
2[label = "add_extra_components", color = "0.37 0.6 0.85", style="rounded"]; 2[label = "add_extra_components", color = "0.45 0.6 0.85", style="rounded"];
3[label = "cluster_network\nclusters: 6", color = "0.39 0.6 0.85", style="rounded"]; 3[label = "cluster_network\nclusters: 6", color = "0.46 0.6 0.85", style="rounded"];
4[label = "simplify_network\nsimpl: ", color = "0.11 0.6 0.85", style="rounded"]; 4[label = "simplify_network\nsimpl: ", color = "0.52 0.6 0.85", style="rounded"];
5[label = "add_electricity", color = "0.23 0.6 0.85", style="rounded"]; 5[label = "add_electricity", color = "0.55 0.6 0.85", style="rounded"];
6[label = "build_renewable_profiles\ntechnology: onwind", color = "0.57 0.6 0.85", style="rounded"]; 6[label = "build_renewable_profiles\ntechnology: solar", color = "0.15 0.6 0.85", style="rounded"];
7[label = "base_network", color = "0.09 0.6 0.85", style="rounded"]; 7[label = "base_network", color = "0.37 0.6 0.85", style="rounded,dashed"];
8[label = "build_shapes", color = "0.41 0.6 0.85", style="rounded"]; 8[label = "build_shapes", color = "0.07 0.6 0.85", style="rounded,dashed"];
9[label = "retrieve_databundle", color = "0.28 0.6 0.85", style="rounded"]; 9[label = "retrieve_databundle", color = "0.60 0.6 0.85", style="rounded"];
10[label = "retrieve_natura_raster", color = "0.62 0.6 0.85", style="rounded"]; 10[label = "retrieve_natura_raster", color = "0.42 0.6 0.85", style="rounded"];
11[label = "build_bus_regions", color = "0.53 0.6 0.85", style="rounded"]; 11[label = "build_bus_regions", color = "0.09 0.6 0.85", style="rounded,dashed"];
12[label = "retrieve_cutout\ncutout: europe-2013-era5", color = "0.05 0.6 0.85", style="rounded,dashed"]; 12[label = "build_renewable_profiles\ntechnology: onwind", color = "0.15 0.6 0.85", style="rounded"];
13[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.57 0.6 0.85", style="rounded"]; 13[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.15 0.6 0.85", style="rounded"];
14[label = "build_ship_raster", color = "0.64 0.6 0.85", style="rounded"]; 14[label = "build_ship_raster", color = "0.02 0.6 0.85", style="rounded"];
15[label = "retrieve_ship_raster", color = "0.07 0.6 0.85", style="rounded,dashed"]; 15[label = "retrieve_ship_raster", color = "0.40 0.6 0.85", style="rounded"];
16[label = "retrieve_cutout\ncutout: europe-2013-sarah", color = "0.05 0.6 0.85", style="rounded,dashed"]; 16[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.15 0.6 0.85", style="rounded"];
17[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.57 0.6 0.85", style="rounded"]; 17[label = "build_line_rating", color = "0.32 0.6 0.85", style="rounded"];
18[label = "build_renewable_profiles\ntechnology: solar", color = "0.57 0.6 0.85", style="rounded"]; 18[label = "retrieve_cost_data\nyear: 2030", color = "0.50 0.6 0.85", style="rounded"];
19[label = "build_hydro_profile", color = "0.44 0.6 0.85", style="rounded"]; 19[label = "build_powerplants", color = "0.64 0.6 0.85", style="rounded,dashed"];
20[label = "retrieve_cost_data", color = "0.30 0.6 0.85", style="rounded"]; 20[label = "build_electricity_demand", color = "0.13 0.6 0.85", style="rounded,dashed"];
21[label = "build_powerplants", color = "0.16 0.6 0.85", style="rounded"]; 21[label = "retrieve_electricity_demand", color = "0.31 0.6 0.85", style="rounded"];
22[label = "build_electricity_demand", color = "0.00 0.6 0.85", style="rounded"]; 22[label = "copy_config", color = "0.23 0.6 0.85", style="rounded"];
23[label = "retrieve_electricity_demand", color = "0.34 0.6 0.85", style="rounded,dashed"]; 1 -> 0
1 -> 0 22 -> 0
2 -> 1 2 -> 1
20 -> 1 18 -> 1
3 -> 2 3 -> 2
20 -> 2 18 -> 2
4 -> 3 4 -> 3
20 -> 3 18 -> 3
5 -> 4 5 -> 4
20 -> 4 18 -> 4
11 -> 4 11 -> 4
6 -> 5 6 -> 5
13 -> 5 12 -> 5
17 -> 5 13 -> 5
18 -> 5 16 -> 5
19 -> 5 7 -> 5
7 -> 5 17 -> 5
20 -> 5 18 -> 5
11 -> 5 11 -> 5
21 -> 5 19 -> 5
9 -> 5 9 -> 5
22 -> 5 20 -> 5
8 -> 5 8 -> 5
7 -> 6 7 -> 6
9 -> 6 9 -> 6
10 -> 6 10 -> 6
8 -> 6 8 -> 6
11 -> 6 11 -> 6
12 -> 6 8 -> 7
8 -> 7 9 -> 8
9 -> 8 8 -> 11
8 -> 11 7 -> 11
7 -> 11 7 -> 12
7 -> 13 9 -> 12
9 -> 13 10 -> 12
10 -> 13 8 -> 12
14 -> 13 11 -> 12
8 -> 13 7 -> 13
11 -> 13 9 -> 13
12 -> 13 10 -> 13
15 -> 14 14 -> 13
12 -> 14 8 -> 13
16 -> 14 11 -> 13
7 -> 17 15 -> 14
9 -> 17 7 -> 16
10 -> 17 9 -> 16
14 -> 17 10 -> 16
8 -> 17 14 -> 16
11 -> 17 8 -> 16
12 -> 17 11 -> 16
7 -> 18 7 -> 17
9 -> 18 7 -> 19
10 -> 18 21 -> 20
8 -> 18
11 -> 18
16 -> 18
8 -> 19
12 -> 19
7 -> 21
23 -> 22
} }
| |

View File

@ -55,5 +55,5 @@ dependencies:
- pip: - pip:
- tsam>=1.1.0 - git+https://github.com/fneum/tsam.git@performance
- pypsa>=0.25.1 - pypsa>=0.25.2

View File

@ -60,6 +60,15 @@ rule solve_sector_networks:
), ),
rule solve_sector_networks_perfect:
input:
expand(
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
**config["scenario"]
),
rule plot_networks: rule plot_networks:
input: input:
expand( expand(

View File

@ -8,31 +8,62 @@ localrules:
copy_conda_env, copy_conda_env,
rule plot_network: if config["foresight"] != "perfect":
params:
foresight=config["foresight"], rule plot_network:
plotting=config["plotting"], params:
input: foresight=config["foresight"],
network=RESULTS plotting=config["plotting"],
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", input:
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", network=RESULTS
output: + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
map=RESULTS regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", output:
today=RESULTS map=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf", + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
threads: 2 today=RESULTS
resources: + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf",
mem_mb=10000, threads: 2
benchmark: resources:
( mem_mb=10000,
benchmark:
(
BENCHMARKS
+ "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/plot_network.py"
if config["foresight"] == "perfect":
rule plot_network:
params:
foresight=config["foresight"],
plotting=config["plotting"],
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
output:
**{
f"map_{year}": RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_"
+ f"{year}.pdf"
for year in config["scenario"]["planning_horizons"]
},
threads: 2
resources:
mem_mb=10000,
benchmark:
BENCHMARKS BENCHMARKS
+ "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" +"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_benchmark"
) conda:
conda: "../envs/environment.yaml"
"../envs/environment.yaml" script:
script: "../scripts/plot_network.py"
"../scripts/plot_network.py"
rule copy_config: rule copy_config:

194
rules/solve_perfect.smk Normal file
View File

@ -0,0 +1,194 @@
# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
rule add_existing_baseyear:
params:
baseyear=config["scenario"]["planning_horizons"][0],
sector=config["sector"],
existing_capacities=config["existing_capacities"],
costs=config["costs"],
input:
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
powerplants=RESOURCES + "powerplants.csv",
busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv",
busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv",
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]),
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
existing_heating="data/existing_infrastructure/existing_heating_raw.csv",
existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv",
existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv",
existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv",
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
wildcard_constraints:
planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear
threads: 1
resources:
mem_mb=2000,
log:
LOGS
+ "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
BENCHMARKS
+ "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/add_existing_baseyear.py"
rule add_brownfield:
params:
H2_retrofit=config["sector"]["H2_retrofit"],
H2_retrofit_capacity_per_CH4=config["sector"]["H2_retrofit_capacity_per_CH4"],
threshold_capacity=config["existing_capacities"]["threshold_capacity"],
input:
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
network_p=solved_previous_horizon, #solved network at previous time step
costs="data/costs_{planning_horizons}.csv",
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
threads: 4
resources:
mem_mb=10000,
log:
LOGS
+ "add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
BENCHMARKS
+ "add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/add_brownfield.py"
rule prepare_perfect_foresight:
input:
**{
f"network_{year}": RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ f"{year}.nc"
for year in config["scenario"]["planning_horizons"][1:]
},
brownfield_network=lambda w: (
RESULTS
+ "prenetworks-brownfield/"
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ "{}.nc".format(str(config["scenario"]["planning_horizons"][0]))
),
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
threads: 2
resources:
mem_mb=10000,
log:
LOGS
+ "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log",
benchmark:
(
BENCHMARKS
+ "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/prepare_perfect_foresight.py"
rule solve_sector_network_perfect:
params:
solving=config["solving"],
foresight=config["foresight"],
sector=config["sector"],
planning_horizons=config["scenario"]["planning_horizons"],
co2_sequestration_potential=config["sector"].get(
"co2_sequestration_potential", 200
),
input:
network=RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
costs="data/costs_2030.csv",
config=RESULTS + "config.yaml",
output:
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
threads: 4
resources:
mem_mb=config["solving"]["mem"],
shadow:
"shallow"
log:
solver=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_solver.log",
python=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_python.log",
memory=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_memory.log",
benchmark:
(
BENCHMARKS
+ "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/solve_network.py"
rule make_summary_perfect:
input:
**{
f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS
+ f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc"
for simpl in config["scenario"]["simpl"]
for clusters in config["scenario"]["clusters"]
for opts in config["scenario"]["opts"]
for sector_opts in config["scenario"]["sector_opts"]
for ll in config["scenario"]["ll"]
},
costs="data/costs_2020.csv",
output:
nodal_costs=RESULTS + "csvs/nodal_costs.csv",
nodal_capacities=RESULTS + "csvs/nodal_capacities.csv",
nodal_cfs=RESULTS + "csvs/nodal_cfs.csv",
cfs=RESULTS + "csvs/cfs.csv",
costs=RESULTS + "csvs/costs.csv",
capacities=RESULTS + "csvs/capacities.csv",
curtailment=RESULTS + "csvs/curtailment.csv",
energy=RESULTS + "csvs/energy.csv",
supply=RESULTS + "csvs/supply.csv",
supply_energy=RESULTS + "csvs/supply_energy.csv",
prices=RESULTS + "csvs/prices.csv",
weighted_prices=RESULTS + "csvs/weighted_prices.csv",
market_values=RESULTS + "csvs/market_values.csv",
price_statistics=RESULTS + "csvs/price_statistics.csv",
metrics=RESULTS + "csvs/metrics.csv",
co2_emissions=RESULTS + "csvs/co2_emissions.csv",
threads: 2
resources:
mem_mb=10000,
log:
LOGS + "make_summary_perfect.log",
benchmark:
(BENCHMARKS + "make_summary_perfect")
conda:
"../envs/environment.yaml"
script:
"../scripts/make_summary_perfect.py"
ruleorder: add_existing_baseyear > add_brownfield

256
scripts/_benchmark.py Normal file
View File

@ -0,0 +1,256 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
"""
from __future__ import absolute_import, print_function
import logging
import os
import sys
import time
logger = logging.getLogger(__name__)
# TODO: provide alternative when multiprocessing is not available
try:
from multiprocessing import Pipe, Process
except ImportError:
from multiprocessing.dummy import Process, Pipe
from memory_profiler import _get_memory, choose_backend
# The memory logging facilities have been adapted from memory_profiler
class MemTimer(Process):
"""
Write memory consumption over a time interval to file until signaled to
stop on the pipe.
"""
def __init__(
self, monitor_pid, interval, pipe, filename, max_usage, backend, *args, **kw
):
self.monitor_pid = monitor_pid
self.interval = interval
self.pipe = pipe
self.filename = filename
self.max_usage = max_usage
self.backend = backend
self.timestamps = kw.pop("timestamps", True)
self.include_children = kw.pop("include_children", True)
super(MemTimer, self).__init__(*args, **kw)
def run(self):
# get baseline memory usage
cur_mem = _get_memory(
self.monitor_pid,
self.backend,
timestamps=self.timestamps,
include_children=self.include_children,
)
n_measurements = 1
mem_usage = cur_mem if self.max_usage else [cur_mem]
if self.filename is not None:
stream = open(self.filename, "w")
stream.write("MEM {0:.6f} {1:.4f}\n".format(*cur_mem))
stream.flush()
else:
stream = None
self.pipe.send(0) # we're ready
stop = False
while True:
cur_mem = _get_memory(
self.monitor_pid,
self.backend,
timestamps=self.timestamps,
include_children=self.include_children,
)
if stream is not None:
stream.write("MEM {0:.6f} {1:.4f}\n".format(*cur_mem))
stream.flush()
n_measurements += 1
if not self.max_usage:
mem_usage.append(cur_mem)
else:
mem_usage = max(cur_mem, mem_usage)
if stop:
break
stop = self.pipe.poll(self.interval)
# do one more iteration
if stream is not None:
stream.close()
self.pipe.send(mem_usage)
self.pipe.send(n_measurements)
class memory_logger(object):
"""
Context manager for taking and reporting memory measurements at fixed
intervals from a separate process, for the duration of a context.
Parameters
----------
filename : None|str
Name of the text file to log memory measurements, if None no log is
created (defaults to None)
interval : float
Interval between measurements (defaults to 1.)
max_usage : bool
If True, only store and report the maximum value (defaults to True)
timestamps : bool
Whether to record tuples of memory usage and timestamps; if logging to
a file timestamps are always kept (defaults to True)
include_children : bool
Whether the memory of subprocesses is to be included (default: True)
Arguments
---------
n_measurements : int
Number of measurements that have been taken
mem_usage : (float, float)|[(float, float)]
All memory measurements and timestamps (if timestamps was True) or only
the maximum memory usage and its timestamp
Note
----
The arguments are only set after all the measurements, i.e. outside of the
with statement.
Example
-------
with memory_logger(filename="memory.log", max_usage=True) as mem:
# Do a lot of long running memory intensive stuff
hard_memory_bound_stuff()
max_mem, timestamp = mem.mem_usage
"""
def __init__(
self,
filename=None,
interval=1.0,
max_usage=True,
timestamps=True,
include_children=True,
):
if filename is not None:
timestamps = True
self.filename = filename
self.interval = interval
self.max_usage = max_usage
self.timestamps = timestamps
self.include_children = include_children
def __enter__(self):
backend = choose_backend()
self.child_conn, self.parent_conn = Pipe() # this will store MemTimer's results
self.p = MemTimer(
os.getpid(),
self.interval,
self.child_conn,
self.filename,
backend=backend,
timestamps=self.timestamps,
max_usage=self.max_usage,
include_children=self.include_children,
)
self.p.start()
self.parent_conn.recv() # wait until memory logging in subprocess is ready
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.parent_conn.send(0) # finish timing
self.mem_usage = self.parent_conn.recv()
self.n_measurements = self.parent_conn.recv()
else:
self.p.terminate()
return False
class timer(object):
level = 0
opened = False
def __init__(self, name="", verbose=True):
self.name = name
self.verbose = verbose
def __enter__(self):
if self.verbose:
if self.opened:
sys.stdout.write("\n")
if len(self.name) > 0:
sys.stdout.write((".. " * self.level) + self.name + ": ")
sys.stdout.flush()
self.__class__.opened = True
self.__class__.level += 1
self.start = time.time()
return self
def print_usec(self, usec):
if usec < 1000:
print("%.1f usec" % usec)
else:
msec = usec / 1000
if msec < 1000:
print("%.1f msec" % msec)
else:
sec = msec / 1000
print("%.1f sec" % sec)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.opened and self.verbose:
sys.stdout.write(".. " * self.level)
if exc_type is None:
stop = time.time()
self.usec = usec = (stop - self.start) * 1e6
if self.verbose:
self.print_usec(usec)
elif self.verbose:
print("failed")
sys.stdout.flush()
self.__class__.level -= 1
if self.verbose:
self.__class__.opened = False
return False
class optional(object):
def __init__(self, variable, contextman):
self.variable = variable
self.contextman = contextman
def __enter__(self):
if self.variable:
return self.contextman.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.variable:
return self.contextman.__exit__(exc_type, exc_val, exc_tb)
return False

View File

@ -406,6 +406,7 @@ def attach_wind_and_solar(
capital_cost=capital_cost, capital_cost=capital_cost,
efficiency=costs.at[supcar, "efficiency"], efficiency=costs.at[supcar, "efficiency"],
p_max_pu=ds["profile"].transpose("time", "bus").to_pandas(), p_max_pu=ds["profile"].transpose("time", "bus").to_pandas(),
lifetime=costs.at[supcar, "lifetime"],
) )

View File

@ -305,6 +305,18 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
if "EU" not in vars(spatial)[carrier[generator]].locations: if "EU" not in vars(spatial)[carrier[generator]].locations:
bus0 = bus0.intersection(capacity.index + " gas") bus0 = bus0.intersection(capacity.index + " gas")
# check for missing bus
missing_bus = pd.Index(bus0).difference(n.buses.index)
if not missing_bus.empty:
logger.info(f"add buses {bus0}")
n.madd(
"Bus",
bus0,
carrier=generator,
location=vars(spatial)[carrier[generator]].locations,
unit="MWh_el",
)
already_build = n.links.index.intersection(asset_i) already_build = n.links.index.intersection(asset_i)
new_build = asset_i.difference(n.links.index) new_build = asset_i.difference(n.links.index)
lifetime_assets = lifetime.loc[grouping_year, generator].dropna() lifetime_assets = lifetime.loc[grouping_year, generator].dropna()
@ -605,6 +617,10 @@ def add_heating_capacities_installed_before_baseyear(
], ],
) )
# drop assets which are at the end of their lifetime
links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index
n.mremove("Link", links_i)
# %% # %%
if __name__ == "__main__": if __name__ == "__main__":
@ -613,13 +629,13 @@ if __name__ == "__main__":
snakemake = mock_snakemake( snakemake = mock_snakemake(
"add_existing_baseyear", "add_existing_baseyear",
configfiles="config/test/config.myopic.yaml", # configfiles="config/test/config.myopic.yaml",
simpl="", simpl="",
clusters="5", clusters="37",
ll="v1.5", ll="v1.0",
opts="", opts="",
sector_opts="24H-T-H-B-I-A-solar+p3-dist1", sector_opts="1p7-4380H-T-H-B-I-A-solar+p3-dist1",
planning_horizons=2030, planning_horizons=2020,
) )
logging.basicConfig(level=snakemake.config["logging"]["level"]) logging.basicConfig(level=snakemake.config["logging"]["level"])

View File

@ -96,6 +96,23 @@ def prepare_hotmaps_database(regions):
gdf.rename(columns={"index_right": "bus"}, inplace=True) gdf.rename(columns={"index_right": "bus"}, inplace=True)
gdf["country"] = gdf.bus.str[:2] gdf["country"] = gdf.bus.str[:2]
# the .sjoin can lead to duplicates if a geom is in two regions
if gdf.index.duplicated().any():
import pycountry
# get all duplicated entries
duplicated_i = gdf.index[gdf.index.duplicated()]
# convert from raw data country name to iso-2-code
s = df.loc[duplicated_i, "Country"].apply(
lambda x: pycountry.countries.lookup(x).alpha_2
)
# Get a boolean mask where gdf's country column matches s's values for the same index
mask = gdf["country"] == gdf.index.map(s)
# Filter gdf using the mask
gdf_filtered = gdf[mask]
# concat not duplicated and filtered gdf
gdf = pd.concat([gdf.drop(duplicated_i), gdf_filtered]).sort_index()
# the .sjoin can lead to duplicates if a geom is in two overlapping regions # the .sjoin can lead to duplicates if a geom is in two overlapping regions
if gdf.index.duplicated().any(): if gdf.index.duplicated().any():
# get all duplicated entries # get all duplicated entries
@ -147,6 +164,7 @@ def build_nodal_distribution_key(hotmaps, regions, countries):
return keys return keys
# %%
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
@ -154,7 +172,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake( snakemake = mock_snakemake(
"build_industrial_distribution_key", "build_industrial_distribution_key",
simpl="", simpl="",
clusters=48, clusters=128,
) )
logging.basicConfig(level=snakemake.config["logging"]["level"]) logging.basicConfig(level=snakemake.config["logging"]["level"])

View File

@ -41,7 +41,7 @@ The following heat gains and losses are considered:
- heat gain through resistive losses - heat gain through resistive losses
- heat gain through solar radiation - heat gain through solar radiation
- heat loss through radiation of the trasnmission line - heat loss through radiation of the transmission line
- heat loss through forced convection with wind - heat loss through forced convection with wind
- heat loss through natural convection - heat loss through natural convection

View File

@ -0,0 +1,745 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Create summary CSV files for all scenario runs with perfect foresight including
costs, capacities, capacity factors, curtailment, energy balances, prices and
other metrics.
"""
import numpy as np
import pandas as pd
import pypsa
from make_summary import (
assign_carriers,
assign_locations,
calculate_cfs,
calculate_nodal_cfs,
calculate_nodal_costs,
)
from prepare_sector_network import prepare_costs
from pypsa.descriptors import get_active_assets, nominal_attrs
from six import iteritems
idx = pd.IndexSlice
opt_name = {"Store": "e", "Line": "s", "Transformer": "s"}
def calculate_costs(n, label, costs):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
costs.columns.levels[0],
costs.columns.levels[1],
costs.columns.levels[2],
investments,
],
names=costs.columns.names[:3] + ["year"],
)
costs = costs.reindex(cols, axis=1)
for c in n.iterate_components(
n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
active = pd.concat(
[
get_active_assets(n, c.name, inv_p).rename(inv_p)
for inv_p in investments
],
axis=1,
).astype(int)
capital_costs = active.mul(capital_costs, axis=0)
discount = (
n.investment_period_weightings["objective"]
/ n.investment_period_weightings["years"]
)
capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum().mul(discount)
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"])
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=[c.list_name])
costs = costs.reindex(capital_costs_grouped.index.union(costs.index))
costs.loc[capital_costs_grouped.index, label] = capital_costs_grouped.values
if c.name == "Link":
p = (
c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
)
elif c.name == "Line":
continue
elif c.name == "StorageUnit":
p_all = c.pnl.p.multiply(n.snapshot_weightings.stores, axis=0)
p_all[p_all < 0.0] = 0.0
p = p_all.groupby(level=0).sum()
else:
p = (
round(c.pnl.p, ndigits=2)
.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
)
# correct sequestration cost
if c.name == "Store":
items = c.df.index[
(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.0)
]
c.df.loc[items, "marginal_cost"] = -20.0
marginal_costs = p.mul(c.df.marginal_cost).T
# marginal_costs = active.mul(marginal_costs, axis=0)
marginal_costs_grouped = (
marginal_costs.groupby(c.df.carrier).sum().mul(discount)
)
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=["marginal"])
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=[c.list_name])
costs = costs.reindex(marginal_costs_grouped.index.union(costs.index))
costs.loc[marginal_costs_grouped.index, label] = marginal_costs_grouped.values
# add back in all hydro
# costs.loc[("storage_units","capital","hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro","p_nom"].sum()
# costs.loc[("storage_units","capital","PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS","p_nom"].sum()
# costs.loc[("generators","capital","ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror","p_nom"].sum()
return costs
def calculate_cumulative_cost():
planning_horizons = snakemake.config["scenario"]["planning_horizons"]
cumulative_cost = pd.DataFrame(
index=df["costs"].sum().index,
columns=pd.Series(data=np.arange(0, 0.1, 0.01), name="social discount rate"),
)
# discount cost and express them in money value of planning_horizons[0]
for r in cumulative_cost.columns:
cumulative_cost[r] = [
df["costs"].sum()[index] / ((1 + r) ** (index[-1] - planning_horizons[0]))
for index in cumulative_cost.index
]
# integrate cost throughout the transition path
for r in cumulative_cost.columns:
for cluster in cumulative_cost.index.get_level_values(level=0).unique():
for lv in cumulative_cost.index.get_level_values(level=1).unique():
for sector_opts in cumulative_cost.index.get_level_values(
level=2
).unique():
cumulative_cost.loc[
(cluster, lv, sector_opts, "cumulative cost"), r
] = np.trapz(
cumulative_cost.loc[
idx[cluster, lv, sector_opts, planning_horizons], r
].values,
x=planning_horizons,
)
return cumulative_cost
def calculate_nodal_capacities(n, label, nodal_capacities):
# Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff
for c in n.iterate_components(
n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
nodal_capacities_c = c.df.groupby(["location", "carrier"])[
opt_name.get(c.name, "p") + "_nom_opt"
].sum()
index = pd.MultiIndex.from_tuples(
[(c.list_name,) + t for t in nodal_capacities_c.index.to_list()]
)
nodal_capacities = nodal_capacities.reindex(index.union(nodal_capacities.index))
nodal_capacities.loc[index, label] = nodal_capacities_c.values
return nodal_capacities
def calculate_capacities(n, label, capacities):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
capacities.columns.levels[0],
capacities.columns.levels[1],
capacities.columns.levels[2],
investments,
],
names=capacities.columns.names[:3] + ["year"],
)
capacities = capacities.reindex(cols, axis=1)
for c in n.iterate_components(
n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
active = pd.concat(
[
get_active_assets(n, c.name, inv_p).rename(inv_p)
for inv_p in investments
],
axis=1,
).astype(int)
caps = c.df[opt_name.get(c.name, "p") + "_nom_opt"]
caps = active.mul(caps, axis=0)
capacities_grouped = (
caps.groupby(c.df.carrier).sum().drop("load", errors="ignore")
)
capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name])
capacities = capacities.reindex(
capacities_grouped.index.union(capacities.index)
)
capacities.loc[capacities_grouped.index, label] = capacities_grouped.values
return capacities
def calculate_curtailment(n, label, curtailment):
avail = (
n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt)
.sum()
.groupby(n.generators.carrier)
.sum()
)
used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()
curtailment[label] = (((avail - used) / avail) * 100).round(3)
return curtailment
def calculate_energy(n, label, energy):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
energy.columns.levels[0],
energy.columns.levels[1],
energy.columns.levels[2],
investments,
],
names=energy.columns.names[:3] + ["year"],
)
energy = energy.reindex(cols, axis=1)
for c in n.iterate_components(n.one_port_components | n.branch_components):
if c.name in n.one_port_components:
c_energies = (
c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
.multiply(c.df.sign)
.groupby(c.df.carrier, axis=1)
.sum()
)
else:
c_energies = pd.DataFrame(
0.0, columns=c.df.carrier.unique(), index=n.investment_periods
)
for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
totals = (
c.pnl["p" + port]
.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
)
# remove values where bus is missing (bug in nomopyomo)
no_bus = c.df.index[c.df["bus" + port] == ""]
totals[no_bus] = float(
n.component_attrs[c.name].loc["p" + port, "default"]
)
c_energies -= totals.groupby(c.df.carrier, axis=1).sum()
c_energies = pd.concat([c_energies.T], keys=[c.list_name])
energy = energy.reindex(c_energies.index.union(energy.index))
energy.loc[c_energies.index, label] = c_energies.values
return energy
def calculate_supply(n, label, supply):
"""
Calculate the max dispatch of each component at the buses aggregated by
carrier.
"""
bus_carriers = n.buses.carrier.unique()
for i in bus_carriers:
bus_map = n.buses.carrier == i
bus_map.at[""] = False
for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0:
continue
s = (
c.pnl.p[items]
.max()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"])
.sum()
)
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply = supply.reindex(s.index.union(supply.index))
supply.loc[s.index, label] = s
for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df["bus" + end].map(bus_map).fillna(False)]
if len(items) == 0:
continue
# lots of sign compensation for direction and to do maximums
s = (-1) ** (1 - int(end)) * (
(-1) ** int(end) * c.pnl["p" + end][items]
).max().groupby(c.df.loc[items, "carrier"]).sum()
s.index = s.index + end
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply = supply.reindex(s.index.union(supply.index))
supply.loc[s.index, label] = s
return supply
def calculate_supply_energy(n, label, supply_energy):
"""
Calculate the total energy supply/consuption of each component at the buses
aggregated by carrier.
"""
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
supply_energy.columns.levels[0],
supply_energy.columns.levels[1],
supply_energy.columns.levels[2],
investments,
],
names=supply_energy.columns.names[:3] + ["year"],
)
supply_energy = supply_energy.reindex(cols, axis=1)
bus_carriers = n.buses.carrier.unique()
for i in bus_carriers:
bus_map = n.buses.carrier == i
bus_map.at[""] = False
for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0:
continue
if c.name == "Generator":
weightings = n.snapshot_weightings.generators
else:
weightings = n.snapshot_weightings.stores
if i in ["oil", "co2", "H2"]:
if c.name == "Load":
c.df.loc[items, "carrier"] = [
load.split("-202")[0] for load in items
]
if i == "oil" and c.name == "Generator":
c.df.loc[items, "carrier"] = "imported oil"
s = (
c.pnl.p[items]
.multiply(weightings, axis=0)
.groupby(level=0)
.sum()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"], axis=1)
.sum()
.T
)
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply_energy = supply_energy.reindex(
s.index.union(supply_energy.index, sort=False)
)
supply_energy.loc[s.index, label] = s.values
for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df["bus" + str(end)].map(bus_map).fillna(False)]
if len(items) == 0:
continue
s = (
(-1)
* c.pnl["p" + end]
.reindex(items, axis=1)
.multiply(n.snapshot_weightings.objective, axis=0)
.groupby(level=0)
.sum()
.groupby(c.df.loc[items, "carrier"], axis=1)
.sum()
).T
s.index = s.index + end
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply_energy = supply_energy.reindex(
s.index.union(supply_energy.index, sort=False)
)
supply_energy.loc[s.index, label] = s.values
return supply_energy
def calculate_metrics(n, label, metrics):
metrics = metrics.reindex(
pd.Index(
[
"line_volume",
"line_volume_limit",
"line_volume_AC",
"line_volume_DC",
"line_volume_shadow",
"co2_shadow",
]
).union(metrics.index)
)
metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[
n.links.carrier == "DC"
].sum()
metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum()
metrics.at["line_volume", label] = metrics.loc[
["line_volume_AC", "line_volume_DC"], label
].sum()
if hasattr(n, "line_volume_limit"):
metrics.at["line_volume_limit", label] = n.line_volume_limit
metrics.at["line_volume_shadow", label] = n.line_volume_limit_dual
if "CO2Limit" in n.global_constraints.index:
metrics.at["co2_shadow", label] = n.global_constraints.at["CO2Limit", "mu"]
return metrics
def calculate_prices(n, label, prices):
prices = prices.reindex(prices.index.union(n.buses.carrier.unique()))
# WARNING: this is time-averaged, see weighted_prices for load-weighted average
prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean()
return prices
def calculate_weighted_prices(n, label, weighted_prices):
# Warning: doesn't include storage units as loads
weighted_prices = weighted_prices.reindex(
pd.Index(
[
"electricity",
"heat",
"space heat",
"urban heat",
"space urban heat",
"gas",
"H2",
]
)
)
link_loads = {
"electricity": [
"heat pump",
"resistive heater",
"battery charger",
"H2 Electrolysis",
],
"heat": ["water tanks charger"],
"urban heat": ["water tanks charger"],
"space heat": [],
"space urban heat": [],
"gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"],
"H2": ["Sabatier", "H2 Fuel Cell"],
}
for carrier in link_loads:
if carrier == "electricity":
suffix = ""
elif carrier[:5] == "space":
suffix = carrier[5:]
else:
suffix = " " + carrier
buses = n.buses.index[n.buses.index.str[2:] == suffix]
if buses.empty:
continue
if carrier in ["H2", "gas"]:
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
else:
load = n.loads_t.p_set.reindex(buses, axis=1)
for tech in link_loads[carrier]:
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
if names.empty:
continue
load += (
n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum()
)
# Add H2 Store when charging
# if carrier == "H2":
# stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1)
# stores[stores > 0.] = 0.
# load += -stores
weighted_prices.loc[carrier, label] = (
load * n.buses_t.marginal_price[buses]
).sum().sum() / load.sum().sum()
if carrier[:5] == "space":
print(load * n.buses_t.marginal_price[buses])
return weighted_prices
def calculate_market_values(n, label, market_values):
# Warning: doesn't include storage units
carrier = "AC"
buses = n.buses.index[n.buses.carrier == carrier]
## First do market value of generators ##
generators = n.generators.index[n.buses.loc[n.generators.bus, "carrier"] == carrier]
techs = n.generators.loc[generators, "carrier"].value_counts().index
market_values = market_values.reindex(market_values.index.union(techs))
for tech in techs:
gens = generators[n.generators.loc[generators, "carrier"] == tech]
dispatch = (
n.generators_t.p[gens]
.groupby(n.generators.loc[gens, "bus"], axis=1)
.sum()
.reindex(columns=buses, fill_value=0.0)
)
revenue = dispatch * n.buses_t.marginal_price[buses]
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
## Now do market value of links ##
for i in ["0", "1"]:
all_links = n.links.index[n.buses.loc[n.links["bus" + i], "carrier"] == carrier]
techs = n.links.loc[all_links, "carrier"].value_counts().index
market_values = market_values.reindex(market_values.index.union(techs))
for tech in techs:
links = all_links[n.links.loc[all_links, "carrier"] == tech]
dispatch = (
n.links_t["p" + i][links]
.groupby(n.links.loc[links, "bus" + i], axis=1)
.sum()
.reindex(columns=buses, fill_value=0.0)
)
revenue = dispatch * n.buses_t.marginal_price[buses]
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
return market_values
def calculate_price_statistics(n, label, price_statistics):
price_statistics = price_statistics.reindex(
price_statistics.index.union(
pd.Index(["zero_hours", "mean", "standard_deviation"])
)
)
buses = n.buses.index[n.buses.carrier == "AC"]
threshold = 0.1 # higher than phoney marginal_cost of wind/solar
df = pd.DataFrame(data=0.0, columns=buses, index=n.snapshots)
df[n.buses_t.marginal_price[buses] < threshold] = 1.0
price_statistics.at["zero_hours", label] = df.sum().sum() / (
df.shape[0] * df.shape[1]
)
price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].mean().mean()
price_statistics.at["standard_deviation", label] = (
n.buses_t.marginal_price[buses].droplevel(0).unstack().std()
)
return price_statistics
def calculate_co2_emissions(n, label, df):
carattr = "co2_emissions"
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
if emissions.empty:
return
weightings = n.snapshot_weightings.generators.mul(
n.investment_period_weightings["years"]
.reindex(n.snapshots)
.fillna(method="bfill")
.fillna(1.0),
axis=0,
)
# generators
gens = n.generators.query("carrier in @emissions.index")
if not gens.empty:
em_pu = gens.carrier.map(emissions) / gens.efficiency
em_pu = (
weightings["generators"].to_frame("weightings")
@ em_pu.to_frame("weightings").T
)
emitted = n.generators_t.p[gens.index].mul(em_pu)
emitted_grouped = (
emitted.groupby(level=0).sum().groupby(n.generators.carrier, axis=1).sum().T
)
df = df.reindex(emitted_grouped.index.union(df.index))
df.loc[emitted_grouped.index, label] = emitted_grouped.values
if any(n.stores.carrier == "co2"):
co2_i = n.stores[n.stores.carrier == "co2"].index
df[label] = n.stores_t.e.groupby(level=0).last()[co2_i].iloc[:, 0]
return df
outputs = [
"nodal_costs",
"nodal_capacities",
"nodal_cfs",
"cfs",
"costs",
"capacities",
"curtailment",
"energy",
"supply",
"supply_energy",
"prices",
"weighted_prices",
"price_statistics",
"market_values",
"metrics",
"co2_emissions",
]
def make_summaries(networks_dict):
columns = pd.MultiIndex.from_tuples(
networks_dict.keys(), names=["cluster", "lv", "opt"]
)
df = {}
for output in outputs:
df[output] = pd.DataFrame(columns=columns, dtype=float)
for label, filename in iteritems(networks_dict):
print(label, filename)
try:
n = pypsa.Network(filename)
except OSError:
print(label, " not solved yet.")
continue
# del networks_dict[label]
if not hasattr(n, "objective"):
print(label, " not solved correctly. Check log if infeasible or unbounded.")
continue
assign_carriers(n)
assign_locations(n)
for output in outputs:
df[output] = globals()["calculate_" + output](n, label, df[output])
return df
def to_csv(df):
for key in df:
df[key] = df[key].apply(lambda x: pd.to_numeric(x))
df[key].to_csv(snakemake.output[key])
# %%
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("make_summary_perfect")
run = snakemake.config["run"]["name"]
if run != "":
run += "/"
networks_dict = {
(clusters, lv, opts + sector_opts): "results/"
+ run
+ f"postnetworks/elec_s{simpl}_{clusters}_l{lv}_{opts}_{sector_opts}_brownfield_all_years.nc"
for simpl in snakemake.config["scenario"]["simpl"]
for clusters in snakemake.config["scenario"]["clusters"]
for opts in snakemake.config["scenario"]["opts"]
for sector_opts in snakemake.config["scenario"]["sector_opts"]
for lv in snakemake.config["scenario"]["ll"]
}
print(networks_dict)
nyears = 1
costs_db = prepare_costs(
snakemake.input.costs,
snakemake.config["costs"],
nyears,
)
df = make_summaries(networks_dict)
df["metrics"].loc["total costs"] = df["costs"].sum().groupby(level=[0, 1, 2]).sum()
to_csv(df)

View File

@ -24,7 +24,7 @@ from make_summary import assign_carriers
from plot_summary import preferred_order, rename_techs from plot_summary import preferred_order, rename_techs
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
plt.style.use(["ggplot", "matplotlibrc"]) plt.style.use(["ggplot"])
def rename_techs_tyndp(tech): def rename_techs_tyndp(tech):
@ -913,6 +913,159 @@ def plot_series(network, carrier="AC", name="test"):
) )
def plot_map_perfect(
network,
components=["Link", "Store", "StorageUnit", "Generator"],
bus_size_factor=1.7e10,
):
n = network.copy()
assign_location(n)
# Drop non-electric buses so they don't clutter the plot
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
# investment periods
investments = n.snapshots.levels[0]
costs = {}
for comp in components:
df_c = n.df(comp)
if df_c.empty:
continue
df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp)
attr = "e_nom_opt" if comp == "Store" else "p_nom_opt"
active = pd.concat(
[n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments],
axis=1,
).astype(int)
capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost
capital_cost_t = (
(active.mul(capital_cost, axis=0))
.groupby([n.df(comp).location, n.df(comp).nice_group])
.sum()
)
capital_cost_t.drop("load", level=1, inplace=True, errors="ignore")
costs[comp] = capital_cost_t
costs = pd.concat(costs).groupby(level=[1, 2]).sum()
costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True)
new_columns = preferred_order.intersection(costs.index.levels[1]).append(
costs.index.levels[1].difference(preferred_order)
)
costs = costs.reindex(new_columns, level=1)
for item in new_columns:
if item not in snakemake.config["plotting"]["tech_colors"]:
print(
"Warning!",
item,
"not in config/plotting/tech_colors, assign random color",
)
snakemake.config["plotting"]["tech_colors"] = "pink"
n.links.drop(
n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")],
inplace=True,
)
# drop non-bus
to_drop = costs.index.levels[0].symmetric_difference(n.buses.index)
if len(to_drop) != 0:
print("dropping non-buses", to_drop)
costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore")
# make sure they are removed from index
costs.index = pd.MultiIndex.from_tuples(costs.index.values)
# PDF has minimum width, so set these to zero
line_lower_threshold = 500.0
line_upper_threshold = 1e4
linewidth_factor = 2e3
ac_color = "gray"
dc_color = "m"
line_widths = n.lines.s_nom_opt
link_widths = n.links.p_nom_opt
linewidth_factor = 2e3
line_lower_threshold = 0.0
title = "Today's transmission"
line_widths[line_widths < line_lower_threshold] = 0.0
link_widths[link_widths < line_lower_threshold] = 0.0
line_widths[line_widths > line_upper_threshold] = line_upper_threshold
link_widths[link_widths > line_upper_threshold] = line_upper_threshold
for year in costs.columns:
fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()})
fig.set_size_inches(7, 6)
fig.suptitle(year)
n.plot(
bus_sizes=costs[year] / bus_size_factor,
bus_colors=snakemake.config["plotting"]["tech_colors"],
line_colors=ac_color,
link_colors=dc_color,
line_widths=line_widths / linewidth_factor,
link_widths=link_widths / linewidth_factor,
ax=ax,
**map_opts,
)
sizes = [20, 10, 5]
labels = [f"{s} bEUR/a" for s in sizes]
sizes = [s / bus_size_factor * 1e9 for s in sizes]
legend_kw = dict(
loc="upper left",
bbox_to_anchor=(0.01, 1.06),
labelspacing=0.8,
frameon=False,
handletextpad=0,
title="system cost",
)
add_legend_circles(
ax,
sizes,
labels,
srid=n.srid,
patch_kw=dict(facecolor="lightgrey"),
legend_kw=legend_kw,
)
sizes = [10, 5]
labels = [f"{s} GW" for s in sizes]
scale = 1e3 / linewidth_factor
sizes = [s * scale for s in sizes]
legend_kw = dict(
loc="upper left",
bbox_to_anchor=(0.27, 1.06),
frameon=False,
labelspacing=0.8,
handletextpad=1,
title=title,
)
add_legend_lines(
ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw
)
legend_kw = dict(
bbox_to_anchor=(1.52, 1.04),
frameon=False,
)
fig.savefig(
snakemake.output[f"map_{year}"], transparent=True, bbox_inches="tight"
)
# %%
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
@ -921,10 +1074,9 @@ if __name__ == "__main__":
"plot_network", "plot_network",
simpl="", simpl="",
opts="", opts="",
clusters="5", clusters="37",
ll="v1.5", ll="v1.0",
sector_opts="CO2L0-1H-T-H-B-I-A-solar+p3-dist1", sector_opts="4380H-T-H-B-I-A-solar+p3-dist1",
planning_horizons="2030",
) )
logging.basicConfig(level=snakemake.config["logging"]["level"]) logging.basicConfig(level=snakemake.config["logging"]["level"])
@ -938,16 +1090,23 @@ if __name__ == "__main__":
if map_opts["boundaries"] is None: if map_opts["boundaries"] is None:
map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1]
plot_map( if snakemake.params["foresight"] == "perfect":
n, plot_map_perfect(
components=["generators", "links", "stores", "storage_units"], n,
bus_size_factor=2e10, components=["Link", "Store", "StorageUnit", "Generator"],
transmission=False, bus_size_factor=2e10,
) )
else:
plot_map(
n,
components=["generators", "links", "stores", "storage_units"],
bus_size_factor=2e10,
transmission=False,
)
plot_h2_map(n, regions) plot_h2_map(n, regions)
plot_ch4_map(n) plot_ch4_map(n)
plot_map_without(n) plot_map_without(n)
# plot_series(n, carrier="AC", name=suffix) # plot_series(n, carrier="AC", name=suffix)
# plot_series(n, carrier="heat", name=suffix) # plot_series(n, carrier="heat", name=suffix)

View File

@ -49,6 +49,10 @@ def rename_techs(label):
# "H2 Fuel Cell": "hydrogen storage", # "H2 Fuel Cell": "hydrogen storage",
# "H2 pipeline": "hydrogen storage", # "H2 pipeline": "hydrogen storage",
"battery": "battery storage", "battery": "battery storage",
"H2 for industry": "H2 for industry",
"land transport fuel cell": "land transport fuel cell",
"land transport oil": "land transport oil",
"oil shipping": "shipping oil",
# "CC": "CC" # "CC": "CC"
} }
@ -157,11 +161,11 @@ def plot_costs():
df.index.difference(preferred_order) df.index.difference(preferred_order)
) )
new_columns = df.sum().sort_values().index # new_columns = df.sum().sort_values().index
fig, ax = plt.subplots(figsize=(12, 8)) fig, ax = plt.subplots(figsize=(12, 8))
df.loc[new_index, new_columns].T.plot( df.loc[new_index].T.plot(
kind="bar", kind="bar",
ax=ax, ax=ax,
stacked=True, stacked=True,
@ -213,17 +217,22 @@ def plot_energy():
logger.info(f"Total energy of {round(df.sum()[0])} TWh/a") logger.info(f"Total energy of {round(df.sum()[0])} TWh/a")
if df.empty:
fig, ax = plt.subplots(figsize=(12, 8))
fig.savefig(snakemake.output.energy, bbox_inches="tight")
return
new_index = preferred_order.intersection(df.index).append( new_index = preferred_order.intersection(df.index).append(
df.index.difference(preferred_order) df.index.difference(preferred_order)
) )
new_columns = df.columns.sort_values() # new_columns = df.columns.sort_values()
fig, ax = plt.subplots(figsize=(12, 8)) fig, ax = plt.subplots(figsize=(12, 8))
logger.debug(df.loc[new_index, new_columns]) logger.debug(df.loc[new_index])
df.loc[new_index, new_columns].T.plot( df.loc[new_index].T.plot(
kind="bar", kind="bar",
ax=ax, ax=ax,
stacked=True, stacked=True,
@ -267,8 +276,6 @@ def plot_balances():
i for i in balances_df.index.levels[0] if i not in co2_carriers i for i in balances_df.index.levels[0] if i not in co2_carriers
] ]
fig, ax = plt.subplots(figsize=(12, 8))
for k, v in balances.items(): for k, v in balances.items():
df = balances_df.loc[v] df = balances_df.loc[v]
df = df.groupby(df.index.get_level_values(2)).sum() df = df.groupby(df.index.get_level_values(2)).sum()
@ -279,7 +286,7 @@ def plot_balances():
# remove trailing link ports # remove trailing link ports
df.index = [ df.index = [
i[:-1] i[:-1]
if ((i not in ["co2", "NH3"]) and (i[-1:] in ["0", "1", "2", "3"])) if ((i not in ["co2", "NH3", "H2"]) and (i[-1:] in ["0", "1", "2", "3"]))
else i else i
for i in df.index for i in df.index
] ]
@ -313,6 +320,8 @@ def plot_balances():
new_columns = df.columns.sort_values() new_columns = df.columns.sort_values()
fig, ax = plt.subplots(figsize=(12, 8))
df.loc[new_index, new_columns].T.plot( df.loc[new_index, new_columns].T.plot(
kind="bar", kind="bar",
ax=ax, ax=ax,
@ -345,8 +354,6 @@ def plot_balances():
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight") fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight")
plt.cla()
def historical_emissions(countries): def historical_emissions(countries):
""" """
@ -354,8 +361,7 @@ def historical_emissions(countries):
""" """
# https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16 # https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
# downloaded 201228 (modified by EEA last on 201221) # downloaded 201228 (modified by EEA last on 201221)
fn = "data/bundle-sector/eea/UNFCCC_v23.csv" df = pd.read_csv(snakemake.input.co2, encoding="latin-1", low_memory=False)
df = pd.read_csv(fn, encoding="latin-1")
df.loc[df["Year"] == "1985-1987", "Year"] = 1986 df.loc[df["Year"] == "1985-1987", "Year"] = 1986
df["Year"] = df["Year"].astype(int) df["Year"] = df["Year"].astype(int)
df = df.set_index( df = df.set_index(
@ -379,18 +385,21 @@ def historical_emissions(countries):
e["waste management"] = "5 - Waste management" e["waste management"] = "5 - Waste management"
e["other"] = "6 - Other Sector" e["other"] = "6 - Other Sector"
e["indirect"] = "ind_CO2 - Indirect CO2" e["indirect"] = "ind_CO2 - Indirect CO2"
e["total wL"] = "Total (with LULUCF)" e["other LULUCF"] = "4.H - Other LULUCF"
e["total woL"] = "Total (without LULUCF)"
pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"] pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"]
if "GB" in countries: if "GB" in countries:
countries.remove("GB") countries.remove("GB")
countries.append("UK") countries.append("UK")
# remove countries which are not included in eea historical emission dataset year = df.index.levels[0][df.index.levels[0] >= 1990]
countries_to_remove = {"AL", "BA", "ME", "MK", "RS"}
countries = list(set(countries) - countries_to_remove) missing = pd.Index(countries).difference(df.index.levels[2])
year = np.arange(1990, 2018).tolist() if not missing.empty:
logger.warning(
f"The following countries are missing and not considered when plotting historic CO2 emissions: {missing}"
)
countries = pd.Index(df.index.levels[2]).intersection(countries)
idx = pd.IndexSlice idx = pd.IndexSlice
co2_totals = ( co2_totals = (
@ -453,18 +462,12 @@ def plot_carbon_budget_distribution(input_eurostat):
plt.rcParams["xtick.labelsize"] = 20 plt.rcParams["xtick.labelsize"] = 20
plt.rcParams["ytick.labelsize"] = 20 plt.rcParams["ytick.labelsize"] = 20
plt.figure(figsize=(10, 7))
gs1 = gridspec.GridSpec(1, 1)
ax1 = plt.subplot(gs1[0, 0])
ax1.set_ylabel("CO$_2$ emissions (Gt per year)", fontsize=22)
ax1.set_ylim([0, 5])
ax1.set_xlim([1990, snakemake.params.planning_horizons[-1] + 1])
path_cb = "results/" + snakemake.params.RDIR + "csvs/"
countries = snakemake.params.countries
emissions_scope = snakemake.params.emissions_scope emissions_scope = snakemake.params.emissions_scope
report_year = snakemake.params.eurostat_report_year report_year = snakemake.params.eurostat_report_year
input_co2 = snakemake.input.co2 input_co2 = snakemake.input.co2
# historic emissions
countries = snakemake.params.countries
e_1990 = co2_emissions_year( e_1990 = co2_emissions_year(
countries, countries,
input_eurostat, input_eurostat,
@ -474,15 +477,37 @@ def plot_carbon_budget_distribution(input_eurostat):
input_co2, input_co2,
year=1990, year=1990,
) )
CO2_CAP = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)
ax1.plot(e_1990 * CO2_CAP[o], linewidth=3, color="dodgerblue", label=None)
emissions = historical_emissions(countries) emissions = historical_emissions(countries)
# add other years https://sdi.eea.europa.eu/data/0569441f-2853-4664-a7cd-db969ef54de0
emissions.loc[2019] = 2.971372
emissions.loc[2020] = 2.691958
emissions.loc[2021] = 2.869355
if snakemake.config["foresight"] == "myopic":
path_cb = "results/" + snakemake.params.RDIR + "/csvs/"
co2_cap = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)[
["cb"]
]
co2_cap *= e_1990
else:
supply_energy = pd.read_csv(
snakemake.input.balances, index_col=[0, 1, 2], header=[0, 1, 2, 3]
)
co2_cap = (
supply_energy.loc["co2"].droplevel(0).drop("co2").sum().unstack().T / 1e9
)
co2_cap.rename(index=lambda x: int(x), inplace=True)
plt.figure(figsize=(10, 7))
gs1 = gridspec.GridSpec(1, 1)
ax1 = plt.subplot(gs1[0, 0])
ax1.set_ylabel("CO$_2$ emissions \n [Gt per year]", fontsize=22)
# ax1.set_ylim([0, 5])
ax1.set_xlim([1990, snakemake.params.planning_horizons[-1] + 1])
ax1.plot(emissions, color="black", linewidth=3, label=None) ax1.plot(emissions, color="black", linewidth=3, label=None)
# plot committed and uder-discussion targets # plot committed and under-discussion targets
# (notice that historical emissions include all countries in the # (notice that historical emissions include all countries in the
# network, but targets refer to EU) # network, but targets refer to EU)
ax1.plot( ax1.plot(
@ -499,7 +524,7 @@ def plot_carbon_budget_distribution(input_eurostat):
[0.45 * emissions[1990]], [0.45 * emissions[1990]],
marker="*", marker="*",
markersize=12, markersize=12,
markerfacecolor="white", markerfacecolor="black",
markeredgecolor="black", markeredgecolor="black",
) )
@ -523,21 +548,7 @@ def plot_carbon_budget_distribution(input_eurostat):
ax1.plot( ax1.plot(
[2050], [2050],
[0.01 * emissions[1990]], [0.0 * emissions[1990]],
marker="*",
markersize=12,
markerfacecolor="white",
linewidth=0,
markeredgecolor="black",
label="EU under-discussion target",
zorder=10,
clip_on=False,
)
ax1.plot(
[2050],
[0.125 * emissions[1990]],
"ro",
marker="*", marker="*",
markersize=12, markersize=12,
markerfacecolor="black", markerfacecolor="black",
@ -545,14 +556,19 @@ def plot_carbon_budget_distribution(input_eurostat):
label="EU committed target", label="EU committed target",
) )
for col in co2_cap.columns:
ax1.plot(co2_cap[col], linewidth=3, label=col)
ax1.legend( ax1.legend(
fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True
) )
path_cb_plot = "results/" + snakemake.params.RDIR + "graphs/" plt.grid(axis="y")
plt.savefig(path_cb_plot + "carbon_budget_plot.pdf", dpi=300) path = snakemake.output.balances.split("balances")[0] + "carbon_budget.pdf"
plt.savefig(path, bbox_inches="tight")
# %%
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
@ -571,6 +587,7 @@ if __name__ == "__main__":
for sector_opts in snakemake.params.sector_opts: for sector_opts in snakemake.params.sector_opts:
opts = sector_opts.split("-") opts = sector_opts.split("-")
for o in opts: if any(["cb" in o for o in opts]) or (
if "cb" in o: snakemake.config["foresight"] == "perfect"
plot_carbon_budget_distribution(snakemake.input.eurostat) ):
plot_carbon_budget_distribution(snakemake.input.eurostat)

View File

@ -0,0 +1,553 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Concats pypsa networks of single investment periods to one network.
"""
import logging
import re
import numpy as np
import pandas as pd
import pypsa
from _helpers import update_config_with_sector_opts
from add_existing_baseyear import add_build_year_to_new_assets
from pypsa.descriptors import expand_series
from pypsa.io import import_components_from_dataframe
from six import iterkeys
logger = logging.getLogger(__name__)
# helper functions ---------------------------------------------------
def get_missing(df, n, c):
"""
Get in network n missing assets of df for component c.
Input:
df: pandas DataFrame, static values of pypsa components
n : pypsa Network to which new assets should be added
c : string, pypsa component.list_name (e.g. "generators")
Return:
pd.DataFrame with static values of missing assets
"""
df_final = getattr(n, c)
missing_i = df.index.difference(df_final.index)
return df.loc[missing_i]
def get_social_discount(t, r=0.01):
"""
Calculate for a given time t and social discount rate r [per unit] the
social discount.
"""
return 1 / (1 + r) ** t
def get_investment_weighting(time_weighting, r=0.01):
"""
Define cost weighting.
Returns cost weightings depending on the the time_weighting
(pd.Series) and the social discountrate r
"""
end = time_weighting.cumsum()
start = time_weighting.cumsum().shift().fillna(0)
return pd.concat([start, end], axis=1).apply(
lambda x: sum([get_social_discount(t, r) for t in range(int(x[0]), int(x[1]))]),
axis=1,
)
def add_year_to_constraints(n, baseyear):
"""
Add investment period to global constraints and rename index.
Parameters
----------
n : pypsa.Network
baseyear : int
year in which optimized assets are built
"""
for c in n.iterate_components(["GlobalConstraint"]):
c.df["investment_period"] = baseyear
c.df.rename(index=lambda x: x + "-" + str(baseyear), inplace=True)
def hvdc_transport_model(n):
"""
Convert AC lines to DC links for multi-decade optimisation with line
expansion.
Losses of DC links are assumed to be 3% per 1000km
"""
logger.info("Convert AC lines to DC links to perform multi-decade optimisation.")
n.madd(
"Link",
n.lines.index,
bus0=n.lines.bus0,
bus1=n.lines.bus1,
p_nom_extendable=True,
p_nom=n.lines.s_nom,
p_nom_min=n.lines.s_nom,
p_min_pu=-1,
efficiency=1 - 0.03 * n.lines.length / 1000,
marginal_cost=0,
carrier="DC",
length=n.lines.length,
capital_cost=n.lines.capital_cost,
)
# Remove AC lines
logger.info("Removing AC lines")
lines_rm = n.lines.index
n.mremove("Line", lines_rm)
# Set efficiency of all DC links to include losses depending on length
n.links.loc[n.links.carrier == "DC", "efficiency"] = (
1 - 0.03 * n.links.loc[n.links.carrier == "DC", "length"] / 1000
)
def adjust_electricity_grid(n, year, years):
"""
Add carrier to lines. Replace AC lines with DC links in case of line
expansion. Add lifetime to DC links in case of line expansion.
Parameters
----------
n : pypsa.Network
year : int
year in which optimized assets are built
years: list
investment periods
"""
n.lines["carrier"] = "AC"
links_i = n.links[n.links.carrier == "DC"].index
if n.lines.s_nom_extendable.any() or n.links.loc[links_i, "p_nom_extendable"].any():
hvdc_transport_model(n)
links_i = n.links[n.links.carrier == "DC"].index
n.links.loc[links_i, "lifetime"] = 100
if year != years[0]:
n.links.loc[links_i, "p_nom_min"] = 0
n.links.loc[links_i, "p_nom"] = 0
# --------------------------------------------------------------------
def concat_networks(years):
"""
Concat given pypsa networks and adds build_year.
Return:
n : pypsa.Network for the whole planning horizon
"""
# input paths of sector coupling networks
network_paths = [snakemake.input.brownfield_network] + [
snakemake.input[f"network_{year}"] for year in years[1:]
]
# final concatenated network
n = pypsa.Network()
# iterate over single year networks and concat to perfect foresight network
for i, network_path in enumerate(network_paths):
year = years[i]
network = pypsa.Network(network_path)
adjust_electricity_grid(network, year, years)
add_build_year_to_new_assets(network, year)
# static ----------------------------------
# (1) add buses and carriers
for component in network.iterate_components(["Bus", "Carrier"]):
df_year = component.df
# get missing assets
missing = get_missing(df_year, n, component.list_name)
import_components_from_dataframe(n, missing, component.name)
# (2) add generators, links, stores and loads
for component in network.iterate_components(
["Generator", "Link", "Store", "Load", "Line", "StorageUnit"]
):
df_year = component.df.copy()
missing = get_missing(df_year, n, component.list_name)
import_components_from_dataframe(n, missing, component.name)
# time variant --------------------------------------------------
network_sns = pd.MultiIndex.from_product([[year], network.snapshots])
snapshots = n.snapshots.drop("now", errors="ignore").union(network_sns)
n.set_snapshots(snapshots)
for component in network.iterate_components():
pnl = getattr(n, component.list_name + "_t")
for k in iterkeys(component.pnl):
pnl_year = component.pnl[k].copy().reindex(snapshots, level=1)
if pnl_year.empty and ~(component.name == "Load" and k == "p_set"):
continue
if component.name == "Load":
static_load = network.loads.loc[network.loads.p_set != 0]
static_load_t = expand_series(static_load.p_set, network_sns).T
pnl_year = pd.concat(
[pnl_year.reindex(network_sns), static_load_t], axis=1
)
columns = (pnl[k].columns.union(pnl_year.columns)).unique()
pnl[k] = pnl[k].reindex(columns=columns)
pnl[k].loc[pnl_year.index, pnl_year.columns] = pnl_year
else:
# this is to avoid adding multiple times assets with
# infinite lifetime as ror
cols = pnl_year.columns.difference(pnl[k].columns)
pnl[k] = pd.concat([pnl[k], pnl_year[cols]], axis=1)
n.snapshot_weightings.loc[year, :] = network.snapshot_weightings.values
# (3) global constraints
for component in network.iterate_components(["GlobalConstraint"]):
add_year_to_constraints(network, year)
import_components_from_dataframe(n, component.df, component.name)
# set investment periods
n.investment_periods = n.snapshots.levels[0]
# weighting of the investment period -> assuming last period same weighting as the period before
time_w = n.investment_periods.to_series().diff().shift(-1).fillna(method="ffill")
n.investment_period_weightings["years"] = time_w
# set objective weightings
objective_w = get_investment_weighting(
n.investment_period_weightings["years"], social_discountrate
)
n.investment_period_weightings["objective"] = objective_w
# all former static loads are now time-dependent -> set static = 0
n.loads["p_set"] = 0
n.loads_t.p_set.fillna(0, inplace=True)
return n
def adjust_stores(n):
"""
Make sure that stores still behave cyclic over one year and not whole
modelling horizon.
"""
# cyclic constraint
cyclic_i = n.stores[n.stores.e_cyclic].index
n.stores.loc[cyclic_i, "e_cyclic_per_period"] = True
n.stores.loc[cyclic_i, "e_cyclic"] = False
# non cyclic store assumptions
non_cyclic_store = ["co2", "co2 stored", "solid biomass", "biogas", "Li ion"]
co2_i = n.stores[n.stores.carrier.isin(non_cyclic_store)].index
n.stores.loc[co2_i, "e_cyclic_per_period"] = False
n.stores.loc[co2_i, "e_cyclic"] = False
# e_initial at beginning of each investment period
e_initial_store = ["solid biomass", "biogas"]
co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index
n.stores.loc[co2_i, "e_initial_per_period"] = True
# n.stores.loc[co2_i, "e_initial"] *= 10
# n.stores.loc[co2_i, "e_nom"] *= 10
e_initial_store = ["co2 stored"]
co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index
n.stores.loc[co2_i, "e_initial_per_period"] = True
return n
def set_phase_out(n, carrier, ct, phase_out_year):
"""
Set planned phase outs for given carrier,country (ct) and planned year of
phase out (phase_out_year).
"""
df = n.links[(n.links.carrier.isin(carrier)) & (n.links.bus1.str[:2] == ct)]
# assets which are going to be phased out before end of their lifetime
assets_i = df[df[["build_year", "lifetime"]].sum(axis=1) > phase_out_year].index
build_year = n.links.loc[assets_i, "build_year"]
# adjust lifetime
n.links.loc[assets_i, "lifetime"] = (phase_out_year - build_year).astype(float)
def set_all_phase_outs(n):
# TODO move this to a csv or to the config
planned = [
(["nuclear"], "DE", 2022),
(["nuclear"], "BE", 2025),
(["nuclear"], "ES", 2027),
(["coal", "lignite"], "DE", 2030),
(["coal", "lignite"], "ES", 2027),
(["coal", "lignite"], "FR", 2022),
(["coal", "lignite"], "GB", 2024),
(["coal", "lignite"], "IT", 2025),
(["coal", "lignite"], "DK", 2030),
(["coal", "lignite"], "FI", 2030),
(["coal", "lignite"], "HU", 2030),
(["coal", "lignite"], "SK", 2030),
(["coal", "lignite"], "GR", 2030),
(["coal", "lignite"], "IE", 2030),
(["coal", "lignite"], "NL", 2030),
(["coal", "lignite"], "RS", 2030),
]
for carrier, ct, phase_out_year in planned:
set_phase_out(n, carrier, ct, phase_out_year)
# remove assets which are already phased out
remove_i = n.links[n.links[["build_year", "lifetime"]].sum(axis=1) < years[0]].index
n.mremove("Link", remove_i)
def set_carbon_constraints(n, opts):
"""
Add global constraints for carbon emissions.
"""
budget = None
for o in opts:
# other budgets
m = re.match(r"^\d+p\d$", o, re.IGNORECASE)
if m is not None:
budget = snakemake.config["co2_budget"][m.group(0)] * 1e9
if budget != None:
logger.info("add carbon budget of {}".format(budget))
n.add(
"GlobalConstraint",
"Budget",
type="Co2Budget",
carrier_attribute="co2_emissions",
sense="<=",
constant=budget,
investment_period=n.investment_periods[-1],
)
# drop other CO2 limits
drop_i = n.global_constraints[n.global_constraints.type == "co2_limit"].index
n.mremove("GlobalConstraint", drop_i)
n.add(
"GlobalConstraint",
"carbon_neutral",
type="co2_limit",
carrier_attribute="co2_emissions",
sense="<=",
constant=0,
investment_period=n.investment_periods[-1],
)
# set minimum CO2 emission constraint to avoid too fast reduction
if "co2min" in opts:
emissions_1990 = 4.53693
emissions_2019 = 3.344096
target_2030 = 0.45 * emissions_1990
annual_reduction = (emissions_2019 - target_2030) / 11
first_year = n.snapshots.levels[0][0]
time_weightings = n.investment_period_weightings.loc[first_year, "years"]
co2min = emissions_2019 - ((first_year - 2019) * annual_reduction)
logger.info(
"add minimum emissions for {} of {} t CO2/a".format(first_year, co2min)
)
n.add(
"GlobalConstraint",
f"Co2Min-{first_year}",
type="Co2min",
carrier_attribute="co2_emissions",
sense=">=",
investment_period=first_year,
constant=co2min * 1e9 * time_weightings,
)
return n
def adjust_lvlimit(n):
"""
Convert global constraints for single investment period to one uniform if
all attributes stay the same.
"""
c = "GlobalConstraint"
cols = ["carrier_attribute", "sense", "constant", "type"]
glc_type = "transmission_volume_expansion_limit"
if (n.df(c)[n.df(c).type == glc_type][cols].nunique() == 1).all():
glc = n.df(c)[n.df(c).type == glc_type][cols].iloc[[0]]
glc.index = pd.Index(["lv_limit"])
remove_i = n.df(c)[n.df(c).type == glc_type].index
n.mremove(c, remove_i)
import_components_from_dataframe(n, glc, c)
return n
def adjust_CO2_glc(n):
c = "GlobalConstraint"
glc_name = "CO2Limit"
glc_type = "primary_energy"
mask = (n.df(c).index.str.contains(glc_name)) & (n.df(c).type == glc_type)
n.df(c).loc[mask, "type"] = "co2_limit"
return n
def add_H2_boilers(n):
"""
Gas boilers can be retrofitted to run with H2.
Add H2 boilers for heating for all existing gas boilers.
"""
c = "Link"
logger.info("Add H2 boilers.")
# existing gas boilers
mask = n.links.carrier.str.contains("gas boiler") & ~n.links.p_nom_extendable
gas_i = n.links[mask].index
df = n.links.loc[gas_i]
# adjust bus 0
df["bus0"] = df.bus1.map(n.buses.location) + " H2"
# rename carrier and index
df["carrier"] = df.carrier.apply(
lambda x: x.replace("gas boiler", "retrofitted H2 boiler")
)
df.rename(
index=lambda x: x.replace("gas boiler", "retrofitted H2 boiler"), inplace=True
)
# todo, costs for retrofitting
df["capital_costs"] = 100
# set existing capacity to zero
df["p_nom"] = 0
df["p_nom_extendable"] = True
# add H2 boilers to network
import_components_from_dataframe(n, df, c)
def apply_time_segmentation_perfect(
n, segments, solver_name="cbc", overwrite_time_dependent=True
):
"""
Aggregating time series to segments with different lengths.
Input:
n: pypsa Network
segments: (int) number of segments in which the typical period should be
subdivided
solver_name: (str) name of solver
overwrite_time_dependent: (bool) overwrite time dependent data of pypsa network
with typical time series created by tsam
"""
try:
import tsam.timeseriesaggregation as tsam
except:
raise ModuleNotFoundError(
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
)
# get all time-dependent data
columns = pd.MultiIndex.from_tuples([], names=["component", "key", "asset"])
raw = pd.DataFrame(index=n.snapshots, columns=columns)
for c in n.iterate_components():
for attr, pnl in c.pnl.items():
# exclude e_min_pu which is used for SOC of EVs in the morning
if not pnl.empty and attr != "e_min_pu":
df = pnl.copy()
df.columns = pd.MultiIndex.from_product([[c.name], [attr], df.columns])
raw = pd.concat([raw, df], axis=1)
raw = raw.dropna(axis=1)
sn_weightings = {}
for year in raw.index.levels[0]:
logger.info(f"Find representative snapshots for {year}.")
raw_t = raw.loc[year]
# normalise all time-dependent data
annual_max = raw_t.max().replace(0, 1)
raw_t = raw_t.div(annual_max, level=0)
# get representative segments
agg = tsam.TimeSeriesAggregation(
raw_t,
hoursPerPeriod=len(raw_t),
noTypicalPeriods=1,
noSegments=int(segments),
segmentation=True,
solver=solver_name,
)
segmented = agg.createTypicalPeriods()
weightings = segmented.index.get_level_values("Segment Duration")
offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0)
timesteps = [raw_t.index[0] + pd.Timedelta(f"{offset}h") for offset in offsets]
snapshots = pd.DatetimeIndex(timesteps)
sn_weightings[year] = pd.Series(
weightings, index=snapshots, name="weightings", dtype="float64"
)
sn_weightings = pd.concat(sn_weightings)
n.set_snapshots(sn_weightings.index)
n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0)
return n
def set_temporal_aggregation_SEG(n, opts, solver_name):
"""
Aggregate network temporally with tsam.
"""
for o in opts:
# segments with package tsam
m = re.match(r"^(\d+)seg$", o, re.IGNORECASE)
if m is not None:
segments = int(m[1])
logger.info(f"Use temporal segmentation with {segments} segments")
n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name)
break
return n
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"prepare_perfect_foresight",
simpl="",
opts="",
clusters="37",
ll="v1.5",
sector_opts="1p7-4380H-T-H-B-I-A-solar+p3-dist1",
)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
# parameters -----------------------------------------------------------
years = snakemake.config["scenario"]["planning_horizons"]
opts = snakemake.wildcards.sector_opts.split("-")
social_discountrate = snakemake.config["costs"]["social_discountrate"]
for o in opts:
if "sdr" in o:
social_discountrate = float(o.replace("sdr", "")) / 100
logger.info(
"Concat networks of investment period {} with social discount rate of {}%".format(
years, social_discountrate * 100
)
)
# concat prenetworks of planning horizon to single network ------------
n = concat_networks(years)
# temporal aggregate
opts = snakemake.wildcards.sector_opts.split("-")
solver_name = snakemake.config["solving"]["solver"]["name"]
n = set_temporal_aggregation_SEG(n, opts, solver_name)
# adjust global constraints lv limit if the same for all years
n = adjust_lvlimit(n)
# adjust global constraints CO2 limit
n = adjust_CO2_glc(n)
# adjust stores to multi period investment
n = adjust_stores(n)
# set phase outs
set_all_phase_outs(n)
# add H2 boiler
add_H2_boilers(n)
# set carbon constraints
opts = snakemake.wildcards.sector_opts.split("-")
n = set_carbon_constraints(n, opts)
# export network
n.export_to_netcdf(snakemake.output[0])

View File

@ -220,7 +220,7 @@ def co2_emissions_year(
# TODO: move to own rule with sector-opts wildcard? # TODO: move to own rule with sector-opts wildcard?
def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year, input_co2): def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year):
""" """
Distribute carbon budget following beta or exponential transition path. Distribute carbon budget following beta or exponential transition path.
""" """
@ -577,6 +577,7 @@ def add_co2_tracking(n, options):
capital_cost=options["co2_sequestration_cost"], capital_cost=options["co2_sequestration_cost"],
carrier="co2 stored", carrier="co2 stored",
bus=spatial.co2.nodes, bus=spatial.co2.nodes,
lifetime=options["co2_sequestration_lifetime"],
) )
n.add("Carrier", "co2 stored") n.add("Carrier", "co2 stored")
@ -3374,7 +3375,7 @@ if __name__ == "__main__":
spatial = define_spatial(pop_layout.index, options) spatial = define_spatial(pop_layout.index, options)
if snakemake.params.foresight == "myopic": if snakemake.params.foresight in ["myopic", "perfect"]:
add_lifetime_wind_solar(n, costs) add_lifetime_wind_solar(n, costs)
conventional = snakemake.params.conventional_carriers conventional = snakemake.params.conventional_carriers
@ -3451,7 +3452,7 @@ if __name__ == "__main__":
if "cb" not in o: if "cb" not in o:
continue continue
limit_type = "carbon budget" limit_type = "carbon budget"
fn = "results/" + snakemake.params.RDIR + "csvs/carbon_budget_distribution.csv" fn = "results/" + snakemake.params.RDIR + "/csvs/carbon_budget_distribution.csv"
if not os.path.exists(fn): if not os.path.exists(fn):
emissions_scope = snakemake.params.emissions_scope emissions_scope = snakemake.params.emissions_scope
report_year = snakemake.params.eurostat_report_year report_year = snakemake.params.eurostat_report_year
@ -3507,7 +3508,7 @@ if __name__ == "__main__":
) )
n.mremove("Line", idx) n.mremove("Line", idx)
first_year_myopic = (snakemake.params.foresight == "myopic") and ( first_year_myopic = (snakemake.params.foresight in ["myopic", "perfect"]) and (
snakemake.params.planning_horizons[0] == investment_year snakemake.params.planning_horizons[0] == investment_year
) )

View File

@ -33,7 +33,9 @@ import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
import xarray as xr import xarray as xr
from _benchmark import memory_logger
from _helpers import configure_logging, update_config_with_sector_opts from _helpers import configure_logging, update_config_with_sector_opts
from pypsa.descriptors import get_activity_mask
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
pypsa.pf.logger.setLevel(logging.WARNING) pypsa.pf.logger.setLevel(logging.WARNING)
@ -47,6 +49,69 @@ def add_land_use_constraint(n, planning_horizons, config):
_add_land_use_constraint(n) _add_land_use_constraint(n)
def add_land_use_constraint_perfect(n):
"""
Add global constraints for tech capacity limit.
"""
logger.info("Add land-use constraint for perfect foresight")
def compress_series(s):
def process_group(group):
if group.nunique() == 1:
return pd.Series(group.iloc[0], index=[None])
else:
return group
return s.groupby(level=[0, 1]).apply(process_group)
def new_index_name(t):
# Convert all elements to string and filter out None values
parts = [str(x) for x in t if x is not None]
# Join with space, but use a dash for the last item if not None
return " ".join(parts[:2]) + (f"-{parts[-1]}" if len(parts) > 2 else "")
def check_p_min_p_max(p_nom_max):
p_nom_min = n.generators[ext_i].groupby(grouper).sum().p_nom_min
p_nom_min = p_nom_min.reindex(p_nom_max.index)
check = (
p_nom_min.groupby(level=[0, 1]).sum()
> p_nom_max.groupby(level=[0, 1]).min()
)
if check.sum():
logger.warning(
f"summed p_min_pu values at node larger than technical potential {check[check].index}"
)
grouper = [n.generators.carrier, n.generators.bus, n.generators.build_year]
ext_i = n.generators.p_nom_extendable
# get technical limit per node and investment period
p_nom_max = n.generators[ext_i].groupby(grouper).min().p_nom_max
# drop carriers without tech limit
p_nom_max = p_nom_max[~p_nom_max.isin([np.inf, np.nan])]
# carrier
carriers = p_nom_max.index.get_level_values(0).unique()
gen_i = n.generators[(n.generators.carrier.isin(carriers)) & (ext_i)].index
n.generators.loc[gen_i, "p_nom_min"] = 0
# check minimum capacities
check_p_min_p_max(p_nom_max)
# drop multi entries in case p_nom_max stays constant in different periods
# p_nom_max = compress_series(p_nom_max)
# adjust name to fit syntax of nominal constraint per bus
df = p_nom_max.reset_index()
df["name"] = df.apply(
lambda row: f"nom_max_{row['carrier']}"
+ (f"_{row['build_year']}" if row["build_year"] is not None else ""),
axis=1,
)
for name in df.name.unique():
df_carrier = df[df.name == name]
bus = df_carrier.bus
n.buses.loc[bus, name] = df_carrier.p_nom_max.values
return n
def _add_land_use_constraint(n): def _add_land_use_constraint(n):
# warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind' # warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
@ -82,7 +147,6 @@ def _add_land_use_constraint(n):
def _add_land_use_constraint_m(n, planning_horizons, config): def _add_land_use_constraint_m(n, planning_horizons, config):
# if generators clustering is lower than network clustering, land_use accounting is at generators clusters # if generators clustering is lower than network clustering, land_use accounting is at generators clusters
planning_horizons = param["planning_horizons"]
grouping_years = config["existing_capacities"]["grouping_years"] grouping_years = config["existing_capacities"]["grouping_years"]
current_horizon = snakemake.wildcards.planning_horizons current_horizon = snakemake.wildcards.planning_horizons
@ -116,7 +180,7 @@ def _add_land_use_constraint_m(n, planning_horizons, config):
n.generators.p_nom_max.clip(lower=0, inplace=True) n.generators.p_nom_max.clip(lower=0, inplace=True)
def add_co2_sequestration_limit(n, limit=200): def add_co2_sequestration_limit(n, config, limit=200):
""" """
Add a global constraint on the amount of Mt CO2 that can be sequestered. Add a global constraint on the amount of Mt CO2 that can be sequestered.
""" """
@ -130,16 +194,146 @@ def add_co2_sequestration_limit(n, limit=200):
limit = float(o[o.find("seq") + 3 :]) * 1e6 limit = float(o[o.find("seq") + 3 :]) * 1e6
break break
n.add( if not n.investment_periods.empty:
periods = n.investment_periods
names = pd.Index([f"co2_sequestration_limit-{period}" for period in periods])
else:
periods = [np.nan]
names = pd.Index(["co2_sequestration_limit"])
n.madd(
"GlobalConstraint", "GlobalConstraint",
"co2_sequestration_limit", names,
sense="<=", sense="<=",
constant=limit, constant=limit,
type="primary_energy", type="primary_energy",
carrier_attribute="co2_absorptions", carrier_attribute="co2_absorptions",
investment_period=periods,
) )
def add_carbon_constraint(n, snapshots):
glcs = n.global_constraints.query('type == "co2_limit"')
if glcs.empty:
return
for name, glc in glcs.iterrows():
rhs = glc.constant
carattr = glc.carrier_attribute
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
if emissions.empty:
continue
# stores
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
stores = n.stores.query("carrier in @emissions.index and not e_cyclic")
time_valid = int(glc.loc["investment_period"])
if not stores.empty:
last = n.snapshot_weightings.reset_index().groupby("period").last()
last_i = last.set_index([last.index, last.timestep]).index
final_e = n.model["Store-e"].loc[last_i, stores.index]
time_i = pd.IndexSlice[time_valid, :]
lhs = final_e.loc[time_i, :] - final_e.shift(snapshot=1).loc[time_i, :]
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
def add_carbon_budget_constraint(n, snapshots):
glcs = n.global_constraints.query('type == "Co2Budget"')
if glcs.empty:
return
for name, glc in glcs.iterrows():
rhs = glc.constant
carattr = glc.carrier_attribute
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
if emissions.empty:
continue
# stores
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
stores = n.stores.query("carrier in @emissions.index and not e_cyclic")
time_valid = int(glc.loc["investment_period"])
weighting = n.investment_period_weightings.loc[time_valid, "years"]
if not stores.empty:
last = n.snapshot_weightings.reset_index().groupby("period").last()
last_i = last.set_index([last.index, last.timestep]).index
final_e = n.model["Store-e"].loc[last_i, stores.index]
time_i = pd.IndexSlice[time_valid, :]
lhs = final_e.loc[time_i, :] * weighting
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
def add_max_growth(n, config):
"""
Add maximum growth rates for different carriers.
"""
opts = snakemake.params["sector"]["limit_max_growth"]
# take maximum yearly difference between investment periods since historic growth is per year
factor = n.investment_period_weightings.years.max() * opts["factor"]
for carrier in opts["max_growth"].keys():
max_per_period = opts["max_growth"][carrier] * factor
logger.info(
f"set maximum growth rate per investment period of {carrier} to {max_per_period} GW."
)
n.carriers.loc[carrier, "max_growth"] = max_per_period * 1e3
for carrier in opts["max_relative_growth"].keys():
max_r_per_period = opts["max_relative_growth"][carrier]
logger.info(
f"set maximum relative growth per investment period of {carrier} to {max_r_per_period}."
)
n.carriers.loc[carrier, "max_relative_growth"] = max_r_per_period
return n
def add_retrofit_gas_boiler_constraint(n, snapshots):
"""
Allow retrofitting of existing gas boilers to H2 boilers.
"""
c = "Link"
logger.info("Add constraint for retrofitting gas boilers to H2 boilers.")
# existing gas boilers
mask = n.links.carrier.str.contains("gas boiler") & ~n.links.p_nom_extendable
gas_i = n.links[mask].index
mask = n.links.carrier.str.contains("retrofitted H2 boiler")
h2_i = n.links[mask].index
n.links.loc[gas_i, "p_nom_extendable"] = True
p_nom = n.links.loc[gas_i, "p_nom"]
n.links.loc[gas_i, "p_nom"] = 0
# heat profile
cols = n.loads_t.p_set.columns[
n.loads_t.p_set.columns.str.contains("heat")
& ~n.loads_t.p_set.columns.str.contains("industry")
& ~n.loads_t.p_set.columns.str.contains("agriculture")
]
profile = n.loads_t.p_set[cols].div(
n.loads_t.p_set[cols].groupby(level=0).max(), level=0
)
# to deal if max value is zero
profile.fillna(0, inplace=True)
profile.rename(columns=n.loads.bus.to_dict(), inplace=True)
profile = profile.reindex(columns=n.links.loc[gas_i, "bus1"])
profile.columns = gas_i
rhs = profile.mul(p_nom)
dispatch = n.model["Link-p"]
active = get_activity_mask(n, c, snapshots, gas_i)
rhs = rhs[active]
p_gas = dispatch.sel(Link=gas_i)
p_h2 = dispatch.sel(Link=h2_i)
lhs = p_gas + p_h2
n.model.add_constraints(lhs == rhs, name="gas_retrofit")
def prepare_network( def prepare_network(
n, n,
solve_opts=None, solve_opts=None,
@ -200,9 +394,14 @@ def prepare_network(
if foresight == "myopic": if foresight == "myopic":
add_land_use_constraint(n, planning_horizons, config) add_land_use_constraint(n, planning_horizons, config)
if foresight == "perfect":
n = add_land_use_constraint_perfect(n)
if snakemake.params["sector"]["limit_max_growth"]["enable"]:
n = add_max_growth(n, config)
if n.stores.carrier.eq("co2 stored").any(): if n.stores.carrier.eq("co2 stored").any():
limit = co2_sequestration_potential limit = co2_sequestration_potential
add_co2_sequestration_limit(n, limit=limit) add_co2_sequestration_limit(n, config, limit=limit)
return n return n
@ -612,12 +811,19 @@ def extra_functionality(n, snapshots):
add_battery_constraints(n) add_battery_constraints(n)
add_lossy_bidirectional_link_constraints(n) add_lossy_bidirectional_link_constraints(n)
add_pipe_retrofit_constraint(n) add_pipe_retrofit_constraint(n)
if n._multi_invest:
add_carbon_constraint(n, snapshots)
add_carbon_budget_constraint(n, snapshots)
add_retrofit_gas_boiler_constraint(n, snapshots)
def solve_network(n, config, solving, opts="", **kwargs): def solve_network(n, config, solving, opts="", **kwargs):
set_of_options = solving["solver"]["options"] set_of_options = solving["solver"]["options"]
cf_solving = solving["options"] cf_solving = solving["options"]
kwargs["multi_investment_periods"] = (
True if config["foresight"] == "perfect" else False
)
kwargs["solver_options"] = ( kwargs["solver_options"] = (
solving["solver_options"][set_of_options] if set_of_options else {} solving["solver_options"][set_of_options] if set_of_options else {}
) )
@ -664,18 +870,20 @@ def solve_network(n, config, solving, opts="", **kwargs):
return n return n
# %%
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
"solve_network", "solve_sector_network_perfect",
configfiles="../config/test/config.perfect.yaml",
simpl="", simpl="",
opts="Ept", opts="",
clusters="37", clusters="5",
ll="v1.0", ll="v1.5",
sector_opts="", sector_opts="8760H-T-H-B-I-A-solar+p3-dist1",
planning_horizons="2020", planning_horizons="2030",
) )
configure_logging(snakemake) configure_logging(snakemake)
if "sector_opts" in snakemake.wildcards.keys(): if "sector_opts" in snakemake.wildcards.keys():
@ -702,13 +910,18 @@ if __name__ == "__main__":
co2_sequestration_potential=snakemake.params["co2_sequestration_potential"], co2_sequestration_potential=snakemake.params["co2_sequestration_potential"],
) )
n = solve_network( with memory_logger(
n, filename=getattr(snakemake.log, "memory", None), interval=30.0
config=snakemake.config, ) as mem:
solving=snakemake.params.solving, n = solve_network(
opts=opts, n,
log_fn=snakemake.log.solver, config=snakemake.config,
) solving=snakemake.params.solving,
opts=opts,
log_fn=snakemake.log.solver,
)
logger.info("Maximum memory usage: {}".format(mem.mem_usage))
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])