Merge branch 'PyPSA:master' into fix_retrofit

This commit is contained in:
Ekaterina 2023-10-13 13:30:04 +03:00 committed by GitHub
commit b0a95aefaa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
45 changed files with 2810 additions and 436 deletions

View File

@ -83,6 +83,7 @@ jobs:
snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime
- name: Upload artifacts
uses: actions/upload-artifact@v3

1
.gitignore vendored
View File

@ -8,6 +8,7 @@ __pycache__
*dconf
gurobi.log
.vscode
*.orig
/bak
/resources

View File

@ -5,7 +5,7 @@ exclude: "^LICENSES"
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
rev: v4.5.0
hooks:
- id: check-merge-conflict
- id: end-of-file-fixer
@ -30,10 +30,10 @@ repos:
# Find common spelling mistakes in comments and docstrings
- repo: https://github.com/codespell-project/codespell
rev: v2.2.5
rev: v2.2.6
hooks:
- id: codespell
args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco,berfore'] # Ignore capital case words, e.g. country codes
args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom,appartment,bage,ore,setis,tabacco,berfore,vor'] # Ignore capital case words, e.g. country codes
types_or: [python, rst, markdown]
files: ^(scripts|doc)/
@ -74,7 +74,7 @@ repos:
# Format Snakemake rule / workflow files
- repo: https://github.com/snakemake/snakefmt
rev: v0.8.4
rev: v0.8.5
hooks:
- id: snakefmt

View File

@ -14,4 +14,3 @@ build:
python:
install:
- requirements: doc/requirements.txt
system_packages: false

View File

@ -66,6 +66,11 @@ if config["foresight"] == "myopic":
include: "rules/solve_myopic.smk"
if config["foresight"] == "perfect":
include: "rules/solve_perfect.smk"
rule all:
input:
RESULTS + "graphs/costs.pdf",
@ -73,12 +78,19 @@ rule all:
rule purge:
message:
"Purging generated resources, results and docs. Downloads are kept."
run:
rmtree("resources/", ignore_errors=True)
rmtree("results/", ignore_errors=True)
rmtree("doc/_build", ignore_errors=True)
import builtins
do_purge = builtins.input(
"Do you really want to delete all generated resources, \nresults and docs (downloads are kept)? [y/N] "
)
if do_purge == "y":
rmtree("resources/", ignore_errors=True)
rmtree("results/", ignore_errors=True)
rmtree("doc/_build", ignore_errors=True)
print("Purging generated resources, results and docs. Downloads are kept.")
else:
raise Exception(f"Input {do_purge}. Aborting purge.")
rule dag:

View File

@ -452,6 +452,7 @@ sector:
hydrogen_fuel_cell: true
hydrogen_turbine: false
SMR: true
SMR_cc: true
regional_co2_sequestration_potential:
enable: false
attribute: 'conservative estimate Mt'
@ -461,6 +462,7 @@ sector:
years_of_storage: 25
co2_sequestration_potential: 200
co2_sequestration_cost: 10
co2_sequestration_lifetime: 50
co2_spatial: false
co2network: false
cc_fraction: 0.9
@ -491,6 +493,20 @@ sector:
OCGT: gas
biomass_to_liquid: false
biosng: false
limit_max_growth:
enable: false
# allowing 30% larger than max historic growth
factor: 1.3
max_growth: # unit GW
onwind: 16 # onshore max grow so far 16 GW in Europe https://www.iea.org/reports/renewables-2020/wind
solar: 28 # solar max grow so far 28 GW in Europe https://www.iea.org/reports/renewables-2020/solar-pv
offwind-ac: 35 # offshore max grow so far 3.5 GW in Europe https://windeurope.org/about-wind/statistics/offshore/european-offshore-wind-industry-key-trends-statistics-2019/
offwind-dc: 35
max_relative_growth:
onwind: 3
solar: 3
offwind-ac: 3
offwind-dc: 3
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#industry
industry:
@ -543,11 +559,13 @@ industry:
hotmaps_locate_missing: false
reference_year: 2015
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#costs
costs:
year: 2030
version: v0.6.0
rooftop_share: 0.14 # based on the potentials, assuming (0.1 kW/m2 and 10 m2/person)
social_discountrate: 0.02
fill_values:
FOM: 0
VOM: 0
@ -761,6 +779,7 @@ plotting:
gas pipeline new: '#a87c62'
# oil
oil: '#c9c9c9'
imported oil: '#a3a3a3'
oil boiler: '#adadad'
residential rural oil boiler: '#a9a9a9'
services rural oil boiler: '#a5a5a5'
@ -892,6 +911,7 @@ plotting:
H2 for shipping: "#ebaee0"
H2: '#bf13a0'
hydrogen: '#bf13a0'
retrofitted H2 boiler: '#e5a0d9'
SMR: '#870c71'
SMR CC: '#4f1745'
H2 liquefaction: '#d647bd'

View File

@ -0,0 +1,43 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
run:
name: "perfect"
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#foresight
foresight: perfect
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#scenario
# Wildcard docs in https://pypsa-eur.readthedocs.io/en/latest/wildcards.html
scenario:
simpl:
- ''
ll:
- v1.0
clusters:
- 37
opts:
- ''
sector_opts:
- 1p5-4380H-T-H-B-I-A-solar+p3-dist1
- 1p7-4380H-T-H-B-I-A-solar+p3-dist1
- 2p0-4380H-T-H-B-I-A-solar+p3-dist1
planning_horizons:
- 2020
- 2030
- 2040
- 2050
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#co2-budget
co2_budget:
# update of IPCC 6th AR compared to the 1.5SR. (discussed here: https://twitter.com/JoeriRogelj/status/1424743828339167233)
1p5: 34.2 # 25.7 # Budget in Gt CO2 for 1.5 for Europe, global 420 Gt, assuming per capita share
1p6: 43.259666 # 35 # Budget in Gt CO2 for 1.6 for Europe, global 580 Gt
1p7: 51.4 # 45 # Budget in Gt CO2 for 1.7 for Europe, global 800 Gt
2p0: 69.778 # 73.9 # Budget in Gt CO2 for 2 for Europe, global 1170 Gt
sector:
min_part_load_fischer_tropsch: 0
min_part_load_methanolisation: 0

View File

@ -0,0 +1,91 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
tutorial: true
run:
name: "test-sector-perfect"
disable_progressbar: true
shared_resources: true
shared_cutouts: true
foresight: perfect
scenario:
ll:
- v1.0
clusters:
- 5
sector_opts:
- 8760H-T-H-B-I-A-solar+p3-dist1
planning_horizons:
- 2030
- 2040
- 2050
countries: ['BE']
snapshots:
start: "2013-03-01"
end: "2013-03-08"
electricity:
co2limit: 100.e+6
extendable_carriers:
Generator: [OCGT]
StorageUnit: [battery]
Store: [H2]
Link: [H2 pipeline]
renewable_carriers: [solar, onwind, offwind-ac, offwind-dc]
sector:
min_part_load_fischer_tropsch: 0
min_part_load_methanolisation: 0
atlite:
default_cutout: be-03-2013-era5
cutouts:
be-03-2013-era5:
module: era5
x: [4., 15.]
y: [46., 56.]
time: ["2013-03-01", "2013-03-08"]
renewable:
onwind:
cutout: be-03-2013-era5
offwind-ac:
cutout: be-03-2013-era5
max_depth: false
offwind-dc:
cutout: be-03-2013-era5
max_depth: false
solar:
cutout: be-03-2013-era5
industry:
St_primary_fraction:
2020: 0.8
2030: 0.6
2040: 0.5
2050: 0.4
solving:
solver:
name: glpk
options: glpk-default
mem: 4000
plotting:
map:
boundaries:
eu_node_location:
x: -5.5
y: 46.
costs_max: 1000
costs_threshold: 0.0000001
energy_max:
energy_min:
energy_threshold: 0.000001

View File

@ -79,6 +79,7 @@ allam_cycle,--,"{true, false}",Add option to include `Allam cycle gas power plan
hydrogen_fuel_cell,--,"{true, false}",Add option to include hydrogen fuel cell for re-electrification. Assuming OCGT technology costs
hydrogen_turbine,--,"{true, false}",Add option to include hydrogen turbine for re-electrification. Assuming OCGT technology costs
SMR,--,"{true, false}",Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR)
SMR CC,--,"{true, false}",Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR) and Carbon Capture (CC)
regional_co2 _sequestration_potential,,,
-- enable,--,"{true, false}",Add option for regionally-resolved geological carbon dioxide sequestration potentials based on `CO2StoP <https://setis.ec.europa.eu/european-co2-storage-database_en>`_.
-- attribute,--,string,Name of the attribute for the sequestration potential

1 Unit Values Description
79 hydrogen_fuel_cell -- {true, false} Add option to include hydrogen fuel cell for re-electrification. Assuming OCGT technology costs
80 hydrogen_turbine -- {true, false} Add option to include hydrogen turbine for re-electrification. Assuming OCGT technology costs
81 SMR -- {true, false} Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR)
82 SMR CC -- {true, false} Add option for transforming natural gas into hydrogen and CO2 using Steam Methane Reforming (SMR) and Carbon Capture (CC)
83 regional_co2 _sequestration_potential
84 -- enable -- {true, false} Add option for regionally-resolved geological carbon dioxide sequestration potentials based on `CO2StoP <https://setis.ec.europa.eu/european-co2-storage-database_en>`_.
85 -- attribute -- string Name of the attribute for the sequestration potential

View File

@ -41,10 +41,10 @@ Perfect foresight scenarios
.. warning::
Perfect foresight is currently under development and not yet implemented.
Perfect foresight is currently implemented as a first test version.
For running perfect foresight scenarios, in future versions you will be able to
set in the ``config/config.yaml``:
For running perfect foresight scenarios, you can adjust the
``config/config.perfect.yaml``:
.. code:: yaml

View File

@ -20,11 +20,23 @@ Upcoming Release
* Files extracted from sector-coupled data bundle have been moved from ``data/`` to ``data/sector-bundle``.
* New feature multi-decade optimisation with perfect foresight.
* It is now possible to specify years for biomass potentials which do not exist
in the JRC-ENSPRESO database, e.g. 2037. These are linearly interpolated.
* In pathway mode, the biomass potential is linked to the investment year.
* Rule ``purge`` now initiates a dialog to confirm if purge is desired.
* Split configuration to enable SMR and SMR CC.
**Bugs and Compatibility**
* A bug preventing custom powerplants specified in ``data/custom_powerplants.csv`` was fixed. (https://github.com/PyPSA/pypsa-eur/pull/732)
PyPSA-Eur 0.8.1 (27th July 2023)
================================

View File

@ -133,89 +133,82 @@ This triggers a workflow of multiple preceding jobs that depend on each rule's i
graph[bgcolor=white, margin=0];
node[shape=box, style=rounded, fontname=sans, fontsize=10, penwidth=2];
edge[penwidth=2, color=grey];
0[label = "solve_network", color = "0.21 0.6 0.85", style="rounded"];
1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.02 0.6 0.85", style="rounded"];
2[label = "add_extra_components", color = "0.37 0.6 0.85", style="rounded"];
3[label = "cluster_network\nclusters: 6", color = "0.39 0.6 0.85", style="rounded"];
4[label = "simplify_network\nsimpl: ", color = "0.11 0.6 0.85", style="rounded"];
5[label = "add_electricity", color = "0.23 0.6 0.85", style="rounded"];
6[label = "build_renewable_profiles\ntechnology: onwind", color = "0.57 0.6 0.85", style="rounded"];
7[label = "base_network", color = "0.09 0.6 0.85", style="rounded"];
8[label = "build_shapes", color = "0.41 0.6 0.85", style="rounded"];
9[label = "retrieve_databundle", color = "0.28 0.6 0.85", style="rounded"];
10[label = "retrieve_natura_raster", color = "0.62 0.6 0.85", style="rounded"];
11[label = "build_bus_regions", color = "0.53 0.6 0.85", style="rounded"];
12[label = "retrieve_cutout\ncutout: europe-2013-era5", color = "0.05 0.6 0.85", style="rounded,dashed"];
13[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.57 0.6 0.85", style="rounded"];
14[label = "build_ship_raster", color = "0.64 0.6 0.85", style="rounded"];
15[label = "retrieve_ship_raster", color = "0.07 0.6 0.85", style="rounded,dashed"];
16[label = "retrieve_cutout\ncutout: europe-2013-sarah", color = "0.05 0.6 0.85", style="rounded,dashed"];
17[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.57 0.6 0.85", style="rounded"];
18[label = "build_renewable_profiles\ntechnology: solar", color = "0.57 0.6 0.85", style="rounded"];
19[label = "build_hydro_profile", color = "0.44 0.6 0.85", style="rounded"];
20[label = "retrieve_cost_data", color = "0.30 0.6 0.85", style="rounded"];
21[label = "build_powerplants", color = "0.16 0.6 0.85", style="rounded"];
22[label = "build_electricity_demand", color = "0.00 0.6 0.85", style="rounded"];
23[label = "retrieve_electricity_demand", color = "0.34 0.6 0.85", style="rounded,dashed"];
1 -> 0
2 -> 1
20 -> 1
3 -> 2
20 -> 2
4 -> 3
20 -> 3
5 -> 4
20 -> 4
11 -> 4
6 -> 5
13 -> 5
17 -> 5
18 -> 5
19 -> 5
7 -> 5
20 -> 5
11 -> 5
21 -> 5
9 -> 5
22 -> 5
8 -> 5
7 -> 6
9 -> 6
10 -> 6
8 -> 6
11 -> 6
12 -> 6
8 -> 7
9 -> 8
8 -> 11
7 -> 11
7 -> 13
9 -> 13
10 -> 13
14 -> 13
8 -> 13
11 -> 13
12 -> 13
15 -> 14
12 -> 14
16 -> 14
7 -> 17
9 -> 17
10 -> 17
14 -> 17
8 -> 17
11 -> 17
12 -> 17
7 -> 18
9 -> 18
10 -> 18
8 -> 18
11 -> 18
16 -> 18
8 -> 19
12 -> 19
7 -> 21
23 -> 22
0[label = "solve_network", color = "0.33 0.6 0.85", style="rounded"];
1[label = "prepare_network\nll: copt\nopts: Co2L-24H", color = "0.03 0.6 0.85", style="rounded"];
2[label = "add_extra_components", color = "0.45 0.6 0.85", style="rounded"];
3[label = "cluster_network\nclusters: 6", color = "0.46 0.6 0.85", style="rounded"];
4[label = "simplify_network\nsimpl: ", color = "0.52 0.6 0.85", style="rounded"];
5[label = "add_electricity", color = "0.55 0.6 0.85", style="rounded"];
6[label = "build_renewable_profiles\ntechnology: solar", color = "0.15 0.6 0.85", style="rounded"];
7[label = "base_network", color = "0.37 0.6 0.85", style="rounded,dashed"];
8[label = "build_shapes", color = "0.07 0.6 0.85", style="rounded,dashed"];
9[label = "retrieve_databundle", color = "0.60 0.6 0.85", style="rounded"];
10[label = "retrieve_natura_raster", color = "0.42 0.6 0.85", style="rounded"];
11[label = "build_bus_regions", color = "0.09 0.6 0.85", style="rounded,dashed"];
12[label = "build_renewable_profiles\ntechnology: onwind", color = "0.15 0.6 0.85", style="rounded"];
13[label = "build_renewable_profiles\ntechnology: offwind-ac", color = "0.15 0.6 0.85", style="rounded"];
14[label = "build_ship_raster", color = "0.02 0.6 0.85", style="rounded"];
15[label = "retrieve_ship_raster", color = "0.40 0.6 0.85", style="rounded"];
16[label = "build_renewable_profiles\ntechnology: offwind-dc", color = "0.15 0.6 0.85", style="rounded"];
17[label = "build_line_rating", color = "0.32 0.6 0.85", style="rounded"];
18[label = "retrieve_cost_data\nyear: 2030", color = "0.50 0.6 0.85", style="rounded"];
19[label = "build_powerplants", color = "0.64 0.6 0.85", style="rounded,dashed"];
20[label = "build_electricity_demand", color = "0.13 0.6 0.85", style="rounded,dashed"];
21[label = "retrieve_electricity_demand", color = "0.31 0.6 0.85", style="rounded"];
22[label = "copy_config", color = "0.23 0.6 0.85", style="rounded"];
1 -> 0
22 -> 0
2 -> 1
18 -> 1
3 -> 2
18 -> 2
4 -> 3
18 -> 3
5 -> 4
18 -> 4
11 -> 4
6 -> 5
12 -> 5
13 -> 5
16 -> 5
7 -> 5
17 -> 5
18 -> 5
11 -> 5
19 -> 5
9 -> 5
20 -> 5
8 -> 5
7 -> 6
9 -> 6
10 -> 6
8 -> 6
11 -> 6
8 -> 7
9 -> 8
8 -> 11
7 -> 11
7 -> 12
9 -> 12
10 -> 12
8 -> 12
11 -> 12
7 -> 13
9 -> 13
10 -> 13
14 -> 13
8 -> 13
11 -> 13
15 -> 14
7 -> 16
9 -> 16
10 -> 16
14 -> 16
8 -> 16
11 -> 16
7 -> 17
7 -> 19
21 -> 20
}
|

View File

@ -55,5 +55,5 @@ dependencies:
- pip:
- tsam>=1.1.0
- pypsa>=0.25.1
- git+https://github.com/fneum/tsam.git@performance
- pypsa>=0.25.2

View File

@ -280,15 +280,16 @@ rule build_biomass_potentials:
country_shapes=RESOURCES + "country_shapes.geojson",
output:
biomass_potentials_all=RESOURCES
+ "biomass_potentials_all_s{simpl}_{clusters}.csv",
biomass_potentials=RESOURCES + "biomass_potentials_s{simpl}_{clusters}.csv",
+ "biomass_potentials_all_s{simpl}_{clusters}_{planning_horizons}.csv",
biomass_potentials=RESOURCES
+ "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv",
threads: 1
resources:
mem_mb=1000,
log:
LOGS + "build_biomass_potentials_s{simpl}_{clusters}.log",
LOGS + "build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.log",
benchmark:
BENCHMARKS + "build_biomass_potentials_s{simpl}_{clusters}"
BENCHMARKS + "build_biomass_potentials_s{simpl}_{clusters}_{planning_horizons}"
conda:
"../envs/environment.yaml"
script:
@ -735,7 +736,12 @@ rule prepare_sector_network:
dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv",
co2_totals_name=RESOURCES + "co2_totals.csv",
co2="data/bundle-sector/eea/UNFCCC_v23.csv",
biomass_potentials=RESOURCES + "biomass_potentials_s{simpl}_{clusters}.csv",
biomass_potentials=RESOURCES
+ "biomass_potentials_s{simpl}_{clusters}_"
+ "{}.csv".format(config["biomass"]["year"])
if config["foresight"] == "overnight"
else RESOURCES
+ "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv",
heat_profile="data/heat_load_profile_BDEW.csv",
costs="data/costs_{}.csv".format(config["costs"]["year"])
if config["foresight"] == "overnight"

View File

@ -60,6 +60,15 @@ rule solve_sector_networks:
),
rule solve_sector_networks_perfect:
input:
expand(
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
**config["scenario"]
),
rule plot_networks:
input:
expand(

View File

@ -8,31 +8,62 @@ localrules:
copy_conda_env,
rule plot_network:
params:
foresight=config["foresight"],
plotting=config["plotting"],
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
output:
map=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
today=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf",
threads: 2
resources:
mem_mb=10000,
benchmark:
(
if config["foresight"] != "perfect":
rule plot_network:
params:
foresight=config["foresight"],
plotting=config["plotting"],
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
output:
map=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
today=RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf",
threads: 2
resources:
mem_mb=10000,
benchmark:
(
BENCHMARKS
+ "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/plot_network.py"
if config["foresight"] == "perfect":
rule plot_network:
params:
foresight=config["foresight"],
plotting=config["plotting"],
input:
network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
output:
**{
f"map_{year}": RESULTS
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_"
+ f"{year}.pdf"
for year in config["scenario"]["planning_horizons"]
},
threads: 2
resources:
mem_mb=10000,
benchmark:
BENCHMARKS
+ "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/plot_network.py"
+"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_benchmark"
conda:
"../envs/environment.yaml"
script:
"../scripts/plot_network.py"
rule copy_config:

194
rules/solve_perfect.smk Normal file
View File

@ -0,0 +1,194 @@
# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
rule add_existing_baseyear:
params:
baseyear=config["scenario"]["planning_horizons"][0],
sector=config["sector"],
existing_capacities=config["existing_capacities"],
costs=config["costs"],
input:
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
powerplants=RESOURCES + "powerplants.csv",
busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv",
busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv",
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]),
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
existing_heating="data/existing_infrastructure/existing_heating_raw.csv",
existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv",
existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv",
existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv",
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
wildcard_constraints:
planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear
threads: 1
resources:
mem_mb=2000,
log:
LOGS
+ "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
BENCHMARKS
+ "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/add_existing_baseyear.py"
rule add_brownfield:
params:
H2_retrofit=config["sector"]["H2_retrofit"],
H2_retrofit_capacity_per_CH4=config["sector"]["H2_retrofit_capacity_per_CH4"],
threshold_capacity=config["existing_capacities"]["threshold_capacity"],
input:
network=RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
network_p=solved_previous_horizon, #solved network at previous time step
costs="data/costs_{planning_horizons}.csv",
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
threads: 4
resources:
mem_mb=10000,
log:
LOGS
+ "add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
benchmark:
(
BENCHMARKS
+ "add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/add_brownfield.py"
rule prepare_perfect_foresight:
input:
**{
f"network_{year}": RESULTS
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ f"{year}.nc"
for year in config["scenario"]["planning_horizons"][1:]
},
brownfield_network=lambda w: (
RESULTS
+ "prenetworks-brownfield/"
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
+ "{}.nc".format(str(config["scenario"]["planning_horizons"][0]))
),
output:
RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
threads: 2
resources:
mem_mb=10000,
log:
LOGS
+ "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}.log",
benchmark:
(
BENCHMARKS
+ "prepare_perfect_foresight{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/prepare_perfect_foresight.py"
rule solve_sector_network_perfect:
params:
solving=config["solving"],
foresight=config["foresight"],
sector=config["sector"],
planning_horizons=config["scenario"]["planning_horizons"],
co2_sequestration_potential=config["sector"].get(
"co2_sequestration_potential", 200
),
input:
network=RESULTS
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
costs="data/costs_2030.csv",
config=RESULTS + "config.yaml",
output:
RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
threads: 4
resources:
mem_mb=config["solving"]["mem"],
shadow:
"shallow"
log:
solver=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_solver.log",
python=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_python.log",
memory=RESULTS
+ "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_memory.log",
benchmark:
(
BENCHMARKS
+ "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years}"
)
conda:
"../envs/environment.yaml"
script:
"../scripts/solve_network.py"
rule make_summary_perfect:
input:
**{
f"networks_{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}": RESULTS
+ f"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc"
for simpl in config["scenario"]["simpl"]
for clusters in config["scenario"]["clusters"]
for opts in config["scenario"]["opts"]
for sector_opts in config["scenario"]["sector_opts"]
for ll in config["scenario"]["ll"]
},
costs="data/costs_2020.csv",
output:
nodal_costs=RESULTS + "csvs/nodal_costs.csv",
nodal_capacities=RESULTS + "csvs/nodal_capacities.csv",
nodal_cfs=RESULTS + "csvs/nodal_cfs.csv",
cfs=RESULTS + "csvs/cfs.csv",
costs=RESULTS + "csvs/costs.csv",
capacities=RESULTS + "csvs/capacities.csv",
curtailment=RESULTS + "csvs/curtailment.csv",
energy=RESULTS + "csvs/energy.csv",
supply=RESULTS + "csvs/supply.csv",
supply_energy=RESULTS + "csvs/supply_energy.csv",
prices=RESULTS + "csvs/prices.csv",
weighted_prices=RESULTS + "csvs/weighted_prices.csv",
market_values=RESULTS + "csvs/market_values.csv",
price_statistics=RESULTS + "csvs/price_statistics.csv",
metrics=RESULTS + "csvs/metrics.csv",
co2_emissions=RESULTS + "csvs/co2_emissions.csv",
threads: 2
resources:
mem_mb=10000,
log:
LOGS + "make_summary_perfect.log",
benchmark:
(BENCHMARKS + "make_summary_perfect")
conda:
"../envs/environment.yaml"
script:
"../scripts/make_summary_perfect.py"
ruleorder: add_existing_baseyear > add_brownfield

256
scripts/_benchmark.py Normal file
View File

@ -0,0 +1,256 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
"""
from __future__ import absolute_import, print_function
import logging
import os
import sys
import time
logger = logging.getLogger(__name__)
# TODO: provide alternative when multiprocessing is not available
try:
from multiprocessing import Pipe, Process
except ImportError:
from multiprocessing.dummy import Process, Pipe
from memory_profiler import _get_memory, choose_backend
# The memory logging facilities have been adapted from memory_profiler
class MemTimer(Process):
"""
Write memory consumption over a time interval to file until signaled to
stop on the pipe.
"""
def __init__(
self, monitor_pid, interval, pipe, filename, max_usage, backend, *args, **kw
):
self.monitor_pid = monitor_pid
self.interval = interval
self.pipe = pipe
self.filename = filename
self.max_usage = max_usage
self.backend = backend
self.timestamps = kw.pop("timestamps", True)
self.include_children = kw.pop("include_children", True)
super(MemTimer, self).__init__(*args, **kw)
def run(self):
# get baseline memory usage
cur_mem = _get_memory(
self.monitor_pid,
self.backend,
timestamps=self.timestamps,
include_children=self.include_children,
)
n_measurements = 1
mem_usage = cur_mem if self.max_usage else [cur_mem]
if self.filename is not None:
stream = open(self.filename, "w")
stream.write("MEM {0:.6f} {1:.4f}\n".format(*cur_mem))
stream.flush()
else:
stream = None
self.pipe.send(0) # we're ready
stop = False
while True:
cur_mem = _get_memory(
self.monitor_pid,
self.backend,
timestamps=self.timestamps,
include_children=self.include_children,
)
if stream is not None:
stream.write("MEM {0:.6f} {1:.4f}\n".format(*cur_mem))
stream.flush()
n_measurements += 1
if not self.max_usage:
mem_usage.append(cur_mem)
else:
mem_usage = max(cur_mem, mem_usage)
if stop:
break
stop = self.pipe.poll(self.interval)
# do one more iteration
if stream is not None:
stream.close()
self.pipe.send(mem_usage)
self.pipe.send(n_measurements)
class memory_logger(object):
"""
Context manager for taking and reporting memory measurements at fixed
intervals from a separate process, for the duration of a context.
Parameters
----------
filename : None|str
Name of the text file to log memory measurements, if None no log is
created (defaults to None)
interval : float
Interval between measurements (defaults to 1.)
max_usage : bool
If True, only store and report the maximum value (defaults to True)
timestamps : bool
Whether to record tuples of memory usage and timestamps; if logging to
a file timestamps are always kept (defaults to True)
include_children : bool
Whether the memory of subprocesses is to be included (default: True)
Arguments
---------
n_measurements : int
Number of measurements that have been taken
mem_usage : (float, float)|[(float, float)]
All memory measurements and timestamps (if timestamps was True) or only
the maximum memory usage and its timestamp
Note
----
The arguments are only set after all the measurements, i.e. outside of the
with statement.
Example
-------
with memory_logger(filename="memory.log", max_usage=True) as mem:
# Do a lot of long running memory intensive stuff
hard_memory_bound_stuff()
max_mem, timestamp = mem.mem_usage
"""
def __init__(
self,
filename=None,
interval=1.0,
max_usage=True,
timestamps=True,
include_children=True,
):
if filename is not None:
timestamps = True
self.filename = filename
self.interval = interval
self.max_usage = max_usage
self.timestamps = timestamps
self.include_children = include_children
def __enter__(self):
backend = choose_backend()
self.child_conn, self.parent_conn = Pipe() # this will store MemTimer's results
self.p = MemTimer(
os.getpid(),
self.interval,
self.child_conn,
self.filename,
backend=backend,
timestamps=self.timestamps,
max_usage=self.max_usage,
include_children=self.include_children,
)
self.p.start()
self.parent_conn.recv() # wait until memory logging in subprocess is ready
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.parent_conn.send(0) # finish timing
self.mem_usage = self.parent_conn.recv()
self.n_measurements = self.parent_conn.recv()
else:
self.p.terminate()
return False
class timer(object):
level = 0
opened = False
def __init__(self, name="", verbose=True):
self.name = name
self.verbose = verbose
def __enter__(self):
if self.verbose:
if self.opened:
sys.stdout.write("\n")
if len(self.name) > 0:
sys.stdout.write((".. " * self.level) + self.name + ": ")
sys.stdout.flush()
self.__class__.opened = True
self.__class__.level += 1
self.start = time.time()
return self
def print_usec(self, usec):
if usec < 1000:
print("%.1f usec" % usec)
else:
msec = usec / 1000
if msec < 1000:
print("%.1f msec" % msec)
else:
sec = msec / 1000
print("%.1f sec" % sec)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.opened and self.verbose:
sys.stdout.write(".. " * self.level)
if exc_type is None:
stop = time.time()
self.usec = usec = (stop - self.start) * 1e6
if self.verbose:
self.print_usec(usec)
elif self.verbose:
print("failed")
sys.stdout.flush()
self.__class__.level -= 1
if self.verbose:
self.__class__.opened = False
return False
class optional(object):
def __init__(self, variable, contextman):
self.variable = variable
self.contextman = contextman
def __enter__(self):
if self.variable:
return self.contextman.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.variable:
return self.contextman.__exit__(exc_type, exc_val, exc_tb)
return False

View File

@ -303,10 +303,7 @@ def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None):
def parse(l):
if len(l) == 1:
return yaml.safe_load(l[0])
else:
return {l.pop(0): parse(l)}
return yaml.safe_load(l[0]) if len(l) == 1 else {l.pop(0): parse(l)}
def update_config_with_sector_opts(config, sector_opts):

View File

@ -41,12 +41,9 @@ def add_brownfield(n, n_p, year):
# remove assets if their optimized nominal capacity is lower than a threshold
# since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible
chp_heat = c.df.index[
(
c.df[attr + "_nom_extendable"]
& c.df.index.str.contains("urban central")
& c.df.index.str.contains("CHP")
& c.df.index.str.contains("heat")
)
(c.df[f"{attr}_nom_extendable"] & c.df.index.str.contains("urban central"))
& c.df.index.str.contains("CHP")
& c.df.index.str.contains("heat")
]
threshold = snakemake.params.threshold_capacity
@ -60,21 +57,20 @@ def add_brownfield(n, n_p, year):
)
n_p.mremove(
c.name,
chp_heat[c.df.loc[chp_heat, attr + "_nom_opt"] < threshold_chp_heat],
chp_heat[c.df.loc[chp_heat, f"{attr}_nom_opt"] < threshold_chp_heat],
)
n_p.mremove(
c.name,
c.df.index[
c.df[attr + "_nom_extendable"]
& ~c.df.index.isin(chp_heat)
& (c.df[attr + "_nom_opt"] < threshold)
(c.df[f"{attr}_nom_extendable"] & ~c.df.index.isin(chp_heat))
& (c.df[f"{attr}_nom_opt"] < threshold)
],
)
# copy over assets but fix their capacity
c.df[attr + "_nom"] = c.df[attr + "_nom_opt"]
c.df[attr + "_nom_extendable"] = False
c.df[f"{attr}_nom"] = c.df[f"{attr}_nom_opt"]
c.df[f"{attr}_nom_extendable"] = False
n.import_components_from_dataframe(c.df, c.name)

View File

@ -293,24 +293,23 @@ def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.0):
l = opsd_load[cntry]
if len(group) == 1:
return pd.DataFrame({group.index[0]: l})
else:
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr()
gdp_n = pd.Series(
transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index
)
pop_n = pd.Series(
transfer.dot(nuts3_cntry["pop"].fillna(1.0).values), index=group.index
)
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr()
gdp_n = pd.Series(
transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index
)
pop_n = pd.Series(
transfer.dot(nuts3_cntry["pop"].fillna(1.0).values), index=group.index
)
# relative factors 0.6 and 0.4 have been determined from a linear
# regression on the country to continent load data
factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))
return pd.DataFrame(
factors.values * l.values[:, np.newaxis],
index=l.index,
columns=factors.index,
)
# relative factors 0.6 and 0.4 have been determined from a linear
# regression on the country to continent load data
factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))
return pd.DataFrame(
factors.values * l.values[:, np.newaxis],
index=l.index,
columns=factors.index,
)
load = pd.concat(
[
@ -406,6 +405,7 @@ def attach_wind_and_solar(
capital_cost=capital_cost,
efficiency=costs.at[supcar, "efficiency"],
p_max_pu=ds["profile"].transpose("time", "bus").to_pandas(),
lifetime=costs.at[supcar, "lifetime"],
)
@ -434,7 +434,7 @@ def attach_conventional_generators(
ppl = (
ppl.query("carrier in @carriers")
.join(costs, on="carrier", rsuffix="_r")
.rename(index=lambda s: "C" + str(s))
.rename(index=lambda s: f"C{str(s)}")
)
ppl["efficiency"] = ppl.efficiency.fillna(ppl.efficiency_r)
@ -511,7 +511,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
ppl = (
ppl.query('carrier == "hydro"')
.reset_index(drop=True)
.rename(index=lambda s: str(s) + " hydro")
.rename(index=lambda s: f"{str(s)} hydro")
)
ror = ppl.query('technology == "Run-Of-River"')
phs = ppl.query('technology == "Pumped Storage"')
@ -608,16 +608,13 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
)
if not missing_countries.empty:
logger.warning(
"Assuming max_hours=6 for hydro reservoirs in the countries: {}".format(
", ".join(missing_countries)
)
f'Assuming max_hours=6 for hydro reservoirs in the countries: {", ".join(missing_countries)}'
)
hydro_max_hours = hydro.max_hours.where(
hydro.max_hours > 0, hydro.country.map(max_hours_country)
).fillna(6)
flatten_dispatch = params.get("flatten_dispatch", False)
if flatten_dispatch:
if flatten_dispatch := params.get("flatten_dispatch", False):
buffer = params.get("flatten_dispatch_buffer", 0.2)
average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"]
p_max_pu = (average_capacity_factor + buffer).clip(upper=1)

View File

@ -45,7 +45,7 @@ def add_build_year_to_new_assets(n, baseyear):
# add -baseyear to name
rename = pd.Series(c.df.index, c.df.index)
rename[assets] += "-" + str(baseyear)
rename[assets] += f"-{str(baseyear)}"
c.df.rename(index=rename, inplace=True)
# rename time-dependent
@ -252,7 +252,7 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
if "m" in snakemake.wildcards.clusters:
for ind in new_capacity.index:
# existing capacities are split evenly among regions in every country
inv_ind = [i for i in inv_busmap[ind]]
inv_ind = list(inv_busmap[ind])
# for offshore the splitting only includes coastal regions
inv_ind = [
@ -305,6 +305,18 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
if "EU" not in vars(spatial)[carrier[generator]].locations:
bus0 = bus0.intersection(capacity.index + " gas")
# check for missing bus
missing_bus = pd.Index(bus0).difference(n.buses.index)
if not missing_bus.empty:
logger.info(f"add buses {bus0}")
n.madd(
"Bus",
bus0,
carrier=generator,
location=vars(spatial)[carrier[generator]].locations,
unit="MWh_el",
)
already_build = n.links.index.intersection(asset_i)
new_build = asset_i.difference(n.links.index)
lifetime_assets = lifetime.loc[grouping_year, generator].dropna()
@ -533,13 +545,17 @@ def add_heating_capacities_installed_before_baseyear(
bus0=nodes[name],
bus1=nodes[name] + " " + name + " heat",
carrier=name + " resistive heater",
efficiency=costs.at[name_type + " resistive heater", "efficiency"],
capital_cost=costs.at[name_type + " resistive heater", "efficiency"]
* costs.at[name_type + " resistive heater", "fixed"],
p_nom=0.5
* nodal_df[f"{heat_type} resistive heater"][nodes[name]]
* ratio
/ costs.at[name_type + " resistive heater", "efficiency"],
efficiency=costs.at[f"{name_type} resistive heater", "efficiency"],
capital_cost=(
costs.at[f"{name_type} resistive heater", "efficiency"]
* costs.at[f"{name_type} resistive heater", "fixed"]
),
p_nom=(
0.5
* nodal_df[f"{heat_type} resistive heater"][nodes[name]]
* ratio
/ costs.at[f"{name_type} resistive heater", "efficiency"]
),
build_year=int(grouping_year),
lifetime=costs.at[costs_name, "lifetime"],
)
@ -552,16 +568,20 @@ def add_heating_capacities_installed_before_baseyear(
bus1=nodes[name] + " " + name + " heat",
bus2="co2 atmosphere",
carrier=name + " gas boiler",
efficiency=costs.at[name_type + " gas boiler", "efficiency"],
efficiency=costs.at[f"{name_type} gas boiler", "efficiency"],
efficiency2=costs.at["gas", "CO2 intensity"],
capital_cost=costs.at[name_type + " gas boiler", "efficiency"]
* costs.at[name_type + " gas boiler", "fixed"],
p_nom=0.5
* nodal_df[f"{heat_type} gas boiler"][nodes[name]]
* ratio
/ costs.at[name_type + " gas boiler", "efficiency"],
capital_cost=(
costs.at[f"{name_type} gas boiler", "efficiency"]
* costs.at[f"{name_type} gas boiler", "fixed"]
),
p_nom=(
0.5
* nodal_df[f"{heat_type} gas boiler"][nodes[name]]
* ratio
/ costs.at[f"{name_type} gas boiler", "efficiency"]
),
build_year=int(grouping_year),
lifetime=costs.at[name_type + " gas boiler", "lifetime"],
lifetime=costs.at[f"{name_type} gas boiler", "lifetime"],
)
n.madd(
@ -581,7 +601,7 @@ def add_heating_capacities_installed_before_baseyear(
* ratio
/ costs.at["decentral oil boiler", "efficiency"],
build_year=int(grouping_year),
lifetime=costs.at[name_type + " gas boiler", "lifetime"],
lifetime=costs.at[f"{name_type} gas boiler", "lifetime"],
)
# delete links with p_nom=nan corresponding to extra nodes in country
@ -605,6 +625,10 @@ def add_heating_capacities_installed_before_baseyear(
],
)
# drop assets which are at the end of their lifetime
links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index
n.mremove("Link", links_i)
# %%
if __name__ == "__main__":
@ -613,13 +637,13 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"add_existing_baseyear",
configfiles="config/test/config.myopic.yaml",
# configfiles="config/test/config.myopic.yaml",
simpl="",
clusters="5",
ll="v1.5",
clusters="37",
ll="v1.0",
opts="",
sector_opts="24H-T-H-B-I-A-solar+p3-dist1",
planning_horizons=2030,
sector_opts="1p7-4380H-T-H-B-I-A-solar+p3-dist1",
planning_horizons=2020,
)
logging.basicConfig(level=snakemake.config["logging"]["level"])

View File

@ -151,9 +151,7 @@ def _load_buses_from_eg(eg_buses, europe_shape, config_elec):
buses.v_nom.isin(config_elec["voltages"]) | buses.v_nom.isnull()
)
logger.info(
"Removing buses with voltages {}".format(
pd.Index(buses.v_nom.unique()).dropna().difference(config_elec["voltages"])
)
f'Removing buses with voltages {pd.Index(buses.v_nom.unique()).dropna().difference(config_elec["voltages"])}'
)
return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b])
@ -460,11 +458,7 @@ def _remove_unconnected_components(network):
components_to_remove = component_sizes.iloc[1:]
logger.info(
"Removing {} unconnected network components with less than {} buses. In total {} buses.".format(
len(components_to_remove),
components_to_remove.max(),
components_to_remove.sum(),
)
f"Removing {len(components_to_remove)} unconnected network components with less than {components_to_remove.max()} buses. In total {components_to_remove.sum()} buses."
)
return network[component == component_sizes.index[0]]

View File

@ -7,9 +7,15 @@ Compute biogas and solid biomass potentials for each clustered model region
using data from JRC ENSPRESO.
"""
import logging
logger = logging.getLogger(__name__)
import geopandas as gpd
import numpy as np
import pandas as pd
AVAILABLE_BIOMASS_YEARS = [2010, 2020, 2030, 2040, 2050]
def build_nuts_population_data(year=2013):
pop = pd.read_csv(
@ -208,13 +214,41 @@ if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_biomass_potentials", simpl="", clusters="5")
snakemake = mock_snakemake(
"build_biomass_potentials",
simpl="",
clusters="5",
planning_horizons=2050,
)
overnight = snakemake.config["foresight"] == "overnight"
params = snakemake.params.biomass
year = params["year"]
investment_year = int(snakemake.wildcards.planning_horizons)
year = params["year"] if overnight else investment_year
scenario = params["scenario"]
enspreso = enspreso_biomass_potentials(year, scenario)
if year > 2050:
logger.info("No biomass potentials for years after 2050, using 2050.")
max_year = max(AVAILABLE_BIOMASS_YEARS)
enspreso = enspreso_biomass_potentials(max_year, scenario)
elif year not in AVAILABLE_BIOMASS_YEARS:
before = int(np.floor(year / 10) * 10)
after = int(np.ceil(year / 10) * 10)
logger.info(
f"No biomass potentials for {year}, interpolating linearly between {before} and {after}."
)
enspreso_before = enspreso_biomass_potentials(before, scenario)
enspreso_after = enspreso_biomass_potentials(after, scenario)
fraction = (year - before) / (after - before)
enspreso = enspreso_before + fraction * (enspreso_after - enspreso_before)
else:
logger.info(f"Using biomass potentials for {year}.")
enspreso = enspreso_biomass_potentials(year, scenario)
enspreso = disaggregate_nuts0(enspreso)

View File

@ -172,8 +172,6 @@ def build_swiss(year):
def idees_per_country(ct, year, base_dir):
ct_totals = {}
ct_idees = idees_rename.get(ct, ct)
fn_residential = f"{base_dir}/JRC-IDEES-2015_Residential_{ct_idees}.xlsx"
fn_tertiary = f"{base_dir}/JRC-IDEES-2015_Tertiary_{ct_idees}.xlsx"
@ -183,11 +181,11 @@ def idees_per_country(ct, year, base_dir):
df = pd.read_excel(fn_residential, "RES_hh_fec", index_col=0)[year]
ct_totals["total residential space"] = df["Space heating"]
rows = ["Advanced electric heating", "Conventional electric heating"]
ct_totals["electricity residential space"] = df[rows].sum()
ct_totals = {
"total residential space": df["Space heating"],
"electricity residential space": df[rows].sum(),
}
ct_totals["total residential water"] = df.at["Water heating"]
assert df.index[23] == "Electricity"

View File

@ -29,25 +29,25 @@ def diameter_to_capacity(pipe_diameter_mm):
Based on p.15 of
https://gasforclimate2050.eu/wp-content/uploads/2020/07/2020_European-Hydrogen-Backbone_Report.pdf
"""
# slopes definitions
m0 = (1500 - 0) / (500 - 0)
m1 = (5000 - 1500) / (600 - 500)
m2 = (11250 - 5000) / (900 - 600)
m3 = (21700 - 11250) / (1200 - 900)
# intercept
a0 = 0
a1 = -16000
a2 = -7500
a3 = -20100
if pipe_diameter_mm < 500:
# slopes definitions
m0 = (1500 - 0) / (500 - 0)
# intercept
a0 = 0
return a0 + m0 * pipe_diameter_mm
elif pipe_diameter_mm < 600:
return a1 + m1 * pipe_diameter_mm
elif pipe_diameter_mm < 900:
return a2 + m2 * pipe_diameter_mm
else:
m3 = (21700 - 11250) / (1200 - 900)
a3 = -20100
return a3 + m3 * pipe_diameter_mm

View File

@ -96,6 +96,23 @@ def prepare_hotmaps_database(regions):
gdf.rename(columns={"index_right": "bus"}, inplace=True)
gdf["country"] = gdf.bus.str[:2]
# the .sjoin can lead to duplicates if a geom is in two regions
if gdf.index.duplicated().any():
import pycountry
# get all duplicated entries
duplicated_i = gdf.index[gdf.index.duplicated()]
# convert from raw data country name to iso-2-code
s = df.loc[duplicated_i, "Country"].apply(
lambda x: pycountry.countries.lookup(x).alpha_2
)
# Get a boolean mask where gdf's country column matches s's values for the same index
mask = gdf["country"] == gdf.index.map(s)
# Filter gdf using the mask
gdf_filtered = gdf[mask]
# concat not duplicated and filtered gdf
gdf = pd.concat([gdf.drop(duplicated_i), gdf_filtered]).sort_index()
# the .sjoin can lead to duplicates if a geom is in two overlapping regions
if gdf.index.duplicated().any():
# get all duplicated entries
@ -147,6 +164,7 @@ def build_nodal_distribution_key(hotmaps, regions, countries):
return keys
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
@ -154,7 +172,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
"build_industrial_distribution_key",
simpl="",
clusters=48,
clusters=128,
)
logging.basicConfig(level=snakemake.config["logging"]["level"])

View File

@ -167,9 +167,7 @@ def industrial_energy_demand(countries, year):
with mp.Pool(processes=nprocesses) as pool:
demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))
demand = pd.concat(demand_l, keys=countries)
return demand
return pd.concat(demand_l, keys=countries)
if __name__ == "__main__":

View File

@ -41,7 +41,7 @@ The following heat gains and losses are considered:
- heat gain through resistive losses
- heat gain through solar radiation
- heat loss through radiation of the trasnmission line
- heat loss through radiation of the transmission line
- heat loss through forced convection with wind
- heat loss through natural convection
@ -83,8 +83,7 @@ def calculate_resistance(T, R_ref, T_ref=293, alpha=0.00403):
-------
Resistance of at given temperature.
"""
R = R_ref * (1 + alpha * (T - T_ref))
return R
return R_ref * (1 + alpha * (T - T_ref))
def calculate_line_rating(n, cutout):
@ -125,13 +124,12 @@ def calculate_line_rating(n, cutout):
R = calculate_resistance(T=353, R_ref=R)
Imax = cutout.line_rating(shapes, R, D=0.0218, Ts=353, epsilon=0.8, alpha=0.8)
line_factor = relevant_lines.eval("v_nom * n_bundle * num_parallel") / 1e3 # in mW
da = xr.DataArray(
return xr.DataArray(
data=np.sqrt(3) * Imax * line_factor.values.reshape(-1, 1),
attrs=dict(
description="Maximal possible power in MW for given line considering line rating"
),
)
return da
if __name__ == "__main__":

View File

@ -146,8 +146,7 @@ if __name__ == "__main__":
ppl, snakemake.input.custom_powerplants, custom_ppl_query
)
countries_wo_ppl = set(countries) - set(ppl.Country.unique())
if countries_wo_ppl:
if countries_wo_ppl := set(countries) - set(ppl.Country.unique()):
logging.warning(f"No powerplants known in: {', '.join(countries_wo_ppl)}")
substations = n.buses.query("substation_lv")

View File

@ -611,12 +611,11 @@ def calculate_costs(u_values, l, cost_retro, window_assumptions):
/ x.A_C_Ref
if x.name[3] != "Window"
else (
window_cost(x["new_U_{}".format(l)], cost_retro, window_assumptions)
* x.A_element
(window_cost(x[f"new_U_{l}"], cost_retro, window_assumptions) * x.A_element)
/ x.A_C_Ref
if x.value > window_limit(float(l), window_assumptions)
else 0
),
)
if x.value > window_limit(float(l), window_assumptions)
else 0,
axis=1,
)
@ -741,12 +740,12 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor)
# (1) by transmission
# calculate new U values of building elements due to additional insulation
for l in l_strength:
u_values["new_U_{}".format(l)] = calculate_new_u(
u_values[f"new_U_{l}"] = calculate_new_u(
u_values, l, l_weight, window_assumptions
)
# surface area of building components [m^2]
area_element = (
data_tabula[["A_{}".format(e) for e in u_values.index.levels[3]]]
data_tabula[[f"A_{e}" for e in u_values.index.levels[3]]]
.rename(columns=lambda x: x[2:])
.stack()
.unstack(-2)
@ -758,7 +757,7 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor)
# heat transfer H_tr_e [W/m^2K] through building element
# U_e * A_e / A_C_Ref
columns = ["value"] + ["new_U_{}".format(l) for l in l_strength]
columns = ["value"] + [f"new_U_{l}" for l in l_strength]
heat_transfer = pd.concat(
[u_values[columns].mul(u_values.A_element, axis=0), u_values.A_element], axis=1
)
@ -877,10 +876,7 @@ def calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain):
alpha = alpha_H_0 + (tau / tau_H_0)
# heat balance ratio
gamma = (1 / Q_ht).mul(Q_gain.sum(axis=1), axis=0)
# gain utilisation factor
nu = (1 - gamma**alpha) / (1 - gamma ** (alpha + 1))
return nu
return (1 - gamma**alpha) / (1 - gamma ** (alpha + 1))
def calculate_space_heat_savings(

View File

@ -66,11 +66,7 @@ def salt_cavern_potential_by_region(caverns, regions):
"capacity_per_area * share * area_caverns / 1000"
) # TWh
caverns_regions = (
overlay.groupby(["name", "storage_type"]).e_nom.sum().unstack("storage_type")
)
return caverns_regions
return overlay.groupby(["name", "storage_type"]).e_nom.sum().unstack("storage_type")
if __name__ == "__main__":

View File

@ -119,7 +119,7 @@ def countries(naturalearth, country_list):
fieldnames = (
df[x].where(lambda s: s != "-99") for x in ("ISO_A2", "WB_A2", "ADM0_A3")
)
df["name"] = reduce(lambda x, y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2]
df["name"] = reduce(lambda x, y: x.fillna(y), fieldnames, next(fieldnames)).str[:2]
df = df.loc[
df.name.isin(country_list) & ((df["scalerank"] == 0) | (df["scalerank"] == 5))

View File

@ -81,14 +81,12 @@ def build_transport_demand(traffic_fn, airtemp_fn, nodes, nodal_transport_data):
- pop_weighted_energy_totals["electricity rail"]
)
transport = (
return (
(transport_shape.multiply(energy_totals_transport) * 1e6 * nyears)
.divide(efficiency_gain * ice_correction)
.multiply(1 + dd_EV)
)
return transport
def transport_degree_factor(
temperature,
@ -132,14 +130,12 @@ def bev_availability_profile(fn, snapshots, nodes, options):
traffic.mean() - traffic.min()
)
avail_profile = generate_periodic_profiles(
return generate_periodic_profiles(
dt_index=snapshots,
nodes=nodes,
weekly_profile=avail.values,
)
return avail_profile
def bev_dsm_profile(snapshots, nodes, options):
dsm_week = np.zeros((24 * 7,))
@ -148,14 +144,12 @@ def bev_dsm_profile(snapshots, nodes, options):
"bev_dsm_restriction_value"
]
dsm_profile = generate_periodic_profiles(
return generate_periodic_profiles(
dt_index=snapshots,
nodes=nodes,
weekly_profile=dsm_week,
)
return dsm_profile
if __name__ == "__main__":
if "snakemake" not in globals():

View File

@ -322,9 +322,9 @@ def busmap_for_n_clusters(
neighbor_bus = n.lines.query(
"bus0 == @disconnected_bus or bus1 == @disconnected_bus"
).iloc[0][["bus0", "bus1"]]
new_country = list(
set(n.buses.loc[neighbor_bus].country) - set([country])
)[0]
new_country = list(set(n.buses.loc[neighbor_bus].country) - {country})[
0
]
logger.info(
f"overwriting country `{country}` of bus `{disconnected_bus}` "

View File

@ -33,10 +33,7 @@ def assign_locations(n):
ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index)
for i in ifind.unique():
names = ifind.index[ifind == i]
if i == -1:
c.df.loc[names, "location"] = ""
else:
c.df.loc[names, "location"] = names.str[:i]
c.df.loc[names, "location"] = "" if i == -1 else names.str[:i]
def calculate_nodal_cfs(n, label, nodal_cfs):
@ -397,7 +394,7 @@ def calculate_supply_energy(n, label, supply_energy):
for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df["bus" + str(end)].map(bus_map).fillna(False)]
items = c.df.index[c.df[f"bus{str(end)}"].map(bus_map).fillna(False)]
if len(items) == 0:
continue
@ -493,7 +490,7 @@ def calculate_weighted_prices(n, label, weighted_prices):
"H2": ["Sabatier", "H2 Fuel Cell"],
}
for carrier in link_loads:
for carrier, value in link_loads.items():
if carrier == "electricity":
suffix = ""
elif carrier[:5] == "space":
@ -515,15 +512,15 @@ def calculate_weighted_prices(n, label, weighted_prices):
else:
load = n.loads_t.p_set[buses]
for tech in link_loads[carrier]:
for tech in value:
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
if names.empty:
continue
load += (
n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum()
)
if not names.empty:
load += (
n.links_t.p0[names]
.groupby(n.links.loc[names, "bus0"], axis=1)
.sum()
)
# Add H2 Store when charging
# if carrier == "H2":
@ -650,11 +647,7 @@ def make_summaries(networks_dict):
networks_dict.keys(), names=["cluster", "ll", "opt", "planning_horizon"]
)
df = {}
for output in outputs:
df[output] = pd.DataFrame(columns=columns, dtype=float)
df = {output: pd.DataFrame(columns=columns, dtype=float) for output in outputs}
for label, filename in networks_dict.items():
logger.info(f"Make summary for scenario {label}, using {filename}")

View File

@ -0,0 +1,745 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Create summary CSV files for all scenario runs with perfect foresight including
costs, capacities, capacity factors, curtailment, energy balances, prices and
other metrics.
"""
import numpy as np
import pandas as pd
import pypsa
from make_summary import (
assign_carriers,
assign_locations,
calculate_cfs,
calculate_nodal_cfs,
calculate_nodal_costs,
)
from prepare_sector_network import prepare_costs
from pypsa.descriptors import get_active_assets, nominal_attrs
from six import iteritems
idx = pd.IndexSlice
opt_name = {"Store": "e", "Line": "s", "Transformer": "s"}
def calculate_costs(n, label, costs):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
costs.columns.levels[0],
costs.columns.levels[1],
costs.columns.levels[2],
investments,
],
names=costs.columns.names[:3] + ["year"],
)
costs = costs.reindex(cols, axis=1)
for c in n.iterate_components(
n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
active = pd.concat(
[
get_active_assets(n, c.name, inv_p).rename(inv_p)
for inv_p in investments
],
axis=1,
).astype(int)
capital_costs = active.mul(capital_costs, axis=0)
discount = (
n.investment_period_weightings["objective"]
/ n.investment_period_weightings["years"]
)
capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum().mul(discount)
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"])
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=[c.list_name])
costs = costs.reindex(capital_costs_grouped.index.union(costs.index))
costs.loc[capital_costs_grouped.index, label] = capital_costs_grouped.values
if c.name == "Link":
p = (
c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
)
elif c.name == "Line":
continue
elif c.name == "StorageUnit":
p_all = c.pnl.p.multiply(n.snapshot_weightings.stores, axis=0)
p_all[p_all < 0.0] = 0.0
p = p_all.groupby(level=0).sum()
else:
p = (
round(c.pnl.p, ndigits=2)
.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
)
# correct sequestration cost
if c.name == "Store":
items = c.df.index[
(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.0)
]
c.df.loc[items, "marginal_cost"] = -20.0
marginal_costs = p.mul(c.df.marginal_cost).T
# marginal_costs = active.mul(marginal_costs, axis=0)
marginal_costs_grouped = (
marginal_costs.groupby(c.df.carrier).sum().mul(discount)
)
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=["marginal"])
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=[c.list_name])
costs = costs.reindex(marginal_costs_grouped.index.union(costs.index))
costs.loc[marginal_costs_grouped.index, label] = marginal_costs_grouped.values
# add back in all hydro
# costs.loc[("storage_units","capital","hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro","p_nom"].sum()
# costs.loc[("storage_units","capital","PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS","p_nom"].sum()
# costs.loc[("generators","capital","ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror","p_nom"].sum()
return costs
def calculate_cumulative_cost():
planning_horizons = snakemake.config["scenario"]["planning_horizons"]
cumulative_cost = pd.DataFrame(
index=df["costs"].sum().index,
columns=pd.Series(data=np.arange(0, 0.1, 0.01), name="social discount rate"),
)
# discount cost and express them in money value of planning_horizons[0]
for r in cumulative_cost.columns:
cumulative_cost[r] = [
df["costs"].sum()[index] / ((1 + r) ** (index[-1] - planning_horizons[0]))
for index in cumulative_cost.index
]
# integrate cost throughout the transition path
for r in cumulative_cost.columns:
for cluster in cumulative_cost.index.get_level_values(level=0).unique():
for lv in cumulative_cost.index.get_level_values(level=1).unique():
for sector_opts in cumulative_cost.index.get_level_values(
level=2
).unique():
cumulative_cost.loc[
(cluster, lv, sector_opts, "cumulative cost"), r
] = np.trapz(
cumulative_cost.loc[
idx[cluster, lv, sector_opts, planning_horizons], r
].values,
x=planning_horizons,
)
return cumulative_cost
def calculate_nodal_capacities(n, label, nodal_capacities):
# Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff
for c in n.iterate_components(
n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
nodal_capacities_c = c.df.groupby(["location", "carrier"])[
opt_name.get(c.name, "p") + "_nom_opt"
].sum()
index = pd.MultiIndex.from_tuples(
[(c.list_name,) + t for t in nodal_capacities_c.index.to_list()]
)
nodal_capacities = nodal_capacities.reindex(index.union(nodal_capacities.index))
nodal_capacities.loc[index, label] = nodal_capacities_c.values
return nodal_capacities
def calculate_capacities(n, label, capacities):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
capacities.columns.levels[0],
capacities.columns.levels[1],
capacities.columns.levels[2],
investments,
],
names=capacities.columns.names[:3] + ["year"],
)
capacities = capacities.reindex(cols, axis=1)
for c in n.iterate_components(
n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
active = pd.concat(
[
get_active_assets(n, c.name, inv_p).rename(inv_p)
for inv_p in investments
],
axis=1,
).astype(int)
caps = c.df[opt_name.get(c.name, "p") + "_nom_opt"]
caps = active.mul(caps, axis=0)
capacities_grouped = (
caps.groupby(c.df.carrier).sum().drop("load", errors="ignore")
)
capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name])
capacities = capacities.reindex(
capacities_grouped.index.union(capacities.index)
)
capacities.loc[capacities_grouped.index, label] = capacities_grouped.values
return capacities
def calculate_curtailment(n, label, curtailment):
avail = (
n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt)
.sum()
.groupby(n.generators.carrier)
.sum()
)
used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()
curtailment[label] = (((avail - used) / avail) * 100).round(3)
return curtailment
def calculate_energy(n, label, energy):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
energy.columns.levels[0],
energy.columns.levels[1],
energy.columns.levels[2],
investments,
],
names=energy.columns.names[:3] + ["year"],
)
energy = energy.reindex(cols, axis=1)
for c in n.iterate_components(n.one_port_components | n.branch_components):
if c.name in n.one_port_components:
c_energies = (
c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
.multiply(c.df.sign)
.groupby(c.df.carrier, axis=1)
.sum()
)
else:
c_energies = pd.DataFrame(
0.0, columns=c.df.carrier.unique(), index=n.investment_periods
)
for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
totals = (
c.pnl["p" + port]
.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
)
# remove values where bus is missing (bug in nomopyomo)
no_bus = c.df.index[c.df["bus" + port] == ""]
totals[no_bus] = float(
n.component_attrs[c.name].loc["p" + port, "default"]
)
c_energies -= totals.groupby(c.df.carrier, axis=1).sum()
c_energies = pd.concat([c_energies.T], keys=[c.list_name])
energy = energy.reindex(c_energies.index.union(energy.index))
energy.loc[c_energies.index, label] = c_energies.values
return energy
def calculate_supply(n, label, supply):
"""
Calculate the max dispatch of each component at the buses aggregated by
carrier.
"""
bus_carriers = n.buses.carrier.unique()
for i in bus_carriers:
bus_map = n.buses.carrier == i
bus_map.at[""] = False
for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0:
continue
s = (
c.pnl.p[items]
.max()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"])
.sum()
)
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply = supply.reindex(s.index.union(supply.index))
supply.loc[s.index, label] = s
for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df["bus" + end].map(bus_map).fillna(False)]
if len(items) == 0:
continue
# lots of sign compensation for direction and to do maximums
s = (-1) ** (1 - int(end)) * (
(-1) ** int(end) * c.pnl["p" + end][items]
).max().groupby(c.df.loc[items, "carrier"]).sum()
s.index = s.index + end
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply = supply.reindex(s.index.union(supply.index))
supply.loc[s.index, label] = s
return supply
def calculate_supply_energy(n, label, supply_energy):
"""
Calculate the total energy supply/consuption of each component at the buses
aggregated by carrier.
"""
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
supply_energy.columns.levels[0],
supply_energy.columns.levels[1],
supply_energy.columns.levels[2],
investments,
],
names=supply_energy.columns.names[:3] + ["year"],
)
supply_energy = supply_energy.reindex(cols, axis=1)
bus_carriers = n.buses.carrier.unique()
for i in bus_carriers:
bus_map = n.buses.carrier == i
bus_map.at[""] = False
for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0:
continue
if c.name == "Generator":
weightings = n.snapshot_weightings.generators
else:
weightings = n.snapshot_weightings.stores
if i in ["oil", "co2", "H2"]:
if c.name == "Load":
c.df.loc[items, "carrier"] = [
load.split("-202")[0] for load in items
]
if i == "oil" and c.name == "Generator":
c.df.loc[items, "carrier"] = "imported oil"
s = (
c.pnl.p[items]
.multiply(weightings, axis=0)
.groupby(level=0)
.sum()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"], axis=1)
.sum()
.T
)
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply_energy = supply_energy.reindex(
s.index.union(supply_energy.index, sort=False)
)
supply_energy.loc[s.index, label] = s.values
for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df[f"bus{str(end)}"].map(bus_map).fillna(False)]
if len(items) == 0:
continue
s = (
(-1)
* c.pnl["p" + end]
.reindex(items, axis=1)
.multiply(n.snapshot_weightings.objective, axis=0)
.groupby(level=0)
.sum()
.groupby(c.df.loc[items, "carrier"], axis=1)
.sum()
).T
s.index = s.index + end
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply_energy = supply_energy.reindex(
s.index.union(supply_energy.index, sort=False)
)
supply_energy.loc[s.index, label] = s.values
return supply_energy
def calculate_metrics(n, label, metrics):
metrics = metrics.reindex(
pd.Index(
[
"line_volume",
"line_volume_limit",
"line_volume_AC",
"line_volume_DC",
"line_volume_shadow",
"co2_shadow",
]
).union(metrics.index)
)
metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[
n.links.carrier == "DC"
].sum()
metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum()
metrics.at["line_volume", label] = metrics.loc[
["line_volume_AC", "line_volume_DC"], label
].sum()
if hasattr(n, "line_volume_limit"):
metrics.at["line_volume_limit", label] = n.line_volume_limit
metrics.at["line_volume_shadow", label] = n.line_volume_limit_dual
if "CO2Limit" in n.global_constraints.index:
metrics.at["co2_shadow", label] = n.global_constraints.at["CO2Limit", "mu"]
return metrics
def calculate_prices(n, label, prices):
prices = prices.reindex(prices.index.union(n.buses.carrier.unique()))
# WARNING: this is time-averaged, see weighted_prices for load-weighted average
prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean()
return prices
def calculate_weighted_prices(n, label, weighted_prices):
# Warning: doesn't include storage units as loads
weighted_prices = weighted_prices.reindex(
pd.Index(
[
"electricity",
"heat",
"space heat",
"urban heat",
"space urban heat",
"gas",
"H2",
]
)
)
link_loads = {
"electricity": [
"heat pump",
"resistive heater",
"battery charger",
"H2 Electrolysis",
],
"heat": ["water tanks charger"],
"urban heat": ["water tanks charger"],
"space heat": [],
"space urban heat": [],
"gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"],
"H2": ["Sabatier", "H2 Fuel Cell"],
}
for carrier, value in link_loads.items():
if carrier == "electricity":
suffix = ""
elif carrier[:5] == "space":
suffix = carrier[5:]
else:
suffix = " " + carrier
buses = n.buses.index[n.buses.index.str[2:] == suffix]
if buses.empty:
continue
load = (
pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
if carrier in ["H2", "gas"]
else n.loads_t.p_set.reindex(buses, axis=1)
)
for tech in value:
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
if names.empty:
continue
load += (
n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum()
)
# Add H2 Store when charging
# if carrier == "H2":
# stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1)
# stores[stores > 0.] = 0.
# load += -stores
weighted_prices.loc[carrier, label] = (
load * n.buses_t.marginal_price[buses]
).sum().sum() / load.sum().sum()
if carrier[:5] == "space":
print(load * n.buses_t.marginal_price[buses])
return weighted_prices
def calculate_market_values(n, label, market_values):
# Warning: doesn't include storage units
carrier = "AC"
buses = n.buses.index[n.buses.carrier == carrier]
## First do market value of generators ##
generators = n.generators.index[n.buses.loc[n.generators.bus, "carrier"] == carrier]
techs = n.generators.loc[generators, "carrier"].value_counts().index
market_values = market_values.reindex(market_values.index.union(techs))
for tech in techs:
gens = generators[n.generators.loc[generators, "carrier"] == tech]
dispatch = (
n.generators_t.p[gens]
.groupby(n.generators.loc[gens, "bus"], axis=1)
.sum()
.reindex(columns=buses, fill_value=0.0)
)
revenue = dispatch * n.buses_t.marginal_price[buses]
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
## Now do market value of links ##
for i in ["0", "1"]:
all_links = n.links.index[n.buses.loc[n.links["bus" + i], "carrier"] == carrier]
techs = n.links.loc[all_links, "carrier"].value_counts().index
market_values = market_values.reindex(market_values.index.union(techs))
for tech in techs:
links = all_links[n.links.loc[all_links, "carrier"] == tech]
dispatch = (
n.links_t["p" + i][links]
.groupby(n.links.loc[links, "bus" + i], axis=1)
.sum()
.reindex(columns=buses, fill_value=0.0)
)
revenue = dispatch * n.buses_t.marginal_price[buses]
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
return market_values
def calculate_price_statistics(n, label, price_statistics):
price_statistics = price_statistics.reindex(
price_statistics.index.union(
pd.Index(["zero_hours", "mean", "standard_deviation"])
)
)
buses = n.buses.index[n.buses.carrier == "AC"]
threshold = 0.1 # higher than phoney marginal_cost of wind/solar
df = pd.DataFrame(data=0.0, columns=buses, index=n.snapshots)
df[n.buses_t.marginal_price[buses] < threshold] = 1.0
price_statistics.at["zero_hours", label] = df.sum().sum() / (
df.shape[0] * df.shape[1]
)
price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].mean().mean()
price_statistics.at["standard_deviation", label] = (
n.buses_t.marginal_price[buses].droplevel(0).unstack().std()
)
return price_statistics
def calculate_co2_emissions(n, label, df):
carattr = "co2_emissions"
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
if emissions.empty:
return
weightings = n.snapshot_weightings.generators.mul(
n.investment_period_weightings["years"]
.reindex(n.snapshots)
.fillna(method="bfill")
.fillna(1.0),
axis=0,
)
# generators
gens = n.generators.query("carrier in @emissions.index")
if not gens.empty:
em_pu = gens.carrier.map(emissions) / gens.efficiency
em_pu = (
weightings["generators"].to_frame("weightings")
@ em_pu.to_frame("weightings").T
)
emitted = n.generators_t.p[gens.index].mul(em_pu)
emitted_grouped = (
emitted.groupby(level=0).sum().groupby(n.generators.carrier, axis=1).sum().T
)
df = df.reindex(emitted_grouped.index.union(df.index))
df.loc[emitted_grouped.index, label] = emitted_grouped.values
if any(n.stores.carrier == "co2"):
co2_i = n.stores[n.stores.carrier == "co2"].index
df[label] = n.stores_t.e.groupby(level=0).last()[co2_i].iloc[:, 0]
return df
outputs = [
"nodal_costs",
"nodal_capacities",
"nodal_cfs",
"cfs",
"costs",
"capacities",
"curtailment",
"energy",
"supply",
"supply_energy",
"prices",
"weighted_prices",
"price_statistics",
"market_values",
"metrics",
"co2_emissions",
]
def make_summaries(networks_dict):
columns = pd.MultiIndex.from_tuples(
networks_dict.keys(), names=["cluster", "lv", "opt"]
)
df = {}
for output in outputs:
df[output] = pd.DataFrame(columns=columns, dtype=float)
for label, filename in iteritems(networks_dict):
print(label, filename)
try:
n = pypsa.Network(filename)
except OSError:
print(label, " not solved yet.")
continue
# del networks_dict[label]
if not hasattr(n, "objective"):
print(label, " not solved correctly. Check log if infeasible or unbounded.")
continue
assign_carriers(n)
assign_locations(n)
for output in outputs:
df[output] = globals()["calculate_" + output](n, label, df[output])
return df
def to_csv(df):
for key in df:
df[key] = df[key].apply(lambda x: pd.to_numeric(x))
df[key].to_csv(snakemake.output[key])
# %%
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("make_summary_perfect")
run = snakemake.config["run"]["name"]
if run != "":
run += "/"
networks_dict = {
(clusters, lv, opts + sector_opts): "results/"
+ run
+ f"postnetworks/elec_s{simpl}_{clusters}_l{lv}_{opts}_{sector_opts}_brownfield_all_years.nc"
for simpl in snakemake.config["scenario"]["simpl"]
for clusters in snakemake.config["scenario"]["clusters"]
for opts in snakemake.config["scenario"]["opts"]
for sector_opts in snakemake.config["scenario"]["sector_opts"]
for lv in snakemake.config["scenario"]["ll"]
}
print(networks_dict)
nyears = 1
costs_db = prepare_costs(
snakemake.input.costs,
snakemake.config["costs"],
nyears,
)
df = make_summaries(networks_dict)
df["metrics"].loc["total costs"] = df["costs"].sum().groupby(level=[0, 1, 2]).sum()
to_csv(df)

View File

@ -24,7 +24,7 @@ from make_summary import assign_carriers
from plot_summary import preferred_order, rename_techs
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
plt.style.use(["ggplot", "matplotlibrc"])
plt.style.use(["ggplot"])
def rename_techs_tyndp(tech):
@ -145,12 +145,12 @@ def plot_map(
ac_color = "rosybrown"
dc_color = "darkseagreen"
title = "added grid"
if snakemake.wildcards["ll"] == "v1.0":
# should be zero
line_widths = n.lines.s_nom_opt - n.lines.s_nom
link_widths = n.links.p_nom_opt - n.links.p_nom
title = "added grid"
if transmission:
line_widths = n.lines.s_nom_opt
link_widths = n.links.p_nom_opt
@ -160,8 +160,6 @@ def plot_map(
else:
line_widths = n.lines.s_nom_opt - n.lines.s_nom_min
link_widths = n.links.p_nom_opt - n.links.p_nom_min
title = "added grid"
if transmission:
line_widths = n.lines.s_nom_opt
link_widths = n.links.p_nom_opt
@ -262,12 +260,7 @@ def group_pipes(df, drop_direction=False):
lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}",
axis=1,
)
# group pipe lines connecting the same buses and rename them for plotting
pipe_capacity = df.groupby(level=0).agg(
{"p_nom_opt": sum, "bus0": "first", "bus1": "first"}
)
return pipe_capacity
return df.groupby(level=0).agg({"p_nom_opt": sum, "bus0": "first", "bus1": "first"})
def plot_h2_map(network, regions):
@ -766,11 +759,13 @@ def plot_series(network, carrier="AC", name="test"):
supply = pd.concat(
(
supply,
(-1)
* c.pnl["p" + str(i)]
.loc[:, c.df.index[c.df["bus" + str(i)].isin(buses)]]
.groupby(c.df.carrier, axis=1)
.sum(),
(
-1
* c.pnl[f"p{str(i)}"]
.loc[:, c.df.index[c.df[f"bus{str(i)}"].isin(buses)]]
.groupby(c.df.carrier, axis=1)
.sum()
),
),
axis=1,
)
@ -913,6 +908,159 @@ def plot_series(network, carrier="AC", name="test"):
)
def plot_map_perfect(
network,
components=["Link", "Store", "StorageUnit", "Generator"],
bus_size_factor=1.7e10,
):
n = network.copy()
assign_location(n)
# Drop non-electric buses so they don't clutter the plot
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
# investment periods
investments = n.snapshots.levels[0]
costs = {}
for comp in components:
df_c = n.df(comp)
if df_c.empty:
continue
df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp)
attr = "e_nom_opt" if comp == "Store" else "p_nom_opt"
active = pd.concat(
[n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments],
axis=1,
).astype(int)
capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost
capital_cost_t = (
(active.mul(capital_cost, axis=0))
.groupby([n.df(comp).location, n.df(comp).nice_group])
.sum()
)
capital_cost_t.drop("load", level=1, inplace=True, errors="ignore")
costs[comp] = capital_cost_t
costs = pd.concat(costs).groupby(level=[1, 2]).sum()
costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True)
new_columns = preferred_order.intersection(costs.index.levels[1]).append(
costs.index.levels[1].difference(preferred_order)
)
costs = costs.reindex(new_columns, level=1)
for item in new_columns:
if item not in snakemake.config["plotting"]["tech_colors"]:
print(
"Warning!",
item,
"not in config/plotting/tech_colors, assign random color",
)
snakemake.config["plotting"]["tech_colors"] = "pink"
n.links.drop(
n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")],
inplace=True,
)
# drop non-bus
to_drop = costs.index.levels[0].symmetric_difference(n.buses.index)
if len(to_drop) != 0:
print("dropping non-buses", to_drop)
costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore")
# make sure they are removed from index
costs.index = pd.MultiIndex.from_tuples(costs.index.values)
# PDF has minimum width, so set these to zero
line_lower_threshold = 500.0
line_upper_threshold = 1e4
linewidth_factor = 2e3
ac_color = "gray"
dc_color = "m"
line_widths = n.lines.s_nom_opt
link_widths = n.links.p_nom_opt
linewidth_factor = 2e3
line_lower_threshold = 0.0
title = "Today's transmission"
line_widths[line_widths < line_lower_threshold] = 0.0
link_widths[link_widths < line_lower_threshold] = 0.0
line_widths[line_widths > line_upper_threshold] = line_upper_threshold
link_widths[link_widths > line_upper_threshold] = line_upper_threshold
for year in costs.columns:
fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()})
fig.set_size_inches(7, 6)
fig.suptitle(year)
n.plot(
bus_sizes=costs[year] / bus_size_factor,
bus_colors=snakemake.config["plotting"]["tech_colors"],
line_colors=ac_color,
link_colors=dc_color,
line_widths=line_widths / linewidth_factor,
link_widths=link_widths / linewidth_factor,
ax=ax,
**map_opts,
)
sizes = [20, 10, 5]
labels = [f"{s} bEUR/a" for s in sizes]
sizes = [s / bus_size_factor * 1e9 for s in sizes]
legend_kw = dict(
loc="upper left",
bbox_to_anchor=(0.01, 1.06),
labelspacing=0.8,
frameon=False,
handletextpad=0,
title="system cost",
)
add_legend_circles(
ax,
sizes,
labels,
srid=n.srid,
patch_kw=dict(facecolor="lightgrey"),
legend_kw=legend_kw,
)
sizes = [10, 5]
labels = [f"{s} GW" for s in sizes]
scale = 1e3 / linewidth_factor
sizes = [s * scale for s in sizes]
legend_kw = dict(
loc="upper left",
bbox_to_anchor=(0.27, 1.06),
frameon=False,
labelspacing=0.8,
handletextpad=1,
title=title,
)
add_legend_lines(
ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw
)
legend_kw = dict(
bbox_to_anchor=(1.52, 1.04),
frameon=False,
)
fig.savefig(
snakemake.output[f"map_{year}"], transparent=True, bbox_inches="tight"
)
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
@ -921,10 +1069,9 @@ if __name__ == "__main__":
"plot_network",
simpl="",
opts="",
clusters="5",
ll="v1.5",
sector_opts="CO2L0-1H-T-H-B-I-A-solar+p3-dist1",
planning_horizons="2030",
clusters="37",
ll="v1.0",
sector_opts="4380H-T-H-B-I-A-solar+p3-dist1",
)
logging.basicConfig(level=snakemake.config["logging"]["level"])
@ -938,16 +1085,23 @@ if __name__ == "__main__":
if map_opts["boundaries"] is None:
map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1]
plot_map(
n,
components=["generators", "links", "stores", "storage_units"],
bus_size_factor=2e10,
transmission=False,
)
if snakemake.params["foresight"] == "perfect":
plot_map_perfect(
n,
components=["Link", "Store", "StorageUnit", "Generator"],
bus_size_factor=2e10,
)
else:
plot_map(
n,
components=["generators", "links", "stores", "storage_units"],
bus_size_factor=2e10,
transmission=False,
)
plot_h2_map(n, regions)
plot_ch4_map(n)
plot_map_without(n)
plot_h2_map(n, regions)
plot_ch4_map(n)
plot_map_without(n)
# plot_series(n, carrier="AC", name=suffix)
# plot_series(n, carrier="heat", name=suffix)

View File

@ -49,6 +49,10 @@ def rename_techs(label):
# "H2 Fuel Cell": "hydrogen storage",
# "H2 pipeline": "hydrogen storage",
"battery": "battery storage",
"H2 for industry": "H2 for industry",
"land transport fuel cell": "land transport fuel cell",
"land transport oil": "land transport oil",
"oil shipping": "shipping oil",
# "CC": "CC"
}
@ -157,11 +161,11 @@ def plot_costs():
df.index.difference(preferred_order)
)
new_columns = df.sum().sort_values().index
# new_columns = df.sum().sort_values().index
fig, ax = plt.subplots(figsize=(12, 8))
df.loc[new_index, new_columns].T.plot(
df.loc[new_index].T.plot(
kind="bar",
ax=ax,
stacked=True,
@ -213,17 +217,22 @@ def plot_energy():
logger.info(f"Total energy of {round(df.sum()[0])} TWh/a")
if df.empty:
fig, ax = plt.subplots(figsize=(12, 8))
fig.savefig(snakemake.output.energy, bbox_inches="tight")
return
new_index = preferred_order.intersection(df.index).append(
df.index.difference(preferred_order)
)
new_columns = df.columns.sort_values()
# new_columns = df.columns.sort_values()
fig, ax = plt.subplots(figsize=(12, 8))
logger.debug(df.loc[new_index, new_columns])
logger.debug(df.loc[new_index])
df.loc[new_index, new_columns].T.plot(
df.loc[new_index].T.plot(
kind="bar",
ax=ax,
stacked=True,
@ -267,8 +276,6 @@ def plot_balances():
i for i in balances_df.index.levels[0] if i not in co2_carriers
]
fig, ax = plt.subplots(figsize=(12, 8))
for k, v in balances.items():
df = balances_df.loc[v]
df = df.groupby(df.index.get_level_values(2)).sum()
@ -279,7 +286,7 @@ def plot_balances():
# remove trailing link ports
df.index = [
i[:-1]
if ((i not in ["co2", "NH3"]) and (i[-1:] in ["0", "1", "2", "3"]))
if ((i not in ["co2", "NH3", "H2"]) and (i[-1:] in ["0", "1", "2", "3"]))
else i
for i in df.index
]
@ -290,11 +297,7 @@ def plot_balances():
df.abs().max(axis=1) < snakemake.params.plotting["energy_threshold"] / 10
]
if v[0] in co2_carriers:
units = "MtCO2/a"
else:
units = "TWh/a"
units = "MtCO2/a" if v[0] in co2_carriers else "TWh/a"
logger.debug(
f"Dropping technology energy balance smaller than {snakemake.params['plotting']['energy_threshold']/10} {units}"
)
@ -313,6 +316,8 @@ def plot_balances():
new_columns = df.columns.sort_values()
fig, ax = plt.subplots(figsize=(12, 8))
df.loc[new_index, new_columns].T.plot(
kind="bar",
ax=ax,
@ -345,8 +350,6 @@ def plot_balances():
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight")
plt.cla()
def historical_emissions(countries):
"""
@ -354,8 +357,7 @@ def historical_emissions(countries):
"""
# https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
# downloaded 201228 (modified by EEA last on 201221)
fn = "data/bundle-sector/eea/UNFCCC_v23.csv"
df = pd.read_csv(fn, encoding="latin-1")
df = pd.read_csv(snakemake.input.co2, encoding="latin-1", low_memory=False)
df.loc[df["Year"] == "1985-1987", "Year"] = 1986
df["Year"] = df["Year"].astype(int)
df = df.set_index(
@ -379,18 +381,21 @@ def historical_emissions(countries):
e["waste management"] = "5 - Waste management"
e["other"] = "6 - Other Sector"
e["indirect"] = "ind_CO2 - Indirect CO2"
e["total wL"] = "Total (with LULUCF)"
e["total woL"] = "Total (without LULUCF)"
e["other LULUCF"] = "4.H - Other LULUCF"
pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"]
if "GB" in countries:
countries.remove("GB")
countries.append("UK")
# remove countries which are not included in eea historical emission dataset
countries_to_remove = {"AL", "BA", "ME", "MK", "RS"}
countries = list(set(countries) - countries_to_remove)
year = np.arange(1990, 2018).tolist()
year = df.index.levels[0][df.index.levels[0] >= 1990]
missing = pd.Index(countries).difference(df.index.levels[2])
if not missing.empty:
logger.warning(
f"The following countries are missing and not considered when plotting historic CO2 emissions: {missing}"
)
countries = pd.Index(df.index.levels[2]).intersection(countries)
idx = pd.IndexSlice
co2_totals = (
@ -453,18 +458,12 @@ def plot_carbon_budget_distribution(input_eurostat):
plt.rcParams["xtick.labelsize"] = 20
plt.rcParams["ytick.labelsize"] = 20
plt.figure(figsize=(10, 7))
gs1 = gridspec.GridSpec(1, 1)
ax1 = plt.subplot(gs1[0, 0])
ax1.set_ylabel("CO$_2$ emissions (Gt per year)", fontsize=22)
ax1.set_ylim([0, 5])
ax1.set_xlim([1990, snakemake.params.planning_horizons[-1] + 1])
path_cb = "results/" + snakemake.params.RDIR + "csvs/"
countries = snakemake.params.countries
emissions_scope = snakemake.params.emissions_scope
report_year = snakemake.params.eurostat_report_year
input_co2 = snakemake.input.co2
# historic emissions
countries = snakemake.params.countries
e_1990 = co2_emissions_year(
countries,
input_eurostat,
@ -474,15 +473,37 @@ def plot_carbon_budget_distribution(input_eurostat):
input_co2,
year=1990,
)
CO2_CAP = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)
ax1.plot(e_1990 * CO2_CAP[o], linewidth=3, color="dodgerblue", label=None)
emissions = historical_emissions(countries)
# add other years https://sdi.eea.europa.eu/data/0569441f-2853-4664-a7cd-db969ef54de0
emissions.loc[2019] = 2.971372
emissions.loc[2020] = 2.691958
emissions.loc[2021] = 2.869355
if snakemake.config["foresight"] == "myopic":
path_cb = "results/" + snakemake.params.RDIR + "/csvs/"
co2_cap = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)[
["cb"]
]
co2_cap *= e_1990
else:
supply_energy = pd.read_csv(
snakemake.input.balances, index_col=[0, 1, 2], header=[0, 1, 2, 3]
)
co2_cap = (
supply_energy.loc["co2"].droplevel(0).drop("co2").sum().unstack().T / 1e9
)
co2_cap.rename(index=lambda x: int(x), inplace=True)
plt.figure(figsize=(10, 7))
gs1 = gridspec.GridSpec(1, 1)
ax1 = plt.subplot(gs1[0, 0])
ax1.set_ylabel("CO$_2$ emissions \n [Gt per year]", fontsize=22)
# ax1.set_ylim([0, 5])
ax1.set_xlim([1990, snakemake.params.planning_horizons[-1] + 1])
ax1.plot(emissions, color="black", linewidth=3, label=None)
# plot committed and uder-discussion targets
# plot committed and under-discussion targets
# (notice that historical emissions include all countries in the
# network, but targets refer to EU)
ax1.plot(
@ -499,7 +520,7 @@ def plot_carbon_budget_distribution(input_eurostat):
[0.45 * emissions[1990]],
marker="*",
markersize=12,
markerfacecolor="white",
markerfacecolor="black",
markeredgecolor="black",
)
@ -523,21 +544,7 @@ def plot_carbon_budget_distribution(input_eurostat):
ax1.plot(
[2050],
[0.01 * emissions[1990]],
marker="*",
markersize=12,
markerfacecolor="white",
linewidth=0,
markeredgecolor="black",
label="EU under-discussion target",
zorder=10,
clip_on=False,
)
ax1.plot(
[2050],
[0.125 * emissions[1990]],
"ro",
[0.0 * emissions[1990]],
marker="*",
markersize=12,
markerfacecolor="black",
@ -545,14 +552,19 @@ def plot_carbon_budget_distribution(input_eurostat):
label="EU committed target",
)
for col in co2_cap.columns:
ax1.plot(co2_cap[col], linewidth=3, label=col)
ax1.legend(
fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True
)
path_cb_plot = "results/" + snakemake.params.RDIR + "graphs/"
plt.savefig(path_cb_plot + "carbon_budget_plot.pdf", dpi=300)
plt.grid(axis="y")
path = snakemake.output.balances.split("balances")[0] + "carbon_budget.pdf"
plt.savefig(path, bbox_inches="tight")
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
@ -571,6 +583,5 @@ if __name__ == "__main__":
for sector_opts in snakemake.params.sector_opts:
opts = sector_opts.split("-")
for o in opts:
if "cb" in o:
plot_carbon_budget_distribution(snakemake.input.eurostat)
if any("cb" in o for o in opts) or snakemake.config["foresight"] == "perfect":
plot_carbon_budget_distribution(snakemake.input.eurostat)

View File

@ -84,13 +84,9 @@ def cross_border_time_series(countries, data):
df_neg.plot.area(
ax=ax[axis], stacked=True, linewidth=0.0, color=color, ylim=[-1, 1]
)
if (axis % 2) == 0:
title = "Historic"
else:
title = "Optimized"
title = "Historic" if (axis % 2) == 0 else "Optimized"
ax[axis].set_title(
title + " Import / Export for " + cc.convert(country, to="name_short")
f"{title} Import / Export for " + cc.convert(country, to="name_short")
)
# Custom legend elements
@ -137,16 +133,12 @@ def cross_border_bar(countries, data):
df_country = sort_one_country(country, df)
df_neg, df_pos = df_country.clip(upper=0), df_country.clip(lower=0)
if (order % 2) == 0:
title = "Historic"
else:
title = "Optimized"
title = "Historic" if (order % 2) == 0 else "Optimized"
df_positive_new = pd.DataFrame(data=df_pos.sum()).T.rename(
{0: title + " " + cc.convert(country, to="name_short")}
{0: f"{title} " + cc.convert(country, to="name_short")}
)
df_negative_new = pd.DataFrame(data=df_neg.sum()).T.rename(
{0: title + " " + cc.convert(country, to="name_short")}
{0: f"{title} " + cc.convert(country, to="name_short")}
)
df_positive = pd.concat([df_positive_new, df_positive])

View File

@ -0,0 +1,549 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Concats pypsa networks of single investment periods to one network.
"""
import logging
import re
import numpy as np
import pandas as pd
import pypsa
from _helpers import update_config_with_sector_opts
from add_existing_baseyear import add_build_year_to_new_assets
from pypsa.descriptors import expand_series
from pypsa.io import import_components_from_dataframe
from six import iterkeys
logger = logging.getLogger(__name__)
# helper functions ---------------------------------------------------
def get_missing(df, n, c):
"""
Get in network n missing assets of df for component c.
Input:
df: pandas DataFrame, static values of pypsa components
n : pypsa Network to which new assets should be added
c : string, pypsa component.list_name (e.g. "generators")
Return:
pd.DataFrame with static values of missing assets
"""
df_final = getattr(n, c)
missing_i = df.index.difference(df_final.index)
return df.loc[missing_i]
def get_social_discount(t, r=0.01):
"""
Calculate for a given time t and social discount rate r [per unit] the
social discount.
"""
return 1 / (1 + r) ** t
def get_investment_weighting(time_weighting, r=0.01):
"""
Define cost weighting.
Returns cost weightings depending on the the time_weighting
(pd.Series) and the social discountrate r
"""
end = time_weighting.cumsum()
start = time_weighting.cumsum().shift().fillna(0)
return pd.concat([start, end], axis=1).apply(
lambda x: sum(get_social_discount(t, r) for t in range(int(x[0]), int(x[1]))),
axis=1,
)
def add_year_to_constraints(n, baseyear):
"""
Add investment period to global constraints and rename index.
Parameters
----------
n : pypsa.Network
baseyear : int
year in which optimized assets are built
"""
for c in n.iterate_components(["GlobalConstraint"]):
c.df["investment_period"] = baseyear
c.df.rename(index=lambda x: x + "-" + str(baseyear), inplace=True)
def hvdc_transport_model(n):
"""
Convert AC lines to DC links for multi-decade optimisation with line
expansion.
Losses of DC links are assumed to be 3% per 1000km
"""
logger.info("Convert AC lines to DC links to perform multi-decade optimisation.")
n.madd(
"Link",
n.lines.index,
bus0=n.lines.bus0,
bus1=n.lines.bus1,
p_nom_extendable=True,
p_nom=n.lines.s_nom,
p_nom_min=n.lines.s_nom,
p_min_pu=-1,
efficiency=1 - 0.03 * n.lines.length / 1000,
marginal_cost=0,
carrier="DC",
length=n.lines.length,
capital_cost=n.lines.capital_cost,
)
# Remove AC lines
logger.info("Removing AC lines")
lines_rm = n.lines.index
n.mremove("Line", lines_rm)
# Set efficiency of all DC links to include losses depending on length
n.links.loc[n.links.carrier == "DC", "efficiency"] = (
1 - 0.03 * n.links.loc[n.links.carrier == "DC", "length"] / 1000
)
def adjust_electricity_grid(n, year, years):
"""
Add carrier to lines. Replace AC lines with DC links in case of line
expansion. Add lifetime to DC links in case of line expansion.
Parameters
----------
n : pypsa.Network
year : int
year in which optimized assets are built
years: list
investment periods
"""
n.lines["carrier"] = "AC"
links_i = n.links[n.links.carrier == "DC"].index
if n.lines.s_nom_extendable.any() or n.links.loc[links_i, "p_nom_extendable"].any():
hvdc_transport_model(n)
links_i = n.links[n.links.carrier == "DC"].index
n.links.loc[links_i, "lifetime"] = 100
if year != years[0]:
n.links.loc[links_i, "p_nom_min"] = 0
n.links.loc[links_i, "p_nom"] = 0
# --------------------------------------------------------------------
def concat_networks(years):
"""
Concat given pypsa networks and adds build_year.
Return:
n : pypsa.Network for the whole planning horizon
"""
# input paths of sector coupling networks
network_paths = [snakemake.input.brownfield_network] + [
snakemake.input[f"network_{year}"] for year in years[1:]
]
# final concatenated network
n = pypsa.Network()
# iterate over single year networks and concat to perfect foresight network
for i, network_path in enumerate(network_paths):
year = years[i]
network = pypsa.Network(network_path)
adjust_electricity_grid(network, year, years)
add_build_year_to_new_assets(network, year)
# static ----------------------------------
# (1) add buses and carriers
for component in network.iterate_components(["Bus", "Carrier"]):
df_year = component.df
# get missing assets
missing = get_missing(df_year, n, component.list_name)
import_components_from_dataframe(n, missing, component.name)
# (2) add generators, links, stores and loads
for component in network.iterate_components(
["Generator", "Link", "Store", "Load", "Line", "StorageUnit"]
):
df_year = component.df.copy()
missing = get_missing(df_year, n, component.list_name)
import_components_from_dataframe(n, missing, component.name)
# time variant --------------------------------------------------
network_sns = pd.MultiIndex.from_product([[year], network.snapshots])
snapshots = n.snapshots.drop("now", errors="ignore").union(network_sns)
n.set_snapshots(snapshots)
for component in network.iterate_components():
pnl = getattr(n, component.list_name + "_t")
for k in iterkeys(component.pnl):
pnl_year = component.pnl[k].copy().reindex(snapshots, level=1)
if pnl_year.empty and ~(component.name == "Load" and k == "p_set"):
continue
if component.name == "Load":
static_load = network.loads.loc[network.loads.p_set != 0]
static_load_t = expand_series(static_load.p_set, network_sns).T
pnl_year = pd.concat(
[pnl_year.reindex(network_sns), static_load_t], axis=1
)
columns = (pnl[k].columns.union(pnl_year.columns)).unique()
pnl[k] = pnl[k].reindex(columns=columns)
pnl[k].loc[pnl_year.index, pnl_year.columns] = pnl_year
else:
# this is to avoid adding multiple times assets with
# infinite lifetime as ror
cols = pnl_year.columns.difference(pnl[k].columns)
pnl[k] = pd.concat([pnl[k], pnl_year[cols]], axis=1)
n.snapshot_weightings.loc[year, :] = network.snapshot_weightings.values
# (3) global constraints
for component in network.iterate_components(["GlobalConstraint"]):
add_year_to_constraints(network, year)
import_components_from_dataframe(n, component.df, component.name)
# set investment periods
n.investment_periods = n.snapshots.levels[0]
# weighting of the investment period -> assuming last period same weighting as the period before
time_w = n.investment_periods.to_series().diff().shift(-1).fillna(method="ffill")
n.investment_period_weightings["years"] = time_w
# set objective weightings
objective_w = get_investment_weighting(
n.investment_period_weightings["years"], social_discountrate
)
n.investment_period_weightings["objective"] = objective_w
# all former static loads are now time-dependent -> set static = 0
n.loads["p_set"] = 0
n.loads_t.p_set.fillna(0, inplace=True)
return n
def adjust_stores(n):
"""
Make sure that stores still behave cyclic over one year and not whole
modelling horizon.
"""
# cyclic constraint
cyclic_i = n.stores[n.stores.e_cyclic].index
n.stores.loc[cyclic_i, "e_cyclic_per_period"] = True
n.stores.loc[cyclic_i, "e_cyclic"] = False
# non cyclic store assumptions
non_cyclic_store = ["co2", "co2 stored", "solid biomass", "biogas", "Li ion"]
co2_i = n.stores[n.stores.carrier.isin(non_cyclic_store)].index
n.stores.loc[co2_i, "e_cyclic_per_period"] = False
n.stores.loc[co2_i, "e_cyclic"] = False
# e_initial at beginning of each investment period
e_initial_store = ["solid biomass", "biogas"]
co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index
n.stores.loc[co2_i, "e_initial_per_period"] = True
# n.stores.loc[co2_i, "e_initial"] *= 10
# n.stores.loc[co2_i, "e_nom"] *= 10
e_initial_store = ["co2 stored"]
co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index
n.stores.loc[co2_i, "e_initial_per_period"] = True
return n
def set_phase_out(n, carrier, ct, phase_out_year):
"""
Set planned phase outs for given carrier,country (ct) and planned year of
phase out (phase_out_year).
"""
df = n.links[(n.links.carrier.isin(carrier)) & (n.links.bus1.str[:2] == ct)]
# assets which are going to be phased out before end of their lifetime
assets_i = df[df[["build_year", "lifetime"]].sum(axis=1) > phase_out_year].index
build_year = n.links.loc[assets_i, "build_year"]
# adjust lifetime
n.links.loc[assets_i, "lifetime"] = (phase_out_year - build_year).astype(float)
def set_all_phase_outs(n):
# TODO move this to a csv or to the config
planned = [
(["nuclear"], "DE", 2022),
(["nuclear"], "BE", 2025),
(["nuclear"], "ES", 2027),
(["coal", "lignite"], "DE", 2030),
(["coal", "lignite"], "ES", 2027),
(["coal", "lignite"], "FR", 2022),
(["coal", "lignite"], "GB", 2024),
(["coal", "lignite"], "IT", 2025),
(["coal", "lignite"], "DK", 2030),
(["coal", "lignite"], "FI", 2030),
(["coal", "lignite"], "HU", 2030),
(["coal", "lignite"], "SK", 2030),
(["coal", "lignite"], "GR", 2030),
(["coal", "lignite"], "IE", 2030),
(["coal", "lignite"], "NL", 2030),
(["coal", "lignite"], "RS", 2030),
]
for carrier, ct, phase_out_year in planned:
set_phase_out(n, carrier, ct, phase_out_year)
# remove assets which are already phased out
remove_i = n.links[n.links[["build_year", "lifetime"]].sum(axis=1) < years[0]].index
n.mremove("Link", remove_i)
def set_carbon_constraints(n, opts):
"""
Add global constraints for carbon emissions.
"""
budget = None
for o in opts:
# other budgets
m = re.match(r"^\d+p\d$", o, re.IGNORECASE)
if m is not None:
budget = snakemake.config["co2_budget"][m.group(0)] * 1e9
if budget != None:
logger.info(f"add carbon budget of {budget}")
n.add(
"GlobalConstraint",
"Budget",
type="Co2Budget",
carrier_attribute="co2_emissions",
sense="<=",
constant=budget,
investment_period=n.investment_periods[-1],
)
# drop other CO2 limits
drop_i = n.global_constraints[n.global_constraints.type == "co2_limit"].index
n.mremove("GlobalConstraint", drop_i)
n.add(
"GlobalConstraint",
"carbon_neutral",
type="co2_limit",
carrier_attribute="co2_emissions",
sense="<=",
constant=0,
investment_period=n.investment_periods[-1],
)
# set minimum CO2 emission constraint to avoid too fast reduction
if "co2min" in opts:
emissions_1990 = 4.53693
emissions_2019 = 3.344096
target_2030 = 0.45 * emissions_1990
annual_reduction = (emissions_2019 - target_2030) / 11
first_year = n.snapshots.levels[0][0]
time_weightings = n.investment_period_weightings.loc[first_year, "years"]
co2min = emissions_2019 - ((first_year - 2019) * annual_reduction)
logger.info(f"add minimum emissions for {first_year} of {co2min} t CO2/a")
n.add(
"GlobalConstraint",
f"Co2Min-{first_year}",
type="Co2min",
carrier_attribute="co2_emissions",
sense=">=",
investment_period=first_year,
constant=co2min * 1e9 * time_weightings,
)
return n
def adjust_lvlimit(n):
"""
Convert global constraints for single investment period to one uniform if
all attributes stay the same.
"""
c = "GlobalConstraint"
cols = ["carrier_attribute", "sense", "constant", "type"]
glc_type = "transmission_volume_expansion_limit"
if (n.df(c)[n.df(c).type == glc_type][cols].nunique() == 1).all():
glc = n.df(c)[n.df(c).type == glc_type][cols].iloc[[0]]
glc.index = pd.Index(["lv_limit"])
remove_i = n.df(c)[n.df(c).type == glc_type].index
n.mremove(c, remove_i)
import_components_from_dataframe(n, glc, c)
return n
def adjust_CO2_glc(n):
c = "GlobalConstraint"
glc_name = "CO2Limit"
glc_type = "primary_energy"
mask = (n.df(c).index.str.contains(glc_name)) & (n.df(c).type == glc_type)
n.df(c).loc[mask, "type"] = "co2_limit"
return n
def add_H2_boilers(n):
"""
Gas boilers can be retrofitted to run with H2.
Add H2 boilers for heating for all existing gas boilers.
"""
c = "Link"
logger.info("Add H2 boilers.")
# existing gas boilers
mask = n.links.carrier.str.contains("gas boiler") & ~n.links.p_nom_extendable
gas_i = n.links[mask].index
df = n.links.loc[gas_i]
# adjust bus 0
df["bus0"] = df.bus1.map(n.buses.location) + " H2"
# rename carrier and index
df["carrier"] = df.carrier.apply(
lambda x: x.replace("gas boiler", "retrofitted H2 boiler")
)
df.rename(
index=lambda x: x.replace("gas boiler", "retrofitted H2 boiler"), inplace=True
)
# todo, costs for retrofitting
df["capital_costs"] = 100
# set existing capacity to zero
df["p_nom"] = 0
df["p_nom_extendable"] = True
# add H2 boilers to network
import_components_from_dataframe(n, df, c)
def apply_time_segmentation_perfect(
n, segments, solver_name="cbc", overwrite_time_dependent=True
):
"""
Aggregating time series to segments with different lengths.
Input:
n: pypsa Network
segments: (int) number of segments in which the typical period should be
subdivided
solver_name: (str) name of solver
overwrite_time_dependent: (bool) overwrite time dependent data of pypsa network
with typical time series created by tsam
"""
try:
import tsam.timeseriesaggregation as tsam
except:
raise ModuleNotFoundError(
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
)
# get all time-dependent data
columns = pd.MultiIndex.from_tuples([], names=["component", "key", "asset"])
raw = pd.DataFrame(index=n.snapshots, columns=columns)
for c in n.iterate_components():
for attr, pnl in c.pnl.items():
# exclude e_min_pu which is used for SOC of EVs in the morning
if not pnl.empty and attr != "e_min_pu":
df = pnl.copy()
df.columns = pd.MultiIndex.from_product([[c.name], [attr], df.columns])
raw = pd.concat([raw, df], axis=1)
raw = raw.dropna(axis=1)
sn_weightings = {}
for year in raw.index.levels[0]:
logger.info(f"Find representative snapshots for {year}.")
raw_t = raw.loc[year]
# normalise all time-dependent data
annual_max = raw_t.max().replace(0, 1)
raw_t = raw_t.div(annual_max, level=0)
# get representative segments
agg = tsam.TimeSeriesAggregation(
raw_t,
hoursPerPeriod=len(raw_t),
noTypicalPeriods=1,
noSegments=int(segments),
segmentation=True,
solver=solver_name,
)
segmented = agg.createTypicalPeriods()
weightings = segmented.index.get_level_values("Segment Duration")
offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0)
timesteps = [raw_t.index[0] + pd.Timedelta(f"{offset}h") for offset in offsets]
snapshots = pd.DatetimeIndex(timesteps)
sn_weightings[year] = pd.Series(
weightings, index=snapshots, name="weightings", dtype="float64"
)
sn_weightings = pd.concat(sn_weightings)
n.set_snapshots(sn_weightings.index)
n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0)
return n
def set_temporal_aggregation_SEG(n, opts, solver_name):
"""
Aggregate network temporally with tsam.
"""
for o in opts:
# segments with package tsam
m = re.match(r"^(\d+)seg$", o, re.IGNORECASE)
if m is not None:
segments = int(m[1])
logger.info(f"Use temporal segmentation with {segments} segments")
n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name)
break
return n
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"prepare_perfect_foresight",
simpl="",
opts="",
clusters="37",
ll="v1.5",
sector_opts="1p7-4380H-T-H-B-I-A-solar+p3-dist1",
)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
# parameters -----------------------------------------------------------
years = snakemake.config["scenario"]["planning_horizons"]
opts = snakemake.wildcards.sector_opts.split("-")
social_discountrate = snakemake.config["costs"]["social_discountrate"]
for o in opts:
if "sdr" in o:
social_discountrate = float(o.replace("sdr", "")) / 100
logger.info(
f"Concat networks of investment period {years} with social discount rate of {social_discountrate * 100}%"
)
# concat prenetworks of planning horizon to single network ------------
n = concat_networks(years)
# temporal aggregate
opts = snakemake.wildcards.sector_opts.split("-")
solver_name = snakemake.config["solving"]["solver"]["name"]
n = set_temporal_aggregation_SEG(n, opts, solver_name)
# adjust global constraints lv limit if the same for all years
n = adjust_lvlimit(n)
# adjust global constraints CO2 limit
n = adjust_CO2_glc(n)
# adjust stores to multi period investment
n = adjust_stores(n)
# set phase outs
set_all_phase_outs(n)
# add H2 boiler
add_H2_boilers(n)
# set carbon constraints
opts = snakemake.wildcards.sector_opts.split("-")
n = set_carbon_constraints(n, opts)
# export network
n.export_to_netcdf(snakemake.output[0])

View File

@ -184,10 +184,7 @@ def get(item, investment_year=None):
"""
Check whether item depends on investment year.
"""
if isinstance(item, dict):
return item[investment_year]
else:
return item
return item[investment_year] if isinstance(item, dict) else item
def co2_emissions_year(
@ -220,7 +217,7 @@ def co2_emissions_year(
# TODO: move to own rule with sector-opts wildcard?
def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year, input_co2):
def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year):
"""
Distribute carbon budget following beta or exponential transition path.
"""
@ -413,11 +410,9 @@ def update_wind_solar_costs(n, costs):
# e.g. clusters == 37m means that VRE generators are left
# at clustering of simplified network, but that they are
# connected to 37-node network
if snakemake.wildcards.clusters[-1:] == "m":
genmap = busmap_s
else:
genmap = clustermaps
genmap = (
busmap_s if snakemake.wildcards.clusters[-1:] == "m" else clustermaps
)
connection_cost = (connection_cost * weight).groupby(
genmap
).sum() / weight.groupby(genmap).sum()
@ -505,8 +500,7 @@ def remove_non_electric_buses(n):
"""
Remove buses from pypsa-eur with carriers which are not AC buses.
"""
to_drop = list(n.buses.query("carrier not in ['AC', 'DC']").carrier.unique())
if to_drop:
if to_drop := list(n.buses.query("carrier not in ['AC', 'DC']").carrier.unique()):
logger.info(f"Drop buses from PyPSA-Eur with carrier: {to_drop}")
n.buses = n.buses[n.buses.carrier.isin(["AC", "DC"])]
@ -577,6 +571,7 @@ def add_co2_tracking(n, options):
capital_cost=options["co2_sequestration_cost"],
carrier="co2 stored",
bus=spatial.co2.nodes,
lifetime=options["co2_sequestration_lifetime"],
)
n.add("Carrier", "co2 stored")
@ -1231,11 +1226,9 @@ def add_storage_and_grids(n, costs):
# apply k_edge_augmentation weighted by length of complement edges
k_edge = options.get("gas_network_connectivity_upgrade", 3)
augmentation = list(
if augmentation := list(
k_edge_augmentation(G, k_edge, avail=complement_edges.values)
)
if augmentation:
):
new_gas_pipes = pd.DataFrame(augmentation, columns=["bus0", "bus1"])
new_gas_pipes["length"] = new_gas_pipes.apply(haversine, axis=1)
@ -1399,7 +1392,7 @@ def add_storage_and_grids(n, costs):
lifetime=costs.at["coal", "lifetime"],
)
if options["SMR"]:
if options["SMR_cc"]:
n.madd(
"Link",
spatial.nodes,
@ -1417,6 +1410,7 @@ def add_storage_and_grids(n, costs):
lifetime=costs.at["SMR CC", "lifetime"],
)
if options["SMR"]:
n.madd(
"Link",
nodes + " SMR",
@ -2893,6 +2887,30 @@ def add_industry(n, costs):
p_set=p_set,
)
primary_steel = get(
snakemake.config["industry"]["St_primary_fraction"], investment_year
)
dri_steel = get(snakemake.config["industry"]["DRI_fraction"], investment_year)
bof_steel = primary_steel - dri_steel
if bof_steel > 0:
add_carrier_buses(n, "coal")
mwh_coal_per_mwh_coke = 1.366 # from eurostat energy balance
p_set = (
industrial_demand["coal"].sum()
+ mwh_coal_per_mwh_coke * industrial_demand["coke"].sum()
) / nhours
n.madd(
"Load",
spatial.coal.nodes,
suffix=" for industry",
bus=spatial.coal.nodes,
carrier="coal for industry",
p_set=p_set,
)
def add_waste_heat(n):
# TODO options?
@ -3325,7 +3343,7 @@ if __name__ == "__main__":
spatial = define_spatial(pop_layout.index, options)
if snakemake.params.foresight == "myopic":
if snakemake.params.foresight in ["myopic", "perfect"]:
add_lifetime_wind_solar(n, costs)
conventional = snakemake.params.conventional_carriers
@ -3402,7 +3420,7 @@ if __name__ == "__main__":
if "cb" not in o:
continue
limit_type = "carbon budget"
fn = "results/" + snakemake.params.RDIR + "csvs/carbon_budget_distribution.csv"
fn = "results/" + snakemake.params.RDIR + "/csvs/carbon_budget_distribution.csv"
if not os.path.exists(fn):
emissions_scope = snakemake.params.emissions_scope
report_year = snakemake.params.eurostat_report_year
@ -3446,7 +3464,7 @@ if __name__ == "__main__":
if options["electricity_grid_connection"]:
add_electricity_grid_connection(n, costs)
first_year_myopic = (snakemake.params.foresight == "myopic") and (
first_year_myopic = (snakemake.params.foresight in ["myopic", "perfect"]) and (
snakemake.params.planning_horizons[0] == investment_year
)

View File

@ -152,22 +152,20 @@ def _prepare_connection_costs_per_link(n, costs, renewable_carriers, length_fact
if n.links.empty:
return {}
connection_costs_per_link = {}
for tech in renewable_carriers:
if tech.startswith("offwind"):
connection_costs_per_link[tech] = (
n.links.length
* length_factor
* (
n.links.underwater_fraction
* costs.at[tech + "-connection-submarine", "capital_cost"]
+ (1.0 - n.links.underwater_fraction)
* costs.at[tech + "-connection-underground", "capital_cost"]
)
return {
tech: (
n.links.length
* length_factor
* (
n.links.underwater_fraction
* costs.at[tech + "-connection-submarine", "capital_cost"]
+ (1.0 - n.links.underwater_fraction)
* costs.at[tech + "-connection-underground", "capital_cost"]
)
return connection_costs_per_link
)
for tech in renewable_carriers
if tech.startswith("offwind")
}
def _compute_connection_costs_to_bus(

View File

@ -33,7 +33,9 @@ import numpy as np
import pandas as pd
import pypsa
import xarray as xr
from _benchmark import memory_logger
from _helpers import configure_logging, update_config_with_sector_opts
from pypsa.descriptors import get_activity_mask
logger = logging.getLogger(__name__)
pypsa.pf.logger.setLevel(logging.WARNING)
@ -47,6 +49,69 @@ def add_land_use_constraint(n, planning_horizons, config):
_add_land_use_constraint(n)
def add_land_use_constraint_perfect(n):
"""
Add global constraints for tech capacity limit.
"""
logger.info("Add land-use constraint for perfect foresight")
def compress_series(s):
def process_group(group):
if group.nunique() == 1:
return pd.Series(group.iloc[0], index=[None])
else:
return group
return s.groupby(level=[0, 1]).apply(process_group)
def new_index_name(t):
# Convert all elements to string and filter out None values
parts = [str(x) for x in t if x is not None]
# Join with space, but use a dash for the last item if not None
return " ".join(parts[:2]) + (f"-{parts[-1]}" if len(parts) > 2 else "")
def check_p_min_p_max(p_nom_max):
p_nom_min = n.generators[ext_i].groupby(grouper).sum().p_nom_min
p_nom_min = p_nom_min.reindex(p_nom_max.index)
check = (
p_nom_min.groupby(level=[0, 1]).sum()
> p_nom_max.groupby(level=[0, 1]).min()
)
if check.sum():
logger.warning(
f"summed p_min_pu values at node larger than technical potential {check[check].index}"
)
grouper = [n.generators.carrier, n.generators.bus, n.generators.build_year]
ext_i = n.generators.p_nom_extendable
# get technical limit per node and investment period
p_nom_max = n.generators[ext_i].groupby(grouper).min().p_nom_max
# drop carriers without tech limit
p_nom_max = p_nom_max[~p_nom_max.isin([np.inf, np.nan])]
# carrier
carriers = p_nom_max.index.get_level_values(0).unique()
gen_i = n.generators[(n.generators.carrier.isin(carriers)) & (ext_i)].index
n.generators.loc[gen_i, "p_nom_min"] = 0
# check minimum capacities
check_p_min_p_max(p_nom_max)
# drop multi entries in case p_nom_max stays constant in different periods
# p_nom_max = compress_series(p_nom_max)
# adjust name to fit syntax of nominal constraint per bus
df = p_nom_max.reset_index()
df["name"] = df.apply(
lambda row: f"nom_max_{row['carrier']}"
+ (f"_{row['build_year']}" if row["build_year"] is not None else ""),
axis=1,
)
for name in df.name.unique():
df_carrier = df[df.name == name]
bus = df_carrier.bus
n.buses.loc[bus, name] = df_carrier.p_nom_max.values
return n
def _add_land_use_constraint(n):
# warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
@ -82,19 +147,13 @@ def _add_land_use_constraint(n):
def _add_land_use_constraint_m(n, planning_horizons, config):
# if generators clustering is lower than network clustering, land_use accounting is at generators clusters
planning_horizons = param["planning_horizons"]
grouping_years = config["existing_capacities"]["grouping_years"]
current_horizon = snakemake.wildcards.planning_horizons
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"]
ind = list(
set(
[
i.split(sep=" ")[0] + " " + i.split(sep=" ")[1]
for i in existing.index
]
)
{i.split(sep=" ")[0] + " " + i.split(sep=" ")[1] for i in existing.index}
)
previous_years = [
@ -116,7 +175,7 @@ def _add_land_use_constraint_m(n, planning_horizons, config):
n.generators.p_nom_max.clip(lower=0, inplace=True)
def add_co2_sequestration_limit(n, limit=200):
def add_co2_sequestration_limit(n, config, limit=200):
"""
Add a global constraint on the amount of Mt CO2 that can be sequestered.
"""
@ -130,16 +189,146 @@ def add_co2_sequestration_limit(n, limit=200):
limit = float(o[o.find("seq") + 3 :]) * 1e6
break
n.add(
if not n.investment_periods.empty:
periods = n.investment_periods
names = pd.Index([f"co2_sequestration_limit-{period}" for period in periods])
else:
periods = [np.nan]
names = pd.Index(["co2_sequestration_limit"])
n.madd(
"GlobalConstraint",
"co2_sequestration_limit",
names,
sense="<=",
constant=limit,
type="primary_energy",
carrier_attribute="co2_absorptions",
investment_period=periods,
)
def add_carbon_constraint(n, snapshots):
glcs = n.global_constraints.query('type == "co2_limit"')
if glcs.empty:
return
for name, glc in glcs.iterrows():
carattr = glc.carrier_attribute
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
if emissions.empty:
continue
# stores
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
stores = n.stores.query("carrier in @emissions.index and not e_cyclic")
if not stores.empty:
last = n.snapshot_weightings.reset_index().groupby("period").last()
last_i = last.set_index([last.index, last.timestep]).index
final_e = n.model["Store-e"].loc[last_i, stores.index]
time_valid = int(glc.loc["investment_period"])
time_i = pd.IndexSlice[time_valid, :]
lhs = final_e.loc[time_i, :] - final_e.shift(snapshot=1).loc[time_i, :]
rhs = glc.constant
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
def add_carbon_budget_constraint(n, snapshots):
glcs = n.global_constraints.query('type == "Co2Budget"')
if glcs.empty:
return
for name, glc in glcs.iterrows():
carattr = glc.carrier_attribute
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
if emissions.empty:
continue
# stores
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
stores = n.stores.query("carrier in @emissions.index and not e_cyclic")
if not stores.empty:
last = n.snapshot_weightings.reset_index().groupby("period").last()
last_i = last.set_index([last.index, last.timestep]).index
final_e = n.model["Store-e"].loc[last_i, stores.index]
time_valid = int(glc.loc["investment_period"])
time_i = pd.IndexSlice[time_valid, :]
weighting = n.investment_period_weightings.loc[time_valid, "years"]
lhs = final_e.loc[time_i, :] * weighting
rhs = glc.constant
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
def add_max_growth(n, config):
"""
Add maximum growth rates for different carriers.
"""
opts = snakemake.params["sector"]["limit_max_growth"]
# take maximum yearly difference between investment periods since historic growth is per year
factor = n.investment_period_weightings.years.max() * opts["factor"]
for carrier in opts["max_growth"].keys():
max_per_period = opts["max_growth"][carrier] * factor
logger.info(
f"set maximum growth rate per investment period of {carrier} to {max_per_period} GW."
)
n.carriers.loc[carrier, "max_growth"] = max_per_period * 1e3
for carrier in opts["max_relative_growth"].keys():
max_r_per_period = opts["max_relative_growth"][carrier]
logger.info(
f"set maximum relative growth per investment period of {carrier} to {max_r_per_period}."
)
n.carriers.loc[carrier, "max_relative_growth"] = max_r_per_period
return n
def add_retrofit_gas_boiler_constraint(n, snapshots):
"""
Allow retrofitting of existing gas boilers to H2 boilers.
"""
c = "Link"
logger.info("Add constraint for retrofitting gas boilers to H2 boilers.")
# existing gas boilers
mask = n.links.carrier.str.contains("gas boiler") & ~n.links.p_nom_extendable
gas_i = n.links[mask].index
mask = n.links.carrier.str.contains("retrofitted H2 boiler")
h2_i = n.links[mask].index
n.links.loc[gas_i, "p_nom_extendable"] = True
p_nom = n.links.loc[gas_i, "p_nom"]
n.links.loc[gas_i, "p_nom"] = 0
# heat profile
cols = n.loads_t.p_set.columns[
n.loads_t.p_set.columns.str.contains("heat")
& ~n.loads_t.p_set.columns.str.contains("industry")
& ~n.loads_t.p_set.columns.str.contains("agriculture")
]
profile = n.loads_t.p_set[cols].div(
n.loads_t.p_set[cols].groupby(level=0).max(), level=0
)
# to deal if max value is zero
profile.fillna(0, inplace=True)
profile.rename(columns=n.loads.bus.to_dict(), inplace=True)
profile = profile.reindex(columns=n.links.loc[gas_i, "bus1"])
profile.columns = gas_i
rhs = profile.mul(p_nom)
dispatch = n.model["Link-p"]
active = get_activity_mask(n, c, snapshots, gas_i)
rhs = rhs[active]
p_gas = dispatch.sel(Link=gas_i)
p_h2 = dispatch.sel(Link=h2_i)
lhs = p_gas + p_h2
n.model.add_constraints(lhs == rhs, name="gas_retrofit")
def prepare_network(
n,
solve_opts=None,
@ -156,8 +345,7 @@ def prepare_network(
):
df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)
load_shedding = solve_opts.get("load_shedding")
if load_shedding:
if load_shedding := solve_opts.get("load_shedding"):
# intersect between macroeconomic and surveybased willingness to pay
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full
# TODO: retrieve color and nice name from config
@ -200,9 +388,14 @@ def prepare_network(
if foresight == "myopic":
add_land_use_constraint(n, planning_horizons, config)
if foresight == "perfect":
n = add_land_use_constraint_perfect(n)
if snakemake.params["sector"]["limit_max_growth"]["enable"]:
n = add_max_growth(n, config)
if n.stores.carrier.eq("co2 stored").any():
limit = co2_sequestration_potential
add_co2_sequestration_limit(n, limit=limit)
add_co2_sequestration_limit(n, config, limit=limit)
return n
@ -594,12 +787,17 @@ def extra_functionality(n, snapshots):
add_EQ_constraints(n, o)
add_battery_constraints(n)
add_pipe_retrofit_constraint(n)
if n._multi_invest:
add_carbon_constraint(n, snapshots)
add_carbon_budget_constraint(n, snapshots)
add_retrofit_gas_boiler_constraint(n, snapshots)
def solve_network(n, config, solving, opts="", **kwargs):
set_of_options = solving["solver"]["options"]
cf_solving = solving["options"]
kwargs["multi_investment_periods"] = config["foresight"] == "perfect"
kwargs["solver_options"] = (
solving["solver_options"][set_of_options] if set_of_options else {}
)
@ -646,18 +844,20 @@ def solve_network(n, config, solving, opts="", **kwargs):
return n
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"solve_network",
"solve_sector_network_perfect",
configfiles="../config/test/config.perfect.yaml",
simpl="",
opts="Ept",
clusters="37",
ll="v1.0",
sector_opts="",
planning_horizons="2020",
opts="",
clusters="5",
ll="v1.5",
sector_opts="8760H-T-H-B-I-A-solar+p3-dist1",
planning_horizons="2030",
)
configure_logging(snakemake)
if "sector_opts" in snakemake.wildcards.keys():
@ -684,13 +884,18 @@ if __name__ == "__main__":
co2_sequestration_potential=snakemake.params["co2_sequestration_potential"],
)
n = solve_network(
n,
config=snakemake.config,
solving=snakemake.params.solving,
opts=opts,
log_fn=snakemake.log.solver,
)
with memory_logger(
filename=getattr(snakemake.log, "memory", None), interval=30.0
) as mem:
n = solve_network(
n,
config=snakemake.config,
solving=snakemake.params.solving,
opts=opts,
log_fn=snakemake.log.solver,
)
logger.info(f"Maximum memory usage: {mem.mem_usage}")
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0])

View File

@ -7,6 +7,7 @@ Solves linear optimal dispatch in hourly resolution using the capacities of
previous capacity expansion in rule :mod:`solve_network`.
"""
import logging
import numpy as np
@ -35,7 +36,7 @@ if __name__ == "__main__":
configure_logging(snakemake)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
opts = (snakemake.wildcards.opts + "-" + snakemake.wildcards.sector_opts).split("-")
opts = f"{snakemake.wildcards.opts}-{snakemake.wildcards.sector_opts}".split("-")
opts = [o for o in opts if o != ""]
solve_opts = snakemake.params.options