Merge pull request #651 from PyPSA/validation

Validation
This commit is contained in:
Fabian Neumann 2023-08-06 09:22:43 +02:00 committed by GitHub
commit e34cec427b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 1626 additions and 190 deletions

11
.sync-send Normal file
View File

@ -0,0 +1,11 @@
# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
rules
scripts
config
config/test
envs
matplotlibrc
Snakefile

View File

@ -1,21 +0,0 @@
# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
.snakemake
.git
.pytest_cache
.ipynb_checkpoints
.vscode
.DS_Store
__pycache__
*.pyc
*.pyo
*.ipynb
notebooks
doc
cutouts
data
benchmarks
*.nc
configs

View File

@ -1,23 +0,0 @@
# SPDX-FileCopyrightText: : 2021-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
.snakemake
.git
.pytest_cache
.ipynb_checkpoints
.vscode
.DS_Store
__pycache__
*.pyc
*.pyo
*.ipynb
notebooks
benchmarks
logs
resources*
results
networks*
cutouts
data/bundle
doc

View File

@ -40,7 +40,7 @@ localrules:
wildcard_constraints:
simpl="[a-zA-Z0-9]*",
clusters="[0-9]+m?|all",
clusters="[0-9]+(m|c)?|all",
ll="(v|c)([0-9\.]+|opt)",
opts="[-+a-zA-Z0-9\.]*",
sector_opts="[-+a-zA-Z0-9\.\s]*",
@ -53,6 +53,7 @@ include: "rules/build_electricity.smk"
include: "rules/build_sector.smk"
include: "rules/solve_electricity.smk"
include: "rules/postprocess.smk"
include: "rules/validate.smk"
if config["foresight"] == "overnight":
@ -98,3 +99,14 @@ rule doc:
directory("doc/_build"),
shell:
"make -C doc html"
rule sync:
params:
cluster=f"{config['remote']['ssh']}:{config['remote']['path']}",
shell:
"""
rsync -uvarh --ignore-missing-args --files-from=.sync-send . {params.cluster}
rsync -uvarh --no-g {params.cluster}/results . || echo "No results directory, skipping rsync"
rsync -uvarh --no-g {params.cluster}/logs . || echo "No logs directory, skipping rsync"
"""

View File

@ -10,6 +10,14 @@ logging:
level: INFO
format: '%(levelname)s:%(name)s:%(message)s'
private:
keys:
entsoe_api:
remote:
ssh: ""
path: ""
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#run
run:
name: ""
@ -209,10 +217,14 @@ renewable:
carriers: [ror, PHS, hydro]
PHS_max_hours: 6
hydro_max_hours: "energy_capacity_totals_by_country" # one of energy_capacity_totals_by_country, estimate_by_large_installations or a float
flatten_dispatch: false
flatten_dispatch_buffer: 0.2
clip_min_inflow: 1.0
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#conventional
conventional:
unit_commitment: false
dynamic_fuel_price: false
nuclear:
p_max_pu: "data/nuclear_p_max_pu.csv" # float of file name
@ -574,16 +586,12 @@ clustering:
algorithm: kmeans
feature: solar+onwind-time
exclude_carriers: []
consider_efficiency_classes: false
aggregation_strategies:
generators:
p_nom_max: sum
p_nom_min: sum
p_min_pu: mean
marginal_cost: mean
committable: any
ramp_limit_up: max
ramp_limit_down: max
efficiency: mean
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#solving
solving:
@ -591,13 +599,17 @@ solving:
options:
clip_p_max_pu: 1.e-2
load_shedding: false
transmission_losses: 0
noisy_costs: true
skip_iterations: true
rolling_horizon: false
seed: 123
# options that go into the optimize function
track_iterations: false
min_iterations: 4
max_iterations: 6
seed: 123
transmission_losses: 0
linearized_unit_commitment: true
horizon: 365
solver:
name: gurobi
@ -625,7 +637,6 @@ solving:
AggFill: 0
PreDual: 0
GURO_PAR_BARDENSETHRESH: 200
seed: 10 # Consistent seed for all plattforms
gurobi-numeric-focus:
name: gurobi
NumericFocus: 3 # Favour numeric stability over speed
@ -658,6 +669,7 @@ solving:
glpk-default: {} # Used in CI
mem: 30000 #memory in MB; 20 GB enough for 50+B+I+H2; 100 GB for 181+B+I+H2
walltime: "12:00:00"
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#plotting
plotting:
@ -688,6 +700,8 @@ plotting:
H2: "Hydrogen Storage"
lines: "Transmission Lines"
ror: "Run of River"
ac: "AC"
dc: "DC"
tech_colors:
# wind

View File

@ -0,0 +1,98 @@
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
run:
name: "validation"
scenario:
ll:
- v1.0
clusters:
- 37
opts:
- 'Ept'
snapshots:
start: "2019-01-01"
end: "2020-01-01"
inclusive: 'left'
enable:
retrieve_cutout: false
electricity:
co2limit: 1e9
extendable_carriers:
Generator: []
StorageUnit: []
Store: []
Link: []
powerplants_filter: not (DateOut < 2019)
conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass]
renewable_carriers: [solar, onwind, offwind-ac, offwind-dc, hydro]
estimate_renewable_capacities:
year: 2019
atlite:
default_cutout: europe-2019-era5
cutouts:
europe-2019-era5:
module: era5
x: [-12., 35.]
y: [33., 72]
dx: 0.3
dy: 0.3
time: ['2019', '2019']
renewable:
onwind:
cutout: europe-2019-era5
offwind-ac:
cutout: europe-2019-era5
offwind-dc:
cutout: europe-2019-era5
solar:
cutout: europe-2019-era5
hydro:
cutout: europe-2019-era5
flatten_dispatch: 0.01
conventional:
unit_commitment: false
dynamic_fuel_price: true
nuclear:
p_max_pu: "data/nuclear_p_max_pu.csv"
biomass:
p_max_pu: 0.65
load:
power_statistics: false
lines:
s_max_pu: 0.23
under_construction: 'remove'
links:
include_tyndp: false
costs:
year: 2020
emission_prices:
co2: 25
clustering:
simplify_network:
exclude_carriers: [oil, coal, lignite, OCGT, CCGT]
cluster_network:
consider_efficiency_classes: true
solving:
options:
load_shedding: true
rolling_horizon: false
horizon: 1000
overlap: 48

View File

@ -1,16 +1,16 @@
country,factor
BE,0.65
BG,0.89
CZ,0.82
FI,0.92
FR,0.70
DE,0.88
HU,0.90
NL,0.86
RO,0.92
SK,0.89
SI,0.94
ES,0.89
SE,0.82
CH,0.86
GB,0.67
BE,0.796
BG,0.894
CZ,0.827
FI,0.936
FR,0.71
DE,0.871
HU,0.913
NL,0.868
RO,0.909
SK,0.9
SI,0.913
ES,0.897
SE,0.851
CH,0.87
GB,0.656

1 country factor
2 BE 0.65 0.796
3 BG 0.89 0.894
4 CZ 0.82 0.827
5 FI 0.92 0.936
6 FR 0.70 0.71
7 DE 0.88 0.871
8 HU 0.90 0.913
9 NL 0.86 0.868
10 RO 0.92 0.909
11 SK 0.89 0.9
12 SI 0.94 0.913
13 ES 0.89 0.897
14 SE 0.82 0.851
15 CH 0.86 0.87
16 GB 0.67 0.656

8
data/unit_commitment.csv Normal file
View File

@ -0,0 +1,8 @@
attribute,OCGT,CCGT,coal,lignite,nuclear
ramp_limit_up,1,1,1,1,0.3
ramp_limit_start_up,0.2,0.45,0.38,0.4,0.5
ramp_limit_shut_down,0.2,0.45,0.38,0.4,0.5
p_min_pu,0.2,0.45,0.325,0.4,0.5
min_up_time,,3,5,7,6
min_down_time,,2,6,6,10
start_up_cost,9.6,34.2,35.64,19.14,16.5
1 attribute OCGT CCGT coal lignite nuclear
2 ramp_limit_up 1 1 1 1 0.3
3 ramp_limit_start_up 0.2 0.45 0.38 0.4 0.5
4 ramp_limit_shut_down 0.2 0.45 0.38 0.4 0.5
5 p_min_pu 0.2 0.45 0.325 0.4 0.5
6 min_up_time 3 5 7 6
7 min_down_time 2 6 6 10
8 start_up_cost 9.6 34.2 35.64 19.14 16.5

View File

@ -1,17 +1,18 @@
,Unit,Values,Description
simplify_network,,,
-- to_substations,bool,"{'true','false'}","Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones"
-- algorithm,str,"One of {kmeans, hac, modularity}",
-- feature,str,"Str in the format carrier1+carrier2+...+carrierN-X, where CarrierI can be from {solar, onwind, offwind, ror} and X is one of {cap, time}.",
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
-- remove stubs,bool,"true/false","Controls whether radial parts of the network should be recursively aggregated. Defaults to true."
-- remove_stubs_across_borders,bool,"true/false","Controls whether radial parts of the network should be recursively aggregated across borders. Defaults to true."
cluster_network,,,
-- algorithm,str,"One of {kmeans, hac}",
-- feature,str,"Str in the format carrier1+carrier2+...+carrierN-X, where CarrierI can be from {solar, onwind, offwind, ror} and X is one of {cap, time}.",
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
aggregation_strategies,,,
-- generators,,,
-- -- {key},str,"{key} can be any of the component of the generator (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator."
-- buses,,,
-- -- {key},str,"{key} can be any of the component of the bus (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus."
,Unit,Values,Description
simplify_network,,,
-- to_substations,bool,"{'true','false'}","Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones"
-- algorithm,str,"One of {kmeans, hac, modularity}",
-- feature,str,"Str in the format carrier1+carrier2+...+carrierN-X, where CarrierI can be from {solar, onwind, offwind, ror} and X is one of {cap, time}.",
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
-- remove stubs,bool,"{'true','false'}",Controls whether radial parts of the network should be recursively aggregated. Defaults to true.
-- remove_stubs_across_borders,bool,"{'true','false'}",Controls whether radial parts of the network should be recursively aggregated across borders. Defaults to true.
cluster_network,,,
-- algorithm,str,"One of {kmeans, hac}",
-- feature,str,"Str in the format carrier1+carrier2+...+carrierN-X, where CarrierI can be from {solar, onwind, offwind, ror} and X is one of {cap, time}.",
-- exclude_carriers,list,"List of Str like [ 'solar', 'onwind'] or empy list []","List of carriers which will not be aggregated. If empty, all carriers will be aggregated."
-- consider_efficiency_classes,bool,"{'true','false'}","Aggregated each carriers into the top 10-quantile (high), the bottom 90-quantile (low), and everything in between (medium)."
aggregation_strategies,,,
-- generators,,,
-- -- {key},str,"{key} can be any of the component of the generator (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator."
-- buses,,,
-- -- {key},str,"{key} can be any of the component of the bus (str). Its value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}.","Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus."

1 Unit Values Description
2 simplify_network
3 -- to_substations bool {'true','false'} Aggregates all nodes without power injection (positive or negative, i.e. demand or generation) to electrically closest ones
4 -- algorithm str One of {‘kmeans’, ‘hac’, ‘modularity‘}
5 -- feature str Str in the format ‘carrier1+carrier2+...+carrierN-X’, where CarrierI can be from {‘solar’, ‘onwind’, ‘offwind’, ‘ror’} and X is one of {‘cap’, ‘time’}.
6 -- exclude_carriers list List of Str like [ 'solar', 'onwind'] or empy list [] List of carriers which will not be aggregated. If empty, all carriers will be aggregated.
7 -- remove stubs bool true/false {'true','false'} Controls whether radial parts of the network should be recursively aggregated. Defaults to true.
8 -- remove_stubs_across_borders bool true/false {'true','false'} Controls whether radial parts of the network should be recursively aggregated across borders. Defaults to true.
9 cluster_network
10 -- algorithm str One of {‘kmeans’, ‘hac’}
11 -- feature str Str in the format ‘carrier1+carrier2+...+carrierN-X’, where CarrierI can be from {‘solar’, ‘onwind’, ‘offwind’, ‘ror’} and X is one of {‘cap’, ‘time’}.
12 -- exclude_carriers list List of Str like [ 'solar', 'onwind'] or empy list [] List of carriers which will not be aggregated. If empty, all carriers will be aggregated.
13 aggregation_strategies -- consider_efficiency_classes bool {'true','false'} Aggregated each carriers into the top 10-quantile (high), the bottom 90-quantile (low), and everything in between (medium).
14 -- generators aggregation_strategies
15 -- -- {key} -- generators str {key} can be any of the component of the generator (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator.
16 -- buses -- -- {key} str {key} can be any of the component of the generator (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new generator.
17 -- -- {key} -- buses str {key} can be any of the component of the bus (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus.
18 -- -- {key} str {key} can be any of the component of the bus (str). It’s value can be any that can be converted to pandas.Series using getattr(). For example one of {min, max, sum}. Aggregates the component according to the given strategy. For example, if sum, then all values within each cluster are summed to represent the new bus.

View File

@ -1,3 +1,5 @@
,Unit,Values,Description
{name},--,"string","For any carrier/technology overwrite attributes as listed below."
-- {attribute},--,"string or float","For any attribute, can specify a float or reference to a file path to a CSV file giving floats for each country (2-letter code)."
,Unit,Values,Description
unit_commitment ,bool,"{true, false}","Allow the overwrite of ramp_limit_up, ramp_limit_start_up, ramp_limit_shut_down, p_min_pu, min_up_time, min_down_time, and start_up_cost of conventional generators. Refer to the CSV file „unit_commitment.csv“."
dynamic_fuel_price ,bool,"{true, false}","Consider the monthly fluctuating fuel prices for each conventional generator. Refer to the CSV file ""data/validation/monthly_fuel_price.csv""."
{name},--,string,For any carrier/technology overwrite attributes as listed below.
-- {attribute},--,string or float,"For any attribute, can specify a float or reference to a file path to a CSV file giving floats for each country (2-letter code)."

1 Unit Values Description
2 {name} unit_commitment -- bool string {true, false} For any carrier/technology overwrite attributes as listed below. Allow the overwrite of ramp_limit_up, ramp_limit_start_up, ramp_limit_shut_down, p_min_pu, min_up_time, min_down_time, and start_up_cost of conventional generators. Refer to the CSV file „unit_commitment.csv“.
3 -- {attribute} dynamic_fuel_price -- bool string or float {true, false} For any attribute, can specify a float or reference to a file path to a CSV file giving floats for each country (2-letter code). Consider the monthly fluctuating fuel prices for each conventional generator. Refer to the CSV file "data/validation/monthly_fuel_price.csv".
4 {name} -- string For any carrier/technology overwrite attributes as listed below.
5 -- {attribute} -- string or float For any attribute, can specify a float or reference to a file path to a CSV file giving floats for each country (2-letter code).

View File

@ -1,6 +1,8 @@
,Unit,Values,Description
cutout,--,"Must be 'europe-2013-era5'","Specifies the directory where the relevant weather data ist stored."
carriers,--,"Any subset of {'ror', 'PHS', 'hydro'}","Specifies the types of hydro power plants to build per-unit availability time series for. 'ror' stands for run-of-river plants, 'PHS' represents pumped-hydro storage, and 'hydro' stands for hydroelectric dams."
PHS_max_hours,h,float,"Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_."
hydro_max_hours,h,"Any of {float, 'energy_capacity_totals_by_country', 'estimate_by_large_installations'}","Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom`` or heuristically determined. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_."
clip_min_inflow,MW,float,"To avoid too small values in the inflow time series, values below this threshold are set to zero."
,Unit,Values,Description
cutout,--,Must be 'europe-2013-era5',Specifies the directory where the relevant weather data ist stored.
carriers,--,"Any subset of {'ror', 'PHS', 'hydro'}","Specifies the types of hydro power plants to build per-unit availability time series for. 'ror' stands for run-of-river plants, 'PHS' represents pumped-hydro storage, and 'hydro' stands for hydroelectric dams."
PHS_max_hours,h,float,Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
hydro_max_hours,h,"Any of {float, 'energy_capacity_totals_by_country', 'estimate_by_large_installations'}",Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom`` or heuristically determined. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
flatten_dispatch,bool,"{true, false}",Consider an upper limit for the hydro dispatch. The limit is given by the average capacity factor plus the buffer given in ``flatten_dispatch_buffer``
flatten_dispatch_buffer,--,float,"If ``flatten_dispatch`` is true, specify the value added above the average capacity factor."
clip_min_inflow,MW,float,"To avoid too small values in the inflow time series, values below this threshold are set to zero."

1 Unit Values Description
2 cutout -- Must be 'europe-2013-era5' Specifies the directory where the relevant weather data ist stored.
3 carriers -- Any subset of {'ror', 'PHS', 'hydro'} Specifies the types of hydro power plants to build per-unit availability time series for. 'ror' stands for run-of-river plants, 'PHS' represents pumped-hydro storage, and 'hydro' stands for hydroelectric dams.
4 PHS_max_hours h float Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
5 hydro_max_hours h Any of {float, 'energy_capacity_totals_by_country', 'estimate_by_large_installations'} Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom`` or heuristically determined. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
6 clip_min_inflow flatten_dispatch MW bool float {true, false} To avoid too small values in the inflow time series, values below this threshold are set to zero. Consider an upper limit for the hydro dispatch. The limit is given by the average capacity factor plus the buffer given in ``flatten_dispatch_buffer``
7 flatten_dispatch_buffer -- float If ``flatten_dispatch`` is true, specify the value added above the average capacity factor.
8 clip_min_inflow MW float To avoid too small values in the inflow time series, values below this threshold are set to zero.

View File

@ -3,6 +3,7 @@ Trigger, Description, Definition, Status
``nSEG``; e.g. ``4380SEG``, "Apply time series segmentation with `tsam <https://tsam.readthedocs.io/en/latest/index.html>`_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load.", ``prepare_network``: apply_time_segmentation(), In active use
``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L19>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L154>`__, In active use
``Ep``, Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L24>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L158>`__, In active use
``Ept``, Add monthly cost for a carbon-dioxide price based on historical values built by the rule ``build_monthly_prices``, In active use
``CCL``, Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``., ``solve_network``, In active use
``EQ``, "Require each country or node to on average produce a minimal share of its total consumption itself. Example: ``EQ0.5c`` demands each country to produce on average at least 50% of its consumption; ``EQ0.5`` demands each node to produce on average at least 50% of its consumption.", ``solve_network``, In active use
``ATK``, "Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.", ``prepare_network``, In active use

Can't render this file because it has a wrong number of fields in line 6.

View File

@ -1,17 +1,19 @@
,Unit,Values,Description
options,,,
-- load_shedding,bool/float,"{'true','false', float}","Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh."
-- transmission_losses,int,"[0-9]","Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored."
-- noisy_costs,bool,"{'true','false'}","Add random noise to marginal cost of generators by :math:`\mathcal{U}(0.009,0,011)` and capital cost of lines and links by :math:`\mathcal{U}(0.09,0,11)`."
-- min_iterations,--,int,"Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
-- max_iterations,--,int,"Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
-- nhours,--,int,"Specifies the :math:`n` first snapshots to take into account. Must be less than the total number of snapshots. Rather recommended only for debugging."
-- clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero."
-- skip_iterations,bool,"{'true','false'}","Skip iterating, do not update impedances of branches. Defaults to true."
-- track_iterations,bool,"{'true','false'}","Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration)"
-- seed,--,int,"Random seed for increased deterministic behaviour."
solver,,,
-- name,--,"One of {'gurobi', 'cplex', 'cbc', 'glpk', 'ipopt'}; potentially more possible","Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow."
-- options,--,"Key listed under ``solver_options``.","Link to specific parameter settings."
solver_options,,"dict","Dictionaries with solver-specific parameter settings."
mem,MB,"int","Estimated maximum memory requirement for solving networks."
,Unit,Values,Description
options,,,
-- clip_p_max_pu,p.u.,float,To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero.
-- load_shedding,bool/float,"{'true','false', float}","Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh."
-- noisy_costs,bool,"{'true','false'}","Add random noise to marginal cost of generators by :math:`\mathcal{U}(0.009,0,011)` and capital cost of lines and links by :math:`\mathcal{U}(0.09,0,11)`."
-- skip_iterations,bool,"{'true','false'}","Skip iterating, do not update impedances of branches. Defaults to true."
-- rolling_horizon,bool,"{'true','false'}","Whether to optimize the network in a rolling horizon manner, where the snapshot range is split into slices of size `horizon` which are solved consecutively."
-- seed,--,int,Random seed for increased deterministic behaviour.
-- track_iterations,bool,"{'true','false'}",Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration)
-- min_iterations,--,int,Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run.
-- max_iterations,--,int,Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run.
-- transmission_losses,int,[0-9],"Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored."
-- linearized_unit_commitment,bool,"{'true','false'}",Whether to optimise using the linearized unit commitment formulation.
-- horizon,--,int,Number of snapshots to consider in each iteration. Defaults to 100.
solver,,,
-- name,--,"One of {'gurobi', 'cplex', 'cbc', 'glpk', 'ipopt'}; potentially more possible",Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow.
-- options,--,Key listed under ``solver_options``.,Link to specific parameter settings.
solver_options,,dict,Dictionaries with solver-specific parameter settings.
mem,MB,int,Estimated maximum memory requirement for solving networks.

1 Unit Values Description
2 options
3 -- load_shedding -- clip_p_max_pu bool/float p.u. {'true','false', float} float Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh. To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero.
4 -- transmission_losses -- load_shedding int bool/float [0-9] {'true','false', float} Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored. Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh.
5 -- noisy_costs bool {'true','false'} Add random noise to marginal cost of generators by :math:`\mathcal{U}(0.009,0,011)` and capital cost of lines and links by :math:`\mathcal{U}(0.09,0,11)`.
6 -- min_iterations -- skip_iterations -- bool int {'true','false'} Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run. Skip iterating, do not update impedances of branches. Defaults to true.
7 -- max_iterations -- rolling_horizon -- bool int {'true','false'} Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run. Whether to optimize the network in a rolling horizon manner, where the snapshot range is split into slices of size `horizon` which are solved consecutively.
8 -- nhours -- seed -- int Specifies the :math:`n` first snapshots to take into account. Must be less than the total number of snapshots. Rather recommended only for debugging. Random seed for increased deterministic behaviour.
9 -- clip_p_max_pu -- track_iterations p.u. bool float {'true','false'} To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero. Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration)
10 -- skip_iterations -- min_iterations bool -- {'true','false'} int Skip iterating, do not update impedances of branches. Defaults to true. Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run.
11 -- track_iterations -- max_iterations bool -- {'true','false'} int Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration) Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run.
12 -- seed -- transmission_losses -- int int [0-9] Random seed for increased deterministic behaviour. Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored.
13 solver -- linearized_unit_commitment bool {'true','false'} Whether to optimise using the linearized unit commitment formulation.
14 -- name -- horizon -- One of {'gurobi', 'cplex', 'cbc', 'glpk', 'ipopt'}; potentially more possible int Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow. Number of snapshots to consider in each iteration. Defaults to 100.
15 -- options solver -- Key listed under ``solver_options``. Link to specific parameter settings.
16 solver_options -- name -- dict One of {'gurobi', 'cplex', 'cbc', 'glpk', 'ipopt'}; potentially more possible Dictionaries with solver-specific parameter settings. Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow.
17 mem -- options MB -- int Key listed under ``solver_options``. Estimated maximum memory requirement for solving networks. Link to specific parameter settings.
18 solver_options dict Dictionaries with solver-specific parameter settings.
19 mem MB int Estimated maximum memory requirement for solving networks.

View File

@ -1,6 +1,12 @@
,Unit,Values,Description
version,--,0.x.x,"Version of PyPSA-Eur. Descriptive only."
tutorial,bool,"{true, false}","Switch to retrieve the tutorial data set instead of the full data set."
logging,,,
-- level,--,"Any of {'INFO', 'WARNING', 'ERROR'}","Restrict console outputs to all infos, warning or errors only"
-- format,--,"","Custom format for log messages. See `LogRecord <https://docs.python.org/3/library/logging.html#logging.LogRecord>`_ attributes."
,Unit,Values,Description
version,--,0.x.x,Version of PyPSA-Eur. Descriptive only.
tutorial,bool,"{true, false}",Switch to retrieve the tutorial data set instead of the full data set.
logging,,,
-- level,--,"Any of {'INFO', 'WARNING', 'ERROR'}","Restrict console outputs to all infos, warning or errors only"
-- format,--,,Custom format for log messages. See `LogRecord <https://docs.python.org/3/library/logging.html#logging.LogRecord>`_ attributes.
private,,,
-- keys,,,
-- -- entsoe_api,--,,Optionally specify the ENTSO-E API key. See the guidelines to get `ENTSO-E API key <https://transparency.entsoe.eu/content/static_content/Static%20content/web%20api/Guide.html>`_
remote,,,
-- ssh,--,,Optionally specify the SSH of a remote cluster to be synchronized.
-- path,--,,Optionally specify the file path within the remote cluster to be synchronized.

1 Unit Values Description
2 version -- 0.x.x Version of PyPSA-Eur. Descriptive only.
3 tutorial bool {true, false} Switch to retrieve the tutorial data set instead of the full data set.
4 logging
5 -- level -- Any of {'INFO', 'WARNING', 'ERROR'} Restrict console outputs to all infos, warning or errors only
6 -- format -- Custom format for log messages. See `LogRecord <https://docs.python.org/3/library/logging.html#logging.LogRecord>`_ attributes.
7 private
8 -- keys
9 -- -- entsoe_api -- Optionally specify the ENTSO-E API key. See the guidelines to get `ENTSO-E API key <https://transparency.entsoe.eu/content/static_content/Static%20content/web%20api/Guide.html>`_
10 remote
11 -- ssh -- Optionally specify the SSH of a remote cluster to be synchronized.
12 -- path -- Optionally specify the file path within the remote cluster to be synchronized.

View File

@ -16,12 +16,13 @@ PyPSA-Eur has several configuration options which are documented in this section
Top-level configuration
=======================
"Private" refers to local, machine-specific settings or data meant for personal use, not to be shared. "Remote" indicates the address of a server used for data exchange, often for clusters and data pushing/pulling.
.. literalinclude:: ../config/config.default.yaml
:language: yaml
:start-at: version:
:end-before: # docs
.. csv-table::
:header-rows: 1
:widths: 22,7,22,33

View File

@ -280,6 +280,7 @@ The PyPSA-Eur workflow is continuously tested for Linux, macOS and Windows (WSL
release_notes
licenses
validation
limitations
contributing
support

View File

@ -83,7 +83,7 @@ This rule, as a substitute for :mod:`build_natura_raster`, downloads an already
Rule ``retrieve_electricity_demand``
====================================
This rule downloads hourly electric load data for each country from the `OPSD platform <data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv>`_.
This rule downloads hourly electric load data for each country from the `OPSD platform <https://data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv>`_.
**Relevant Settings**

53
doc/validation.rst Normal file
View File

@ -0,0 +1,53 @@
..
SPDX-FileCopyrightText: 2019-2023 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0
##########################################
Validation
##########################################
The PyPSA-Eur model workflow provides a built-in mechanism for validation. This allows users to contrast the outcomes of network optimization against the historical behaviour of the European power system. The snakemake rule ``validate_elec_networks`` enables this by generating comparative figures that encapsulate key data points such as dispatch carrier, cross-border flows, and market prices per price zone.
These comparisons utilize data from the 2019 ENTSO-E Transparency Platform. To enable this, an ENTSO-E API key must be inserted into the ``config.yaml`` file. Detailed steps for this process can be found in the user guide `here <https://transparency.entsoe.eu/content/static_content/Static%20content/web%20api/Guide.html>`_.
Once the API key is set, the validation workflow can be triggered by running the following command:
snakemake validate_elec_networks --configfile config/config.validation.yaml -c8
The configuration file `config/config.validation.yaml` contains the following parameters:
.. literalinclude:: ../config/config.validation.yaml
:language: yaml
The setup uses monthly varying fuel prices for gas, lignite, coal and oil as well as CO2 prices, which are created by the script ``build_monthly_prices``. Upon completion of the validation process, the resulting network and generated figures will be stored in the ``results/validation`` directory for further analysis.
Results
=======
By the time of writing the comparison with the historical data shows partially accurate, partially improvable results. The following figures show the comparison of the dispatch of the different carriers.
.. image:: ../graphics/validation_seasonal_operation_area_elec_s_37_ec_lv1.0_Ept.png
:width: 100%
:align: center
.. image:: ../graphics/validation_production_bar_elec_s_37_ec_lv1.0_Ept.png
:width: 100%
:align: center
Issues and possible improvements
--------------------------------
**Overestimated dispatch of wind and solar:** Renewable potentials of wind and solar are slightly overestimated in the model. This leads to a higher dispatch of these carriers than in the historical data. In particular, the solar dispatch during winter is overestimated.
**Coal - Lignite fuel switch:** The model has a fuel switch from coal to lignite. This might result from non-captured subsidies for lignite and coal in the model. In order to fix the fuel switch from coal to lignite, a manual cost correction was added to the script ``build_monthly_prices``.
**Planned outages of nuclear power plants:** Planned outages of nuclear power plants are not captured in the model. This leads to a underestimated dispatch of nuclear power plants in winter and a overestimated dispatch in summer. This point is hard to fix, since the planned outages are not published in the ENTSO-E Transparency Platform.
**False classification of run-of-river power plants:** Some run-of-river power plants are classified as hydro power plants in the model. This leads to a general overestimation of the hydro power dispatch. In particular, Swedish hydro power plants are overestimated.
**Load shedding:** Due to constraint NTC's (crossborder capacities), the model has to shed load in some regions. This leads to a high market prices in the regions which drive the average market price up. Further fine-tuning of the NTC's is needed to avoid load shedding.

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 801 KiB

View File

@ -4,3 +4,4 @@
font.family: sans-serif
font.sans-serif: Ubuntu, DejaVu Sans
image.cmap: viridis
figure.autolayout : True

View File

@ -62,6 +62,9 @@ rule base_network:
params:
countries=config["countries"],
snapshots=config["snapshots"],
lines=config["lines"],
links=config["links"],
transformers=config["transformers"],
input:
eg_buses="data/entsoegridkit/buses.csv",
eg_lines="data/entsoegridkit/lines.csv",
@ -254,6 +257,24 @@ rule build_renewable_profiles:
"../scripts/build_renewable_profiles.py"
rule build_monthly_prices:
input:
co2_price_raw="data/validation/emission-spot-primary-market-auction-report-2019-data.xls",
fuel_price_raw="data/validation/energy-price-trends-xlsx-5619002.xlsx",
output:
co2_price=RESOURCES + "co2_price.csv",
fuel_price=RESOURCES + "monthly_fuel_price.csv",
log:
LOGS + "build_monthly_prices.log",
threads: 1
resources:
mem_mb=5000,
conda:
"../envs/environment.yaml"
script:
"../scripts/build_monthly_prices.py"
rule build_hydro_profile:
params:
hydro=config["renewable"]["hydro"],
@ -305,7 +326,7 @@ rule add_electricity:
countries=config["countries"],
renewable=config["renewable"],
electricity=config["electricity"],
conventional=config.get("conventional", {}),
conventional=config["conventional"],
costs=config["costs"],
input:
**{
@ -315,6 +336,7 @@ rule add_electricity:
**{
f"conventional_{carrier}_{attr}": fn
for carrier, d in config.get("conventional", {None: {}}).items()
if carrier in config["electricity"]["conventional_carriers"]
for attr, fn in d.items()
if str(fn).startswith("data/")
},
@ -327,6 +349,8 @@ rule add_electricity:
powerplants=RESOURCES + "powerplants.csv",
hydro_capacities=ancient("data/bundle/hydro_capacities.csv"),
geth_hydro_capacities="data/geth2015_hydro_capacities.csv",
unit_commitment="data/unit_commitment.csv",
fuel_price=RESOURCES + "monthly_fuel_price.csv",
load=RESOURCES + "load.csv",
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
output:
@ -337,7 +361,7 @@ rule add_electricity:
BENCHMARKS + "add_electricity"
threads: 1
resources:
mem_mb=5000,
mem_mb=10000,
conda:
"../envs/environment.yaml"
script:
@ -412,7 +436,7 @@ rule cluster_network:
BENCHMARKS + "cluster_network/elec_s{simpl}_{clusters}"
threads: 1
resources:
mem_mb=6000,
mem_mb=10000,
conda:
"../envs/environment.yaml"
script:
@ -435,7 +459,7 @@ rule add_extra_components:
BENCHMARKS + "add_extra_components/elec_s{simpl}_{clusters}_ec"
threads: 1
resources:
mem_mb=3000,
mem_mb=4000,
conda:
"../envs/environment.yaml"
script:
@ -454,6 +478,7 @@ rule prepare_network:
input:
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc",
tech_costs=COSTS,
co2_price=RESOURCES + "co2_price.csv",
output:
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
log:

View File

@ -73,3 +73,18 @@ rule plot_networks:
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
**config["scenario"]
),
rule validate_elec_networks:
input:
expand(
RESULTS
+ "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
**config["scenario"]
),
expand(
RESULTS
+ "figures/.validation_{kind}_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
**config["scenario"],
kind=["production", "prices", "cross_border"]
),

View File

@ -15,8 +15,8 @@ def memory(w):
if m is not None:
factor *= int(m.group(1)) / 8760
break
if w.clusters.endswith("m"):
return int(factor * (18000 + 180 * int(w.clusters[:-1])))
if w.clusters.endswith("m") or w.clusters.endswith("c"):
return int(factor * (55000 + 600 * int(w.clusters[:-1])))
elif w.clusters == "all":
return int(factor * (18000 + 180 * 4000))
else:

View File

@ -128,3 +128,34 @@ rule plot_summary:
"../envs/environment.yaml"
script:
"../scripts/plot_summary.py"
STATISTICS_BARPLOTS = [
"capacity_factor",
"installed_capacity",
"optimal_capacity",
"capital_expenditure",
"operational_expenditure",
"curtailment",
"supply",
"withdrawal",
"market_value",
]
rule plot_elec_statistics:
params:
plotting=config["plotting"],
barplots=STATISTICS_BARPLOTS,
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
output:
**{
f"{plot}_bar": RESULTS
+ f"figures/statistics_{plot}_bar_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
for plot in STATISTICS_BARPLOTS
},
barplots_touch=RESULTS
+ "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
script:
"../scripts/plot_statistics.py"

View File

@ -158,7 +158,11 @@ if config["enable"]["retrieve"]:
rule retrieve_electricity_demand:
input:
HTTP.remote(
"data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv",
"data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv".format(
version="2019-06-05"
if config["snapshots"]["end"] < "2019"
else "2020-10-06"
),
keep_local=True,
static=True,
),
@ -191,3 +195,39 @@ if config["enable"]["retrieve"]:
retries: 2
run:
move(input[0], output[0])
if config["enable"]["retrieve"]:
rule retrieve_monthly_co2_prices:
input:
HTTP.remote(
"https://www.eex.com/fileadmin/EEX/Downloads/EUA_Emission_Spot_Primary_Market_Auction_Report/Archive_Reports/emission-spot-primary-market-auction-report-2019-data.xls",
keep_local=True,
static=True,
),
output:
"data/validation/emission-spot-primary-market-auction-report-2019-data.xls",
log:
LOGS + "retrieve_monthly_co2_prices.log",
resources:
mem_mb=5000,
retries: 2
run:
move(input[0], output[0])
if config["enable"]["retrieve"]:
rule retrieve_monthly_fuel_prices:
output:
"data/validation/energy-price-trends-xlsx-5619002.xlsx",
log:
LOGS + "retrieve_monthly_fuel_prices.log",
resources:
mem_mb=5000,
retries: 2
conda:
"../envs/environment.yaml"
script:
"../scripts/retrieve_monthly_fuel_prices.py"

View File

@ -27,6 +27,7 @@ rule solve_network:
threads: 4
resources:
mem_mb=memory,
walltime=config["solving"].get("walltime", "12:00:00"),
shadow:
"minimal"
conda:
@ -56,7 +57,8 @@ rule solve_operations_network:
)
threads: 4
resources:
mem_mb=(lambda w: 5000 + 372 * int(w.clusters)),
mem_mb=(lambda w: 10000 + 372 * int(w.clusters)),
walltime=config["solving"].get("walltime", "12:00:00"),
shadow:
"minimal"
conda:

View File

@ -106,6 +106,7 @@ rule solve_sector_network_myopic:
threads: 4
resources:
mem_mb=config["solving"]["mem"],
walltime=config["solving"].get("walltime", "12:00:00"),
benchmark:
(
BENCHMARKS

View File

@ -28,6 +28,7 @@ rule solve_sector_network:
threads: config["solving"]["solver"].get("threads", 4)
resources:
mem_mb=config["solving"]["mem"],
walltime=config["solving"].get("walltime", "12:00:00"),
benchmark:
(
RESULTS

117
rules/validate.smk Normal file
View File

@ -0,0 +1,117 @@
# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
PRODUCTION_PLOTS = [
"production_bar",
"production_deviation_bar",
"seasonal_operation_area",
]
CROSS_BORDER_PLOTS = ["trade_time_series", "cross_border_bar"]
PRICES_PLOTS = ["price_bar", "price_line"]
rule build_electricity_production:
"""
This rule builds the electricity production for each country and technology from ENTSO-E data.
The data is used for validation of the optimization results.
"""
params:
snapshots=config["snapshots"],
countries=config["countries"],
output:
RESOURCES + "historical_electricity_production.csv",
log:
LOGS + "build_electricity_production.log",
resources:
mem_mb=5000,
script:
"../scripts/build_electricity_production.py"
rule build_cross_border_flows:
"""
This rule builds the cross-border flows from ENTSO-E data.
The data is used for validation of the optimization results.
"""
params:
snapshots=config["snapshots"],
countries=config["countries"],
input:
network=RESOURCES + "networks/base.nc",
output:
RESOURCES + "historical_cross_border_flows.csv",
log:
LOGS + "build_cross_border_flows.log",
resources:
mem_mb=5000,
script:
"../scripts/build_cross_border_flows.py"
rule build_electricity_prices:
"""
This rule builds the electricity prices from ENTSO-E data.
The data is used for validation of the optimization results.
"""
params:
snapshots=config["snapshots"],
countries=config["countries"],
output:
RESOURCES + "historical_electricity_prices.csv",
log:
LOGS + "build_electricity_prices.log",
resources:
mem_mb=5000,
script:
"../scripts/build_electricity_prices.py"
rule plot_validation_electricity_production:
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
electricity_production=RESOURCES + "historical_electricity_production.csv",
output:
**{
plot: RESULTS
+ f"figures/validation_{plot}_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
for plot in PRODUCTION_PLOTS
},
plots_touch=RESULTS
+ "figures/.validation_production_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
script:
"../scripts/plot_validation_electricity_production.py"
rule plot_validation_cross_border_flows:
params:
countries=config["countries"],
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
cross_border_flows=RESOURCES + "historical_cross_border_flows.csv",
output:
**{
plot: RESULTS
+ f"figures/validation_{plot}_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
for plot in CROSS_BORDER_PLOTS
},
plots_touch=RESULTS
+ "figures/.validation_cross_border_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
script:
"../scripts/plot_validation_cross_border_flows.py"
rule plot_validation_electricity_prices:
input:
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
electricity_prices=RESOURCES + "historical_electricity_prices.csv",
output:
**{
plot: RESULTS
+ f"figures/validation_{plot}_elec_s{{simpl}}_{{clusters}}_ec_l{{ll}}_{{opts}}.pdf"
for plot in PRICES_PLOTS
},
plots_touch=RESULTS
+ "figures/.validation_prices_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
script:
"../scripts/plot_validation_electricity_prices.py"

View File

@ -204,7 +204,6 @@ def load_costs(tech_costs, config, max_hours, Nyears=1.0):
* costs["investment"]
* Nyears
)
costs.at["OCGT", "fuel"] = costs.at["gas", "fuel"]
costs.at["CCGT", "fuel"] = costs.at["gas", "fuel"]
@ -359,12 +358,12 @@ def update_transmission_costs(n, costs, length_factor=1.0):
def attach_wind_and_solar(
n, costs, input_profiles, carriers, extendable_carriers, line_length_factor=1
):
add_missing_carriers(n, carriers)
for car in carriers:
if car == "hydro":
continue
n.add("Carrier", car)
with xr.open_dataset(getattr(input_profiles, "profile_" + car)) as ds:
if ds.indexes["bus"].empty:
continue
@ -419,6 +418,8 @@ def attach_conventional_generators(
extendable_carriers,
conventional_params,
conventional_inputs,
unit_commitment=None,
fuel_price=None,
):
carriers = list(set(conventional_carriers) | set(extendable_carriers["Generator"]))
add_missing_carriers(n, carriers)
@ -437,15 +438,34 @@ def attach_conventional_generators(
.rename(index=lambda s: "C" + str(s))
)
ppl["efficiency"] = ppl.efficiency.fillna(ppl.efficiency_r)
ppl["marginal_cost"] = (
ppl.carrier.map(costs.VOM) + ppl.carrier.map(costs.fuel) / ppl.efficiency
)
logger.info(
"Adding {} generators with capacities [GW] \n{}".format(
len(ppl), ppl.groupby("carrier").p_nom.sum().div(1e3).round(2)
if unit_commitment is not None:
committable_attrs = ppl.carrier.isin(unit_commitment).to_frame("committable")
for attr in unit_commitment.index:
default = pypsa.components.component_attrs["Generator"].default[attr]
committable_attrs[attr] = ppl.carrier.map(unit_commitment.loc[attr]).fillna(
default
)
else:
committable_attrs = {}
if fuel_price is not None:
fuel_price = fuel_price.assign(
OCGT=fuel_price["gas"], CCGT=fuel_price["gas"]
).drop("gas", axis=1)
missing_carriers = list(set(carriers) - set(fuel_price))
fuel_price = fuel_price.assign(**costs.fuel[missing_carriers])
fuel_price = fuel_price.reindex(ppl.carrier, axis=1)
fuel_price.columns = ppl.index
marginal_cost = fuel_price.div(ppl.efficiency).add(ppl.carrier.map(costs.VOM))
else:
marginal_cost = (
ppl.carrier.map(costs.VOM) + ppl.carrier.map(costs.fuel) / ppl.efficiency
)
)
# Define generators using modified ppl DataFrame
caps = ppl.groupby("carrier").p_nom.sum().div(1e3).round(2)
logger.info(f"Adding {len(ppl)} generators with capacities [GW] \n{caps}")
n.madd(
"Generator",
@ -456,13 +476,14 @@ def attach_conventional_generators(
p_nom=ppl.p_nom.where(ppl.carrier.isin(conventional_carriers), 0),
p_nom_extendable=ppl.carrier.isin(extendable_carriers["Generator"]),
efficiency=ppl.efficiency,
marginal_cost=ppl.marginal_cost,
marginal_cost=marginal_cost,
capital_cost=ppl.capital_cost,
build_year=ppl.datein.fillna(0).astype(int),
lifetime=(ppl.dateout - ppl.datein).fillna(np.inf),
**committable_attrs,
)
for carrier in conventional_params:
for carrier in set(conventional_params) & set(carriers):
# Generators with technology affected
idx = n.generators.query("carrier == @carrier").index
@ -596,6 +617,14 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
hydro.max_hours > 0, hydro.country.map(max_hours_country)
).fillna(6)
flatten_dispatch = params.get("flatten_dispatch", False)
if flatten_dispatch:
buffer = params.get("flatten_dispatch_buffer", 0.2)
average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"]
p_max_pu = (average_capacity_factor + buffer).clip(upper=1)
else:
p_max_pu = 1
n.madd(
"StorageUnit",
hydro.index,
@ -605,7 +634,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
max_hours=hydro_max_hours,
capital_cost=costs.at["hydro", "capital_cost"],
marginal_cost=costs.at["hydro", "marginal_cost"],
p_max_pu=1.0, # dispatch
p_max_pu=p_max_pu, # dispatch
p_min_pu=0.0, # store
efficiency_dispatch=costs.at["hydro", "efficiency"],
efficiency_store=0.0,
@ -695,13 +724,14 @@ def attach_OPSD_renewables(n, tech_map):
{"Solar": "PV"}
)
df = df.query("Fueltype in @tech_map").powerplant.convert_country_to_alpha2()
df = df.dropna(subset=["lat", "lon"])
for fueltype, carriers in tech_map.items():
gens = n.generators[lambda df: df.carrier.isin(carriers)]
buses = n.buses.loc[gens.bus.unique()]
gens_per_bus = gens.groupby("bus").p_nom.count()
caps = map_country_bus(df.query("Fueltype == @fueltype and lat == lat"), buses)
caps = map_country_bus(df.query("Fueltype == @fueltype"), buses)
caps = caps.groupby(["bus"]).Capacity.sum()
caps = caps / gens_per_bus.reindex(caps.index, fill_value=1)
@ -811,6 +841,20 @@ if __name__ == "__main__":
conventional_inputs = {
k: v for k, v in snakemake.input.items() if k.startswith("conventional_")
}
if params.conventional["unit_commitment"]:
unit_commitment = pd.read_csv(snakemake.input.unit_commitment, index_col=0)
else:
unit_commitment = None
if params.conventional["dynamic_fuel_price"]:
fuel_price = pd.read_csv(
snakemake.input.fuel_price, index_col=0, header=0, parse_dates=True
)
fuel_price = fuel_price.reindex(n.snapshots).fillna(method="ffill")
else:
fuel_price = None
attach_conventional_generators(
n,
costs,
@ -819,6 +863,8 @@ if __name__ == "__main__":
extendable_carriers,
params.conventional,
conventional_inputs,
unit_commitment=unit_commitment,
fuel_price=fuel_price,
)
attach_wind_and_solar(
@ -831,15 +877,16 @@ if __name__ == "__main__":
)
if "hydro" in renewable_carriers:
para = params.renewable["hydro"]
p = params.renewable["hydro"]
carriers = p.pop("carriers", [])
attach_hydro(
n,
costs,
ppl,
snakemake.input.profile_hydro,
snakemake.input.hydro_capacities,
para.pop("carriers", []),
**para,
carriers,
**p,
)
estimate_renewable_caps = params.electricity["estimate_renewable_capacities"]

View File

@ -337,7 +337,7 @@ def _load_lines_from_eg(buses, eg_lines):
)
lines["length"] /= 1e3
lines["carrier"] = "AC"
lines = _remove_dangling_branches(lines, buses)
return lines

View File

@ -0,0 +1,65 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import logging
import pandas as pd
import pypsa
from _helpers import configure_logging
from entsoe import EntsoePandasClient
from entsoe.exceptions import InvalidBusinessParameterError, NoMatchingDataError
from requests import HTTPError
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_cross_border_flows")
configure_logging(snakemake)
api_key = snakemake.config["private"]["keys"]["entsoe_api"]
client = EntsoePandasClient(api_key=api_key)
n = pypsa.Network(snakemake.input.network)
start = pd.Timestamp(snakemake.params.snapshots["start"], tz="Europe/Brussels")
end = pd.Timestamp(snakemake.params.snapshots["end"], tz="Europe/Brussels")
branches = n.branches().query("carrier in ['AC', 'DC']")
c = n.buses.country
branch_countries = pd.concat([branches.bus0.map(c), branches.bus1.map(c)], axis=1)
branch_countries = branch_countries.query("bus0 != bus1")
branch_countries = branch_countries.apply(sorted, axis=1, result_type="broadcast")
country_pairs = branch_countries.drop_duplicates().reset_index(drop=True)
flows = []
unavailable_borders = []
for from_country, to_country in country_pairs.values:
try:
flow_directed = client.query_crossborder_flows(
from_country, to_country, start=start, end=end
)
flow_reverse = client.query_crossborder_flows(
to_country, from_country, start=start, end=end
)
flow = (flow_directed - flow_reverse).rename(
f"{from_country} - {to_country}"
)
flow = flow.tz_localize(None).resample("1h").mean()
flow = flow.loc[start.tz_localize(None) : end.tz_localize(None)]
flows.append(flow)
except (HTTPError, NoMatchingDataError, InvalidBusinessParameterError):
unavailable_borders.append(f"{from_country}-{to_country}")
if unavailable_borders:
logger.warning(
"Historical electricity cross-border flows for countries"
f" {', '.join(unavailable_borders)} not available."
)
flows = pd.concat(flows, axis=1)
flows.to_csv(snakemake.output[0])

View File

@ -80,11 +80,9 @@ def load_timeseries(fn, years, countries, powerstatistics=True):
def rename(s):
return s[: -len(pattern)]
def date_parser(x):
return dateutil.parser.parse(x, ignoretz=True)
return (
pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)
pd.read_csv(fn, index_col=0, parse_dates=[0])
.tz_localize(None)
.filter(like=pattern)
.rename(columns=rename)
.dropna(how="all", axis=0)
@ -168,6 +166,7 @@ def manual_adjustment(load, fn_load, powerstatistics):
by the corresponding ratio of total energy consumptions reported by
IEA Data browser [0] for the year 2013.
2. For the ENTSOE transparency load data (if powerstatistics is False)
Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the
@ -176,6 +175,9 @@ def manual_adjustment(load, fn_load, powerstatistics):
[0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons
Bosnia and Herzegovina (BA) does not exist in the data set for 2019. It gets the
electricity consumption data from Croatia (HR) for the year 2019, scaled by the
factors derived from https://energy.at-site.be/eurostat-2021/
Parameters
----------
@ -264,9 +266,17 @@ def manual_adjustment(load, fn_load, powerstatistics):
load["AL"] = load.ME * (5.7 / 2.9)
if "MK" not in load and "MK" in countries:
load["MK"] = load.ME * (6.7 / 2.9)
if "BA" not in load and "BA" in countries:
load["BA"] = load.HR * (11.0 / 16.2)
copy_timeslice(
load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1)
)
copy_timeslice(
load, "LU", "2019-01-02 11:00", "2019-01-05 05:00", Delta(weeks=-1)
)
copy_timeslice(
load, "LU", "2019-02-05 20:00", "2019-02-06 19:00", Delta(weeks=-1)
)
return load
@ -291,6 +301,9 @@ if __name__ == "__main__":
if snakemake.params.load["manual_adjustments"]:
load = manual_adjustment(load, snakemake.input[0], powerstatistics)
if load.empty:
logger.warning("Build electricity demand time series is empty.")
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")
load = load.interpolate(method="linear", limit=interpolate_limit)

View File

@ -0,0 +1,52 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import logging
import pandas as pd
from _helpers import configure_logging
from entsoe import EntsoePandasClient
from entsoe.exceptions import NoMatchingDataError
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_cross_border_flows")
configure_logging(snakemake)
api_key = snakemake.config["private"]["keys"]["entsoe_api"]
client = EntsoePandasClient(api_key=api_key)
start = pd.Timestamp(snakemake.params.snapshots["start"], tz="Europe/Brussels")
end = pd.Timestamp(snakemake.params.snapshots["end"], tz="Europe/Brussels")
countries = snakemake.params.countries
prices = []
unavailable_countries = []
for country in countries:
country_code = country
try:
gen = client.query_day_ahead_prices(country, start=start, end=end)
gen = gen.tz_localize(None).resample("1h").mean()
gen = gen.loc[start.tz_localize(None) : end.tz_localize(None)]
prices.append(gen)
except NoMatchingDataError:
unavailable_countries.append(country)
if unavailable_countries:
logger.warning(
f"Historical electricity prices for countries {', '.join(unavailable_countries)} not available."
)
keys = [c for c in countries if c not in unavailable_countries]
prices = pd.concat(prices, keys=keys, axis=1)
prices.to_csv(snakemake.output[0])

View File

@ -0,0 +1,73 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import logging
import pandas as pd
from _helpers import configure_logging
from entsoe import EntsoePandasClient
from entsoe.exceptions import NoMatchingDataError
logger = logging.getLogger(__name__)
carrier_grouper = {
"Waste": "Biomass",
"Hydro Pumped Storage": "Hydro",
"Hydro Water Reservoir": "Hydro",
"Hydro Run-of-river and poundage": "Run of River",
"Fossil Coal-derived gas": "Gas",
"Fossil Gas": "Gas",
"Fossil Oil": "Oil",
"Fossil Oil shale": "Oil",
"Fossil Brown coal/Lignite": "Lignite",
"Fossil Peat": "Lignite",
"Fossil Hard coal": "Coal",
"Wind Onshore": "Onshore Wind",
"Wind Offshore": "Offshore Wind",
"Other renewable": "Other",
"Marine": "Other",
}
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_electricity_production")
configure_logging(snakemake)
api_key = snakemake.config["private"]["keys"]["entsoe_api"]
client = EntsoePandasClient(api_key=api_key)
start = pd.Timestamp(snakemake.params.snapshots["start"], tz="Europe/Brussels")
end = pd.Timestamp(snakemake.params.snapshots["end"], tz="Europe/Brussels")
countries = snakemake.params.countries
generation = []
unavailable_countries = []
for country in countries:
country_code = country
try:
gen = client.query_generation(country, start=start, end=end, nett=True)
gen = gen.tz_localize(None).resample("1h").mean()
gen = gen.loc[start.tz_localize(None) : end.tz_localize(None)]
gen = gen.rename(columns=carrier_grouper).groupby(level=0, axis=1).sum()
generation.append(gen)
except NoMatchingDataError:
unavailable_countries.append(country)
if unavailable_countries:
logger.warning(
f"Historical electricity production for countries {', '.join(unavailable_countries)} not available."
)
keys = [c for c in countries if c not in unavailable_countries]
generation = pd.concat(generation, keys=keys, axis=1)
generation.to_csv(snakemake.output[0])

View File

@ -0,0 +1,122 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 16 10:37:35 2023.
This script extracts monthly fuel prices of oil, gas, coal and lignite,
as well as CO2 prices
Inputs
------
- ``data/energy-price-trends-xlsx-5619002.xlsx``: energy price index of fossil fuels
- ``emission-spot-primary-market-auction-report-2019-data.xls``: CO2 Prices spot primary auction
Outputs
-------
- ``data/validation/monthly_fuel_price.csv``
- ``data/validation/CO2_price_2019.csv``
Description
-----------
The rule :mod:`build_monthly_prices` collects monthly fuel prices and CO2 prices
and translates them from different input sources to pypsa syntax
Data sources:
[1] Fuel price index. Destatis
https://www.destatis.de/EN/Home/_node.html
[2] average annual fuel price lignite, ENTSO-E
https://2020.entsos-tyndp-scenarios.eu/fuel-commodities-and-carbon-prices/
[3] CO2 Prices, Emission spot primary auction, EEX
https://www.eex.com/en/market-data/environmental-markets/eua-primary-auction-spot-download
Data was accessed at 16.5.2023
"""
import logging
import pandas as pd
from _helpers import configure_logging
logger = logging.getLogger(__name__)
# keywords in datasheet
keywords = {
"coal": " GP09-051 Hard coal",
"lignite": " GP09-052 Lignite and lignite briquettes",
"oil": " GP09-0610 10 Mineral oil, crude",
"gas": "GP09-062 Natural gas",
}
# sheet names to pypsa syntax
sheet_name_map = {
"coal": "5.1 Hard coal and lignite",
"lignite": "5.1 Hard coal and lignite",
"oil": "5.2 Mineral oil",
"gas": "5.3.1 Natural gas - indices",
}
# import fuel price 2015 in Eur/MWh
# source lignite, price for 2020, scaled by price index, ENTSO-E [3]
price_2020 = (
pd.Series({"coal": 3.0, "oil": 10.6, "gas": 5.6, "lignite": 1.1}) * 3.6
) # Eur/MWh
# manual adjustment of coal price
price_2020["coal"] = 2.4 * 3.6
price_2020["lignite"] = 1.6 * 3.6
def get_fuel_price():
price = {}
for carrier, keyword in keywords.items():
sheet_name = sheet_name_map[carrier]
df = pd.read_excel(
snakemake.input.fuel_price_raw,
sheet_name=sheet_name,
index_col=0,
skiprows=6,
nrows=18,
)
df = df.dropna(axis=0).iloc[:, :12]
start, end = df.index[0], str(int(df.index[-1][:4]) + 1)
df = df.stack()
df.index = pd.date_range(start=start, end=end, freq="MS", inclusive="left")
scale = price_2020[carrier] / df["2020"].mean() # scale to 2020 price
df = df.mul(scale)
price[carrier] = df
return pd.concat(price, axis=1)
def get_co2_price():
# emission price
co2_price = pd.read_excel(snakemake.input.co2_price_raw, index_col=1, header=5)
return co2_price["Auction Price €/tCO2"]
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_monthly_prices")
configure_logging(snakemake)
fuel_price = get_fuel_price()
fuel_price.to_csv(snakemake.output.fuel_price)
co2_price = get_co2_price()
co2_price.to_csv(snakemake.output.co2_price)

View File

@ -54,6 +54,23 @@ logger = logging.getLogger(__name__)
def determine_cutout_xXyY(cutout_name):
"""
Determine the full extent of a cutout.
Since the coordinates of the cutout data are given as the
center of the grid cells, the extent of the cutout is
calculated by adding/subtracting half of the grid cell size.
Parameters
----------
cutout_name : str
Path to the cutout.
Returns
-------
A list of extent coordinates in the order [x, X, y, Y].
"""
cutout = atlite.Cutout(cutout_name)
assert cutout.crs.to_epsg() == 4326
x, X, y, Y = cutout.extent

View File

@ -461,7 +461,7 @@ if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("cluster_network", simpl="", clusters="37c")
snakemake = mock_snakemake("cluster_network", simpl="", clusters="37")
configure_logging(snakemake)
params = snakemake.params
@ -483,6 +483,23 @@ if __name__ == "__main__":
else:
n_clusters = int(snakemake.wildcards.clusters)
if params.cluster_network.get("consider_efficiency_classes", False):
carriers = []
for c in aggregate_carriers:
gens = n.generators.query("carrier == @c")
low = gens.efficiency.quantile(0.10)
high = gens.efficiency.quantile(0.90)
if low >= high:
carriers += [c]
else:
labels = ["low", "medium", "high"]
suffix = pd.cut(
gens.efficiency, bins=[0, low, high, 1], labels=labels
).astype(str)
carriers += [f"{c} {label} efficiency" for label in labels]
n.generators.carrier.update(gens.carrier + " " + suffix + " efficiency")
aggregate_carriers = carriers
if n_clusters == len(n.buses):
# Fast-path if no clustering is necessary
busmap = n.buses.index.to_series()
@ -524,6 +541,11 @@ if __name__ == "__main__":
update_p_nom_max(clustering.network)
if params.cluster_network.get("consider_efficiency_classes"):
labels = [f" {label} efficiency" for label in ["low", "medium", "high"]]
nc = clustering.network
nc.generators["carrier"] = nc.generators.carrier.replace(labels, "", regex=True)
clustering.network.meta = dict(
snakemake.config, **dict(wildcards=dict(snakemake.wildcards))
)

116
scripts/plot_statistics.py Normal file
View File

@ -0,0 +1,116 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import matplotlib.pyplot as plt
import pypsa
import seaborn as sns
from _helpers import configure_logging
sns.set_theme("paper", style="whitegrid")
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"plot_elec_statistics",
simpl="",
opts="Ept-12h",
clusters="37",
ll="v1.0",
)
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network)
n.loads.carrier = "load"
n.carriers.loc["load", ["nice_name", "color"]] = "Load", "darkred"
colors = n.carriers.set_index("nice_name").color.where(
lambda s: s != "", "lightgrey"
)
# %%
def rename_index(ds):
specific = ds.index.map(lambda x: f"{x[1]}\n({x[0]})")
generic = ds.index.get_level_values("carrier")
duplicated = generic.duplicated(keep=False)
index = specific.where(duplicated, generic)
return ds.set_axis(index)
def plot_static_per_carrier(ds, ax, drop_zero=True):
if drop_zero:
ds = ds[ds != 0]
ds = ds.dropna()
c = colors[ds.index.get_level_values("carrier")]
ds = ds.pipe(rename_index)
label = f"{ds.attrs['name']} [{ds.attrs['unit']}]"
ds.plot.barh(color=c.values, xlabel=label, ax=ax)
ax.grid(axis="y")
fig, ax = plt.subplots()
ds = n.statistics.capacity_factor().dropna()
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.capacity_factor_bar)
fig, ax = plt.subplots()
ds = n.statistics.installed_capacity().dropna()
ds = ds.drop("Line")
ds = ds.drop(("Generator", "Load"))
ds = ds / 1e3
ds.attrs["unit"] = "GW"
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.installed_capacity_bar)
fig, ax = plt.subplots()
ds = n.statistics.optimal_capacity()
ds = ds.drop("Line")
ds = ds.drop(("Generator", "Load"))
ds = ds / 1e3
ds.attrs["unit"] = "GW"
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.optimal_capacity_bar)
fig, ax = plt.subplots()
ds = n.statistics.capex()
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.capital_expenditure_bar)
fig, ax = plt.subplots()
ds = n.statistics.opex()
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.operational_expenditure_bar)
fig, ax = plt.subplots()
ds = n.statistics.curtailment()
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.curtailment_bar)
fig, ax = plt.subplots()
ds = n.statistics.supply()
ds = ds.drop("Line")
ds = ds / 1e6
ds.attrs["unit"] = "TWh"
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.supply_bar)
fig, ax = plt.subplots()
ds = n.statistics.withdrawal()
ds = ds.drop("Line")
ds = ds / -1e6
ds.attrs["unit"] = "TWh"
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.withdrawal_bar)
fig, ax = plt.subplots()
ds = n.statistics.market_value()
plot_static_per_carrier(ds, ax)
fig.savefig(snakemake.output.market_value_bar)
# touch file
with open(snakemake.output.barplots_touch, "a"):
pass

View File

@ -0,0 +1,242 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import country_converter as coco
import matplotlib.pyplot as plt
import pandas as pd
import pypsa
import seaborn as sns
from _helpers import configure_logging
sns.set_theme("paper", style="whitegrid")
cc = coco.CountryConverter()
color_country = {
"AL": "#440154",
"AT": "#482677",
"BA": "#43398e",
"BE": "#3953a4",
"BG": "#2c728e",
"CH": "#228b8d",
"CZ": "#1f9d8a",
"DE": "#29af7f",
"DK": "#3fbc73",
"EE": "#5ec962",
"ES": "#84d44b",
"FI": "#addc30",
"FR": "#d8e219",
"GB": "#fde725",
"GR": "#f0f921",
"HR": "#f1c25e",
"HU": "#f4a784",
"IE": "#f78f98",
"IT": "#f87ea0",
"LT": "#f87a9a",
"LU": "#f57694",
"LV": "#f3758d",
"ME": "#f37685",
"MK": "#f37b7c",
"NL": "#FF6666",
"NO": "#FF3333",
"PL": "#eb0000",
"PT": "#d70000",
"RO": "#c00000",
"RS": "#a50000",
"SE": "#8a0000",
"SI": "#6f0000",
"SK": "#550000",
}
def sort_one_country(country, df):
indices = [link for link in df.columns if country in link]
df_country = df[indices].copy()
for link in df_country.columns:
if country in link[5:]:
df_country[link] = -df_country[link]
link_reverse = str(link[5:] + " - " + link[:2])
df_country = df_country.rename(columns={link: link_reverse})
return df_country.reindex(sorted(df_country.columns), axis=1)
def cross_border_time_series(countries, data):
fig, ax = plt.subplots(2 * len(countries), 1, figsize=(15, 10 * len(countries)))
axis = 0
for country in countries:
ymin = 0
ymax = 0
for df in data:
df_country = sort_one_country(country, df)
df_neg, df_pos = df_country.clip(upper=0), df_country.clip(lower=0)
color = [color_country[link[5:]] for link in df_country.columns]
df_pos.plot.area(
ax=ax[axis], stacked=True, linewidth=0.0, color=color, ylim=[-1, 1]
)
df_neg.plot.area(
ax=ax[axis], stacked=True, linewidth=0.0, color=color, ylim=[-1, 1]
)
if (axis % 2) == 0:
title = "Historic"
else:
title = "Optimized"
ax[axis].set_title(
title + " Import / Export for " + cc.convert(country, to="name_short")
)
# Custom legend elements
legend_elements = []
for link in df_country.columns:
legend_elements = legend_elements + [
plt.fill_between(
[],
[],
color=color_country[link[5:]],
label=cc.convert(link[5:], to="name_short"),
)
]
# Create the legend
ax[axis].legend(handles=legend_elements, loc="upper right")
# rescale the y axis
neg_min = df_neg.sum(axis=1).min() * 1.2
if neg_min < ymin:
ymin = neg_min
pos_max = df_pos.sum(axis=1).max() * 1.2
if pos_max < ymax:
ymax = pos_max
axis = axis + 1
for x in range(axis - 2, axis):
ax[x].set_ylim([neg_min, pos_max])
fig.savefig(snakemake.output.trade_time_series, bbox_inches="tight")
def cross_border_bar(countries, data):
df_positive = pd.DataFrame()
df_negative = pd.DataFrame()
color = []
for country in countries:
order = 0
for df in data:
df_country = sort_one_country(country, df)
df_neg, df_pos = df_country.clip(upper=0), df_country.clip(lower=0)
if (order % 2) == 0:
title = "Historic"
else:
title = "Optimized"
df_positive_new = pd.DataFrame(data=df_pos.sum()).T.rename(
{0: title + " " + cc.convert(country, to="name_short")}
)
df_negative_new = pd.DataFrame(data=df_neg.sum()).T.rename(
{0: title + " " + cc.convert(country, to="name_short")}
)
df_positive = pd.concat([df_positive_new, df_positive])
df_negative = pd.concat([df_negative_new, df_negative])
order = order + 1
color = [color_country[link[5:]] for link in df_positive.columns]
fig, ax = plt.subplots(figsize=(15, 60))
df_positive.plot.barh(ax=ax, stacked=True, color=color, zorder=2)
df_negative.plot.barh(ax=ax, stacked=True, color=color, zorder=2)
plt.grid(axis="x", zorder=0)
plt.grid(axis="y", zorder=0)
# Custom legend elements
legend_elements = []
for country in list(color_country.keys()):
legend_elements = legend_elements + [
plt.fill_between(
[],
[],
color=color_country[country],
label=cc.convert(country, to="name_short"),
)
]
# Create the legend
plt.legend(handles=legend_elements, loc="upper right")
fig.savefig(snakemake.output.cross_border_bar, bbox_inches="tight")
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"plot_electricity_prices",
simpl="",
opts="Ept-12h",
clusters="37",
ll="v1.0",
)
configure_logging(snakemake)
countries = snakemake.params.countries
n = pypsa.Network(snakemake.input.network)
n.loads.carrier = "load"
historic = pd.read_csv(
snakemake.input.cross_border_flows,
index_col=0,
header=0,
parse_dates=True,
)
if len(historic.index) > len(n.snapshots):
historic = historic.resample(n.snapshots.inferred_freq).mean().loc[n.snapshots]
# Preparing network data to be shaped similar to ENTSOE datastructure
optimized_links = n.links_t.p0.rename(
columns=dict(n.links.bus0.str[:2] + " - " + n.links.bus1.str[:2])
)
optimized_lines = n.lines_t.p0.rename(
columns=dict(n.lines.bus0.str[:2] + " - " + n.lines.bus1.str[:2])
)
optimized = pd.concat([optimized_links, optimized_lines], axis=1)
# Drop internal country connection
optimized.drop(
[c for c in optimized.columns if c[:2] == c[5:]], axis=1, inplace=True
)
# align columns name
for c1 in optimized.columns:
for c2 in optimized.columns:
if c1[:2] == c2[5:] and c2[:2] == c1[5:]:
optimized = optimized.rename(columns={c1: c2})
optimized = optimized.groupby(lambda x: x, axis=1).sum()
cross_border_bar(countries, [historic, optimized])
cross_border_time_series(countries, [historic, optimized])
# touch file
with open(snakemake.output.plots_touch, "a"):
pass

View File

@ -0,0 +1,63 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import matplotlib.pyplot as plt
import pandas as pd
import pypsa
import seaborn as sns
from _helpers import configure_logging
from pypsa.statistics import get_bus_and_carrier
sns.set_theme("paper", style="whitegrid")
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"plot_electricity_prices",
simpl="",
opts="Ept-12h",
clusters="37",
ll="v1.0",
)
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network)
n.loads.carrier = "load"
historic = pd.read_csv(
snakemake.input.electricity_prices,
index_col=0,
header=0,
parse_dates=True,
)
if len(historic.index) > len(n.snapshots):
historic = historic.resample(n.snapshots.inferred_freq).mean().loc[n.snapshots]
optimized = n.buses_t.marginal_price.groupby(n.buses.country, axis=1).mean()
data = pd.concat([historic, optimized], keys=["Historic", "Optimized"], axis=1)
data.columns.names = ["Kind", "Country"]
fig, ax = plt.subplots(figsize=(6, 6))
df = data.mean().unstack().T
df.plot.barh(ax=ax, xlabel="Electricity Price [€/MWh]", ylabel="")
ax.grid(axis="y")
fig.savefig(snakemake.output.price_bar, bbox_inches="tight")
fig, ax = plt.subplots()
df = data.groupby(level="Kind", axis=1).mean()
df.plot(ax=ax, xlabel="", ylabel="Electricity Price [€/MWh]", alpha=0.8)
ax.grid(axis="x")
fig.savefig(snakemake.output.price_line, bbox_inches="tight")
# touch file
with open(snakemake.output.plots_touch, "a"):
pass

View File

@ -0,0 +1,144 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import matplotlib.pyplot as plt
import pandas as pd
import pypsa
import seaborn as sns
from _helpers import configure_logging
from pypsa.statistics import get_bus_and_carrier
sns.set_theme("paper", style="whitegrid")
carrier_groups = {
"Offshore Wind (AC)": "Offshore Wind",
"Offshore Wind (DC)": "Offshore Wind",
"Open-Cycle Gas": "Gas",
"Combined-Cycle Gas": "Gas",
"Reservoir & Dam": "Hydro",
"Pumped Hydro Storage": "Hydro",
}
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"plot_validation_electricity_production",
simpl="",
opts="Ept",
clusters="37c",
ll="v1.0",
)
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network)
n.loads.carrier = "load"
historic = pd.read_csv(
snakemake.input.electricity_production,
index_col=0,
header=[0, 1],
parse_dates=True,
)
colors = n.carriers.set_index("nice_name").color.where(
lambda s: s != "", "lightgrey"
)
colors["Offshore Wind"] = colors["Offshore Wind (AC)"]
colors["Gas"] = colors["Combined-Cycle Gas"]
colors["Hydro"] = colors["Reservoir & Dam"]
colors["Other"] = "lightgray"
if len(historic.index) > len(n.snapshots):
historic = historic.resample(n.snapshots.inferred_freq).mean().loc[n.snapshots]
optimized = n.statistics.dispatch(
groupby=get_bus_and_carrier, aggregate_time=False
).T
optimized = optimized[["Generator", "StorageUnit"]].droplevel(0, axis=1)
optimized = optimized.rename(columns=n.buses.country, level=0)
optimized = optimized.rename(columns=carrier_groups, level=1)
optimized = optimized.groupby(axis=1, level=[0, 1]).sum()
data = pd.concat([historic, optimized], keys=["Historic", "Optimized"], axis=1)
data.columns.names = ["Kind", "Country", "Carrier"]
data = data.mul(n.snapshot_weightings.generators, axis=0)
# total production per carrier
fig, ax = plt.subplots(figsize=(6, 6))
df = data.groupby(level=["Kind", "Carrier"], axis=1).sum().sum().unstack().T
df = df / 1e6 # TWh
df.plot.barh(ax=ax, xlabel="Electricity Production [TWh]", ylabel="")
ax.grid(axis="y")
fig.savefig(snakemake.output.production_bar, bbox_inches="tight")
# highest diffs
fig, ax = plt.subplots(figsize=(6, 10))
df = data.sum() / 1e6 # TWh
df = df["Optimized"] - df["Historic"]
df = df.dropna().sort_values()
df = pd.concat([df.iloc[:5], df.iloc[-5:]])
c = colors[df.index.get_level_values(1)]
df.plot.barh(
xlabel="Optimized Production - Historic Production [TWh]", ax=ax, color=c.values
)
ax.set_title("Strongest Deviations")
ax.grid(axis="y")
fig.savefig(snakemake.output.production_deviation_bar, bbox_inches="tight")
# seasonal operation
fig, axes = plt.subplots(3, 1, figsize=(9, 9))
df = (
data.groupby(level=["Kind", "Carrier"], axis=1)
.sum()
.resample("1W")
.mean()
.clip(lower=0)
)
df = df / 1e3
order = (
(df["Historic"].diff().abs().sum() / df["Historic"].sum()).sort_values().index
)
c = colors[order]
optimized = df["Optimized"].reindex(order, axis=1, level=1)
historical = df["Historic"].reindex(order, axis=1, level=1)
kwargs = dict(color=c, legend=False, ylabel="Production [GW]", xlabel="")
optimized.plot.area(ax=axes[0], **kwargs, title="Optimized")
historical.plot.area(ax=axes[1], **kwargs, title="Historic")
diff = optimized - historical
diff.clip(lower=0).plot.area(
ax=axes[2], **kwargs, title="$\Delta$ (Optimized - Historic)"
)
lim = axes[2].get_ylim()[1]
diff.clip(upper=0).plot.area(ax=axes[2], **kwargs)
axes[2].set_ylim(bottom=-lim, top=lim)
h, l = axes[0].get_legend_handles_labels()
fig.legend(
h[::-1],
l[::-1],
loc="center left",
bbox_to_anchor=(1, 0.5),
ncol=1,
frameon=False,
labelspacing=1,
)
fig.savefig(snakemake.output.seasonal_operation_area, bbox_inches="tight")
# touch file
with open(snakemake.output.plots_touch, "a"):
pass

View File

@ -65,6 +65,7 @@ import pandas as pd
import pypsa
from _helpers import configure_logging
from add_electricity import load_costs, update_transmission_costs
from pypsa.descriptors import expand_series
idx = pd.IndexSlice
@ -103,10 +104,30 @@ def add_emission_prices(n, emission_prices={"co2": 0.0}, exclude_co2=False):
).sum(axis=1)
gen_ep = n.generators.carrier.map(ep) / n.generators.efficiency
n.generators["marginal_cost"] += gen_ep
n.generators_t["marginal_cost"] += gen_ep[n.generators_t["marginal_cost"].columns]
su_ep = n.storage_units.carrier.map(ep) / n.storage_units.efficiency_dispatch
n.storage_units["marginal_cost"] += su_ep
def add_dynamic_emission_prices(n):
co2_price = pd.read_csv(snakemake.input.co2_price, index_col=0, parse_dates=True)
co2_price = co2_price[~co2_price.index.duplicated()]
co2_price = (
co2_price.reindex(n.snapshots).fillna(method="ffill").fillna(method="bfill")
)
emissions = (
n.generators.carrier.map(n.carriers.co2_emissions) / n.generators.efficiency
)
co2_cost = expand_series(emissions, n.snapshots).T.mul(co2_price.iloc[:, 0], axis=0)
static = n.generators.marginal_cost
dynamic = n.get_switchable_as_dense("Generator", "marginal_cost")
marginal_cost = dynamic + co2_cost.reindex(columns=dynamic.columns, fill_value=0)
n.generators_t.marginal_cost = marginal_cost.loc[:, marginal_cost.ne(static).any()]
def set_line_s_max_pu(n, s_max_pu=0.7):
n.lines["s_max_pu"] = s_max_pu
logger.info(f"N-1 security margin of lines set to {s_max_pu}")
@ -253,12 +274,13 @@ def set_line_nom_max(
n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True)
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"prepare_network", simpl="", clusters="40", ll="v0.3", opts="Co2L-24H"
"prepare_network", simpl="", clusters="37", ll="v1.0", opts="Ept"
)
configure_logging(snakemake)
@ -332,7 +354,12 @@ if __name__ == "__main__":
c.df.loc[sel, attr] *= factor
for o in opts:
if "Ep" in o:
if "Ept" in o:
logger.info(
"Setting time dependent emission prices according spot market price"
)
add_dynamic_emission_prices(n)
elif "Ep" in o:
m = re.findall("[0-9]*\.?[0-9]+$", o)
if len(m) > 0:
logger.info("Setting emission prices according to wildcard value.")

View File

@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Retrieve monthly fuel prices from Destatis.
"""
import logging
logger = logging.getLogger(__name__)
from pathlib import Path
from _helpers import configure_logging, progress_retrieve
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("retrieve_monthly_fuel_prices")
rootpath = ".."
else:
rootpath = "."
configure_logging(snakemake)
url = "https://www.destatis.de/EN/Themes/Economy/Prices/Publications/Downloads-Energy-Price-Trends/energy-price-trends-xlsx-5619002.xlsx?__blob=publicationFile"
to_fn = Path(rootpath) / Path(snakemake.output[0])
logger.info(f"Downloading monthly fuel prices from '{url}'.")
disable_progress = snakemake.config["run"].get("disable_progressbar", False)
progress_retrieve(url, to_fn, disable=disable_progress)
logger.info(f"Monthly fuel prices available at {to_fn}")

View File

@ -595,47 +595,45 @@ def extra_functionality(n, snapshots):
def solve_network(n, config, solving, opts="", **kwargs):
set_of_options = solving["solver"]["options"]
solver_options = solving["solver_options"][set_of_options] if set_of_options else {}
solver_name = solving["solver"]["name"]
cf_solving = solving["options"]
track_iterations = cf_solving.get("track_iterations", False)
min_iterations = cf_solving.get("min_iterations", 4)
max_iterations = cf_solving.get("max_iterations", 6)
transmission_losses = cf_solving.get("transmission_losses", 0)
assign_all_duals = cf_solving.get("assign_all_duals", False)
kwargs["solver_options"] = (
solving["solver_options"][set_of_options] if set_of_options else {}
)
kwargs["solver_name"] = solving["solver"]["name"]
kwargs["extra_functionality"] = extra_functionality
kwargs["transmission_losses"] = cf_solving.get("transmission_losses", False)
kwargs["linearized_unit_commitment"] = cf_solving.get(
"linearized_unit_commitment", False
)
kwargs["assign_all_duals"] = cf_solving.get("assign_all_duals", False)
rolling_horizon = cf_solving.pop("rolling_horizon", False)
skip_iterations = cf_solving.pop("skip_iterations", False)
if not n.lines.s_nom_extendable.any():
skip_iterations = True
logger.info("No expandable lines found. Skipping iterative solving.")
# add to network for extra_functionality
n.config = config
n.opts = opts
skip_iterations = cf_solving.get("skip_iterations", False)
if not n.lines.s_nom_extendable.any():
skip_iterations = True
logger.info("No expandable lines found. Skipping iterative solving.")
if skip_iterations:
status, condition = n.optimize(
solver_name=solver_name,
transmission_losses=transmission_losses,
assign_all_duals=assign_all_duals,
extra_functionality=extra_functionality,
**solver_options,
**kwargs,
)
if rolling_horizon:
kwargs["horizon"] = cf_solving.get("horizon", 365)
kwargs["overlap"] = cf_solving.get("overlap", 0)
n.optimize.optimize_with_rolling_horizon(**kwargs)
status, condition = "", ""
elif skip_iterations:
status, condition = n.optimize(**kwargs)
else:
kwargs["track_iterations"] = (cf_solving.get("track_iterations", False),)
kwargs["min_iterations"] = (cf_solving.get("min_iterations", 4),)
kwargs["max_iterations"] = (cf_solving.get("max_iterations", 6),)
status, condition = n.optimize.optimize_transmission_expansion_iteratively(
solver_name=solver_name,
track_iterations=track_iterations,
min_iterations=min_iterations,
max_iterations=max_iterations,
transmission_losses=transmission_losses,
assign_all_duals=assign_all_duals,
extra_functionality=extra_functionality,
**solver_options,
**kwargs,
**kwargs
)
if status != "ok":
if status != "ok" and not rolling_horizon:
logger.warning(
f"Solving status '{status}' with termination condition '{condition}'"
)
@ -650,14 +648,13 @@ if __name__ == "__main__":
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"solve_sector_network",
configfiles="test/config.overnight.yaml",
"solve_network",
simpl="",
opts="",
clusters="5",
ll="v1.5",
sector_opts="CO2L0-24H-T-H-B-I-A-solar+p3-dist1",
planning_horizons="2030",
opts="Ept",
clusters="37",
ll="v1.0",
sector_opts="",
planning_horizons="2020",
)
configure_logging(snakemake)
if "sector_opts" in snakemake.wildcards.keys():