add unresolved merge conflict to be addressed

This commit is contained in:
Fabian Neumann 2021-04-27 16:14:52 +02:00
commit 70078d03b2
41 changed files with 739 additions and 253 deletions

View File

@ -24,7 +24,7 @@ before_install:
- conda activate pypsa-eur
# install open-source solver
- mamba install -c conda-forge glpk ipopt
- mamba install -c conda-forge glpk ipopt'<3.13.3'
# list packages for easier debugging
- conda list

View File

@ -7,7 +7,7 @@ SPDX-License-Identifier: CC-BY-4.0
[![Build Status](https://travis-ci.org/PyPSA/pypsa-eur.svg?branch=master)](https://travis-ci.org/PyPSA/pypsa-eur)
[![Documentation](https://readthedocs.org/projects/pypsa-eur/badge/?version=latest)](https://pypsa-eur.readthedocs.io/en/latest/?badge=latest)
![Size](https://img.shields.io/github/repo-size/pypsa/pypsa-eur)
[![Zenodo](https://zenodo.org/badge/DOI/10.5281/zenodo.3520875.svg)](https://doi.org/10.5281/zenodo.3520875)
[![Zenodo](https://zenodo.org/badge/DOI/10.5281/zenodo.3520874.svg)](https://doi.org/10.5281/zenodo.3520874)
[![Gitter](https://badges.gitter.im/PyPSA/community.svg)](https://gitter.im/PyPSA/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
[![Snakemake](https://img.shields.io/badge/snakemake-≥5.0.0-brightgreen.svg?style=flat)](https://snakemake.readthedocs.io)
[![REUSE status](https://api.reuse.software/badge/github.com/pypsa/pypsa-eur)](https://api.reuse.software/info/github.com/pypsa/pypsa-eur)
@ -42,7 +42,7 @@ discussion in Section 3.4 "Model validation" of the paper.
![PyPSA-Eur Grid Model Simplified](doc/img/elec_s_X.png)
The model is designed to be imported into the open toolbox
The model building routines are defined through a snakemake workflow. The model is designed to be imported into the open toolbox
[PyPSA](https://github.com/PyPSA/PyPSA) for operational studies as
well as generation and transmission expansion planning studies.
@ -61,7 +61,7 @@ The dataset consists of:
- Geographical potentials for wind and solar generators based on land use (CORINE) and excluding nature reserves (Natura2000) are computed with the [vresutils library](https://github.com/FRESNA/vresutils) and the [glaes library](https://github.com/FZJ-IEK3-VSA/glaes).
Already-built versions of the model can be found in the accompanying [Zenodo
repository](https://doi.org/10.5281/zenodo.3601882).
repository](https://doi.org/10.5281/zenodo.3601881).
A version of the model that adds building heating, transport and
industry sectors to the model, as well as gas networks, can be found

View File

@ -51,8 +51,8 @@ datafiles = ['ch_cantons.csv', 'je-e-21.03.02.xls',
'eez/World_EEZ_v8_2014.shp', 'EIA_hydro_generation_2000_2014.csv',
'hydro_capacities.csv', 'naturalearth/ne_10m_admin_0_countries.shp',
'NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp', 'nama_10r_3popgdp.tsv.gz',
'nama_10r_3gdp.tsv.gz', 'time_series_60min_singleindex_filtered.csv',
'corine/g250_clc06_V18_5.tif']
'nama_10r_3gdp.tsv.gz', 'corine/g250_clc06_V18_5.tif']
if not config.get('tutorial', False):
@ -66,6 +66,12 @@ if config['enable'].get('retrieve_databundle', True):
script: 'scripts/retrieve_databundle.py'
rule build_load_data:
output: "resources/load.csv"
log: "logs/build_load_data.log"
script: 'scripts/build_load_data.py'
rule build_powerplants:
input:
base_network="networks/base.nc",
@ -211,7 +217,7 @@ rule add_electricity:
powerplants='resources/powerplants.csv',
hydro_capacities='data/bundle/hydro_capacities.csv',
geth_hydro_capacities='data/geth2015_hydro_capacities.csv',
opsd_load='data/bundle/time_series_60min_singleindex_filtered.csv',
load='resources/load.csv',
nuts3_shapes='resources/nuts3_shapes.geojson',
**{f"profile_{tech}": "resources/profile{year}_" + f"{tech}.nc"
for tech in config['renewable']}
@ -290,6 +296,11 @@ def memory(w):
if m is not None:
factor /= int(m.group(1))
break
for o in w.opts.split('-'):
m = re.match(r'^(\d+)seg$', o, re.IGNORECASE)
if m is not None:
factor *= int(m.group(1)) / 8760
break
if w.clusters.endswith('m'):
return int(factor * (18000 + 180 * int(w.clusters[:-1])))
else:

View File

@ -2,7 +2,7 @@
#
# SPDX-License-Identifier: CC0-1.0
version: 0.2.0
version: 0.3.0
tutorial: false
logging:
@ -32,17 +32,18 @@ enable:
retrieve_cutout: true
build_natura_raster: false
retrieve_natura_raster: true
custom_busmap: false
electricity:
voltages: [220., 300., 380.]
co2limit: 7.75e+7 # 0.05 * 3.1e9*0.5
co2base: 3.1e+9 # 1 * 3.1e9*0.5
co2base: 1.487e9
agg_p_nom_limits: data/agg_p_nom_minmax.csv
extendable_carriers:
Generator: []
StorageUnit: [battery, H2]
Store: [] # battery, H2
StorageUnit: [] # battery, H2
Store: [battery, H2]
Link: []
max_hours:
@ -52,6 +53,7 @@ electricity:
powerplants_filter: false # use pandas query strings here, e.g. Country not in ['Germany']
custom_powerplants: false # use pandas query strings here, e.g. Country in ['Germany']
conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass]
renewable_capacities_from_OPSD: [] # onwind, offwind, solar
# estimate_renewable_capacities_from_capacity_stats:
# # Wind is the Fueltype in ppm.data.Capacity_stats, onwind, offwind-{ac,dc} the carrier in PyPSA-Eur
@ -142,8 +144,7 @@ renewable:
cutout: europe-2013-era5
carriers: [ror, PHS, hydro]
PHS_max_hours: 6
hydro_max_hours: "energy_capacity_totals_by_country" # one of energy_capacity_totals_by_country,
# estimate_by_large_installations or a float
hydro_max_hours: "energy_capacity_totals_by_country" # one of energy_capacity_totals_by_country, estimate_by_large_installations or a float
clip_min_inflow: 1.0
lines:
@ -168,6 +169,11 @@ transformers:
type: ''
load:
url: https://data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv
power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data
interpolate_limit: 3 # data gaps up until this size are interpolated linearly
time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from
manual_adjustments: true # false
scaling_factor: 1.0
fallback_year: 2013
@ -175,13 +181,16 @@ costs:
year: 2030
discountrate: 0.07 # From a Lion Hirth paper, also reflects average of Noothout et al 2016
USD2013_to_EUR2013: 0.7532 # [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html
marginal_cost:
marginal_cost: # EUR/MWh
solar: 0.01
onwind: 0.015
offwind: 0.015
hydro: 0.
H2: 0.
electrolysis: 0.
fuel cell: 0.
battery: 0.
battery inverter: 0.
emission_prices: # in currency per tonne emission, only used with the option Ep
co2: 0.

View File

@ -2,8 +2,9 @@
#
# SPDX-License-Identifier: CC0-1.0
version: 0.2.0
version: 0.3.0
tutorial: true
logging:
level: INFO
format: '%(levelname)s:%(name)s:%(message)s'
@ -31,6 +32,7 @@ enable:
retrieve_cutout: true
build_natura_raster: false
retrieve_natura_raster: true
custom_busmap: false
electricity:
voltages: [220., 300., 380.]
@ -38,8 +40,8 @@ electricity:
extendable_carriers:
Generator: [OCGT]
StorageUnit: [battery, H2]
Store: [] #battery, H2
StorageUnit: [] #battery, H2
Store: [battery, H2]
Link: []
max_hours:
@ -146,6 +148,11 @@ transformers:
type: ''
load:
url: https://data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv
power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data
interpolate_limit: 3 # data gaps up until this size are interpolated linearly
time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from
manual_adjustments: true # false
scaling_factor: 1.0
fallback_year: 2013

View File

@ -114,7 +114,7 @@ DAC,2030,lifetime,30,years,Fasihi
DAC,2030,FOM,4,%/year,Fasihi
battery inverter,2030,investment,411,USD/kWel,budischak2013
battery inverter,2030,lifetime,20,years,budischak2013
battery inverter,2030,efficiency,0.81,per unit,budischak2013; Lund and Kempton (2008) http://dx.doi.org/10.1016/j.enpol.2008.06.007
battery inverter,2030,efficiency,0.9,per unit charge/discharge,budischak2013; Lund and Kempton (2008) http://dx.doi.org/10.1016/j.enpol.2008.06.007
battery inverter,2030,FOM,3,%/year,budischak2013
battery storage,2030,investment,192,USD/kWh,budischak2013
battery storage,2030,lifetime,15,years,budischak2013

1 technology year parameter value unit source
114 DAC 2030 FOM 4 %/year Fasihi
115 battery inverter 2030 investment 411 USD/kWel budischak2013
116 battery inverter 2030 lifetime 20 years budischak2013
117 battery inverter 2030 efficiency 0.81 0.9 per unit per unit charge/discharge budischak2013; Lund and Kempton (2008) http://dx.doi.org/10.1016/j.enpol.2008.06.007
118 battery inverter 2030 FOM 3 %/year budischak2013
119 battery storage 2030 investment 192 USD/kWh budischak2013
120 battery storage 2030 lifetime 15 years budischak2013

View File

@ -6,8 +6,8 @@ Italy-Montenegro,Villanova (IT),Latsva (MT),445,,1200,under construction,Link.14
NordLink,Tonstad (NO),Wilster (DE),514,,1400,under construction,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/37,6.716948,58.662631,9.373979,53.922479
COBRA cable,Endrup (DK),Eemshaven (NL),325,,700,under construction,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/71,8.718392,55.523115,6.835494,53.438589
Thames Estuary Cluster (NEMO-Link),Richborough (GB),Gezelle (BE),140,,1000,under construction,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/74,1.324854,51.295891,3.23043,51.24902
Anglo-Scottish -1,Hunterston (UK),Deeside (UK),422,,2400,under construction,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/77,-4.898329,55.723331,-3.032972,53.199735
ALEGrO,Lixhe (BE),Oberzier (DE),100,,1000,in permitting,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/92,5.67933,50.7567965,6.474704,50.867532
Anglo-Scottish -1,Hunterston (UK),Deeside (UK),422,,2400,built,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/77,-4.898329,55.723331,-3.032972,53.199735
ALEGrO,Lixhe (BE),Oberzier (DE),100,,1000,built,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/92,5.67933,50.7567965,6.474704,50.867532
North Sea Link,Kvilldal (NO),Blythe (GB),720,,1400,under construction,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/110,6.637527,59.515096,-1.510277,55.126957
HVDC SuedOstLink,Wolmirstedt (DE),Isar (DE),,557,2000,in permitting,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/130,11.629014,52.252137,12.091596,48.080837
HVDC Line A-North,Emden East (DE),Osterath (DE),,284,2000,in permitting,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/132,7.206009,53.359403,6.619451,51.272935

1 Name Converterstation 1 Converterstation 2 Length (given) (km) Length (distance*1.2) (km) Power (MW) status replaces Ref x1 y1 x2 y2
6 NordLink Tonstad (NO) Wilster (DE) 514 1400 under construction https://tyndp.entsoe.eu/tyndp2018/projects/projects/37 6.716948 58.662631 9.373979 53.922479
7 COBRA cable Endrup (DK) Eemshaven (NL) 325 700 under construction https://tyndp.entsoe.eu/tyndp2018/projects/projects/71 8.718392 55.523115 6.835494 53.438589
8 Thames Estuary Cluster (NEMO-Link) Richborough (GB) Gezelle (BE) 140 1000 under construction https://tyndp.entsoe.eu/tyndp2018/projects/projects/74 1.324854 51.295891 3.23043 51.24902
9 Anglo-Scottish -1 Hunterston (UK) Deeside (UK) 422 2400 under construction built https://tyndp.entsoe.eu/tyndp2018/projects/projects/77 -4.898329 55.723331 -3.032972 53.199735
10 ALEGrO Lixhe (BE) Oberzier (DE) 100 1000 in permitting built https://tyndp.entsoe.eu/tyndp2018/projects/projects/92 5.67933 50.7567965 6.474704 50.867532
11 North Sea Link Kvilldal (NO) Blythe (GB) 720 1400 under construction https://tyndp.entsoe.eu/tyndp2018/projects/projects/110 6.637527 59.515096 -1.510277 55.126957
12 HVDC SuedOstLink Wolmirstedt (DE) Isar (DE) 557 2000 in permitting https://tyndp.entsoe.eu/tyndp2018/projects/projects/130 11.629014 52.252137 12.091596 48.080837
13 HVDC Line A-North Emden East (DE) Osterath (DE) 284 2000 in permitting https://tyndp.entsoe.eu/tyndp2018/projects/projects/132 7.206009 53.359403 6.619451 51.272935

View File

@ -33,12 +33,13 @@ Link:
"14559": "6240" # fix wrong bus allocation from 6241
"12998": "1333" # combine link 12998 + 12997 in 12998
"5627": '2309' # combine link 5627 + 5628 in 5627
"8068": "5819" # fix GB location of Anglo-Scottish interconnector
length:
index:
"12998": 409.0
"5627": 26.39
bus0:
index:
# set bus0 == bus1 for removing the link in remove_unconnected_components
"5628": "7276"
"12997": "7276"
"14552": "5819" # fix GB location of GB-IE interconnector
"5628": "7276" # bus0 == bus1 to remove link in remove_unconnected_components
"12997": "7276" # bus0 == bus1 to remove link in remove_unconnected_components

View File

@ -2,21 +2,72 @@
SPDX-License-Identifier: GPL-3.0-or-later
*/
/* override table width restrictions */
@media screen and (min-width: 767px) {
.wy-side-nav-search {
background-color: #eeeeee;
}
.wy-side-nav-search .wy-dropdown>a,
.wy-side-nav-search>a {
color: rgb(34, 97, 156)
}
.wy-side-nav-search>div.version {
color: rgb(34, 97, 156)
}
.wy-menu-vertical header,
.wy-menu-vertical p.caption,
.rst-versions a {
color: #999999;
}
.wy-menu-vertical a.reference:hover,
.wy-menu-vertical a.reference.internal:hover {
background: #dddddd;
color: #fff;
}
.wy-nav-side {
background: #efefef;
}
.wy-menu-vertical a.reference {
color: #000;
}
.rst-versions .rst-current-version,
.wy-nav-top,
.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a:hover {
background: #002221;
}
.wy-nav-content .highlight {
background: #ffffff;
}
.rst-content code.literal,
.rst-content tt.literal {
color: rgb(34, 97, 156)
}
.wy-nav-content a.reference {
color: rgb(34, 97, 156);
}
/* override table width restrictions */
@media screen and (min-width: 767px) {
.wy-table-responsive table td {
/* !important prevents the common CSS stylesheets from overriding
this as on RTD they are loaded after this stylesheet */
white-space: normal !important;
/* background: #eeeeee !important; */
background: rgb(250, 250, 250) !important;
}
.wy-table-responsive {
max-width: 100%;
overflow: visible !important;
}
.wy-nav-content {
max-width: 910px !important;
}

View File

@ -60,7 +60,7 @@ Now a window with the machine details will open. You have to configure the follo
You can edit your machine configuration later. So use a cheap machine type configuration to transfer data and
only when everything is ready and tested, your expensive machine type, for instance a custom 8 vCPU with 160 GB memory.
Solvers do not parallelise well, so we recommend not to choose more than 8 vCPU.
Check ``snakemake -j -n 1 solve_all_networks`` as a dry run to see how much memory is required.
Check ``snakemake -n -j 1 solve_all_networks`` as a dry run to see how much memory is required.
The memory requirements will vary depending on the spatial and temporal resoulution of your optimisation.
Example: for an hourly, 181 node full European network, set 8 vCPU and 150 GB memory since the dry-run calculated a 135 GB memory requirement.)
- Boot disk: As default, your VM is created with 10 GB. Depending on how much you want to handle on one VM you should increase the disk size.
@ -85,7 +85,7 @@ Step 3 - Installation of Cloud SDK
sudo apt-get update
sudo apt-get install bzip2 libxml2-dev
sudo apt-get install wget
wget https://repo.anaconda.com/archive/Anaconda3-2020.07-Linux-x86_64.sh (Check the link. To be up to date with anaconda, check the Anaconda website https://www.anaconda.com/products/individual )
wget https://repo.anaconda.com/archive/Anaconda3-2020.07-Linux-x86_64.sh
ls (to see what anaconda file to bash)
bash Anaconda3-2020.07-Linux-x86_64.sh
source ~/.bashrc

View File

@ -74,9 +74,9 @@ author = u'Jonas Hoersch (KIT, FIAS), Fabian Hofmann (FIAS), David Schlachtberge
# built documents.
#
# The short X.Y version.
version = u'0.2'
version = u'0.3'
# The full version, including alpha/beta/rc tags.
release = u'0.2.0'
release = u'0.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View File

@ -1,16 +1,19 @@
,Unit,Values,Description
voltages,kV,"Any subset of {220., 300., 380.}","Voltage levels to consider when"
co2limit,:math:`t_{CO_2-eq}/a`,float,"Cap on total annual system carbon dioxide emissions"
co2base,:math:`t_{CO_2-eq}/a`,float,"Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard."
agg_p_nom_limits,--,file path,"Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``."
extendable_carriers,,,
-- Generator,--,"Any subset of {'OCGT','CCGT'}","Places extendable conventional power plants (OCGT and/or CCGT) where gas power plants are located today without capacity limits."
-- StorageUnit,--,"Any subset of {'battery','H2'}","Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity."
-- Store,--,"Any subset of {'battery','H2'}","Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity."
-- Link,--,"Any subset of {'H2 pipeline'}","Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``."
max_hours,,,
-- battery,h,float,"Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_."
-- H2,h,float,"Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_."
powerplants_filter,--,"use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. Country not in ['Germany']","Filter query for the default powerplant database."
custom_powerplants,--,"use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. Country in ['Germany']","Filter query for the custom powerplant database."
conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}","List of conventional power plants to include in the model from ``resources/powerplants.csv``."
,Unit,Values,Description,
voltages,kV,"Any subset of {220., 300., 380.}",Voltage levels to consider when,
co2limit,:math:`t_{CO_2-eq}/a`,float,Cap on total annual system carbon dioxide emissions,
co2base,:math:`t_{CO_2-eq}/a`,float,Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard.,
agg_p_nom_limits,file,path,Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``.
extendable_carriers,,,,
-- Generator,--,"Any subset of {'OCGT','CCGT'}",Places extendable conventional power plants (OCGT and/or CCGT) where gas power plants are located today without capacity limits.
-- StorageUnit,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity.
-- Store,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity.
-- Link,--,Any subset of {'H2 pipeline'},Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``.
max_hours,,,,
-- battery,h,float,Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
-- H2,h,float,Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation <https://pypsa.readthedocs.io/en/latest/components.html#storage-unit>`_.
powerplants_filter,--,"use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. Country not in ['Germany']",Filter query for the default powerplant database.,
custom_powerplants,--,"use `pandas.query <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.query.html>`_ strings here, e.g. Country in ['Germany']",Filter query for the custom powerplant database.,
conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}",List of conventional power plants to include in the model from ``resources/powerplants.csv``.,
renewable_capacities_from_OPSD,,"[solar, onwind, offwind]",List of carriers (offwind-ac and offwind-dc are included in offwind) whose capacities 'p_nom' are aligned to the `OPSD renewable power plant list <https://data.open-power-system-data.org/renewable_power_plants/>`_,
estimate_renewable_capacities_from_capacitiy_stats,,,,
"-- Fueltype [ppm], e.g. Wind",,"list of fueltypes strings in PyPSA-Eur, e.g. [onwind, offwind-ac, offwind-dc]",converts ppm Fueltype to PyPSA-EUR Fueltype,

Can't render this file because it has a wrong number of fields in line 5.

View File

@ -1,2 +1,7 @@
,Unit,Values,Description
url,--,string,"Link to open power system data time series data."
power_statistics,bool,"{true, false}",Whether to load the electricity consumption data of the ENTSOE power statistics (only for files from 2019 and before) or from the ENTSOE transparency data (only has load data from 2015 onwards).
interpolate_limit,hours,integer,"Maximum gap size (consecutive nans) which interpolated linearly."
time_shift_for_large_gaps,string,string,"Periods which are used for copying time-slices in order to fill large gaps of nans. Have to be valid ``pandas`` period strings."
manual_adjustments,bool,"{true, false}","Whether to adjust the load data manually according to the function in :func:`manual_adjustment`."
scaling_factor,--,float,"Global correction factor for the load time series."

1 Unit Values Description
2 url -- string Link to open power system data time series data.
3 power_statistics bool {true, false} Whether to load the electricity consumption data of the ENTSOE power statistics (only for files from 2019 and before) or from the ENTSOE transparency data (only has load data from 2015 onwards).
4 interpolate_limit hours integer Maximum gap size (consecutive nans) which interpolated linearly.
5 time_shift_for_large_gaps string string Periods which are used for copying time-slices in order to fill large gaps of nans. Have to be valid ``pandas`` period strings.
6 manual_adjustments bool {true, false} Whether to adjust the load data manually according to the function in :func:`manual_adjustment`.
7 scaling_factor -- float Global correction factor for the load time series.

View File

@ -1,5 +1,6 @@
Trigger, Description, Definition, Status
``nH``; i.e. ``2H``-``6H``, Resample the time-resolution by averaging over every ``n`` snapshots, ``prepare_network``: `average_every_nhours() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L110>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L146>`_), In active use
``nSEG``; e.g. ``4380SEG``, "Apply time series segmentation with `tsam <https://tsam.readthedocs.io/en/latest/index.html>`_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load.", ``prepare_network``: apply_time_segmentation(), In active use
``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L19>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L154>`_, In active use
``Ep``, Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L24>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L158>`_, In active use
``CCL``, Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``., ``solve_network``, In active use
@ -7,4 +8,4 @@ Trigger, Description, Definition, Status
``ATK``, "Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.", ``prepare_network``, In active use
``BAU``, Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/solve_network.py#L66>`_, Untested
``SAFE``, Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network., ``solve_network`` `add_opts_constraints() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/solve_network.py#L73>`_, Untested
``carrier+factor``, "Alter the capital cost of a carrier by a factor. Example: ``solar+0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use
``carrier+{c|p}factor``, "Alter the capital cost (``c``) or installable potential (``p``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use

1 Trigger Description Definition Status
2 ``nH``; i.e. ``2H``-``6H`` Resample the time-resolution by averaging over every ``n`` snapshots ``prepare_network``: `average_every_nhours() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L110>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L146>`_) In active use
3 ``nSEG``; e.g. ``4380SEG`` Apply time series segmentation with `tsam <https://tsam.readthedocs.io/en/latest/index.html>`_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load. ``prepare_network``: apply_time_segmentation() In active use
4 ``Co2L`` Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``) ``prepare_network``: `add_co2limit() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L19>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L154>`_ In active use
5 ``Ep`` Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well) ``prepare_network``: `add_emission_prices() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L24>`_ and its `caller <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/prepare_network.py#L158>`_ In active use
6 ``CCL`` Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``. ``solve_network`` In active use
8 ``ATK`` Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links. ``prepare_network`` In active use
9 ``BAU`` Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities`` ``solve_network``: `add_opts_constraints() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/solve_network.py#L66>`_ Untested
10 ``SAFE`` Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network. ``solve_network`` `add_opts_constraints() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/solve_network.py#L73>`_ Untested
11 ``carrier+factor`` ``carrier+{c|p}factor`` Alter the capital cost of a carrier by a factor. Example: ``solar+0.5`` reduces the capital cost of solar to 50\% of original values. Alter the capital cost (``c``) or installable potential (``p``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values. ``prepare_network`` In active use

View File

@ -3,7 +3,7 @@ version,--,0.x.x,"Version of PyPSA-Eur"
tutorial,bool,"{true, false}","Switch to retrieve the tutorial data set instead of the full data set."
logging,,,
-- level,--,"Any of {'INFO', 'WARNING', 'ERROR'}","Restrict console outputs to all infos, warning or errors only"
-- format,--,"e.g. ``%(levelname)s:%(name)s:%(message)s``","Custom format for log messages. See `LogRecord <https://docs.python.org/3/library/logging.html#logging.LogRecord>`_ attributes."
-- format,--,"","Custom format for log messages. See `LogRecord <https://docs.python.org/3/library/logging.html#logging.LogRecord>`_ attributes."
summary_dir,--,"e.g. 'results'","Directory into which results are written."
countries,--,"Subset of {'AL', 'AT', 'BA', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'ME', 'MK', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'SE', 'SI', 'SK'}","European countries defined by their `Two-letter country codes (ISO 3166-1) <https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2>`_ which should be included in the energy system model."
focus_weights,--,"Keys should be two-digit country codes (e.g. DE) and values should range between 0 and 1","Ratio of total clusters for particular countries. the remaining weight is distributed according to mean load. An example: ``focus_weights: DE: 0.6 FR: 0.2``."
@ -14,3 +14,4 @@ enable,,,
-- retrieve_cutout,bool,"{true, false}","Switch to enable the retrieval of cutouts from zenodo with :mod:`retrieve_cutout`."
-- build_natura_raster,bool,"{true, false}","Switch to enable the creation of the raster ``natura.tiff`` via the rule :mod:`build_natura_raster`."
-- retrieve_natura_raster,bool,"{true, false}","Switch to enable the retrieval of ``natura.tiff`` from zenodo with :mod:`retrieve_natura_raster`."
-- custom_busmap,bool,"{true, false}","Switch to enable the use of custom busmaps in rule :mod:`cluster_network`. If activated the rule looks for provided busmaps at ``data/custom_busmap_elec{year}_s{simpl}_{clusters}.csv`` which should have the same format as ``resources/busmap_elec{year}_s{simpl}_{clusters}.csv``, i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc``."

1 Unit Values Description
3 tutorial bool {true, false} Switch to retrieve the tutorial data set instead of the full data set.
4 logging
5 -- level -- Any of {'INFO', 'WARNING', 'ERROR'} Restrict console outputs to all infos, warning or errors only
6 -- format -- e.g. ``%(levelname)s:%(name)s:%(message)s`` Custom format for log messages. See `LogRecord <https://docs.python.org/3/library/logging.html#logging.LogRecord>`_ attributes.
7 summary_dir -- e.g. 'results' Directory into which results are written.
8 countries -- Subset of {'AL', 'AT', 'BA', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'ME', 'MK', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'SE', 'SI', 'SK'} European countries defined by their `Two-letter country codes (ISO 3166-1) <https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2>`_ which should be included in the energy system model.
9 focus_weights -- Keys should be two-digit country codes (e.g. DE) and values should range between 0 and 1 Ratio of total clusters for particular countries. the remaining weight is distributed according to mean load. An example: ``focus_weights: DE: 0.6 FR: 0.2``.
14 -- retrieve_cutout bool {true, false} Switch to enable the retrieval of cutouts from zenodo with :mod:`retrieve_cutout`.
15 -- build_natura_raster bool {true, false} Switch to enable the creation of the raster ``natura.tiff`` via the rule :mod:`build_natura_raster`.
16 -- retrieve_natura_raster bool {true, false} Switch to enable the retrieval of ``natura.tiff`` from zenodo with :mod:`retrieve_natura_raster`.
17 -- custom_busmap bool {true, false} Switch to enable the use of custom busmaps in rule :mod:`cluster_network`. If activated the rule looks for provided busmaps at ``data/custom_busmap_elec{year}_s{simpl}_{clusters}.csv`` which should have the same format as ``resources/busmap_elec{year}_s{simpl}_{clusters}.csv``, i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc``.

View File

@ -18,7 +18,7 @@ Top-level configuration
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 5-12,21,28-34
:lines: 5-12,20,27-34
.. csv-table::
:header-rows: 1
@ -50,7 +50,7 @@ An exemplary dependency graph (starting from the simplification rules) then look
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 14-19
:lines: 14-18
.. csv-table::
:header-rows: 1
@ -66,7 +66,7 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 23-26
:lines: 22-25
.. csv-table::
:header-rows: 1
@ -80,7 +80,7 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 36-54
:lines: 36-60
.. csv-table::
:header-rows: 1
@ -97,7 +97,7 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 61-74
:lines: 62-75
.. csv-table::
:header-rows: 1
@ -114,7 +114,7 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 76-93
:lines: 77-94
.. csv-table::
:header-rows: 1
@ -126,7 +126,7 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 76,94-106
:lines: 77,95-107
.. csv-table::
:header-rows: 1
@ -138,7 +138,7 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 76,107-120
:lines: 77,108-121
.. csv-table::
:header-rows: 1
@ -150,7 +150,7 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 76,121-140
:lines: 77,122-141
.. csv-table::
:header-rows: 1
@ -162,7 +162,7 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 76,141-147
:lines: 77,142-147
.. csv-table::
:header-rows: 1
@ -218,7 +218,7 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 170-171
:lines: 170-176
.. csv-table::
:header-rows: 1
@ -232,7 +232,7 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 173-185
:lines: 178-190
.. csv-table::
:header-rows: 1
@ -241,7 +241,6 @@ Specifies the temporal range to build an energy system model for as arguments to
.. note::
To change cost assumptions in more detail (i.e. other than ``marginal_cost`` and ``capital_cost``), consider modifying cost assumptions directly in ``data/costs.csv`` as this is not yet supported through the config file.
You can also build multiple different cost databases. Make a renamed copy of ``data/costs.csv`` (e.g. ``data/costs-optimistic.csv``) and set the variable ``COSTS=data/costs-optimistic.csv`` in the ``Snakefile``.
.. _solving_cf:
@ -254,7 +253,7 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 187-197
:lines: 192-202
.. csv-table::
:header-rows: 1
@ -266,7 +265,7 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 187,198-214
:lines: 192,203-219
.. csv-table::
:header-rows: 1
@ -280,7 +279,7 @@ Specifies the temporal range to build an energy system model for as arguments to
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 216-355
:lines: 221-299
.. csv-table::
:header-rows: 1

View File

@ -12,7 +12,7 @@ be it with new ideas, suggestions, by filing bug reports or contributing code
to our `GitHub repository <https://github.com/PyPSA/PyPSA-Eur>`_.
* If you already have some code changes, you can submit them directly as a `pull request <https://github.com/PyPSA/pypsa-eur/pulls>`_.
* If you are wondering where we would greatly appreciate your efforts, check out the ``help wanted`` tag in the `issues list <https://github.com/PyPSA/pypsa-eur/issues`_ and initiate a discussion there..
* If you are wondering where we would greatly appreciate your efforts, check out the ``help wanted`` tag in the `issues list <https://github.com/PyPSA/pypsa-eur/issues>`_ and initiate a discussion there.
* If you start working on a feature in the code, let us know by opening an issue or a draft pull request.
This helps all of us to keep an overview on what is being done and helps to avoid a situation where we
are doing the same work twice in parallel.

View File

@ -32,7 +32,7 @@ Based on the parameters above the ``marginal_cost`` and ``capital_cost`` of the
.. note::
Another great resource for `cost assumptions <https://ens.dk/en/our-services/projections-and-models/technology-data`_ is the cost database from the Danish Energy Agency.
Another great resource for cost assumptions is the `cost database from the Danish Energy Agency <https://ens.dk/en/our-services/projections-and-models/technology-data>`_.
Modifying Cost Assumptions
==========================

View File

@ -19,8 +19,8 @@ PyPSA-Eur: An Open Optimisation Model of the European Transmission System
.. image:: https://img.shields.io/github/repo-size/pypsa/pypsa-eur
:alt: GitHub repo size
.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3520875.svg
:target: https://doi.org/10.5281/zenodo.3520875
.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3520874.svg
:target: https://doi.org/10.5281/zenodo.3520874
.. image:: https://badges.gitter.im/PyPSA/community.svg
:target: https://gitter.im/PyPSA/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge
@ -42,6 +42,8 @@ It contains alternating current lines at and above 220 kV voltage level and all
The model is suitable both for operational studies and generation and transmission expansion planning studies. The continental scope and highly resolved spatial scale enables a proper description of the long-range smoothing effects for renewable power generation and their varying resource availability.
.. image:: img/base.png
:width: 50%
:align: center
The restriction to freely available and open data encourages the open exchange of model data developments and eases the comparison of model results. It provides a full, automated software pipeline to assemble the load-flow-ready model from the original datasets, which enables easy replacement and improvement of the individual parts.
@ -169,16 +171,16 @@ Please use the following BibTeX: ::
If you want to cite a specific PyPSA-Eur version, each release of PyPSA-Eur is stored on Zenodo with a release-specific DOI.
This can be found linked from the overall PyPSA-Eur Zenodo DOI:
.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3520875.svg
:target: https://doi.org/10.5281/zenodo.3520875
.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3520874.svg
:target: https://doi.org/10.5281/zenodo.3520874
Pre-Built Networks as a Dataset
===============================
There are pre-built networks available as a dataset on Zenodo as well for every release of PyPSA-Eur.
.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3601882.svg
:target: https://doi.org/10.5281/zenodo.3601882
.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3601881.svg
:target: https://doi.org/10.5281/zenodo.3601881
The included ``.nc`` files are PyPSA network files which can be imported with PyPSA via:

View File

@ -17,6 +17,7 @@ Clone the Repository
First of all, clone the `PyPSA-Eur repository <https://github.com/PyPSA/pypsa-eur>`_ using the version control system ``git``.
The path to the directory into which the ``git repository`` is cloned, must **not** have any spaces!
If you do not have ``git`` installed, follow installation instructions `here <https://git-scm.com/book/en/v2/Getting-Started-Installing-Git>`_.
.. code:: bash
@ -24,8 +25,6 @@ The path to the directory into which the ``git repository`` is cloned, must **no
/some/path/without/spaces % git clone https://github.com/PyPSA/pypsa-eur.git
.. note::
If you do not have ``git`` installed, follow installation instructions `here <https://git-scm.com/book/en/v2/Getting-Started-Installing-Git>`_.
.. _deps:
@ -46,7 +45,6 @@ The environment can be installed and activated using
.../pypsa-eur % conda activate pypsa-eur
.. note::
Note that activation is local to the currently open shell!
After opening a new terminal window, one needs to reissue the second command!
@ -74,25 +72,23 @@ PyPSA is known to work with the free software
- `Cbc <https://projects.coin-or.org/Cbc#DownloadandInstall>`_
- `GLPK <https://www.gnu.org/software/glpk/>`_ (`WinGLKP <http://winglpk.sourceforge.net/>`_)
and the non-free, commercial software (for which free academic licenses are available)
and the non-free, commercial software (for some of which free academic licenses are available)
- `Gurobi <https://www.gurobi.com/documentation/quickstart.html>`_
- `CPLEX <https://www.ibm.com/products/ilog-cplex-optimization-studio>`_
- `FICO® Xpress Solver <https://www.fico.com/de/products/fico-xpress-solver>`_
and any other solver that works with the underlying modelling framework `Pyomo <http://www.pyomo.org/>`_.
For installation instructions of these solvers for your operating system, follow the links above.
Commercial solvers such as Gurobi and CPLEX currently significantly outperform open-source solvers for large-scale problems.
It might be the case that you can only retrieve solutions by using a commercial solver.
.. seealso::
`Getting a solver in the PyPSA documentation <https://pypsa.readthedocs.io/en/latest/installation.html#getting-a-solver-for-linear-optimisation>`_
.. note::
Commercial solvers such as Gurobi and CPLEX currently significantly outperform open-source solvers for large-scale problems.
It might be the case that you can only retrieve solutions by using a commercial solver.
.. note::
The rules :mod:`cluster_network` and :mod:`simplify_network` solve a quadratic optimisation problem for clustering.
The open-source solvers Cbc and GlPK cannot handle this. A fallback to Ipopt is implemented in this case, but requires
also Ipopt to be installed. For an open-source solver setup install in your `conda` environment on OSX/Linux
also Ipopt to be installed. For an open-source solver setup install in your ``conda`` environment on OSX/Linux
.. code:: bash

View File

@ -64,4 +64,6 @@ Folder Structure
System Requirements
===================
Building the model with the scripts in this repository uses up to 20 GB of memory. Computing optimal investment and operation scenarios requires a strong interior-point solver compatible with the modelling library `Pyomo <https://www.pyomo.org>`_ like `Gurobi <http://www.gurobi.com/>`_ or `CPLEX <https://www.ibm.com/analytics/cplex-optimizer>`_ with up to 100 GB of memory.
Building the model with the scripts in this repository runs on a normal computer.
But computing optimal investment and operation scenarios requires a strong interior-point solver
like `Gurobi <http://www.gurobi.com/>`_ or `CPLEX <https://www.ibm.com/analytics/cplex-optimizer>`_ with more memory.

View File

@ -39,6 +39,7 @@ together into a detailed PyPSA network stored in ``networks/elec.nc``.
preparation/retrieve
preparation/build_shapes
preparation/build_load_data
preparation/build_cutout
preparation/build_natura_raster
preparation/prepare_links_p_nom

View File

@ -0,0 +1,12 @@
..
SPDX-FileCopyrightText: 2020-2021 The PyPSA-Eur Authors
SPDX-License-Identifier: CC-BY-4.0
.. _load_data:
Rule ``build_load_data``
=============================
.. automodule:: build_load_data

View File

@ -11,43 +11,118 @@ Release Notes
Upcoming Release
================
* Fix: Value for ``co2base`` in ``config.yaml`` adjusted to 1.487e9 t CO2-eq (from 3.1e9 t CO2-eq). The new value represents emissions related to the electricity sector for EU+UK. The old value was ~2x too high and used when the emissions wildcard in ``{opts}`` was used.
* Add option to include marginal costs of links representing fuel cells, electrolysis, and battery inverters
[`#232 <https://github.com/PyPSA/pypsa-eur/pull/232>`_].
PyPSA-Eur 0.3.0 (7th December 2020)
==================================
**New Features**
Using the ``{opts}`` wildcard for scenarios:
* An option is introduced which adds constraints such that each country or node produces on average a minimal share of its total consumption itself.
For example ``EQ0.5c`` set in the ``{opts}`` wildcard requires each country to produce on average at least 50% of its consumption. Additionally,
the option ``ATK`` requires autarky at each node and removes all means of power transmission through lines and links. ``ATKc`` only removes
cross-border transfer capacities. Moreover, line and link capacities can be capped in the ``config.yaml`` at
``lines: s_nom_max:`` and ``links: p_nom_max`` (`#166 <https://github.com/PyPSA/pypsa-eur/pull/166>`_).
cross-border transfer capacities.
[`#166 <https://github.com/PyPSA/pypsa-eur/pull/166>`_].
* Added an option to alter the capital cost of carriers by a factor via ``carrier+factor`` in the ``{opts}`` wildcard. This can be useful for exploring uncertain cost parameters. Example: ``solar+0.5`` reduces the capital cost of solar to 50% of original values (`#167 <https://github.com/PyPSA/pypsa-eur/pull/167>`_).
* Added an option to alter the capital cost (``c``) or installable potentials (``p``) of carriers by a factor via ``carrier+{c,p}factor`` in the ``{opts}`` wildcard.
This can be useful for exploring uncertain cost parameters.
Example: ``solar+c0.5`` reduces the capital cost of solar to 50% of original values
[`#167 <https://github.com/PyPSA/pypsa-eur/pull/167>`_, `#207 <https://github.com/PyPSA/pypsa-eur/pull/207>`_].
* Add compatibility for pyomo 5.7.0 in :mod:`cluster_network` and :mod:`simplify_network`.
* Added an option to the ``{opts}`` wildcard that applies a time series segmentation algorithm based on renewables, hydro inflow and load time series
to produce a given total number of adjacent snapshots of varying lengths.
This feature is an alternative to downsampling the temporal resolution by simply averaging and
uses the `tsam <https://tsam.readthedocs.io/en/latest/index.html>`_ package
[`#186 <https://github.com/PyPSA/pypsa-eur/pull/186>`_].
* Raise a warning if `tech_colors` in the config are not defined for all carriers.
More OPSD integration:
* Corrected HVDC link connections (a) between Norway and Denmark and (b) mainland Italy, Corsica (FR) and Sardinia (IT) (`#181 <https://github.com/PyPSA/pypsa-eur/pull/181>`_)
* Add renewable power plants from `OPSD <https://data.open-power-system-data.org/renewable_power_plants/2020-08-25>`_ to the network for specified technologies.
This will overwrite the capacities calculated from the heuristic approach in :func:`estimate_renewable_capacities()`
[`#212 <https://github.com/PyPSA/pypsa-eur/pull/212>`_].
* Added Google Cloud Platform tutorial (for Windows users).
* Electricity consumption data is now retrieved directly from the `OPSD website <https://data.open-power-system-data.org/time_series/2019-06-05>`_ using the rule :mod:`build_load_data`.
The user can decide whether to take the ENTSO-E power statistics data (default) or the ENTSO-E transparency data
[`#211 <https://github.com/PyPSA/pypsa-eur/pull/211>`_].
* Corrected setting of exogenous emission price (in config -> cost -> emission price). This was not weighted by the efficiency and effective emission of the generators. Fixed in `#171 <https://github.com/PyPSA/pypsa-eur/pull/171>`_.
Other:
* Don't remove capital costs from lines and links, when imposing a line volume limit (wildcard ``lv``) or a line cost limit (``lc``). Previously, these were removed to move the expansion in direction of the limit.
* Added an option to use custom busmaps in rule :mod:`cluster_network`. To use this feature set ``enable: custom_busmap: true``.
Then, the rule looks for custom busmaps at ``data/custom_busmap_elec_s{simpl}_{clusters}.csv``,
which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``.
i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc``
[`#193 <https://github.com/PyPSA/pypsa-eur/pull/193>`_].
* Fix bug of clustering offwind-{ac,dc} sites in the option of high-resolution sites for renewables. Now, there are more sites for offwind-{ac,dc} available than network nodes. Before, they were clustered to the resolution of the network. (e.g. elec_s1024_37m.nc: 37 network nodes, 1024 sites)
* Line and link capacities can be capped in the ``config.yaml`` at ``lines: s_nom_max:`` and ``links: p_nom_max``:
[`#166 <https://github.com/PyPSA/pypsa-eur/pull/166>`_].
* Use `mamba` (https://github.com/mamba-org/mamba) for faster Travis CI builds (`#196 <https://github.com/PyPSA/pypsa-eur/pull/196>`_)
* Added Google Cloud Platform tutorial (for Windows users)
[`#177 <https://github.com/PyPSA/pypsa-eur/pull/177>`_].
* The N-1 security margin for transmission lines is now fixed to a provided value in ``config.yaml``, removing an undocumented linear interpolation between 0.5 and 0.7 in the range between 37 and 200 nodes.
**Changes**
* The mappings for clustered lines and buses produced by the ``simplify_network`` and ``cluster_network`` rules changed from Hierarchical Data Format (.h5) to Comma-Separated Values format (.csv) (`#198 <https://github.com/PyPSA/pypsa-eur/pull/198>`_)
* Don't remove capital costs from lines and links, when imposing a line volume limit (``lv``) or a line cost limit (``lc``).
Previously, these were removed to move the expansion in direction of the limit
[`#183 <https://github.com/PyPSA/pypsa-eur/pull/183>`_].
* The mappings for clustered lines and buses produced by the :mod:`simplify_network` and :mod:`cluster_network` rules
changed from Hierarchical Data Format (``.h5``) to Comma-Separated Values format (``.csv``) for ease of use.
[`#198 <https://github.com/PyPSA/pypsa-eur/pull/198>`_]
* The N-1 security margin for transmission lines is now fixed to a provided value in ``config.yaml``,
removing an undocumented linear interpolation between 0.5 and 0.7 in the range between 37 and 200 nodes.
[`#199 <https://github.com/PyPSA/pypsa-eur/pull/199>`_].
* Modelling hydrogen and battery storage with Store and Link components is now the default,
rather than using StorageUnit components with fixed power-to-energy ratio
[`#205 <https://github.com/PyPSA/pypsa-eur/pull/205>`_].
* Use ``mamba`` (https://github.com/mamba-org/mamba) for faster Travis CI builds
[`#196 <https://github.com/PyPSA/pypsa-eur/pull/196>`_].
* Multiple smaller changes: Removed unused ``{network}`` wildcard, moved environment files to dedicated ``envs`` folder,
removed sector-coupling components from configuration files, updated documentation colors, minor refactoring and code cleaning
[`#190 <https://github.com/PyPSA/pypsa-eur/pull 190>`_].
**Bugs and Compatibility**
* Add compatibility for pyomo 5.7.0 in :mod:`cluster_network` and :mod:`simplify_network`
[`#172 <https://github.com/PyPSA/pypsa-eur/pull/172>`_].
* Fixed a bug for storage units such that individual store and dispatch efficiencies are correctly taken account of rather than only their round-trip efficiencies.
In the cost database (``data/costs.csv``) the efficiency of battery inverters should be stated as per discharge/charge rather than per roundtrip
[`#202 <https://github.com/PyPSA/pypsa-eur/pull/202>`_].
* Corrected exogenous emission price setting (in ``config: cost: emission price:``),
which now correctly accounts for the efficiency and effective emission of the generators
[`#171 <https://github.com/PyPSA/pypsa-eur/pull/171>`_].
* Corrected HVDC link connections (a) between Norway and Denmark and (b) mainland Italy, Corsica (FR) and Sardinia (IT)
as well as for East-Western and Anglo-Scottish interconnectors
[`#181 <https://github.com/PyPSA/pypsa-eur/pull/181>`_, `#206 <https://github.com/PyPSA/pypsa-eur/pull/206>`_].
* Fix bug of clustering ``offwind-{ac,dc}`` generators in the option of high-resolution generators for renewables.
Now, there are more sites for ``offwind-{ac,dc}`` available than network nodes.
Before, they were clustered to the resolution of the network (``elec_s1024_37m.nc``: 37 network nodes, 1024 generators)
[`#191 <https://github.com/PyPSA/pypsa-eur/pull/191>`_].
* Raise a warning if ``tech_colors`` in the config are not defined for all carriers
[`#178 <https://github.com/PyPSA/pypsa-eur/pull/178>`_].
PyPSA-Eur 0.2.0 (8th June 2020)
==================================
* The optimization is now performed using the ``pyomo=False`` setting in the :func:`pypsa.lopf.network_lopf`. This speeds up the solving process significantly and consumes much less memory. The inclusion of additional constraints were adjusted to the new implementation. They are all passed to the :func:`network_lopf` function via the ``extra_functionality`` argument. The rule ``trace_solve_network`` was integrated into the rule :mod:`solve_network` and can be activated via configuration with ``solving: options: track_iterations: true``. The charging and discharging capacities of batteries modelled as store-link combination are now coupled (`#116 <https://github.com/PyPSA/pypsa-eur/pull/116>`_).
* The optimization is now performed using the ``pyomo=False`` setting in the :func:`pypsa.lopf.network_lopf`. This speeds up the solving process significantly and consumes much less memory. The inclusion of additional constraints were adjusted to the new implementation. They are all passed to the :func:`network_lopf` function via the ``extra_functionality`` argument. The rule ``trace_solve_network`` was integrated into the rule :mod:`solve_network` and can be activated via configuration with ``solving: options: track_iterations: true``. The charging and discharging capacities of batteries modelled as store-link combination are now coupled [`#116 <https://github.com/PyPSA/pypsa-eur/pull/116>`_].
* An updated extract of the `ENTSO-E Transmission System Map <https://www.entsoe.eu/data/map/>`_ (including Malta) was added to the repository using the `GridKit <https://github.com/PyPSA/GridKit>`_ tool. This tool has been updated to retrieve up-to-date map extracts using a single `script <https://github.com/PyPSA/GridKit/blob/master/entsoe/runall_in_docker.sh>`_. The update extract features 5322 buses, 6574 lines, 46 links. (`#118 <https://github.com/PyPSA/pypsa-eur/pull/118>`_).
* An updated extract of the `ENTSO-E Transmission System Map <https://www.entsoe.eu/data/map/>`_ (including Malta) was added to the repository using the `GridKit <https://github.com/PyPSA/GridKit>`_ tool. This tool has been updated to retrieve up-to-date map extracts using a single `script <https://github.com/PyPSA/GridKit/blob/master/entsoe/runall_in_docker.sh>`_. The update extract features 5322 buses, 6574 lines, 46 links. [`#118 <https://github.com/PyPSA/pypsa-eur/pull/118>`_].
* Added `FSFE REUSE <https://reuse.software>`_ compliant license information. Documentation now licensed under CC-BY-4.0 (`#160 <https://github.com/PyPSA/pypsa-eur/pull/160>`_).
* Added `FSFE REUSE <https://reuse.software>`_ compliant license information. Documentation now licensed under CC-BY-4.0 [`#160 <https://github.com/PyPSA/pypsa-eur/pull/160>`_].
* Added a 30 minute `video introduction <https://pypsa-eur.readthedocs.io/en/latest/introduction.html>`_ and a 20 minute `video tutorial <https://pypsa-eur.readthedocs.io/en/latest/tutorial.html>`_
@ -55,19 +130,19 @@ PyPSA-Eur 0.2.0 (8th June 2020)
* Added an option to skip iterative solving usually performed to update the line impedances of expanded lines at ``solving: options: skip_iterations:``.
* ``snakemake`` rules for retrieving cutouts and the natura raster can now be disabled independently from their respective rules to build them; via ``config.*yaml`` (`#136 <https://github.com/PyPSA/pypsa-eur/pull/136>`_).
* ``snakemake`` rules for retrieving cutouts and the natura raster can now be disabled independently from their respective rules to build them; via ``config.*yaml`` [`#136 <https://github.com/PyPSA/pypsa-eur/pull/136>`_].
* Removed the ``id`` column for custom power plants in ``data/custom_powerplants.csv`` to avoid custom power plants with conflicting ids getting attached to the wrong bus (`#131 <https://github.com/PyPSA/pypsa-eur/pull/131>`_).
* Removed the ``id`` column for custom power plants in ``data/custom_powerplants.csv`` to avoid custom power plants with conflicting ids getting attached to the wrong bus [`#131 <https://github.com/PyPSA/pypsa-eur/pull/131>`_].
* Add option ``renewables: {carrier}: keep_all_available_areas:`` to use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed (`#150 <https://github.com/PyPSA/pypsa-eur/pull/150>`_).
* Add option ``renewables: {carrier}: keep_all_available_areas:`` to use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed [`#150 <https://github.com/PyPSA/pypsa-eur/pull/150>`_].
* Added a function ``_helpers.load_network()`` which loads a network with overridden components specified in ``snakemake.config['override_components']`` (`#128 <https://github.com/PyPSA/pypsa-eur/pull/128>`_).
* Added a function ``_helpers.load_network()`` which loads a network with overridden components specified in ``snakemake.config['override_components']`` [`#128 <https://github.com/PyPSA/pypsa-eur/pull/128>`_].
* Bugfix in :mod:`base_network` which now finds all closest links, not only the first entry (`#143 <https://github.com/PyPSA/pypsa-eur/pull/143>`_).
* Bugfix in :mod:`base_network` which now finds all closest links, not only the first entry [`#143 <https://github.com/PyPSA/pypsa-eur/pull/143>`_].
* Bugfix in :mod:`cluster_network` which now skips recalculation of link parameters if there are no links (`#149 <https://github.com/PyPSA/pypsa-eur/pull/149>`_).
* Bugfix in :mod:`cluster_network` which now skips recalculation of link parameters if there are no links [`#149 <https://github.com/PyPSA/pypsa-eur/pull/149>`_].
* Added information on pull requests to contribution guidelines (`#151 <https://github.com/PyPSA/pypsa-eur/pull/151>`_).
* Added information on pull requests to contribution guidelines [`#151 <https://github.com/PyPSA/pypsa-eur/pull/151>`_].
* Improved documentation on open-source solver setup and added usage warnings.
@ -78,31 +153,31 @@ PyPSA-Eur 0.1.0 (9th January 2020)
This is the first release of PyPSA-Eur, a model of the European power system at the transmission network level. Recent changes include:
* Documentation on installation, workflows and configuration settings is now available online at `pypsa-eur.readthedocs.io <pypsa-eur.readthedocs.io>`_ (`#65 <https://github.com/PyPSA/pypsa-eur/pull/65>`_).
* Documentation on installation, workflows and configuration settings is now available online at `pypsa-eur.readthedocs.io <pypsa-eur.readthedocs.io>`_ [`#65 <https://github.com/PyPSA/pypsa-eur/pull/65>`_].
* The ``conda`` environment files were updated and extended (`#81 <https://github.com/PyPSA/pypsa-eur/pull/81>`_).
* The ``conda`` environment files were updated and extended [`#81 <https://github.com/PyPSA/pypsa-eur/pull/81>`_].
* The power plant database was updated with extensive filtering options via ``pandas.query`` functionality (`#84 <https://github.com/PyPSA/pypsa-eur/pull/84>`_ and `#94 <https://github.com/PyPSA/pypsa-eur/pull/94>`_).
* The power plant database was updated with extensive filtering options via ``pandas.query`` functionality [`#84 <https://github.com/PyPSA/pypsa-eur/pull/84>`_ and `#94 <https://github.com/PyPSA/pypsa-eur/pull/94>`_].
* Continuous integration testing with `Travis CI <https://travis-ci.org>`_ is now included for Linux, Mac and Windows (`#82 <https://github.com/PyPSA/pypsa-eur/pull/82>`_).
* Continuous integration testing with `Travis CI <https://travis-ci.org>`_ is now included for Linux, Mac and Windows [`#82 <https://github.com/PyPSA/pypsa-eur/pull/82>`_].
* Data dependencies were moved to `zenodo <https://zenodo.org/>`_ and are now versioned (`#60 <https://github.com/PyPSA/pypsa-eur/issues/60>`_).
* Data dependencies were moved to `zenodo <https://zenodo.org/>`_ and are now versioned [`#60 <https://github.com/PyPSA/pypsa-eur/issues/60>`_].
* Data dependencies are now retrieved directly from within the snakemake workflow (`#86 <https://github.com/PyPSA/pypsa-eur/pull/86>`_).
* Data dependencies are now retrieved directly from within the snakemake workflow [`#86 <https://github.com/PyPSA/pypsa-eur/pull/86>`_].
* Emission prices can be added to marginal costs of generators through the keyworks ``Ep`` in the ``{opts}`` wildcard (`#100 <https://github.com/PyPSA/pypsa-eur/pull/100>`_).
* Emission prices can be added to marginal costs of generators through the keyworks ``Ep`` in the ``{opts}`` wildcard [`#100 <https://github.com/PyPSA/pypsa-eur/pull/100>`_].
* An option is introduced to add extendable nuclear power plants to the network (`#98 <https://github.com/PyPSA/pypsa-eur/pull/98>`_).
* An option is introduced to add extendable nuclear power plants to the network [`#98 <https://github.com/PyPSA/pypsa-eur/pull/98>`_].
* Focus weights can now be specified for particular countries for the network clustering, which allows to set a proportion of the total number of clusters for particular countries (`#87 <https://github.com/PyPSA/pypsa-eur/pull/87>`_).
* Focus weights can now be specified for particular countries for the network clustering, which allows to set a proportion of the total number of clusters for particular countries [`#87 <https://github.com/PyPSA/pypsa-eur/pull/87>`_].
* A new rule :mod:`add_extra_components` allows to add additional components to the network only after clustering. It is thereby possible to model storage units (e.g. battery and hydrogen) in more detail via a combination of ``Store``, ``Link`` and ``Bus`` elements (`#97 <https://github.com/PyPSA/pypsa-eur/pull/97>`_).
* A new rule :mod:`add_extra_components` allows to add additional components to the network only after clustering. It is thereby possible to model storage units (e.g. battery and hydrogen) in more detail via a combination of ``Store``, ``Link`` and ``Bus`` elements [`#97 <https://github.com/PyPSA/pypsa-eur/pull/97>`_].
* Hydrogen pipelines (including cost assumptions) can now be added alongside clustered network connections in the rule :mod:`add_extra_components` . Set ``electricity: extendable_carriers: Link: [H2 pipeline]`` and ensure hydrogen storage is modelled as a ``Store``. This is a first simplified stage (`#108 <https://github.com/PyPSA/pypsa-eur/pull/108>`_).
* Hydrogen pipelines (including cost assumptions) can now be added alongside clustered network connections in the rule :mod:`add_extra_components` . Set ``electricity: extendable_carriers: Link: [H2 pipeline]`` and ensure hydrogen storage is modelled as a ``Store``. This is a first simplified stage [`#108 <https://github.com/PyPSA/pypsa-eur/pull/108>`_].
* Logfiles for all rules of the ``snakemake`` workflow are now written in the folder ``log/`` (`#102 <https://github.com/PyPSA/pypsa-eur/pull/102>`_).
* Logfiles for all rules of the ``snakemake`` workflow are now written in the folder ``log/`` [`#102 <https://github.com/PyPSA/pypsa-eur/pull/102>`_].
* The new function ``_helpers.mock_snakemake`` creates a ``snakemake`` object which mimics the actual ``snakemake`` object produced by workflow by parsing the ``Snakefile`` and setting all paths for inputs, outputs, and logs. This allows running all scripts within a (I)python terminal (or just by calling ``python <script-name>``) and thereby facilitates developing and debugging scripts significantly (`#107 <https://github.com/PyPSA/pypsa-eur/pull/107>`_).
* The new function ``_helpers.mock_snakemake`` creates a ``snakemake`` object which mimics the actual ``snakemake`` object produced by workflow by parsing the ``Snakefile`` and setting all paths for inputs, outputs, and logs. This allows running all scripts within a (I)python terminal (or just by calling ``python <script-name>``) and thereby facilitates developing and debugging scripts significantly [`#107 <https://github.com/PyPSA/pypsa-eur/pull/107>`_].
Release Process
===============
@ -122,10 +197,10 @@ Release Process
* Tag a release on Github via ``git tag v0.x.x``, ``git push``, ``git push --tags``. Include release notes in the tag message.
* Upload code to `zenodo code repository <https://doi.org/10.5281/zenodo.3520875>`_ with `GNU GPL 3.0 <https://www.gnu.org/licenses/gpl-3.0.en.html>`_ license.
* Upload code to `zenodo code repository <https://doi.org/10.5281/zenodo.3520874>`_ with `GNU GPL 3.0 <https://www.gnu.org/licenses/gpl-3.0.en.html>`_ license.
* Create pre-built networks for ``config.default.yaml`` by running ``snakemake -j 1 extra_components_all_networks``.
* Upload pre-built networks to `zenodo data repository <https://doi.org/10.5281/zenodo.3601882>`_ with `CC BY 4.0 <https://creativecommons.org/licenses/by/4.0/>`_ license.
* Upload pre-built networks to `zenodo data repository <https://doi.org/10.5281/zenodo.3601881>`_ with `CC BY 4.0 <https://creativecommons.org/licenses/by/4.0/>`_ license.
* Send announcement on the `PyPSA and PyPSA-Eur mailing list <https://groups.google.com/forum/#!forum/pypsa>`_.

View File

@ -59,35 +59,35 @@ It is also possible to allow less or more carbon-dioxide emissions. Here, we lim
.. literalinclude:: ../config.tutorial.yaml
:language: yaml
:lines: 35,37
:lines: 36,38
PyPSA-Eur also includes a database of existing conventional powerplants.
We can select which types of powerplants we like to be included with fixed capacities:
.. literalinclude:: ../config.tutorial.yaml
:language: yaml
:lines: 35,51
:lines: 36,52
To accurately model the temporal and spatial availability of renewables such as wind and solar energy, we rely on historical weather data.
It is advisable to adapt the required range of coordinates to the selection of countries.
.. literalinclude:: ../config.tutorial.yaml
:language: yaml
:lines: 53-61
:lines: 54-62
We can also decide which weather data source should be used to calculate potentials and capacity factor time-series for each carrier.
For example, we may want to use the ERA-5 dataset for solar and not the default SARAH-2 dataset.
.. literalinclude:: ../config.tutorial.yaml
:language: yaml
:lines: 63,106-107
:lines: 64,107-108
Finally, it is possible to pick a solver. For instance, this tutorial uses the open-source solvers CBC and Ipopt and does not rely
on the commercial solvers Gurobi or CPLEX (for which free academic licenses are available).
.. literalinclude:: ../config.tutorial.yaml
:language: yaml
:lines: 164,175-176
:lines: 170,180-181
.. note::
@ -271,7 +271,7 @@ the wildcards given in ``scenario`` in the configuration file ``config.yaml`` ar
.. literalinclude:: ../config.tutorial.yaml
:language: yaml
:lines: 13-18
:lines: 14-18
In this example we would not only solve a 6-node model of Germany but also a 2-node model.
@ -286,12 +286,4 @@ The solved networks can be analysed just like any other PyPSA network (e.g. in J
network = pypsa.Network("results/networks/elec_s_6_ec_lcopt_Co2L-24H.nc")
...
For inspiration, read the `examples section in the PyPSA documentation <https://pypsa.readthedocs.io/en/latest/examples.html>`_.
.. note::
There are rules for summaries and plotting available in the repository of PyPSA-Eur.
They are currently under revision and therefore not yet documented.

View File

@ -27,9 +27,6 @@ The ``{simpl}`` wildcard specifies number of buses a detailed
network model should be pre-clustered to in the rule
:mod:`simplify_network` (before :mod:`cluster_network`).
.. seealso::
:mod:`simplify_network`
.. _clusters:
The ``{clusters}`` wildcard
@ -45,9 +42,6 @@ If an `m` is placed behind the number of clusters (e.g. ``100m``),
generators are only moved to the clustered buses but not aggregated
by carrier; i.e. the clustered bus may have more than one e.g. wind generator.
.. seealso::
:mod:`cluster_network`
.. _ll:
The ``{ll}`` wildcard
@ -79,9 +73,6 @@ The wildcard, in general, consists of two parts:
(c) ``c1.25`` will allow to build a transmission network that
costs no more than 25 % more than the current system.
.. seealso::
:mod:`prepare_network`
.. _opts:
The ``{opts}`` wildcard
@ -98,16 +89,13 @@ It may hold multiple triggers separated by ``-``, i.e. ``Co2L-3H`` contains the
:widths: 10,20,10,10
:file: configtables/opts.csv
.. seealso::
:mod:`prepare_network`, :mod:`solve_network`
.. _country:
The ``{country}`` wildcard
==========================
The rules ``make_summary`` and ``plot_summary`` (generating summaries of all or a subselection
of the solved networks) as well as ``plot_p_nom_max`` (for plotting the cumulative
The rules :mod:`make_summary` and :mod:`plot_summary` (generating summaries of all or a subselection
of the solved networks) as well as :mod:`plot_p_nom_map` (for plotting the cumulative
generation potentials for renewable technologies) can be narrowed to
individual countries using the ``{country}`` wildcard.
@ -121,9 +109,6 @@ in Germany (in the solution for Europe) use:
snakemake -j 1 results/summaries/elec_s_all_lall_Co2L-3H_DE
.. seealso::
:mod:`make_summary`, :mod:`plot_summary`, :mod:`plot_p_nom_max`
.. _cutout_wc:
The ``{cutout}`` wildcard
@ -133,9 +118,6 @@ The ``{cutout}`` wildcard facilitates running the rule :mod:`build_cutout`
for all cutout configurations specified under ``atlite: cutouts:``.
These cutouts will be stored in a folder specified by ``{cutout}``.
.. seealso::
:mod:`build_cutout`, :ref:`atlite_cf`
.. _technology:
The ``{technology}`` wildcard
@ -151,22 +133,16 @@ For instance ``{technology}`` can be used to plot regionally disaggregated poten
with the rule :mod:`plot_p_nom_max` or to summarize a particular technology's
full load hours in various countries with the rule :mod:`build_country_flh`.
.. seealso::
:mod:`build_renewable_profiles`, :mod:`plot_p_nom_max`, :mod:`build_country_flh`
.. _attr:
The ``{attr}`` wildcard
=======================
The ``{attr}`` wildcard specifies which attribute are used for size
The ``{attr}`` wildcard specifies which attribute is used for size
representations of network components on a map plot produced by the rule
``plot_network``. While it might be extended in the future, ``{attr}``
:mod:`plot_network`. While it might be extended in the future, ``{attr}``
currently only supports plotting of ``p_nom``.
.. seealso::
:mod:`plot_network`
.. _ext:
The ``{ext}`` wildcard
@ -181,6 +157,3 @@ formats depends on the used backend. To query the supported file types on your s
import matplotlib.pyplot as plt
plt.gcf().canvas.get_supported_filetypes()
.. seealso::
:mod:`plot_network`, :mod:`plot_summary`, :mod:`plot_p_nom_max`

View File

@ -6,6 +6,7 @@ name: pypsa-eur-docs
channels:
- conda-forge
dependencies:
- python<=3.7
- pip
- pypsa>=0.17.1
- atlite=0.0.3

View File

@ -4,7 +4,6 @@
name: pypsa-eur
channels:
- defaults
- conda-forge
- bioconda
- http://conda.anaconda.org/gurobi
@ -19,15 +18,16 @@ dependencies:
# Dependencies of the workflow itself
- xlrd
- openpyxl
- scikit-learn
- pycountry
- seaborn
- snakemake-minimal<=5.24.2 # until https://github.com/snakemake/snakemake/issues/635 closed
- snakemake-minimal
- memory_profiler
- yaml
- pytables
- lxml
- powerplantmatching>=0.4.3
- powerplantmatching>=0.4.8
- numpy<=1.19.0 # otherwise macos fails
# Second order dependencies which should really be deps of atlite
@ -51,12 +51,11 @@ dependencies:
- rasterio
- shapely
- libgdal<=3.0.4
# Solvers
- gurobi:gurobi # until https://github.com/conda-forge/pypsa-feedstock/issues/4 closed
- descartes
- pip:
- vresutils==0.3.1
- tsam>=1.1.0
- git+https://github.com/PyPSA/glaes.git#egg=glaes
- git+https://github.com/PyPSA/geokit.git#egg=geokit
- cdsapi

View File

@ -170,6 +170,7 @@ def aggregate_costs(n, flatten=False, opts=None, existing_only=False):
n.iterate_components(iterkeys(components), skip_empty=False),
itervalues(components)
):
if c.df.empty: continue
if not existing_only: p_nom += "_opt"
costs[(c.list_name, 'capital')] = (c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum()
if p_attr is not None:

View File

@ -24,13 +24,13 @@ Relevant Settings
conventional_carriers:
co2limit:
extendable_carriers:
Generator:
include_renewable_capacities_from_OPSD:
estimate_renewable_capacities_from_capacity_stats:
load:
scaling_factor:
renewable: (keys)
renewable:
hydro:
carriers:
hydro_max_hours:
@ -52,15 +52,8 @@ Inputs
.. image:: ../img/hydrocapacities.png
:scale: 34 %
- ``data/geth2015_hydro_capacities.csv``: alternative to capacities above; NOT CURRENTLY USED!
- ``data/bundle/time_series_60min_singleindex_filtered.csv``: Hourly per-country load profiles since 2010 from the `ENTSO-E statistical database <https://www.entsoe.eu/data/power-stats/hourly_load/>`_
.. image:: ../img/load-box.png
:scale: 33 %
.. image:: ../img/load-ts.png
:scale: 33 %
- ``data/geth2015_hydro_capacities.csv``: alternative to capacities above; not currently used!
- ``resources/opsd_load.csv`` Hourly per-country load profiles.
- ``resources/regions_onshore.geojson``: confer :ref:`busregions`
- ``resources/nuts3_shapes.geojson``: confer :ref:`shapes`
- ``resources/powerplants.csv``: confer :ref:`powerplants`
@ -98,7 +91,9 @@ import pandas as pd
import numpy as np
import xarray as xr
import geopandas as gpd
import powerplantmatching as ppm
import powerplantmatching as pm
from powerplantmatching.export import map_country_bus
from vresutils.costdata import annuity
from vresutils.load import timeseries_opsd
@ -169,13 +164,10 @@ def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None):
def costs_for_storage(store, link1, link2=None, max_hours=1.):
capital_cost = link1['capital_cost'] + max_hours * store['capital_cost']
efficiency = link1['efficiency']**0.5
if link2 is not None:
capital_cost += link2['capital_cost']
efficiency *= link2['efficiency']**0.5
return pd.Series(dict(capital_cost=capital_cost,
marginal_cost=0.,
efficiency=efficiency,
co2_emissions=0.))
if elec_config is None:
@ -213,6 +205,7 @@ def attach_load(n):
regions = (gpd.read_file(snakemake.input.regions).set_index('name')
.reindex(substation_lv_i))
# ------------- TO MERGE --------------
available_years = range(2011,2016)
requested_years = n.snapshots.year[[0,-1]]
use_fallback = any(year not in available_years for year in requested_years)
@ -237,9 +230,15 @@ def attach_load(n):
# Convert to naive UTC (has to be explicit since pandas 0.24)
opsd_load.index = opsd_load.index.tz_localize(None)
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index')
opsd_load = (pd.read_csv(snakemake.input.load, index_col=0, parse_dates=True)
.filter(items=snakemake.config['countries']))
def normed(x): return x.divide(x.sum())
scaling = snakemake.config.get('load', {}).get('scaling_factor', 1.0)
logger.info(f"Load data scaled with scalling factor {scaling}.")
opsd_load *= scaling
# ------------- TO MERGE --------------
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index')
def upsample(cntry, group):
l = opsd_load[cntry]
@ -255,7 +254,8 @@ def attach_load(n):
index=group.index)
# relative factors 0.6 and 0.4 have been determined from a linear
# regression on the country to continent load data (refer to vresutils.load._upsampling_weights)
# regression on the country to continent load data
# (refer to vresutils.load._upsampling_weights)
factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))
return pd.DataFrame(factors.values * l.values[:,np.newaxis],
index=l.index, columns=factors.index)
@ -273,6 +273,11 @@ def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=Fal
if n.links.empty: return
dc_b = n.links.carrier == 'DC'
# If there are no dc links, then the 'underwater_fraction' column
# may be missing. Therefore we have to return here.
if n.links.loc[dc_b].empty: return
if simple_hvdc_costs:
costs = (n.links.loc[dc_b, 'length'] * length_factor *
costs.at['HVDC overhead', 'capital_cost'])
@ -331,7 +336,7 @@ def attach_conventional_generators(n, costs, ppl):
ppl = (ppl.query('carrier in @carriers').join(costs, on='carrier')
.rename(index=lambda s: 'C' + str(s)))
logger.info('Adding {} generators with capacities\n{}'
logger.info('Adding {} generators with capacities [MW] \n{}'
.format(len(ppl), ppl.groupby('carrier').p_nom.sum()))
n.madd("Generator", ppl.index,
@ -360,7 +365,7 @@ def attach_hydro(n, costs, ppl):
country = ppl['bus'].map(n.buses.country).rename("country")
inflow_idx = ror.index | hydro.index
inflow_idx = ror.index.union(hydro.index)
if not inflow_idx.empty:
dist_key = ppl.loc[inflow_idx, 'p_nom'].groupby(country).transform(normed)
@ -494,6 +499,39 @@ def attach_extendable_generators(n, costs, ppl):
"Only OCGT, CCGT and nuclear are allowed at the moment.")
def attach_OPSD_renewables(n):
available = ['DE', 'FR', 'PL', 'CH', 'DK', 'CZ', 'SE', 'GB']
tech_map = {'Onshore': 'onwind', 'Offshore': 'offwind', 'Solar': 'solar'}
countries = set(available) & set(n.buses.country)
techs = snakemake.config['electricity'].get('renewable_capacities_from_OPSD', [])
tech_map = {k: v for k, v in tech_map.items() if v in techs}
if not tech_map:
return
logger.info(f'Using OPSD renewable capacities in {", ".join(countries)} '
f'for technologies {", ".join(tech_map.values())}.')
df = pd.concat([pm.data.OPSD_VRE_country(c) for c in countries])
technology_b = ~df.Technology.isin(['Onshore', 'Offshore'])
df['Fueltype'] = df.Fueltype.where(technology_b, df.Technology)
df = df.query('Fueltype in @tech_map').powerplant.convert_country_to_alpha2()
for fueltype, carrier_like in tech_map.items():
gens = n.generators[lambda df: df.carrier.str.contains(carrier_like)]
buses = n.buses.loc[gens.bus.unique()]
gens_per_bus = gens.groupby('bus').p_nom.count()
caps = map_country_bus(df.query('Fueltype == @fueltype'), buses)
caps = caps.groupby(['bus']).Capacity.sum()
caps = caps / gens_per_bus.reindex(caps.index, fill_value=1)
n.generators.p_nom.update(gens.bus.map(caps).dropna())
def estimate_renewable_capacities(n, tech_map=None):
if tech_map is None:
tech_map = (snakemake.config['electricity']
@ -501,16 +539,25 @@ def estimate_renewable_capacities(n, tech_map=None):
if len(tech_map) == 0: return
capacities = (ppm.data.Capacity_stats().powerplant.convert_country_to_alpha2()
capacities = (pm.data.Capacity_stats().powerplant.convert_country_to_alpha2()
[lambda df: df.Energy_Source_Level_2]
.set_index(['Fueltype', 'Country']).sort_index())
countries = n.buses.country.unique()
if len(countries) == 0: return
logger.info('heuristics applied to distribute renewable capacities [MW] \n{}'
.format(capacities.query('Fueltype in @tech_map.keys() and Capacity >= 0.1')
.groupby('Country').agg({'Capacity': 'sum'})))
for ppm_fueltype, techs in tech_map.items():
tech_capacities = capacities.loc[ppm_fueltype, 'Capacity']\
.reindex(countries, fill_value=0.)
tech_i = n.generators.query('carrier in @techs').index
#tech_i = n.generators.query('carrier in @techs').index
tech_i = (n.generators.query('carrier in @techs')
[n.generators.query('carrier in @techs')
.bus.map(n.buses.country).isin(countries)].index)
n.generators.loc[tech_i, 'p_nom'] = (
(n.generators_t.p_max_pu[tech_i].mean() *
n.generators.loc[tech_i, 'p_nom_max']) # maximal yearly generation
@ -560,6 +607,8 @@ if __name__ == "__main__":
attach_extendable_generators(n, costs, ppl)
estimate_renewable_capacities(n)
attach_OPSD_renewables(n)
add_nice_carrier_names(n)
n.export_to_netcdf(snakemake.output[0])

View File

@ -73,6 +73,9 @@ def attach_storageunits(n, costs):
buses_i = n.buses.index
lookup_store = {"H2": "electrolysis", "battery": "battery inverter"}
lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"}
for carrier in carriers:
n.madd("StorageUnit", buses_i, ' ' + carrier,
bus=buses_i,
@ -80,8 +83,8 @@ def attach_storageunits(n, costs):
p_nom_extendable=True,
capital_cost=costs.at[carrier, 'capital_cost'],
marginal_cost=costs.at[carrier, 'marginal_cost'],
efficiency_store=costs.at[carrier, 'efficiency'],
efficiency_dispatch=costs.at[carrier, 'efficiency'],
efficiency_store=costs.at[lookup_store[carrier], 'efficiency'],
efficiency_dispatch=costs.at[lookup_dispatch[carrier], 'efficiency'],
max_hours=max_hours[carrier],
cyclic_state_of_charge=True)
@ -111,7 +114,8 @@ def attach_stores(n, costs):
carrier='H2 electrolysis',
p_nom_extendable=True,
efficiency=costs.at["electrolysis", "efficiency"],
capital_cost=costs.at["electrolysis", "capital_cost"])
capital_cost=costs.at["electrolysis", "capital_cost"],
marginal_cost=costs.at["electrolysis", "marginal_cost"])
n.madd("Link", h2_buses_i + " Fuel Cell",
bus0=h2_buses_i,
@ -120,7 +124,8 @@ def attach_stores(n, costs):
p_nom_extendable=True,
efficiency=costs.at["fuel cell", "efficiency"],
#NB: fixed cost is per MWel
capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"])
capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"],
marginal_cost=costs.at["fuel cell", "marginal_cost"])
if 'battery' in carriers:
b_buses_i = n.madd("Bus", buses_i + " battery", carrier="battery", **bus_sub_dict)
@ -130,23 +135,27 @@ def attach_stores(n, costs):
carrier='battery',
e_cyclic=True,
e_nom_extendable=True,
capital_cost=costs.at['battery storage', 'capital_cost'])
capital_cost=costs.at['battery storage', 'capital_cost'],
marginal_cost=costs.at["battery", "marginal_cost"])
n.madd("Link", b_buses_i + " charger",
bus0=buses_i,
bus1=b_buses_i,
carrier='battery charger',
efficiency=costs.at['battery inverter', 'efficiency']**0.5,
efficiency=costs.at['battery inverter', 'efficiency'],
capital_cost=costs.at['battery inverter', 'capital_cost'],
p_nom_extendable=True)
p_nom_extendable=True,
marginal_cost=costs.at["battery inverter", "marginal_cost"])
n.madd("Link", b_buses_i + " discharger",
bus0=b_buses_i,
bus1=buses_i,
carrier='battery discharger',
efficiency=costs.at['battery inverter','efficiency']**0.5,
efficiency=costs.at['battery inverter','efficiency'],
capital_cost=costs.at['battery inverter', 'capital_cost'],
p_nom_extendable=True)
p_nom_extendable=True,
marginal_cost=costs.at["battery inverter", "marginal_cost"])
def attach_hydrogen_pipelines(n, costs):

View File

@ -205,8 +205,8 @@ def _add_links_from_tyndp(buses, links):
buses = buses.loc[keep_b['Bus']]
links = links.loc[keep_b['Link']]
links_tyndp["j"] = _find_closest_links(links, links_tyndp, distance_upper_bound=0.15)
# Corresponds approximately to 15km tolerances
links_tyndp["j"] = _find_closest_links(links, links_tyndp, distance_upper_bound=0.20)
# Corresponds approximately to 20km tolerances
if links_tyndp["j"].notnull().any():
logger.info("TYNDP links already in the dataset (skipping): " + ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"]))
@ -552,6 +552,9 @@ def base_network():
n = pypsa.Network()
n.name = 'PyPSA-Eur'
n.set_snapshots(pd.date_range(freq='h', **snakemake.config['snapshots']))
n.snapshot_weightings[:] *= 8760. / n.snapshot_weightings.sum()
n.import_components_from_dataframe(buses, "Bus")
n.import_components_from_dataframe(lines, "Line")
n.import_components_from_dataframe(transformers, "Transformer")

227
scripts/build_load_data.py Executable file
View File

@ -0,0 +1,227 @@
# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""
This rule downloads the load data from `Open Power System Data Time series <https://data.open-power-system-data.org/time_series/>`_. For all countries in the network, the per country load timeseries with suffix ``_load_actual_entsoe_transparency`` are extracted from the dataset. After filling small gaps linearly and large gaps by copying time-slice of a given period, the load data is exported to a ``.csv`` file.
Relevant Settings
-----------------
.. code:: yaml
snapshots:
load:
url:
interpolate_limit:
time_shift_for_large_gaps:
manual_adjustments:
.. seealso::
Documentation of the configuration file ``config.yaml`` at
:ref:`load_cf`
Inputs
------
Outputs
-------
- ``resource/time_series_60min_singleindex_filtered.csv``:
"""
import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging
import pandas as pd
import numpy as np
import dateutil
from pandas import Timedelta as Delta
def load_timeseries(fn, years, countries, powerstatistics=True):
"""
Read load data from OPSD time-series package version 2020-10-06.
Parameters
----------
years : None or slice()
Years for which to read load data (defaults to
slice("2018","2019"))
fn : str
File name or url location (file format .csv)
countries : listlike
Countries for which to read load data.
powerstatistics: bool
Whether the electricity consumption data of the ENTSOE power
statistics (if true) or of the ENTSOE transparency map (if false)
should be parsed.
Returns
-------
load : pd.DataFrame
Load time-series with UTC timestamps x ISO-2 countries
"""
logger.info(f"Retrieving load data from '{fn}'.")
pattern = 'power_statistics' if powerstatistics else '_transparency'
pattern = f'_load_actual_entsoe_{pattern}'
rename = lambda s: s[:-len(pattern)]
date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True)
return (pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)
.filter(like=pattern)
.rename(columns=rename)
.dropna(how="all", axis=0)
.rename(columns={'GB_UKM' : 'GB'})
.filter(items=countries)
.loc[years])
def consecutive_nans(ds):
return (ds.isnull().astype(int)
.groupby(ds.notnull().astype(int).cumsum()[ds.isnull()])
.transform('sum').fillna(0))
def fill_large_gaps(ds, shift):
"""
Fill up large gaps with load data from the previous week.
This function fills gaps ragning from 3 to 168 hours (one week).
"""
shift = Delta(shift)
nhours = shift / np.timedelta64(1, 'h')
if (consecutive_nans(ds) > nhours).any():
logger.warning('There exist gaps larger then the time shift used for '
'copying time slices.')
time_shift = pd.Series(ds.values, ds.index + shift)
return ds.where(ds.notnull(), time_shift.reindex_like(ds))
def nan_statistics(df):
def max_consecutive_nans(ds):
return (ds.isnull().astype(int)
.groupby(ds.notnull().astype(int).cumsum())
.sum().max())
consecutive = df.apply(max_consecutive_nans)
total = df.isnull().sum()
max_total_per_month = df.isnull().resample('m').sum().max()
return pd.concat([total, consecutive, max_total_per_month],
keys=['total', 'consecutive', 'max_total_per_month'], axis=1)
def copy_timeslice(load, cntry, start, stop, delta):
start = pd.Timestamp(start)
stop = pd.Timestamp(stop)
if start-delta in load.index and stop in load.index and cntry in load:
load.loc[start:stop, cntry] = load.loc[start-delta:stop-delta, cntry].values
def manual_adjustment(load, powerstatistics):
"""
Adjust gaps manual for load data from OPSD time-series package.
1. For the ENTSOE power statistics load data (if powerstatistics is True)
Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the
same load curve as Serbia and Albania the same as Macdedonia, both scaled
by the corresponding ratio of total energy consumptions reported by
IEA Data browser [0] for the year 2013.
2. For the ENTSOE transparency load data (if powerstatistics is False)
Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the
same load curve as Montenegro, scaled by the corresponding ratio of total energy
consumptions reported by IEA Data browser [0] for the year 2016.
[0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons
Parameters
----------
load : pd.DataFrame
Load time-series with UTC timestamps x ISO-2 countries
powerstatistics: bool
Whether argument load comprises the electricity consumption data of
the ENTSOE power statistics or of the ENTSOE transparency map
Returns
-------
load : pd.DataFrame
Manual adjusted and interpolated load time-series with UTC
timestamps x ISO-2 countries
"""
if powerstatistics:
if 'MK' in load.columns:
if 'AL' not in load.columns or load.AL.isnull().values.all():
load['AL'] = load['MK'] * (4.1 / 7.4)
if 'RS' in load.columns:
if 'KV' not in load.columns or load.KV.isnull().values.all():
load['KV'] = load['RS'] * (4.8 / 27.)
copy_timeslice(load, 'GR', '2015-08-11 21:00', '2015-08-15 20:00', Delta(weeks=1))
copy_timeslice(load, 'AT', '2018-12-31 22:00', '2019-01-01 22:00', Delta(days=2))
copy_timeslice(load, 'CH', '2010-01-19 07:00', '2010-01-19 22:00', Delta(days=1))
copy_timeslice(load, 'CH', '2010-03-28 00:00', '2010-03-28 21:00', Delta(days=1))
# is a WE, so take WE before
copy_timeslice(load, 'CH', '2010-10-08 13:00', '2010-10-10 21:00', Delta(weeks=1))
copy_timeslice(load, 'CH', '2010-11-04 04:00', '2010-11-04 22:00', Delta(days=1))
copy_timeslice(load, 'NO', '2010-12-09 11:00', '2010-12-09 18:00', Delta(days=1))
# whole january missing
copy_timeslice(load, 'GB', '2009-12-31 23:00', '2010-01-31 23:00', Delta(days=-364))
else:
if 'ME' in load:
if 'AL' not in load and 'AL' in countries:
load['AL'] = load.ME * (5.7/2.9)
if 'MK' not in load and 'MK' in countries:
load['MK'] = load.ME * (6.7/2.9)
copy_timeslice(load, 'BG', '2018-10-27 21:00', '2018-10-28 22:00', Delta(weeks=1))
return load
if __name__ == "__main__":
if 'snakemake' not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_load_data')
configure_logging(snakemake)
config = snakemake.config
powerstatistics = config['load']['power_statistics']
url = config['load']['url']
interpolate_limit = config['load']['interpolate_limit']
countries = config['countries']
snapshots = pd.date_range(freq='h', **config['snapshots'])
years = slice(snapshots[0], snapshots[-1])
time_shift = config['load']['time_shift_for_large_gaps']
load = load_timeseries(url, years, countries, powerstatistics)
if config['load']['manual_adjustments']:
load = manual_adjustment(load, powerstatistics)
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")
load = load.interpolate(method='linear', limit=interpolate_limit)
logger.info("Filling larger gaps by copying time-slices of period "
f"'{time_shift}'.")
load = load.apply(fill_large_gaps, shift=time_shift)
assert not load.isna().any().any(), (
'Load data contains nans. Adjust the parameters '
'`time_shift_for_large_gaps` or modify the `manual_adjustment` function '
'for implementing the needed load data modifications.')
load.to_csv(snakemake.output[0])

View File

@ -33,8 +33,9 @@ Inputs
- ``resources/regions_onshore_elec{year}_s{simpl}.geojson``: confer :ref:`simplify`
- ``resources/regions_offshore_elec{year}_s{simpl}.geojson``: confer :ref:`simplify`
- ``resources/clustermaps_elec{year}_s{simpl}.h5``: confer :ref:`simplify`
- ``networks/elec{year}_s{simpl}.nc``: confer :ref:`simplify`
- ``resources/busmap_elec{year}_s{simpl}.csv``: confer :ref:`simplify`
- ``data/custom_busmap_elec{year}_s{simpl}_{clusters}.csv``: optional input
Outputs
-------
@ -49,7 +50,8 @@ Outputs
.. image:: ../img/regions_offshore_elec_s_X.png
:scale: 33 %
- ``resources/clustermaps_elec{year}_s{simpl}_{clusters}.h5``: Mapping of buses and lines from ``networks/elec{year}_s{simpl}.nc`` to ``networks/elec{year}_s{simpl}_{clusters}.nc``; has keys ['/busmap', '/busmap_s', '/linemap', '/linemap_negative', '/linemap_positive']
- ``resources/busmap_elec{year}_s{simpl}_{clusters}.csv``: Mapping of buses from ``networks/elec_s{simpl}.nc`` to ``networks/elec_s{simpl}_{clusters}.nc``;
- ``resources/linemap{year}_elec_s{simpl}_{clusters}.csv``: Mapping of lines from ``networks/elec_s{simpl}.nc`` to ``networks/elec_s{simpl}_{clusters}.nc``;
- ``networks/elec{year}_s{simpl}_{clusters}.nc``:
.. image:: ../img/elec_s_X.png
@ -255,10 +257,9 @@ def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algori
.apply(busmap_for_country).squeeze().rename('busmap'))
def clustering_for_n_clusters(n, n_clusters, aggregate_carriers=None,
line_length_factor=1.25, potential_mode='simple',
solver_name="cbc", algorithm="kmeans",
extended_link_costs=0, focus_weights=None):
def clustering_for_n_clusters(n, n_clusters, custom_busmap=False, aggregate_carriers=None,
line_length_factor=1.25, potential_mode='simple', solver_name="cbc",
algorithm="kmeans", extended_link_costs=0, focus_weights=None):
if potential_mode == 'simple':
p_nom_max_strategy = np.sum
@ -267,8 +268,15 @@ def clustering_for_n_clusters(n, n_clusters, aggregate_carriers=None,
else:
raise AttributeError(f"potential_mode should be one of 'simple' or 'conservative' but is '{potential_mode}'")
if custom_busmap:
busmap = pd.read_csv(snakemake.input.custom_busmap, index_col=0, squeeze=True)
busmap.index = busmap.index.astype(str)
logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}")
else:
busmap = busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm)
clustering = get_clustering_from_busmap(
n, busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm),
n, busmap,
bus_strategies=dict(country=_make_consense("Bus", "country")),
aggregate_generators_weighted=True,
aggregate_generators_carriers=aggregate_carriers,
@ -363,7 +371,8 @@ if __name__ == "__main__":
return v
potential_mode = consense(pd.Series([snakemake.config['renewable'][tech]['potential']
for tech in renewable_carriers]))
clustering = clustering_for_n_clusters(n, n_clusters, aggregate_carriers,
custom_busmap = snakemake.config["enable"].get("custom_busmap", False)
clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap, aggregate_carriers,
line_length_factor=line_length_factor,
potential_mode=potential_mode,
solver_name=snakemake.config['solving']['solver']['name'],

View File

@ -71,7 +71,7 @@ opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"}
def _add_indexed_rows(df, raw_index):
new_index = df.index|pd.MultiIndex.from_product(raw_index)
new_index = df.index.union(pd.MultiIndex.from_product(raw_index))
if isinstance(new_index, pd.Index):
new_index = pd.MultiIndex.from_tuples(new_index)
@ -126,7 +126,7 @@ def calculate_costs(n, label, costs):
marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum()
costs = costs.reindex(costs.index|pd.MultiIndex.from_product([[c.list_name],["marginal"],marginal_costs_grouped.index]))
costs = costs.reindex(costs.index.union(pd.MultiIndex.from_product([[c.list_name],["marginal"],marginal_costs_grouped.index])))
costs.loc[idx[c.list_name,"marginal",list(marginal_costs_grouped.index)],label] = marginal_costs_grouped.values
@ -222,7 +222,7 @@ def calculate_supply(n, label, supply):
#lots of sign compensation for direction and to do maximums
s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items,'carrier']).sum()
supply = supply.reindex(supply.index|pd.MultiIndex.from_product([[i],[c.list_name],s.index]))
supply = supply.reindex(supply.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index])))
supply.loc[idx[i,c.list_name,list(s.index)],label] = s.values
return supply
@ -268,7 +268,7 @@ def calculate_supply_energy(n, label, supply_energy):
s = (-1)*c.pnl["p"+end][items].sum().groupby(c.df.loc[items,'carrier']).sum()
supply_energy = supply_energy.reindex(supply_energy.index|pd.MultiIndex.from_product([[i],[c.list_name],s.index]))
supply_energy = supply_energy.reindex(supply_energy.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index])))
supply_energy.loc[idx[i,c.list_name,list(s.index)],label] = s.values
return supply_energy
@ -276,7 +276,7 @@ def calculate_supply_energy(n, label, supply_energy):
def calculate_metrics(n,label,metrics):
metrics = metrics.reindex(metrics.index|pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"]))
metrics = metrics.reindex(metrics.index.union(pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"])))
metrics.at["line_volume_DC",label] = (n.links.length*n.links.p_nom_opt)[n.links.carrier == "DC"].sum()
metrics.at["line_volume_AC",label] = (n.lines.length*n.lines.s_nom_opt).sum()
@ -298,7 +298,7 @@ def calculate_prices(n,label,prices):
bus_type = pd.Series(n.buses.index.str[3:],n.buses.index).replace("","electricity")
prices = prices.reindex(prices.index|bus_type.value_counts().index)
prices = prices.reindex(prices.index.union(bus_type.value_counts().index))
logger.warning("Prices are time-averaged, not load-weighted")
prices[label] = n.buses_t.marginal_price.mean().groupby(bus_type).mean()

View File

@ -164,7 +164,7 @@ def plot_map(n, ax=None, attribute='p_nom', opts={}):
handler_map=make_handler_map_to_scale_circles_as_in(ax))
ax.add_artist(l2)
techs = (bus_sizes.index.levels[1]) & pd.Index(opts['vre_techs'] + opts['conv_techs'] + opts['storage_techs'])
techs = (bus_sizes.index.levels[1]).intersection(pd.Index(opts['vre_techs'] + opts['conv_techs'] + opts['storage_techs']))
handles = []
labels = []
for t in techs:

View File

@ -30,7 +30,7 @@ logger = logging.getLogger(__name__)
def rename_techs(label):
elif "H2" in label:
if "H2" in label:
label = "hydrogen storage"
elif label == "solar":
label = "solar PV"

View File

@ -11,7 +11,8 @@ Prepare PyPSA network for solving according to :ref:`opts` and :ref:`ll`, such a
- setting an **N-1 security margin** factor for transmission line capacities,
- specifying an expansion limit on the **cost** of transmission expansion,
- specifying an expansion limit on the **volume** of transmission expansion, and
- reducing the **temporal** resolution by averaging over multiple hours.
- reducing the **temporal** resolution by averaging over multiple hours
or segmenting time series into chunks of varying lengths using ``tsam``.
Relevant Settings
-----------------
@ -110,7 +111,7 @@ def set_transmission_limit(n, ll_type, factor, Nyears=1):
col = 'capital_cost' if ll_type == 'c' else 'length'
ref = (lines_s_nom @ n.lines[col] +
n.links[links_dc_b].p_nom @ n.links[links_dc_b][col])
n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col])
costs = load_costs(Nyears, snakemake.input.tech_costs,
snakemake.config['costs'],
@ -135,7 +136,7 @@ def set_transmission_limit(n, ll_type, factor, Nyears=1):
def average_every_nhours(n, offset):
logger.info('Resampling the network to {}'.format(offset))
logger.info(f"Resampling the network to {offset}")
m = n.copy(with_time=False)
snapshot_weightings = n.snapshot_weightings.resample(offset).sum()
@ -150,6 +151,47 @@ def average_every_nhours(n, offset):
return m
def apply_time_segmentation(n, segments):
logger.info(f"Aggregating time series to {segments} segments.")
try:
import tsam.timeseriesaggregation as tsam
except:
raise ModuleNotFoundError("Optional dependency 'tsam' not found."
"Install via 'pip install tsam'")
p_max_pu_norm = n.generators_t.p_max_pu.max()
p_max_pu = n.generators_t.p_max_pu / p_max_pu_norm
load_norm = n.loads_t.p_set.max()
load = n.loads_t.p_set / load_norm
inflow_norm = n.storage_units_t.inflow.max()
inflow = n.storage_units_t.inflow / inflow_norm
raw = pd.concat([p_max_pu, load, inflow], axis=1, sort=False)
solver_name = snakemake.config["solving"]["solver"]["name"]
agg = tsam.TimeSeriesAggregation(raw, hoursPerPeriod=len(raw),
noTypicalPeriods=1, noSegments=int(segments),
segmentation=True, solver=solver_name)
segmented = agg.createTypicalPeriods()
weightings = segmented.index.get_level_values("Segment Duration")
offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0)
snapshots = [n.snapshots[0] + pd.Timedelta(f"{offset}h") for offset in offsets]
n.set_snapshots(pd.DatetimeIndex(snapshots, name='name'))
n.snapshot_weightings = pd.Series(weightings, index=snapshots, name="weightings", dtype="float64")
segmented.index = snapshots
n.generators_t.p_max_pu = segmented[n.generators_t.p_max_pu.columns] * p_max_pu_norm
n.loads_t.p_set = segmented[n.loads_t.p_set.columns] * load_norm
n.storage_units_t.inflow = segmented[n.storage_units_t.inflow.columns] * inflow_norm
return n
def enforce_autarky(n, only_crossborder=False):
if only_crossborder:
lines_rm = n.lines.loc[
@ -162,7 +204,7 @@ def enforce_autarky(n, only_crossborder=False):
].index
else:
lines_rm = n.lines.index
links_rm = n.links.index
links_rm = n.links.loc[n.links.carrier=="DC"].index
n.mremove("Line", lines_rm)
n.mremove("Link", links_rm)
@ -191,8 +233,12 @@ if __name__ == "__main__":
if m is not None:
n = average_every_nhours(n, m.group(0))
break
else:
logger.info("No resampling")
for o in opts:
m = re.match(r'^\d+seg$', o, re.IGNORECASE)
if m is not None:
n = apply_time_segmentation(n, m.group(0)[:-3])
break
for o in opts:
if "Co2L" in o:
@ -208,14 +254,17 @@ if __name__ == "__main__":
suptechs = map(lambda c: c.split("-", 2)[0], n.carriers.index)
if oo[0].startswith(tuple(suptechs)):
carrier = oo[0]
cost_factor = float(oo[1])
# handles only p_nom_max as stores and lines have no potentials
attr_lookup = {"p": "p_nom_max", "c": "capital_cost"}
attr = attr_lookup[oo[1][0]]
factor = float(oo[1][1:])
if carrier == "AC": # lines do not have carrier
n.lines.capital_cost *= cost_factor
n.lines[attr] *= factor
else:
comps = {"Generator", "Link", "StorageUnit"}
comps = {"Generator", "Link", "StorageUnit", "Store"}
for c in n.iterate_components(comps):
sel = c.df.carrier.str.contains(carrier)
c.df.loc[sel,"capital_cost"] *= cost_factor
c.df.loc[sel,attr] *= factor
if 'Ep' in opts:
add_emission_prices(n)

View File

@ -62,7 +62,7 @@ Outputs
.. image:: ../img/regions_offshore_elec_s .png
:scale: 33 %
- ``resources/clustermaps_elec{year}_s{simpl}.h5``: Mapping of buses from ``networks/elec.nc`` to ``networks/elec{year}_s{simpl}.nc``; has keys ['/busmap_s']
- ``resources/busmap_elec{year}_s{simpl}.h5``: Mapping of buses from ``networks/elec.nc`` to ``networks/elec{year}_s{simpl}.nc``; has keys ['/busmap_s']
- ``networks/elec{year}_s{simpl}.nc``:
.. image:: ../img/elec_s.png
@ -322,7 +322,7 @@ def remove_stubs(n):
def cluster(n, n_clusters):
logger.info("Clustering to {} buses".format(n_clusters))
logger.info(f"Clustering to {n_clusters} buses")
renewable_carriers = pd.Index([tech
for tech in n.generators.carrier.unique()
@ -336,7 +336,7 @@ def cluster(n, n_clusters):
potential_mode = (consense(pd.Series([snakemake.config['renewable'][tech]['potential']
for tech in renewable_carriers]))
if len(renewable_carriers) > 0 else 'conservative')
clustering = clustering_for_n_clusters(n, n_clusters, potential_mode=potential_mode,
clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap=False, potential_mode=potential_mode,
solver_name=snakemake.config['solving']['solver']['name'])
return clustering.network, clustering.busmap

View File

@ -10,10 +10,6 @@ Relevant Settings
.. code:: yaml
(electricity:)
(BAU_mincapacities:)
(SAFE_reservemargin:)
solving:
tmpdir:
options:
@ -28,10 +24,6 @@ Relevant Settings
track_iterations:
solver:
name:
(solveroptions):
(plotting:)
(conv_techs:)
.. seealso::
Documentation of the configuration file ``config.yaml`` at

View File

@ -2,7 +2,7 @@
#
# SPDX-License-Identifier: CC0-1.0
version: 0.2.0
version: 0.3.0
tutorial: true
logging:
level: INFO
@ -31,6 +31,7 @@ enable:
retrieve_cutout: true
build_natura_raster: false
retrieve_natura_raster: true
custom_busmap: false
electricity:
voltages: [220., 300., 380.]
@ -146,6 +147,11 @@ transformers:
type: ''
load:
url: https://data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv
power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data
interpolate_limit: 3 # data gaps up until this size are interpolated linearly
time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from
manual_adjustments: true # false
scaling_factor: 1.0
fallback_year: 2013