From 984e688615c8ee25903fa2a17d9d68dd9020e757 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Wed, 28 Oct 2020 15:30:36 +0100 Subject: [PATCH 01/34] skip cost aggregation of component if empty --- scripts/_helpers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/_helpers.py b/scripts/_helpers.py index fff8143d..85f5eb76 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -168,6 +168,7 @@ def aggregate_costs(n, flatten=False, opts=None, existing_only=False): n.iterate_components(iterkeys(components), skip_empty=False), itervalues(components) ): + if c.df.empty: continue if not existing_only: p_nom += "_opt" costs[(c.list_name, 'capital')] = (c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum() if p_attr is not None: From 9c526687fd1a2e3bb030d572cd1cc9002399a15a Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 12 Nov 2020 16:27:52 +0100 Subject: [PATCH 02/34] environment: remove snakemake-minimal version constraint --- environment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yaml b/environment.yaml index f363d6f0..06f842a9 100644 --- a/environment.yaml +++ b/environment.yaml @@ -21,7 +21,7 @@ dependencies: - scikit-learn - pycountry - seaborn - - snakemake-minimal<=5.24.2 # until https://github.com/snakemake/snakemake/issues/635 closed + - snakemake-minimal - memory_profiler - yaml - pytables From 2e70e8d15b722e818efb57cf72b35a9536340365 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 12 Nov 2020 17:37:43 +0100 Subject: [PATCH 03/34] prepare: fix transmission limit when no HVDC links in network (#209) --- scripts/prepare_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/prepare_network.py b/scripts/prepare_network.py index b1f9313d..ba6bb9e2 100755 --- a/scripts/prepare_network.py +++ b/scripts/prepare_network.py @@ -108,7 +108,7 @@ def set_transmission_limit(n, ll_type, factor, Nyears=1): col = 'capital_cost' if ll_type == 'c' else 'length' ref = (lines_s_nom @ n.lines[col] + - n.links[links_dc_b].p_nom @ n.links[links_dc_b][col]) + n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col]) costs = load_costs(Nyears, snakemake.input.tech_costs, snakemake.config['costs'], From 3b858384d321ede6a14e679304db4ba2c039fe16 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Tue, 17 Nov 2020 12:46:49 +0100 Subject: [PATCH 04/34] data: ALEGrO in operation --- data/links_tyndp.csv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/links_tyndp.csv b/data/links_tyndp.csv index 5d0aec28..27cb0d6b 100644 --- a/data/links_tyndp.csv +++ b/data/links_tyndp.csv @@ -7,7 +7,7 @@ NordLink,Tonstad (NO),Wilster (DE),514,,1400,under construction,,https://tyndp.e COBRA cable,Endrup (DK),Eemshaven (NL),325,,700,under construction,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/71,8.718392,55.523115,6.835494,53.438589 Thames Estuary Cluster (NEMO-Link),Richborough (GB),Gezelle (BE),140,,1000,under construction,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/74,1.324854,51.295891,3.23043,51.24902 Anglo-Scottish -1,Hunterston (UK),Deeside (UK),422,,2400,under construction,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/77,-4.898329,55.723331,-3.032972,53.199735 -ALEGrO,Lixhe (BE),Oberzier (DE),100,,1000,in permitting,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/92,5.67933,50.7567965,6.474704,50.867532 +ALEGrO,Lixhe (BE),Oberzier (DE),100,,1000,built,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/92,5.67933,50.7567965,6.474704,50.867532 North Sea Link,Kvilldal (NO),Blythe (GB),720,,1400,under construction,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/110,6.637527,59.515096,-1.510277,55.126957 HVDC SuedOstLink,Wolmirstedt (DE),Isar (DE),,557,2000,in permitting,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/130,11.629014,52.252137,12.091596,48.080837 HVDC Line A-North,Emden East (DE),Osterath (DE),,284,2000,in permitting,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/132,7.206009,53.359403,6.619451,51.272935 From de0478968c9e271fdb8f4c766b05240a80fc31dc Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Wed, 25 Nov 2020 13:18:30 +0100 Subject: [PATCH 05/34] config: make storage modelling with Store and Link default (#205) --- config.default.yaml | 4 ++-- config.tutorial.yaml | 4 ++-- doc/release_notes.rst | 2 ++ 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/config.default.yaml b/config.default.yaml index 0913282a..ff7a503f 100755 --- a/config.default.yaml +++ b/config.default.yaml @@ -41,8 +41,8 @@ electricity: extendable_carriers: Generator: [] - StorageUnit: [battery, H2] - Store: [] # battery, H2 + StorageUnit: [] # battery, H2 + Store: [battery, H2] Link: [] max_hours: diff --git a/config.tutorial.yaml b/config.tutorial.yaml index 7d577bc9..a51c2202 100755 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -38,8 +38,8 @@ electricity: extendable_carriers: Generator: [OCGT] - StorageUnit: [battery, H2] - Store: [] #battery, H2 + StorageUnit: [] #battery, H2 + Store: [battery, H2] Link: [] max_hours: diff --git a/doc/release_notes.rst b/doc/release_notes.rst index dafbd200..85a7337b 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -39,6 +39,8 @@ Upcoming Release * The mappings for clustered lines and buses produced by the ``simplify_network`` and ``cluster_network`` rules changed from Hierarchical Data Format (.h5) to Comma-Separated Values format (.csv) (`#198 `_) +* Modelling hydrogen and battery storage with Store and Link components is now the default, rather than using StorageUnit components with fixed power-to-energy ratio (`#205 `_). + PyPSA-Eur 0.2.0 (8th June 2020) ================================== From bfbf595ad8e6b448a45ee4b836553e0eaf639874 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 26 Nov 2020 17:20:48 +0100 Subject: [PATCH 06/34] Parameter corrections for East-Western and Anglo-Scottish interconnection (#206) * data: parameter corrections for East-Western and Anglo-Scottish interconnector * add release notes --- data/links_tyndp.csv | 2 +- data/parameter_corrections.yaml | 7 ++++--- doc/release_notes.rst | 2 ++ scripts/base_network.py | 4 ++-- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/data/links_tyndp.csv b/data/links_tyndp.csv index 27cb0d6b..f37f34c2 100644 --- a/data/links_tyndp.csv +++ b/data/links_tyndp.csv @@ -6,7 +6,7 @@ Italy-Montenegro,Villanova (IT),Latsva (MT),445,,1200,under construction,Link.14 NordLink,Tonstad (NO),Wilster (DE),514,,1400,under construction,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/37,6.716948,58.662631,9.373979,53.922479 COBRA cable,Endrup (DK),Eemshaven (NL),325,,700,under construction,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/71,8.718392,55.523115,6.835494,53.438589 Thames Estuary Cluster (NEMO-Link),Richborough (GB),Gezelle (BE),140,,1000,under construction,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/74,1.324854,51.295891,3.23043,51.24902 -Anglo-Scottish -1,Hunterston (UK),Deeside (UK),422,,2400,under construction,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/77,-4.898329,55.723331,-3.032972,53.199735 +Anglo-Scottish -1,Hunterston (UK),Deeside (UK),422,,2400,built,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/77,-4.898329,55.723331,-3.032972,53.199735 ALEGrO,Lixhe (BE),Oberzier (DE),100,,1000,built,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/92,5.67933,50.7567965,6.474704,50.867532 North Sea Link,Kvilldal (NO),Blythe (GB),720,,1400,under construction,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/110,6.637527,59.515096,-1.510277,55.126957 HVDC SuedOstLink,Wolmirstedt (DE),Isar (DE),,557,2000,in permitting,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/130,11.629014,52.252137,12.091596,48.080837 diff --git a/data/parameter_corrections.yaml b/data/parameter_corrections.yaml index 83a17d51..89e002c9 100644 --- a/data/parameter_corrections.yaml +++ b/data/parameter_corrections.yaml @@ -33,12 +33,13 @@ Link: "14559": "6240" # fix wrong bus allocation from 6241 "12998": "1333" # combine link 12998 + 12997 in 12998 "5627": '2309' # combine link 5627 + 5628 in 5627 + "8068": "5819" # fix GB location of Anglo-Scottish interconnector length: index: "12998": 409.0 "5627": 26.39 bus0: index: - # set bus0 == bus1 for removing the link in remove_unconnected_components - "5628": "7276" - "12997": "7276" \ No newline at end of file + "14552": "5819" # fix GB location of GB-IE interconnector + "5628": "7276" # bus0 == bus1 to remove link in remove_unconnected_components + "12997": "7276" # bus0 == bus1 to remove link in remove_unconnected_components \ No newline at end of file diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 85a7337b..90c04f08 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -39,6 +39,8 @@ Upcoming Release * The mappings for clustered lines and buses produced by the ``simplify_network`` and ``cluster_network`` rules changed from Hierarchical Data Format (.h5) to Comma-Separated Values format (.csv) (`#198 `_) +* Parameter corrections for East-Western and Anglo-Scottish interconnectors (`#206 `_) + * Modelling hydrogen and battery storage with Store and Link components is now the default, rather than using StorageUnit components with fixed power-to-energy ratio (`#205 `_). diff --git a/scripts/base_network.py b/scripts/base_network.py index 1d5fddd7..e64f533b 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -201,8 +201,8 @@ def _add_links_from_tyndp(buses, links): buses = buses.loc[keep_b['Bus']] links = links.loc[keep_b['Link']] - links_tyndp["j"] = _find_closest_links(links, links_tyndp, distance_upper_bound=0.15) - # Corresponds approximately to 15km tolerances + links_tyndp["j"] = _find_closest_links(links, links_tyndp, distance_upper_bound=0.20) + # Corresponds approximately to 20km tolerances if links_tyndp["j"].notnull().any(): logger.info("TYNDP links already in the dataset (skipping): " + ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"])) From 03170f4aaf50e78929c9db56e5a84cab1ce48619 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 26 Nov 2020 17:25:14 +0100 Subject: [PATCH 07/34] add option for p_nom_max factors in {opts} wildcard (#207) * prepare: add option for potential reduction sweeps * prepare: fix attr lookup for potential/cost reduction sweeps * prepare: fix attr naming cost -> capital_cost * add release notes --- doc/configtables/opts.csv | 2 +- doc/release_notes.rst | 5 ++++- scripts/prepare_network.py | 11 +++++++---- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/doc/configtables/opts.csv b/doc/configtables/opts.csv index e43528fe..55a9c471 100644 --- a/doc/configtables/opts.csv +++ b/doc/configtables/opts.csv @@ -7,4 +7,4 @@ Trigger, Description, Definition, Status ``ATK``, "Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.", ``prepare_network``, In active use ``BAU``, Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() `_, Untested ``SAFE``, Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network., ``solve_network`` `add_opts_constraints() `_, Untested -``carrier+factor``, "Alter the capital cost of a carrier by a factor. Example: ``solar+0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use \ No newline at end of file +``carrier+{c|p}factor``, "Alter the capital cost (``c``) or installable potential (``p``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use \ No newline at end of file diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 90c04f08..e5a773c5 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -17,7 +17,10 @@ Upcoming Release cross-border transfer capacities. Moreover, line and link capacities can be capped in the ``config.yaml`` at ``lines: s_nom_max:`` and ``links: p_nom_max`` (`#166 `_). -* Added an option to alter the capital cost of carriers by a factor via ``carrier+factor`` in the ``{opts}`` wildcard. This can be useful for exploring uncertain cost parameters. Example: ``solar+0.5`` reduces the capital cost of solar to 50% of original values (`#167 `_). +* Added an option to alter the capital cost (``c``) or installable potentials (``p``) of carriers by a factor via ``carrier+{c,p}factor`` in the ``{opts}`` wildcard. + This can be useful for exploring uncertain cost parameters. + Example: ``solar+c0.5`` reduces the capital cost of solar to 50% of original values + (`#167 `_ and `#207 `_). * Add compatibility for pyomo 5.7.0 in :mod:`cluster_network` and :mod:`simplify_network`. diff --git a/scripts/prepare_network.py b/scripts/prepare_network.py index ba6bb9e2..fe88f457 100755 --- a/scripts/prepare_network.py +++ b/scripts/prepare_network.py @@ -205,14 +205,17 @@ if __name__ == "__main__": suptechs = map(lambda c: c.split("-", 2)[0], n.carriers.index) if oo[0].startswith(tuple(suptechs)): carrier = oo[0] - cost_factor = float(oo[1]) + # handles only p_nom_max as stores and lines have no potentials + attr_lookup = {"p": "p_nom_max", "c": "capital_cost"} + attr = attr_lookup[oo[1][0]] + factor = float(oo[1][1:]) if carrier == "AC": # lines do not have carrier - n.lines.capital_cost *= cost_factor + n.lines[attr] *= factor else: - comps = {"Generator", "Link", "StorageUnit"} + comps = {"Generator", "Link", "StorageUnit", "Store"} for c in n.iterate_components(comps): sel = c.df.carrier.str.contains(carrier) - c.df.loc[sel,"capital_cost"] *= cost_factor + c.df.loc[sel,attr] *= factor if 'Ep' in opts: add_emission_prices(n) From c1447875d55463f751143c98782ef885a8b4760d Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Wed, 2 Dec 2020 17:48:47 +0100 Subject: [PATCH 08/34] no defaults channel in environment.yaml (#144) * no defaults channel in environment.yaml * GIS from conda-forge * remove channel pinning and add descartes via conda * remove geopandas version restriction * travis: remove duplicated conda list --- environment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yaml b/environment.yaml index 06f842a9..cce0050b 100644 --- a/environment.yaml +++ b/environment.yaml @@ -4,7 +4,6 @@ name: pypsa-eur channels: - - defaults - conda-forge - bioconda - http://conda.anaconda.org/gurobi @@ -51,6 +50,7 @@ dependencies: - rasterio - shapely - libgdal<=3.0.4 + - descartes # Solvers - gurobi:gurobi # until https://github.com/conda-forge/pypsa-feedstock/issues/4 closed From f18b7b02cb72de79a82a2f403ba85d1acb1bb531 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 3 Dec 2020 10:02:23 +0100 Subject: [PATCH 09/34] StorageUnit correct efficiency_dispatch/store (counter-proposal 2) (#202) * fix efficiencies of storage units counter proposal * extra_components: fix roundtrip efficiency * add_electricity: remove misleading roundtrip efficiency for storageunit * add release notes --- data/costs.csv | 2 +- doc/release_notes.rst | 3 +++ scripts/add_electricity.py | 3 --- scripts/add_extra_components.py | 11 +++++++---- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/data/costs.csv b/data/costs.csv index 330cc3bb..8953eb8a 100644 --- a/data/costs.csv +++ b/data/costs.csv @@ -114,7 +114,7 @@ DAC,2030,lifetime,30,years,Fasihi DAC,2030,FOM,4,%/year,Fasihi battery inverter,2030,investment,411,USD/kWel,budischak2013 battery inverter,2030,lifetime,20,years,budischak2013 -battery inverter,2030,efficiency,0.81,per unit,budischak2013; Lund and Kempton (2008) http://dx.doi.org/10.1016/j.enpol.2008.06.007 +battery inverter,2030,efficiency,0.9,per unit charge/discharge,budischak2013; Lund and Kempton (2008) http://dx.doi.org/10.1016/j.enpol.2008.06.007 battery inverter,2030,FOM,3,%/year,budischak2013 battery storage,2030,investment,192,USD/kWh,budischak2013 battery storage,2030,lifetime,15,years,budischak2013 diff --git a/doc/release_notes.rst b/doc/release_notes.rst index e5a773c5..7efea8be 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -42,6 +42,9 @@ Upcoming Release * The mappings for clustered lines and buses produced by the ``simplify_network`` and ``cluster_network`` rules changed from Hierarchical Data Format (.h5) to Comma-Separated Values format (.csv) (`#198 `_) +* Fixed a bug for storage units such that individual store and dispatch efficiencies are correctly taken account of rather than only their round-trip efficiencies. + In the cost database (``data/costs.csv``) the efficiency of battery inverters should be stated as per discharge/charge rather than per roundtrip (`#202 _). + * Parameter corrections for East-Western and Anglo-Scottish interconnectors (`#206 `_) * Modelling hydrogen and battery storage with Store and Link components is now the default, rather than using StorageUnit components with fixed power-to-energy ratio (`#205 `_). diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 7a32e628..936ca1b5 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -166,13 +166,10 @@ def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None): def costs_for_storage(store, link1, link2=None, max_hours=1.): capital_cost = link1['capital_cost'] + max_hours * store['capital_cost'] - efficiency = link1['efficiency']**0.5 if link2 is not None: capital_cost += link2['capital_cost'] - efficiency *= link2['efficiency']**0.5 return pd.Series(dict(capital_cost=capital_cost, marginal_cost=0., - efficiency=efficiency, co2_emissions=0.)) if elec_config is None: diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 219c082d..0b48af2a 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -70,6 +70,9 @@ def attach_storageunits(n, costs): buses_i = n.buses.index + lookup_store = {"H2": "electrolysis", "battery": "battery inverter"} + lookup_dispatch = {"H2": "fuel cell", "battery": "battery inverter"} + for carrier in carriers: n.madd("StorageUnit", buses_i, ' ' + carrier, bus=buses_i, @@ -77,8 +80,8 @@ def attach_storageunits(n, costs): p_nom_extendable=True, capital_cost=costs.at[carrier, 'capital_cost'], marginal_cost=costs.at[carrier, 'marginal_cost'], - efficiency_store=costs.at[carrier, 'efficiency'], - efficiency_dispatch=costs.at[carrier, 'efficiency'], + efficiency_store=costs.at[lookup_store[carrier], 'efficiency'], + efficiency_dispatch=costs.at[lookup_dispatch[carrier], 'efficiency'], max_hours=max_hours[carrier], cyclic_state_of_charge=True) @@ -132,7 +135,7 @@ def attach_stores(n, costs): bus0=buses_i, bus1=b_buses_i, carrier='battery charger', - efficiency=costs.at['battery inverter', 'efficiency']**0.5, + efficiency=costs.at['battery inverter', 'efficiency'], capital_cost=costs.at['battery inverter', 'capital_cost'], p_nom_extendable=True) @@ -140,7 +143,7 @@ def attach_stores(n, costs): bus0=b_buses_i, bus1=buses_i, carrier='battery discharger', - efficiency=costs.at['battery inverter','efficiency']**0.5, + efficiency=costs.at['battery inverter','efficiency'], capital_cost=costs.at['battery inverter', 'capital_cost'], p_nom_extendable=True) From 9792069ab3304f64ecc6566b54eb7bab07401a2e Mon Sep 17 00:00:00 2001 From: FabianHofmann Date: Thu, 3 Dec 2020 12:49:04 +0100 Subject: [PATCH 10/34] Update load processing (#211) * build_load_data * Add documentation * updating load data import * Update Config files * Update load.csv * Update add_electricity.py * change log file name * Update scripts/add_electricity.py Co-authored-by: FabianHofmann * Update scripts/build_load_data.py Co-authored-by: FabianHofmann * Update scripts/build_load_data.py Co-authored-by: FabianHofmann * Update scripts/build_load_data.py Co-authored-by: FabianHofmann * Update build_load_data.py * Update build_load_data.py * Update scripts/build_load_data.py Co-authored-by: FabianHofmann * update gap handling in build_load_data * Update build_load_data.py * Update config.test1.yaml * update test.config * Update config.tutorial.yaml * update load csv function for load data * Update build_load_data.py * Update config.test1.yaml * Update add_electricity.py * Update build_load_data.py * Added error messages if load data contains gaps after modifications * general adjustments: - reduce data source to only entsoe statistics - sanitize code - adjust logging messages - adjust daocstrings * update Snakefile config and docs * update release notes rename build_load -> build_load_data in config * small follow up * - reintroduce choice between powerstatistics and transparency - remove load_ timeseries from databundle - always build load_data - reinsert scaling factor in config - fix url to 2019 version * update doc: configtable, release notes update config.yaml * follow up Co-authored-by: Jan Frederick Co-authored-by: JanFrederickUnnewehr <50404069+JanFrederickUnnewehr@users.noreply.github.com> --- Snakefile | 10 +- config.default.yaml | 5 + config.tutorial.yaml | 5 + doc/configtables/load.csv | 7 +- doc/configuration.rst | 10 +- doc/preparation.rst | 1 + doc/preparation/build_load_data.rst | 12 ++ doc/release_notes.rst | 3 + scripts/add_electricity.py | 24 +-- scripts/build_load_data.py | 224 ++++++++++++++++++++++++++++ test/config.test1.yaml | 5 + 11 files changed, 281 insertions(+), 25 deletions(-) create mode 100644 doc/preparation/build_load_data.rst create mode 100755 scripts/build_load_data.py diff --git a/Snakefile b/Snakefile index c043e57a..133f7843 100644 --- a/Snakefile +++ b/Snakefile @@ -53,8 +53,7 @@ datafiles = ['ch_cantons.csv', 'je-e-21.03.02.xls', 'eez/World_EEZ_v8_2014.shp', 'EIA_hydro_generation_2000_2014.csv', 'hydro_capacities.csv', 'naturalearth/ne_10m_admin_0_countries.shp', 'NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp', 'nama_10r_3popgdp.tsv.gz', - 'nama_10r_3gdp.tsv.gz', 'time_series_60min_singleindex_filtered.csv', - 'corine/g250_clc06_V18_5.tif'] + 'nama_10r_3gdp.tsv.gz', 'corine/g250_clc06_V18_5.tif'] if not config.get('tutorial', False): datafiles.extend(["natura/Natura2000_end2015.shp", "GEBCO_2014_2D.nc"]) @@ -65,6 +64,11 @@ if config['enable'].get('retrieve_databundle', True): log: "logs/retrieve_databundle.log" script: 'scripts/retrieve_databundle.py' +rule build_load_data: + output: "resources/load.csv" + log: "logs/build_load_data.log" + script: 'scripts/build_load_data.py' + rule build_powerplants: input: base_network="networks/base.nc", @@ -204,7 +208,7 @@ rule add_electricity: powerplants='resources/powerplants.csv', hydro_capacities='data/bundle/hydro_capacities.csv', geth_hydro_capacities='data/geth2015_hydro_capacities.csv', - opsd_load='data/bundle/time_series_60min_singleindex_filtered.csv', + load='resources/load.csv', nuts3_shapes='resources/nuts3_shapes.geojson', **{'profile_' + t: "resources/profile_" + t + ".nc" for t in config['renewable']} diff --git a/config.default.yaml b/config.default.yaml index ff7a503f..98f8ed67 100755 --- a/config.default.yaml +++ b/config.default.yaml @@ -168,6 +168,11 @@ transformers: type: '' load: + url: https://data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv + power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data + interpolate_limit: 3 # data gaps up until this size are interpolated linearly + time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from + manual_adjustments: true # false scaling_factor: 1.0 costs: diff --git a/config.tutorial.yaml b/config.tutorial.yaml index a51c2202..aed8693e 100755 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -146,6 +146,11 @@ transformers: type: '' load: + url: https://data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv + power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data + interpolate_limit: 3 # data gaps up until this size are interpolated linearly + time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from + manual_adjustments: true # false scaling_factor: 1.0 costs: diff --git a/doc/configtables/load.csv b/doc/configtables/load.csv index 035b27a1..66f3b994 100644 --- a/doc/configtables/load.csv +++ b/doc/configtables/load.csv @@ -1,2 +1,7 @@ ,Unit,Values,Description -scaling_factor,--,float,"Global correction factor for the load time series." \ No newline at end of file +url,--,string,"Link to open power system data time series data." +power_statistics,bool,"{true, false}",Whether to load the electricity consumption data of the ENTSOE power statistics (only for files from 2019 and before) or from the ENTSOE transparency data (only has load data from 2015 onwards). +interpolate_limit,hours,integer,"Maximum gap size (consecutive nans) which interpolated linearly." +time_shift_for_large_gaps,string,string,"Periods which are used for copying time-slices in order to fill large gaps of nans. Have to be valid ``pandas`` period strings." +manual_adjustments,bool,"{true, false}","Whether to adjust the load data manually according to the function in :func:`manual_adjustment`." +scaling_factor,--,float,"Global correction factor for the load time series." diff --git a/doc/configuration.rst b/doc/configuration.rst index bf276c06..f0e1717b 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -218,7 +218,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 170-171 + :lines: 170-174 .. csv-table:: :header-rows: 1 @@ -232,7 +232,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 173-185 + :lines: 175-188 .. csv-table:: :header-rows: 1 @@ -254,7 +254,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 187-197 + :lines: 190-200 .. csv-table:: :header-rows: 1 @@ -266,7 +266,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 187,198-214 + :lines: 190,201-217 .. csv-table:: :header-rows: 1 @@ -280,7 +280,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 216-355 + :lines: 219-358 .. csv-table:: :header-rows: 1 diff --git a/doc/preparation.rst b/doc/preparation.rst index b2749a41..9e986580 100644 --- a/doc/preparation.rst +++ b/doc/preparation.rst @@ -39,6 +39,7 @@ together into a detailed PyPSA network stored in ``networks/elec.nc``. preparation/retrieve preparation/build_shapes + preparation/build_load_data preparation/build_cutout preparation/build_natura_raster preparation/prepare_links_p_nom diff --git a/doc/preparation/build_load_data.rst b/doc/preparation/build_load_data.rst new file mode 100644 index 00000000..03535981 --- /dev/null +++ b/doc/preparation/build_load_data.rst @@ -0,0 +1,12 @@ +.. + SPDX-FileCopyrightText: 2020-2021 The PyPSA-Eur Authors + + SPDX-License-Identifier: CC-BY-4.0 + +.. _load_data: + +Rule ``build_load_data`` +============================= + + +.. automodule:: build_load_data diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 7efea8be..065310c6 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -49,6 +49,9 @@ Upcoming Release * Modelling hydrogen and battery storage with Store and Link components is now the default, rather than using StorageUnit components with fixed power-to-energy ratio (`#205 `_). +* Electricity consumption data is now directly retrieved from the `OPSD website `_ using the rule ``build_load_data``. The user can decide whether to take the ENTSOE power statistics data (defaul) or the ENTSOE transparency data. + + PyPSA-Eur 0.2.0 (8th June 2020) ================================== diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 936ca1b5..80904b7a 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -53,14 +53,9 @@ Inputs :scale: 34 % - ``data/geth2015_hydro_capacities.csv``: alternative to capacities above; NOT CURRENTLY USED! -- ``data/bundle/time_series_60min_singleindex_filtered.csv``: Hourly per-country load profiles since 2010 from the `ENTSO-E statistical database `_ - .. image:: ../img/load-box.png - :scale: 33 % - - .. image:: ../img/load-ts.png - :scale: 33 % +- ``resources/opsd_load.csv`` Hourly per-country load profiles. - ``resources/regions_onshore.geojson``: confer :ref:`busregions` - ``resources/nuts3_shapes.geojson``: confer :ref:`shapes` - ``resources/powerplants.csv``: confer :ref:`powerplants` @@ -91,7 +86,6 @@ It further adds extendable ``generators`` with **zero** capacity for """ from vresutils.costdata import annuity -from vresutils.load import timeseries_opsd from vresutils import transfer as vtransfer import logging @@ -200,7 +194,6 @@ def load_powerplants(ppl_fn=None): .rename(columns=str.lower).drop(columns=['efficiency']) .replace({'carrier': carrier_dict})) - # ============================================================================= # Attach components # ============================================================================= @@ -211,17 +204,15 @@ def attach_load(n): substation_lv_i = n.buses.index[n.buses['substation_lv']] regions = (gpd.read_file(snakemake.input.regions).set_index('name') .reindex(substation_lv_i)) - opsd_load = (timeseries_opsd(slice(*n.snapshots[[0,-1]].year.astype(str)), - snakemake.input.opsd_load) * - snakemake.config.get('load', {}).get('scaling_factor', 1.0)) + opsd_load = (pd.read_csv(snakemake.input.load, index_col=0, parse_dates=True) + .filter(items=snakemake.config['countries'])) - # Convert to naive UTC (has to be explicit since pandas 0.24) - opsd_load.index = opsd_load.index.tz_localize(None) + scaling = snakemake.config.get('load', {}).get('scaling_factor', 1.0) + logger.info(f"Load data scaled with scalling factor {scaling}.") + opsd_load *= scaling nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index') - def normed(x): return x.divide(x.sum()) - def upsample(cntry, group): l = opsd_load[cntry] if len(group) == 1: @@ -236,7 +227,8 @@ def attach_load(n): index=group.index) # relative factors 0.6 and 0.4 have been determined from a linear - # regression on the country to continent load data (refer to vresutils.load._upsampling_weights) + # regression on the country to continent load data + # (refer to vresutils.load._upsampling_weights) factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n)) return pd.DataFrame(factors.values * l.values[:,np.newaxis], index=l.index, columns=factors.index) diff --git a/scripts/build_load_data.py b/scripts/build_load_data.py new file mode 100755 index 00000000..0b781dd3 --- /dev/null +++ b/scripts/build_load_data.py @@ -0,0 +1,224 @@ +# coding: utf-8 +""" + +This rule downloads the load data from `Open Power System Data Time series `_. For all countries in the network, the per country load timeseries with suffix ``_load_actual_entsoe_transparency`` are extracted from the dataset. After filling small gaps linearly and large gaps by copying time-slice of a given period, the load data is exported to a ``.csv`` file. + +Relevant Settings +----------------- + +.. code:: yaml + + snapshots: + + load: + url: + interpolate_limit: + time_shift_for_large_gaps: + manual_adjustments: true + + +.. seealso:: + Documentation of the configuration file ``config.yaml`` at + :ref:`load_cf` + +Inputs +------ + + +Outputs +------- + +- ``resource/time_series_60min_singleindex_filtered.csv``: + + +""" + +import logging +logger = logging.getLogger(__name__) +from _helpers import configure_logging + +import pandas as pd +import numpy as np +import dateutil +from pandas import Timedelta as Delta + + +def load_timeseries(fn, years, countries, powerstatistics=True): + """ + Read load data from OPSD time-series package version 2020-10-06. + + Parameters + ---------- + years : None or slice() + Years for which to read load data (defaults to + slice("2018","2019")) + fn : str + File name or url location (file format .csv) + countries : listlike + Countries for which to read load data. + powerstatistics: bool + Whether the electricity consumption data of the ENTSOE power + statistics (if true) or of the ENTSOE transparency map (if false) + should be parsed. + + Returns + ------- + load : pd.DataFrame + Load time-series with UTC timestamps x ISO-2 countries + """ + logger.info(f"Retrieving load data from '{fn}'.") + + pattern = 'power_statistics' if powerstatistics else '_transparency' + pattern = f'_load_actual_entsoe_{pattern}' + rename = lambda s: s[:-len(pattern)] + date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True) + return (pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser) + .filter(like=pattern) + .rename(columns=rename) + .dropna(how="all", axis=0) + .rename(columns={'GB_UKM' : 'GB'}) + .filter(items=countries) + .loc[years]) + + +def consecutive_nans(ds): + return (ds.isnull().astype(int) + .groupby(ds.notnull().astype(int).cumsum()[ds.isnull()]) + .transform('sum').fillna(0)) + + +def fill_large_gaps(ds, shift): + """ + Fill up large gaps with load data from the previous week. + + This function fills gaps ragning from 3 to 168 hours (one week). + """ + shift = Delta(shift) + nhours = shift / np.timedelta64(1, 'h') + if (consecutive_nans(ds) > nhours).any(): + logger.warning('There exist gaps larger then the time shift used for ' + 'copying time slices.') + time_shift = pd.Series(ds.values, ds.index + shift) + return ds.where(ds.notnull(), time_shift.reindex_like(ds)) + + +def nan_statistics(df): + def max_consecutive_nans(ds): + return (ds.isnull().astype(int) + .groupby(ds.notnull().astype(int).cumsum()) + .sum().max()) + consecutive = df.apply(max_consecutive_nans) + total = df.isnull().sum() + max_total_per_month = df.isnull().resample('m').sum().max() + return pd.concat([total, consecutive, max_total_per_month], + keys=['total', 'consecutive', 'max_total_per_month'], axis=1) + + +def copy_timeslice(load, cntry, start, stop, delta): + start = pd.Timestamp(start) + stop = pd.Timestamp(stop) + if start-delta in load.index and stop in load.index and cntry in load: + load.loc[start:stop, cntry] = load.loc[start-delta:stop-delta, cntry].values + + +def manual_adjustment(load, powerstatistics): + """ + Adjust gaps manual for load data from OPSD time-series package. + + 1. For the ENTSOE power statistics load data (if powerstatistics is True) + + Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the + same load curve as Serbia and Albania the same as Macdedonia, both scaled + by the corresponding ratio of total energy consumptions reported by + IEA Data browser [0] for the year 2013. + + 2. For the ENTSOE transparency load data (if powerstatistics is False) + + Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the + same load curve as Montenegro, scaled by the corresponding ratio of total energy + consumptions reported by IEA Data browser [0] for the year 2016. + + [0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons + + + Parameters + ---------- + load : pd.DataFrame + Load time-series with UTC timestamps x ISO-2 countries + powerstatistics: bool + Whether argument load comprises the electricity consumption data of + the ENTSOE power statistics or of the ENTSOE transparency map + + Returns + ------- + load : pd.DataFrame + Manual adjusted and interpolated load time-series with UTC + timestamps x ISO-2 countries + """ + + if powerstatistics: + if 'MK' in load.columns: + if 'AL' not in load.columns or load.AL.isnull().values.all(): + load['AL'] = load['MK'] * (4.1 / 7.4) + if 'RS' in load.columns: + if 'KV' not in load.columns or load.KV.isnull().values.all(): + load['KV'] = load['RS'] * (4.8 / 27.) + + copy_timeslice(load, 'GR', '2015-08-11 21:00', '2015-08-15 20:00', Delta(weeks=1)) + copy_timeslice(load, 'AT', '2018-12-31 22:00', '2019-01-01 22:00', Delta(days=2)) + copy_timeslice(load, 'CH', '2010-01-19 07:00', '2010-01-19 22:00', Delta(days=1)) + copy_timeslice(load, 'CH', '2010-03-28 00:00', '2010-03-28 21:00', Delta(days=1)) + # is a WE, so take WE before + copy_timeslice(load, 'CH', '2010-10-08 13:00', '2010-10-10 21:00', Delta(weeks=1)) + copy_timeslice(load, 'CH', '2010-11-04 04:00', '2010-11-04 22:00', Delta(days=1)) + copy_timeslice(load, 'NO', '2010-12-09 11:00', '2010-12-09 18:00', Delta(days=1)) + # whole january missing + copy_timeslice(load, 'GB', '2009-12-31 23:00', '2010-01-31 23:00', Delta(days=-364)) + + else: + if 'ME' in load: + if 'AL' not in load and 'AL' in countries: + load['AL'] = load.ME * (5.7/2.9) + if 'MK' not in load and 'MK' in countries: + load['MK'] = load.ME * (6.7/2.9) + copy_timeslice(load, 'BG', '2018-10-27 21:00', '2018-10-28 22:00', Delta(weeks=1)) + + return load + + +if __name__ == "__main__": + + if 'snakemake' not in globals(): + from _helpers import mock_snakemake + snakemake = mock_snakemake('build_load_data') + + configure_logging(snakemake) + + config = snakemake.config + powerstatistics = config['load']['power_statistics'] + url = config['load']['url'] + interpolate_limit = config['load']['interpolate_limit'] + countries = config['countries'] + snapshots = pd.date_range(freq='h', **config['snapshots']) + years = slice(snapshots[0], snapshots[-1]) + time_shift = config['load']['time_shift_for_large_gaps'] + + load = load_timeseries(url, years, countries, powerstatistics) + + if config['load']['manual_adjustments']: + load = manual_adjustment(load, powerstatistics) + + logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.") + load = load.interpolate(method='linear', limit=interpolate_limit) + + logger.info("Filling larger gaps by copying time-slices of period " + f"'{time_shift}'.") + load = load.apply(fill_large_gaps, shift=time_shift) + + assert not load.isna().any().any(), ( + 'Load data contains nans. Adjust the parameters ' + '`time_shift_for_large_gaps` or modify the `manual_adjustment` function ' + 'for implementing the needed load data modifications.') + + load.to_csv(snakemake.output[0]) + diff --git a/test/config.test1.yaml b/test/config.test1.yaml index 2efdaecb..e8f17758 100755 --- a/test/config.test1.yaml +++ b/test/config.test1.yaml @@ -146,6 +146,11 @@ transformers: type: '' load: + url: https://data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv + power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data + interpolate_limit: 3 # data gaps up until this size are interpolated linearly + time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from + manual_adjustments: true # false scaling_factor: 1.0 costs: From cfb979a0d0f037a24b9d916b726805108386049b Mon Sep 17 00:00:00 2001 From: Martha Maria <53824825+eb5194@users.noreply.github.com> Date: Thu, 3 Dec 2020 15:17:16 +0100 Subject: [PATCH 11/34] Custom busmap for clustering (#193) * add option to use custom clustermaps from data folder * adapt default config to custom busmap * input file from Snakefile * adapt input description * add option to use custom clustermaps from data folder * adapt default config to custom busmap * input file from Snakefile * adapt input description * Snakefile: custom_busmap in cluster_network input is now csv * cluster_network: custom_busmap is now read as csv file, adaptions of description * simplify_network: adapt descriptions * configfiles: add cutom_clustermaps switch * unify clustarmap and busmap names * unify clustermap and busmap names * test/config: unify clustermap and busmap names * cluster_network: make clustering_for_n_clusters compatible with simplify_network * simplify_network: make compatible with changes in cluster_network.py * Update scripts/cluster_network.py * Update scripts/simplify_network.py * Update scripts/simplify_network.py * Update scripts/cluster_network.py * Update scripts/cluster_network.py * cluster_network: move custom_busmap flag to enable; simplify names * cluster_network: move custom_busmap flag to enable; simplify names * custom_busmap: add documentation * cluster_network: add default for custom_busmap for compatibility with old configs * cluster_network: add default for custom_busmap for compatibility with old configs Co-authored-by: martha.frysztacki Co-authored-by: Fabian Neumann --- Snakefile | 2 ++ config.default.yaml | 1 + config.tutorial.yaml | 1 + doc/configtables/toplevel.csv | 3 ++- doc/release_notes.rst | 7 +++++-- scripts/cluster_network.py | 25 +++++++++++++++++-------- scripts/simplify_network.py | 6 +++--- test/config.test1.yaml | 1 + 8 files changed, 32 insertions(+), 14 deletions(-) diff --git a/Snakefile b/Snakefile index 133f7843..6b92999e 100644 --- a/Snakefile +++ b/Snakefile @@ -244,6 +244,8 @@ rule cluster_network: regions_onshore="resources/regions_onshore_{network}_s{simpl}.geojson", regions_offshore="resources/regions_offshore_{network}_s{simpl}.geojson", busmap=ancient('resources/busmap_{network}_s{simpl}.csv'), + custom_busmap=("data/custom_busmap_{network}_s{simpl}_{clusters}.csv" + if config["enable"].get("custom_busmap", False) else []), tech_costs=COSTS output: network='networks/{network}_s{simpl}_{clusters}.nc', diff --git a/config.default.yaml b/config.default.yaml index 98f8ed67..0d299fe5 100755 --- a/config.default.yaml +++ b/config.default.yaml @@ -32,6 +32,7 @@ enable: retrieve_cutout: true build_natura_raster: false retrieve_natura_raster: true + custom_busmap: false electricity: voltages: [220., 300., 380.] diff --git a/config.tutorial.yaml b/config.tutorial.yaml index aed8693e..80b8c7a0 100755 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -31,6 +31,7 @@ enable: retrieve_cutout: true build_natura_raster: false retrieve_natura_raster: true + custom_busmap: false electricity: voltages: [220., 300., 380.] diff --git a/doc/configtables/toplevel.csv b/doc/configtables/toplevel.csv index 7eb7dd61..4592161b 100644 --- a/doc/configtables/toplevel.csv +++ b/doc/configtables/toplevel.csv @@ -13,4 +13,5 @@ enable,,, -- build_cutout,bool,"{true, false}","Switch to enable the building of cutouts via the rule :mod:`build_cutout`." -- retrieve_cutout,bool,"{true, false}","Switch to enable the retrieval of cutouts from zenodo with :mod:`retrieve_cutout`." -- build_natura_raster,bool,"{true, false}","Switch to enable the creation of the raster ``natura.tiff`` via the rule :mod:`build_natura_raster`." --- retrieve_natura_raster,bool,"{true, false}","Switch to enable the retrieval of ``natura.tiff`` from zenodo with :mod:`retrieve_natura_raster`." \ No newline at end of file +-- retrieve_natura_raster,bool,"{true, false}","Switch to enable the retrieval of ``natura.tiff`` from zenodo with :mod:`retrieve_natura_raster`." +-- custom_busmap,bool,"{true, false}","Switch to enable the use of custom busmaps in rule :mod:`cluster_network`. If activated the rule looks for provided busmaps at ``data/custom_busmap_elec_s{simpl}_{clusters}.csv`` which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``, i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc``." \ No newline at end of file diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 065310c6..bec532e6 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -42,6 +42,11 @@ Upcoming Release * The mappings for clustered lines and buses produced by the ``simplify_network`` and ``cluster_network`` rules changed from Hierarchical Data Format (.h5) to Comma-Separated Values format (.csv) (`#198 `_) +* Added an option to use custom busmaps in rule :mod:`cluster_network`. To use this feature set ``enable: custom_busmap: true``. + Then, the rule looks for custom busmaps at ``data/custom_busmap_elec_s{simpl}_{clusters}.csv``, + which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``. + i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc`` (`#193 `_). + * Fixed a bug for storage units such that individual store and dispatch efficiencies are correctly taken account of rather than only their round-trip efficiencies. In the cost database (``data/costs.csv``) the efficiency of battery inverters should be stated as per discharge/charge rather than per roundtrip (`#202 _). @@ -51,8 +56,6 @@ Upcoming Release * Electricity consumption data is now directly retrieved from the `OPSD website `_ using the rule ``build_load_data``. The user can decide whether to take the ENTSOE power statistics data (defaul) or the ENTSOE transparency data. - - PyPSA-Eur 0.2.0 (8th June 2020) ================================== diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index 620d8868..3fbe2d68 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -33,8 +33,9 @@ Inputs - ``resources/regions_onshore_{network}_s{simpl}.geojson``: confer :ref:`simplify` - ``resources/regions_offshore_{network}_s{simpl}.geojson``: confer :ref:`simplify` -- ``resources/clustermaps_{network}_s{simpl}.h5``: confer :ref:`simplify` +- ``resources/busmap_{network}_s{simpl}.csv``: confer :ref:`simplify` - ``networks/{network}_s{simpl}.nc``: confer :ref:`simplify` +- ``data/custom_busmap_{network}_s{simpl}_{clusters}.csv``: optional input Outputs ------- @@ -49,7 +50,8 @@ Outputs .. image:: ../img/regions_offshore_elec_s_X.png :scale: 33 % -- ``resources/clustermaps_{network}_s{simpl}_{clusters}.h5``: Mapping of buses and lines from ``networks/elec_s{simpl}.nc`` to ``networks/elec_s{simpl}_{clusters}.nc``; has keys ['/busmap', '/busmap_s', '/linemap', '/linemap_negative', '/linemap_positive'] +- ``resources/busmap_{network}_s{simpl}_{clusters}.csv``: Mapping of buses from ``networks/elec_s{simpl}.nc`` to ``networks/elec_s{simpl}_{clusters}.nc``; +- ``resources/linemap_{network}_s{simpl}_{clusters}.csv``: Mapping of lines from ``networks/elec_s{simpl}.nc`` to ``networks/elec_s{simpl}_{clusters}.nc``; - ``networks/{network}_s{simpl}_{clusters}.nc``: .. image:: ../img/elec_s_X.png @@ -267,10 +269,9 @@ def plot_busmap_for_n_clusters(n, n_clusters=50): n.plot(bus_colors=busmap.map(dict(zip(cs, cr)))) del cs, cr -def clustering_for_n_clusters(n, n_clusters, aggregate_carriers=None, - line_length_factor=1.25, potential_mode='simple', - solver_name="cbc", algorithm="kmeans", - extended_link_costs=0, focus_weights=None): +def clustering_for_n_clusters(n, n_clusters, custom_busmap=False, aggregate_carriers=None, + line_length_factor=1.25, potential_mode='simple', solver_name="cbc", + algorithm="kmeans", extended_link_costs=0, focus_weights=None): if potential_mode == 'simple': p_nom_max_strategy = np.sum @@ -280,8 +281,15 @@ def clustering_for_n_clusters(n, n_clusters, aggregate_carriers=None, raise AttributeError("potential_mode should be one of 'simple' or 'conservative', " "but is '{}'".format(potential_mode)) + if custom_busmap: + busmap = pd.read_csv(snakemake.input.custom_busmap, index_col=0, squeeze=True) + busmap.index = busmap.index.astype(str) + logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}") + else: + busmap = busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm) + clustering = get_clustering_from_busmap( - n, busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm), + n, busmap, bus_strategies=dict(country=_make_consense("Bus", "country")), aggregate_generators_weighted=True, aggregate_generators_carriers=aggregate_carriers, @@ -363,7 +371,8 @@ if __name__ == "__main__": return v potential_mode = consense(pd.Series([snakemake.config['renewable'][tech]['potential'] for tech in renewable_carriers])) - clustering = clustering_for_n_clusters(n, n_clusters, aggregate_carriers, + custom_busmap = snakemake.config["enable"].get("custom_busmap", False) + clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap, aggregate_carriers, line_length_factor=line_length_factor, potential_mode=potential_mode, solver_name=snakemake.config['solving']['solver']['name'], diff --git a/scripts/simplify_network.py b/scripts/simplify_network.py index d030a283..53631005 100644 --- a/scripts/simplify_network.py +++ b/scripts/simplify_network.py @@ -62,7 +62,7 @@ Outputs .. image:: ../img/regions_offshore_elec_s .png :scale: 33 % -- ``resources/clustermaps_{network}_s{simpl}.h5``: Mapping of buses from ``networks/elec.nc`` to ``networks/elec_s{simpl}.nc``; has keys ['/busmap_s'] +- ``resources/busmap_{network}_s{simpl}.csv``: Mapping of buses from ``networks/elec.nc`` to ``networks/elec_s{simpl}.nc``; - ``networks/{network}_s{simpl}.nc``: .. image:: ../img/elec_s.png @@ -315,7 +315,7 @@ def remove_stubs(n): return n, busmap def cluster(n, n_clusters): - logger.info("Clustering to {} buses".format(n_clusters)) + logger.info(f"Clustering to {n_clusters} buses") renewable_carriers = pd.Index([tech for tech in n.generators.carrier.unique() @@ -329,7 +329,7 @@ def cluster(n, n_clusters): potential_mode = (consense(pd.Series([snakemake.config['renewable'][tech]['potential'] for tech in renewable_carriers])) if len(renewable_carriers) > 0 else 'conservative') - clustering = clustering_for_n_clusters(n, n_clusters, potential_mode=potential_mode, + clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap=False, potential_mode=potential_mode, solver_name=snakemake.config['solving']['solver']['name']) return clustering.network, clustering.busmap diff --git a/test/config.test1.yaml b/test/config.test1.yaml index e8f17758..0ea71ca4 100755 --- a/test/config.test1.yaml +++ b/test/config.test1.yaml @@ -31,6 +31,7 @@ enable: retrieve_cutout: true build_natura_raster: false retrieve_natura_raster: true + custom_busmap: false electricity: voltages: [220., 300., 380.] From 84edde8f63d7968926d83f36fc3e3194eaee0fc1 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 3 Dec 2020 16:02:21 +0100 Subject: [PATCH 12/34] add time series segmentation with tsam package (#186) * add time segmentation with tsam * cut off SEG * Snakefile: adjust memory function * untangle memory reservation calculation * prepare: document segmentation option * correct typo --- Snakefile | 7 ++++- doc/configtables/opts.csv | 1 + doc/release_notes.rst | 6 +++++ environment.docs.yaml | 1 + environment.yaml | 1 + scripts/prepare_network.py | 54 +++++++++++++++++++++++++++++++++++--- 6 files changed, 65 insertions(+), 5 deletions(-) diff --git a/Snakefile b/Snakefile index 6b92999e..a82fdae3 100644 --- a/Snakefile +++ b/Snakefile @@ -279,7 +279,7 @@ rule prepare_network: output: 'networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc' log: "logs/prepare_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.log" threads: 1 - resources: mem=1000 + resources: mem=4000 # benchmark: "benchmarks/prepare_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}" script: "scripts/prepare_network.py" @@ -290,6 +290,11 @@ def memory(w): if m is not None: factor /= int(m.group(1)) break + for o in w.opts.split('-'): + m = re.match(r'^(\d+)seg$', o, re.IGNORECASE) + if m is not None: + factor *= int(m.group(1)) / 8760 + break if w.clusters.endswith('m'): return int(factor * (18000 + 180 * int(w.clusters[:-1]))) else: diff --git a/doc/configtables/opts.csv b/doc/configtables/opts.csv index 55a9c471..43d299d4 100644 --- a/doc/configtables/opts.csv +++ b/doc/configtables/opts.csv @@ -1,5 +1,6 @@ Trigger, Description, Definition, Status ``nH``; i.e. ``2H``-``6H``, Resample the time-resolution by averaging over every ``n`` snapshots, ``prepare_network``: `average_every_nhours() `_ and its `caller `_), In active use +``nSEG``; e.g. ``4380SEG``, "Apply time series segmentation with `tsam `_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load.", ``prepare_network``: ``apply_time_segmentation(), In active use ``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() `_ and its `caller `_, In active use ``Ep``, Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() `_ and its `caller `_, In active use ``CCL``, Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``., ``solve_network``, In active use diff --git a/doc/release_notes.rst b/doc/release_notes.rst index bec532e6..6e581705 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -56,6 +56,12 @@ Upcoming Release * Electricity consumption data is now directly retrieved from the `OPSD website `_ using the rule ``build_load_data``. The user can decide whether to take the ENTSOE power statistics data (defaul) or the ENTSOE transparency data. +* Added an option to the ``{opts}`` wildcard that applies a time series segmentation algorithm based on renewables, hydro inflow and load time series + to produce a given total number of adjacent snapshots of varying lengths. + This feature is an alternative to downsampling the temporal resolution by simply averaging and + uses the `tsam `_ package + (#186 `_). + PyPSA-Eur 0.2.0 (8th June 2020) ================================== diff --git a/environment.docs.yaml b/environment.docs.yaml index 3c50f2f2..762e89af 100755 --- a/environment.docs.yaml +++ b/environment.docs.yaml @@ -49,6 +49,7 @@ dependencies: # The FRESNA/KIT stuff is not packaged for conda yet - pip: - vresutils==0.3.1 + - tsam>=1.1.0 - git+https://github.com/PyPSA/glaes.git#egg=glaes - git+https://github.com/PyPSA/geokit.git#egg=geokit - cdsapi diff --git a/environment.yaml b/environment.yaml index cce0050b..cb322632 100644 --- a/environment.yaml +++ b/environment.yaml @@ -57,6 +57,7 @@ dependencies: - pip: - vresutils==0.3.1 + - tsam>=1.1.0 - git+https://github.com/PyPSA/glaes.git#egg=glaes - git+https://github.com/PyPSA/geokit.git#egg=geokit - cdsapi diff --git a/scripts/prepare_network.py b/scripts/prepare_network.py index fe88f457..c2092980 100755 --- a/scripts/prepare_network.py +++ b/scripts/prepare_network.py @@ -11,7 +11,8 @@ Prepare PyPSA network for solving according to :ref:`opts` and :ref:`ll`, such a - setting an **N-1 security margin** factor for transmission line capacities, - specifying an expansion limit on the **cost** of transmission expansion, - specifying an expansion limit on the **volume** of transmission expansion, and -- reducing the **temporal** resolution by averaging over multiple hours. +- reducing the **temporal** resolution by averaging over multiple hours + or segmenting time series into chunks of varying lengths using ``tsam``. Relevant Settings ----------------- @@ -133,7 +134,7 @@ def set_transmission_limit(n, ll_type, factor, Nyears=1): def average_every_nhours(n, offset): - logger.info('Resampling the network to {}'.format(offset)) + logger.info(f"Resampling the network to {offset}") m = n.copy(with_time=False) snapshot_weightings = n.snapshot_weightings.resample(offset).sum() @@ -148,6 +149,47 @@ def average_every_nhours(n, offset): return m +def apply_time_segmentation(n, segments): + logger.info(f"Aggregating time series to {segments} segments.") + try: + import tsam.timeseriesaggregation as tsam + except: + raise ModuleNotFoundError("Optional dependency 'tsam' not found." + "Install via 'pip install tsam'") + + p_max_pu_norm = n.generators_t.p_max_pu.max() + p_max_pu = n.generators_t.p_max_pu / p_max_pu_norm + + load_norm = n.loads_t.p_set.max() + load = n.loads_t.p_set / load_norm + + inflow_norm = n.storage_units_t.inflow.max() + inflow = n.storage_units_t.inflow / inflow_norm + + raw = pd.concat([p_max_pu, load, inflow], axis=1, sort=False) + + solver_name = snakemake.config["solving"]["solver"]["name"] + + agg = tsam.TimeSeriesAggregation(raw, hoursPerPeriod=len(raw), + noTypicalPeriods=1, noSegments=int(segments), + segmentation=True, solver=solver_name) + + segmented = agg.createTypicalPeriods() + + weightings = segmented.index.get_level_values("Segment Duration") + offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0) + snapshots = [n.snapshots[0] + pd.Timedelta(f"{offset}h") for offset in offsets] + + n.set_snapshots(pd.DatetimeIndex(snapshots, name='name')) + n.snapshot_weightings = pd.Series(weightings, index=snapshots, name="weightings", dtype="float64") + + segmented.index = snapshots + n.generators_t.p_max_pu = segmented[n.generators_t.p_max_pu.columns] * p_max_pu_norm + n.loads_t.p_set = segmented[n.loads_t.p_set.columns] * load_norm + n.storage_units_t.inflow = segmented[n.storage_units_t.inflow.columns] * inflow_norm + + return n + def enforce_autarky(n, only_crossborder=False): if only_crossborder: lines_rm = n.lines.loc[ @@ -189,8 +231,12 @@ if __name__ == "__main__": if m is not None: n = average_every_nhours(n, m.group(0)) break - else: - logger.info("No resampling") + + for o in opts: + m = re.match(r'^\d+seg$', o, re.IGNORECASE) + if m is not None: + n = apply_time_segmentation(n, m.group(0)[:-3]) + break for o in opts: if "Co2L" in o: From 2fc1ea0255bdffe45cc9158a7463c2508bb02b8f Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 3 Dec 2020 19:50:53 +0100 Subject: [PATCH 13/34] some refactoring and code cleaning (#190) * some refactoring and code cleaning * execute pre-commit * pre-commit: limit large files * add license to .pre-commit * add pre-commit to env * solve: tidy memory logger * travis: add conda list for easier debugging * undo config test/tutorial without plotting, rm matplotlibrc, .licenses * remove {networks} wildcard * unadd pre-commit config * add release notes * restore REUSE compliance * fix docs environment python version * env: remove gurobi from dependencies * fix unclean merge block * fix elif to if * lighter rtd style * lighter rtd style II --- .github/ISSUE_TEMPLATE/bug_report.md | 4 +- .github/ISSUE_TEMPLATE/config.yml | 2 +- .github/pull_request_template.md | 4 +- .gitignore | 1 + .readthedocs.yml | 2 +- .reuse/dep5 | 2 +- .travis.yml | 2 +- README.md | 2 +- Snakefile | 236 +++++++++--------- cluster.yaml | 22 -- config.default.yaml | 67 +---- config.tutorial.yaml | 84 +------ data/links_tyndp.csv | 2 +- data/parameter_corrections.yaml | 4 +- doc/_static/theme_overrides.css | 74 +++++- doc/cloudcomputing.rst | 40 +-- doc/configtables/atlite.csv | 2 +- doc/configtables/costs.csv | 2 +- doc/configtables/hydro.csv | 2 +- doc/configtables/lines.csv | 2 +- doc/configtables/links.csv | 2 +- doc/configtables/offwind-ac.csv | 2 +- doc/configtables/offwind-dc.csv | 2 +- doc/configtables/onwind.csv | 2 +- doc/configtables/opts.csv | 2 +- doc/configtables/plotting.csv | 1 - doc/configtables/scenario.csv | 5 +- doc/configtables/snapshots.csv | 2 +- doc/configtables/solar.csv | 2 +- doc/configtables/solving-options.csv | 2 +- doc/configtables/solving-solver.csv | 2 +- doc/configtables/toplevel.csv | 4 +- doc/configtables/transformers.csv | 2 +- doc/configuration.rst | 4 +- doc/costs.rst | 2 +- doc/index.rst | 10 +- doc/installation.rst | 16 +- doc/introduction.rst | 4 +- doc/limitations.rst | 3 +- doc/plotting.rst | 2 +- doc/preparation/base_network.rst | 4 +- doc/preparation/build_bus_regions.rst | 2 +- doc/preparation/build_cutout.rst | 2 +- doc/release_notes.rst | 11 +- doc/simplification.rst | 2 +- doc/tutorial.rst | 20 +- doc/wildcards.rst | 14 +- .../environment.docs.yaml | 32 +-- .../environment.fixed.yaml | 0 environment.yaml => envs/environment.yaml | 6 +- scripts/_helpers.py | 8 +- scripts/add_electricity.py | 40 +-- scripts/add_extra_components.py | 16 +- scripts/base_network.py | 46 +++- scripts/build_bus_regions.py | 23 +- scripts/build_country_flh.py | 5 +- scripts/build_cutout.py | 7 +- scripts/build_hydro_profile.py | 13 +- scripts/build_load_data.py | 5 +- scripts/build_natura_raster.py | 3 +- scripts/build_powerplants.py | 9 +- scripts/build_renewable_profiles.py | 13 +- scripts/build_shapes.py | 18 +- scripts/cluster_network.py | 90 +++---- scripts/make_summary.py | 96 ++----- scripts/plot_network.py | 36 +-- scripts/plot_p_nom_max.py | 8 +- scripts/plot_summary.py | 35 +-- scripts/prepare_links_p_nom.py | 33 ++- scripts/prepare_network.py | 21 +- scripts/retrieve_databundle.py | 7 +- scripts/retrieve_natura_raster.py | 3 +- scripts/simplify_network.py | 22 +- scripts/solve_network.py | 22 +- scripts/solve_operations_network.py | 12 +- test/config.test1.yaml | 66 +---- 76 files changed, 590 insertions(+), 787 deletions(-) delete mode 100644 cluster.yaml rename environment.docs.yaml => envs/environment.docs.yaml (59%) rename environment.fixedversions.yaml => envs/environment.fixed.yaml (100%) rename environment.yaml => envs/environment.yaml (82%) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index a1671b69..2654595c 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -11,7 +11,7 @@ assignees: '' ## Checklist - [ ] I am using the current [`master`](https://github.com/PyPSA/pypsa-eur/tree/master) branch or the latest [release](https://github.com/PyPSA/pypsa-eur/releases). Please indicate. -- [ ] I am running on an up-to-date [`pypsa-eur` environment](https://github.com/PyPSA/pypsa-eur/blob/master/environment.yaml). Update via `conda env update -f environment.yaml`. +- [ ] I am running on an up-to-date [`pypsa-eur` environment](https://github.com/PyPSA/pypsa-eur/blob/master/envs/environment.yaml). Update via `conda env update -f envs/environment.yaml`. ## Describe the Bug @@ -24,4 +24,4 @@ assignees: '' ``` -``` \ No newline at end of file +``` diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index bb1951c0..5b64d242 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -2,4 +2,4 @@ blank_issues_enabled: false contact_links: - name: PyPSA Mailing List url: https://groups.google.com/forum/#!forum/pypsa - about: Please ask and answer general usage questions here. \ No newline at end of file + about: Please ask and answer general usage questions here. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index b431c08a..05601c58 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -7,7 +7,7 @@ Closes # (if applicable). - [ ] I tested my contribution locally and it seems to work fine. - [ ] Code and workflow changes are sufficiently documented. -- [ ] Newly introduced dependencies are added to `environment.yaml` and `environment.docs.yaml`. +- [ ] Newly introduced dependencies are added to `envs/environment.yaml` and `envs/environment.docs.yaml`. - [ ] Changes in configuration options are added in all of `config.default.yaml`, `config.tutorial.yaml`, and `test/config.test1.yaml`. - [ ] Changes in configuration options are also documented in `doc/configtables/*.csv` and line references are adjusted in `doc/configuration.rst` and `doc/tutorial.rst`. -- [ ] A note for the release notes `doc/release_notes.rst` is amended in the format of previous release notes. \ No newline at end of file +- [ ] A note for the release notes `doc/release_notes.rst` is amended in the format of previous release notes. diff --git a/.gitignore b/.gitignore index 5ed82d0d..b4734ab2 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ __pycache__ *dconf gurobi.log +.vscode /bak /resources diff --git a/.readthedocs.yml b/.readthedocs.yml index 0ccb7def..173d21d7 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -5,4 +5,4 @@ version: 2 conda: - environment: environment.docs.yaml \ No newline at end of file + environment: envs/environment.docs.yaml diff --git a/.reuse/dep5 b/.reuse/dep5 index ef2e4f85..eb64a172 100644 --- a/.reuse/dep5 +++ b/.reuse/dep5 @@ -25,4 +25,4 @@ License: CC0-1.0 Files: borg-it Copyright: : 2017-2020 The PyPSA-Eur Authors -License: CC0-1.0 \ No newline at end of file +License: CC0-1.0 diff --git a/.travis.yml b/.travis.yml index b04adcd4..a6a29a39 100644 --- a/.travis.yml +++ b/.travis.yml @@ -20,7 +20,7 @@ before_install: # install conda environment - conda install -c conda-forge mamba - - mamba env create -f ./environment.yaml + - mamba env create -f ./envs/environment.yaml - conda activate pypsa-eur # install open-source solver diff --git a/README.md b/README.md index dcc7c6fc..c281e589 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ and local grid bottlenecks may cause unrealistic load-shedding or generator curtailment. We recommend to cluster the network to a couple of hundred nodes to remove these local inconsistencies. See the -discussion in Section 3.4 "Model validation" of the paper. +discussion in Section 3.4 "Model validation" of the paper. ![PyPSA-Eur Grid Model](doc/img/base.png) diff --git a/Snakefile b/Snakefile index a82fdae3..7616974e 100644 --- a/Snakefile +++ b/Snakefile @@ -11,33 +11,31 @@ if not exists("config.yaml"): configfile: "config.yaml" COSTS="data/costs.csv" +ATLITE_NPROCESSES = config['atlite'].get('nprocesses', 4) + wildcard_constraints: - ll="(v|c)([0-9\.]+|opt|all)|all", # line limit, can be volume or cost simpl="[a-zA-Z0-9]*|all", clusters="[0-9]+m?|all", - sectors="[+a-zA-Z0-9]+", + ll="(v|c)([0-9\.]+|opt|all)|all", opts="[-+a-zA-Z0-9\.]*" + rule cluster_all_elec_networks: - input: - expand("networks/elec_s{simpl}_{clusters}.nc", - **config['scenario']) + input: expand("networks/elec_s{simpl}_{clusters}.nc", **config['scenario']) + rule extra_components_all_elec_networks: - input: - expand("networks/elec_s{simpl}_{clusters}_ec.nc", - **config['scenario']) + input: expand("networks/elec_s{simpl}_{clusters}_ec.nc", **config['scenario']) + rule prepare_all_elec_networks: - input: - expand("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - **config['scenario']) + input: expand("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario']) + rule solve_all_elec_networks: - input: - expand("results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", - **config['scenario']) + input: expand("results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario']) + if config['enable'].get('prepare_links_p_nom', False): rule prepare_links_p_nom: @@ -45,7 +43,6 @@ if config['enable'].get('prepare_links_p_nom', False): log: 'logs/prepare_links_p_nom.log' threads: 1 resources: mem=500 - # group: 'nonfeedin_preparation' script: 'scripts/prepare_links_p_nom.py' @@ -55,20 +52,24 @@ datafiles = ['ch_cantons.csv', 'je-e-21.03.02.xls', 'NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp', 'nama_10r_3popgdp.tsv.gz', 'nama_10r_3gdp.tsv.gz', 'corine/g250_clc06_V18_5.tif'] + if not config.get('tutorial', False): datafiles.extend(["natura/Natura2000_end2015.shp", "GEBCO_2014_2D.nc"]) + if config['enable'].get('retrieve_databundle', True): rule retrieve_databundle: - output: expand('data/bundle/{file}', file=datafiles) + output: expand('data/bundle/{file}', file=datafiles) log: "logs/retrieve_databundle.log" script: 'scripts/retrieve_databundle.py' + rule build_load_data: output: "resources/load.csv" log: "logs/build_load_data.log" script: 'scripts/build_load_data.py' + rule build_powerplants: input: base_network="networks/base.nc", @@ -77,9 +78,9 @@ rule build_powerplants: log: "logs/build_powerplants.log" threads: 1 resources: mem=500 - # group: 'nonfeedin_preparation' script: "scripts/build_powerplants.py" + rule base_network: input: eg_buses='data/entsoegridkit/buses.csv', @@ -98,9 +99,9 @@ rule base_network: benchmark: "benchmarks/base_network" threads: 1 resources: mem=500 - # group: 'nonfeedin_preparation' script: "scripts/base_network.py" + rule build_shapes: input: naturalearth='data/bundle/naturalearth/ne_10m_admin_0_countries.shp', @@ -118,9 +119,9 @@ rule build_shapes: log: "logs/build_shapes.log" threads: 1 resources: mem=500 - # group: 'nonfeedin_preparation' script: "scripts/build_shapes.py" + rule build_bus_regions: input: country_shapes='resources/country_shapes.geojson', @@ -130,20 +131,21 @@ rule build_bus_regions: regions_onshore="resources/regions_onshore.geojson", regions_offshore="resources/regions_offshore.geojson" log: "logs/build_bus_regions.log" + threads: 1 resources: mem=1000 - # group: 'nonfeedin_preparation' script: "scripts/build_bus_regions.py" -if config['enable'].get('build_cutout', False): + +if config['enable'].get('build_cutout', False): rule build_cutout: output: directory("cutouts/{cutout}") log: "logs/build_cutout/{cutout}.log" - resources: mem=config['atlite'].get('nprocesses', 4) * 1000 - threads: config['atlite'].get('nprocesses', 4) benchmark: "benchmarks/build_cutout_{cutout}" - # group: 'feedin_preparation' + threads: ATLITE_NPROCESSES + resources: mem=ATLITE_NPROCESSES * 1000 script: "scripts/build_cutout.py" + if config['enable'].get('retrieve_cutout', True): rule retrieve_cutout: output: directory(expand("cutouts/{cutouts}", **config['atlite'])), @@ -151,43 +153,46 @@ if config['enable'].get('retrieve_cutout', True): script: 'scripts/retrieve_cutout.py' -if config['enable'].get('build_natura_raster', False): +if config['enable'].get('build_natura_raster', False): rule build_natura_raster: - input: + input: natura="data/bundle/natura/Natura2000_end2015.shp", cutouts=expand("cutouts/{cutouts}", **config['atlite']) output: "resources/natura.tiff" log: "logs/build_natura_raster.log" script: "scripts/build_natura_raster.py" + if config['enable'].get('retrieve_natura_raster', True): rule retrieve_natura_raster: output: "resources/natura.tiff" log: "logs/retrieve_natura_raster.log" script: 'scripts/retrieve_natura_raster.py' + rule build_renewable_profiles: input: base_network="networks/base.nc", corine="data/bundle/corine/g250_clc06_V18_5.tif", natura="resources/natura.tiff", - gebco=lambda wildcards: ("data/bundle/GEBCO_2014_2D.nc" - if "max_depth" in config["renewable"][wildcards.technology].keys() - else []), + gebco=lambda w: ("data/bundle/GEBCO_2014_2D.nc" + if "max_depth" in config["renewable"][w.technology].keys() + else []), country_shapes='resources/country_shapes.geojson', offshore_shapes='resources/offshore_shapes.geojson', - regions=lambda wildcards: ("resources/regions_onshore.geojson" - if wildcards.technology in ('onwind', 'solar') - else "resources/regions_offshore.geojson"), - cutout=lambda wildcards: "cutouts/" + config["renewable"][wildcards.technology]['cutout'] - output: profile="resources/profile_{technology}.nc", + regions=lambda w: ("resources/regions_onshore.geojson" + if w.technology in ('onwind', 'solar') + else "resources/regions_offshore.geojson"), + cutout=lambda w: "cutouts/" + config["renewable"][w.technology]['cutout'] + output: + profile="resources/profile_{technology}.nc", log: "logs/build_renewable_profile_{technology}.log" - resources: mem=config['atlite'].get('nprocesses', 2) * 5000 - threads: config['atlite'].get('nprocesses', 2) benchmark: "benchmarks/build_renewable_profiles_{technology}" - # group: 'feedin_preparation' + threads: ATLITE_NPROCESSES + resources: mem=ATLITE_NPROCESSES * 5000 script: "scripts/build_renewable_profiles.py" + if 'hydro' in config['renewable'].keys(): rule build_hydro_profile: input: @@ -197,9 +202,9 @@ if 'hydro' in config['renewable'].keys(): output: 'resources/profile_hydro.nc' log: "logs/build_hydro_profile.log" resources: mem=5000 - # group: 'feedin_preparation' script: 'scripts/build_hydro_profile.py' + rule add_electricity: input: base_network='networks/base.nc', @@ -210,79 +215,78 @@ rule add_electricity: geth_hydro_capacities='data/geth2015_hydro_capacities.csv', load='resources/load.csv', nuts3_shapes='resources/nuts3_shapes.geojson', - **{'profile_' + t: "resources/profile_" + t + ".nc" - for t in config['renewable']} + **{f"profile_{tech}": f"resources/profile_{tech}.nc" + for tech in config['renewable']} output: "networks/elec.nc" log: "logs/add_electricity.log" benchmark: "benchmarks/add_electricity" threads: 1 resources: mem=3000 - # group: 'build_pypsa_networks' script: "scripts/add_electricity.py" + rule simplify_network: input: - network='networks/{network}.nc', + network='networks/elec.nc', tech_costs=COSTS, regions_onshore="resources/regions_onshore.geojson", regions_offshore="resources/regions_offshore.geojson" output: - network='networks/{network}_s{simpl}.nc', - regions_onshore="resources/regions_onshore_{network}_s{simpl}.geojson", - regions_offshore="resources/regions_offshore_{network}_s{simpl}.geojson", - busmap='resources/busmap_{network}_s{simpl}.csv' - log: "logs/simplify_network/{network}_s{simpl}.log" - benchmark: "benchmarks/simplify_network/{network}_s{simpl}" + network='networks/elec_s{simpl}.nc', + regions_onshore="resources/regions_onshore_elec_s{simpl}.geojson", + regions_offshore="resources/regions_offshore_elec_s{simpl}.geojson", + busmap='resources/busmap_elec_s{simpl}.csv' + log: "logs/simplify_network/elec_s{simpl}.log" + benchmark: "benchmarks/simplify_network/elec_s{simpl}" threads: 1 resources: mem=4000 - # group: 'build_pypsa_networks' script: "scripts/simplify_network.py" + rule cluster_network: input: - network='networks/{network}_s{simpl}.nc', - regions_onshore="resources/regions_onshore_{network}_s{simpl}.geojson", - regions_offshore="resources/regions_offshore_{network}_s{simpl}.geojson", - busmap=ancient('resources/busmap_{network}_s{simpl}.csv'), - custom_busmap=("data/custom_busmap_{network}_s{simpl}_{clusters}.csv" + network='networks/elec_s{simpl}.nc', + regions_onshore="resources/regions_onshore_elec_s{simpl}.geojson", + regions_offshore="resources/regions_offshore_elec_s{simpl}.geojson", + busmap=ancient('resources/busmap_elec_s{simpl}.csv'), + custom_busmap=("data/custom_busmap_elec_s{simpl}_{clusters}.csv" if config["enable"].get("custom_busmap", False) else []), tech_costs=COSTS output: - network='networks/{network}_s{simpl}_{clusters}.nc', - regions_onshore="resources/regions_onshore_{network}_s{simpl}_{clusters}.geojson", - regions_offshore="resources/regions_offshore_{network}_s{simpl}_{clusters}.geojson", - busmap="resources/busmap_{network}_s{simpl}_{clusters}.csv", - linemap="resources/linemap_{network}_s{simpl}_{clusters}.csv" - log: "logs/cluster_network/{network}_s{simpl}_{clusters}.log" - benchmark: "benchmarks/cluster_network/{network}_s{simpl}_{clusters}" + network='networks/elec_s{simpl}_{clusters}.nc', + regions_onshore="resources/regions_onshore_elec_s{simpl}_{clusters}.geojson", + regions_offshore="resources/regions_offshore_elec_s{simpl}_{clusters}.geojson", + busmap="resources/busmap_elec_s{simpl}_{clusters}.csv", + linemap="resources/linemap_elec_s{simpl}_{clusters}.csv" + log: "logs/cluster_network/elec_s{simpl}_{clusters}.log" + benchmark: "benchmarks/cluster_network/elec_s{simpl}_{clusters}" threads: 1 resources: mem=3000 - # group: 'build_pypsa_networks' script: "scripts/cluster_network.py" rule add_extra_components: input: - network='networks/{network}_s{simpl}_{clusters}.nc', + network='networks/elec_s{simpl}_{clusters}.nc', tech_costs=COSTS, - output: 'networks/{network}_s{simpl}_{clusters}_ec.nc' - log: "logs/add_extra_components/{network}_s{simpl}_{clusters}.log" - benchmark: "benchmarks/add_extra_components/{network}_s{simpl}_{clusters}_ec" + output: 'networks/elec_s{simpl}_{clusters}_ec.nc' + log: "logs/add_extra_components/elec_s{simpl}_{clusters}.log" + benchmark: "benchmarks/add_extra_components/elec_s{simpl}_{clusters}_ec" threads: 1 resources: mem=3000 - # group: 'build_pypsa_networks' script: "scripts/add_extra_components.py" rule prepare_network: - input: 'networks/{network}_s{simpl}_{clusters}_ec.nc', tech_costs=COSTS - output: 'networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc' - log: "logs/prepare_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.log" + input: 'networks/elec_s{simpl}_{clusters}_ec.nc', tech_costs=COSTS + output: 'networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc' + log: "logs/prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log" + benchmark: "benchmarks/prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" threads: 1 resources: mem=4000 - # benchmark: "benchmarks/prepare_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}" script: "scripts/prepare_network.py" + def memory(w): factor = 3. for o in w.opts.split('-'): @@ -299,48 +303,49 @@ def memory(w): return int(factor * (18000 + 180 * int(w.clusters[:-1]))) else: return int(factor * (10000 + 195 * int(w.clusters))) - # return 4890+310 * int(w.clusters) + rule solve_network: - input: "networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" - output: "results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" - shadow: "shallow" + input: "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" + output: "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" log: - solver=normpath("logs/solve_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"), - python="logs/solve_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", - memory="logs/solve_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log" - benchmark: "benchmarks/solve_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}" + solver=normpath("logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"), + python="logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", + memory="logs/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log" + benchmark: "benchmarks/solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" threads: 4 resources: mem=memory - # group: "solve" # with group, threads is ignored https://bitbucket.org/snakemake/snakemake/issues/971/group-job-description-does-not-contain + shadow: "shallow" script: "scripts/solve_network.py" + rule solve_operations_network: input: - unprepared="networks/{network}_s{simpl}_{clusters}_ec.nc", - optimized="results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" - output: "results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc" - shadow: "shallow" + unprepared="networks/elec_s{simpl}_{clusters}_ec.nc", + optimized="results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" + output: "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc" log: - solver=normpath("logs/solve_operations_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"), - python="logs/solve_operations_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log", - memory="logs/solve_operations_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log" - benchmark: "benchmarks/solve_operations_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}" + solver=normpath("logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"), + python="logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log", + memory="logs/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log" + benchmark: "benchmarks/solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" threads: 4 resources: mem=(lambda w: 5000 + 372 * int(w.clusters)) - # group: "solve_operations" + shadow: "shallow" script: "scripts/solve_operations_network.py" + rule plot_network: input: - network="results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + network="results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", tech_costs=COSTS output: - only_map="results/plots/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}.{ext}", - ext="results/plots/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_ext.{ext}" - log: "logs/plot_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_{ext}.log" + only_map="results/plots/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}.{ext}", + ext="results/plots/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_ext.{ext}" + log: "logs/plot_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_{ext}.log" script: "scripts/plot_network.py" + def input_make_summary(w): # It's mildly hacky to include the separate costs input as first entry if w.ll.endswith("all"): @@ -350,48 +355,54 @@ def input_make_summary(w): else: ll = w.ll return ([COSTS] + - expand("results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", + expand("results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", network=w.network, ll=ll, **{k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k) for k in ["simpl", "clusters", "opts"]})) + rule make_summary: input: input_make_summary - output: directory("results/summaries/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}") - log: "logs/make_summary/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.log", + output: directory("results/summaries/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}") + log: "logs/make_summary/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.log", script: "scripts/make_summary.py" + rule plot_summary: - input: "results/summaries/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}" - output: "results/plots/summary_{summary}_{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.{ext}" - log: "logs/plot_summary/{summary}_{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}_{ext}.log" + input: "results/summaries/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}" + output: "results/plots/summary_{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.{ext}" + log: "logs/plot_summary/{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}_{ext}.log" script: "scripts/plot_summary.py" -def input_plot_p_nom_max(wildcards): - return [('networks/{network}_s{simpl}{maybe_cluster}.nc' - .format(maybe_cluster=('' if c == 'full' else ('_' + c)), **wildcards)) - for c in wildcards.clusts.split(",")] + +def input_plot_p_nom_max(w): + return [("networks/elec_s{simpl}{maybe_cluster}.nc" + .format(maybe_cluster=('' if c == 'full' else ('_' + c)), **w)) + for c in w.clusts.split(",")] + + rule plot_p_nom_max: input: input_plot_p_nom_max - output: "results/plots/{network}_s{simpl}_cum_p_nom_max_{clusts}_{techs}_{country}.{ext}" - log: "logs/plot_p_nom_max/{network}_s{simpl}_{clusts}_{techs}_{country}_{ext}.log" + output: "results/plots/elec_s{simpl}_cum_p_nom_max_{clusts}_{techs}_{country}.{ext}" + log: "logs/plot_p_nom_max/elec_s{simpl}_{clusts}_{techs}_{country}_{ext}.log" script: "scripts/plot_p_nom_max.py" + rule build_country_flh: input: base_network="networks/base.nc", corine="data/bundle/corine/g250_clc06_V18_5.tif", natura="resources/natura.tiff", - gebco=lambda wildcards: ("data/bundle/GEBCO_2014_2D.nc" - if "max_depth" in config["renewable"][wildcards.technology].keys() - else []), + gebco=lambda w: ("data/bundle/GEBCO_2014_2D.nc" + if "max_depth" in config["renewable"][w.technology].keys() + else []), country_shapes='resources/country_shapes.geojson', offshore_shapes='resources/offshore_shapes.geojson', pietzker="data/pietzker2014.xlsx", regions=lambda w: ("resources/country_shapes.geojson" - if w.technology in ('onwind', 'solar') - else "resources/offshore_shapes.geojson"), + if w.technology in ('onwind', 'solar') + else "resources/offshore_shapes.geojson"), cutout=lambda w: "cutouts/" + config["renewable"][w.technology]['cutout'] output: area="resources/country_flh_area_{technology}.csv", @@ -402,9 +413,4 @@ rule build_country_flh: log: "logs/build_country_flh_{technology}.log" resources: mem=10000 benchmark: "benchmarks/build_country_flh_{technology}" - # group: 'feedin_preparation' script: "scripts/build_country_flh.py" - -# Local Variables: -# mode: python -# End: diff --git a/cluster.yaml b/cluster.yaml deleted file mode 100644 index b36e6ed2..00000000 --- a/cluster.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors -# -# SPDX-License-Identifier: GPL-3.0-or-later - -__default__: - log: "logs/cluster/{{name}}.log" - -feedin_preparation: - walltime: "12:00:00" - -solve_network: - walltime: "05:00:00:00" - -trace_solve_network: - walltime: "05:00:00:00" - -solve: - walltime: "05:00:00:00" - threads: 4 # Group threads are not aggregated - -solve_operations: - walltime: "01:00:00:00" diff --git a/config.default.yaml b/config.default.yaml index 0d299fe5..67a4f15a 100755 --- a/config.default.yaml +++ b/config.default.yaml @@ -12,7 +12,6 @@ logging: summary_dir: results scenario: - sectors: [E] simpl: [''] ll: ['copt'] clusters: [37, 128, 256, 512, 1024] @@ -265,67 +264,18 @@ plotting: 'waste' : '#68896b' 'geothermal' : '#ba91b1' "OCGT" : "#d35050" - "OCGT marginal" : "#d35050" - "OCGT-heat" : "#d35050" - "gas boiler" : "#d35050" - "gas boilers" : "#d35050" - "gas boiler marginal" : "#d35050" - "gas-to-power/heat" : "#d35050" "gas" : "#d35050" "natural gas" : "#d35050" "CCGT" : "#b20101" - "CCGT marginal" : "#b20101" - "Nuclear" : "#ff9000" - "Nuclear marginal" : "#ff9000" "nuclear" : "#ff9000" "coal" : "#707070" - "Coal" : "#707070" - "Coal marginal" : "#707070" "lignite" : "#9e5a01" - "Lignite" : "#9e5a01" - "Lignite marginal" : "#9e5a01" - "Oil" : "#262626" "oil" : "#262626" "H2" : "#ea048a" "hydrogen storage" : "#ea048a" - "Sabatier" : "#a31597" - "methanation" : "#a31597" - "helmeth" : "#a31597" - "DAC" : "#d284ff" - "co2 stored" : "#e5e5e5" - "CO2 sequestration" : "#e5e5e5" "battery" : "#b8ea04" - "battery storage" : "#b8ea04" - "Li ion" : "#b8ea04" - "BEV charger" : "#e2ff7c" - "V2G" : "#7a9618" - "transport fuel cell" : "#e884be" - "retrofitting" : "#e0d6a8" - "building retrofitting" : "#e0d6a8" - "heat pumps" : "#ff9768" - "heat pump" : "#ff9768" - "air heat pump" : "#ffbea0" - "ground heat pump" : "#ff7a3d" - "power-to-heat" : "#a59e7c" - "power-to-gas" : "#db8585" - "power-to-liquid" : "#a9acd1" - "Fischer-Tropsch" : "#a9acd1" - "resistive heater" : "#aa4925" - "water tanks" : "#401f75" - "hot water storage" : "#401f75" - "hot water charging" : "#351c5e" - "hot water discharging" : "#683ab2" - "CHP" : "#d80a56" - "CHP heat" : "#d80a56" - "CHP electric" : "#d80a56" - "district heating" : "#93864b" - "Ambient" : "#262626" "Electric load" : "#f9d002" "electricity" : "#f9d002" - "Heat load" : "#d35050" - "heat" : "#d35050" - "Transport load" : "#235ebc" - "transport" : "#235ebc" "lines" : "#70af1d" "transmission lines" : "#70af1d" "AC-AC" : "#70af1d" @@ -345,18 +295,5 @@ plotting: hydro: "Reservoir & Dam" battery: "Battery Storage" H2: "Hydrogen Storage" - lines: "Transmission lines" - ror: "Run of river" - nice_names_n: - OCGT: "Open-Cycle\nGas" - CCGT: "Combined-Cycle\nGas" - offwind-ac: "Offshore\nWind (AC)" - offwind-dc: "Offshore\nWind (DC)" - onwind: "Onshore\nWind" - battery: "Battery\nStorage" - H2: "Hydrogen\nStorage" - lines: "Transmission\nlines" - ror: "Run of\nriver" - PHS: "Pumped Hydro\nStorage" - hydro: "Reservoir\n& Dam" - + lines: "Transmission Lines" + ror: "Run of River" diff --git a/config.tutorial.yaml b/config.tutorial.yaml index 80b8c7a0..17c7509e 100755 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -11,7 +11,6 @@ logging: summary_dir: results scenario: - sectors: [E] simpl: [''] ll: ['copt'] clusters: [5] @@ -177,26 +176,8 @@ solving: clip_p_max_pu: 0.01 skip_iterations: false track_iterations: false - #nhours: 10 solver: name: cbc - # solver: - # name: gurobi - # threads: 4 - # method: 2 # barrier - # crossover: 0 - # BarConvTol: 1.e-5 - # FeasibilityTol: 1.e-6 - # AggFill: 0 - # PreDual: 0 - # GURO_PAR_BARDENSETHRESH: 200 - # solver: - # name: cplex - # threads: 4 - # lpmethod: 4 # barrier - # solutiontype: 2 # non basic solution, ie no crossover - # barrier_convergetol: 1.e-5 - # feasopt_tolerance: 1.e-6 plotting: map: @@ -244,67 +225,18 @@ plotting: 'waste' : '#68896b' 'geothermal' : '#ba91b1' "OCGT" : "#d35050" - "OCGT marginal" : "#d35050" - "OCGT-heat" : "#d35050" - "gas boiler" : "#d35050" - "gas boilers" : "#d35050" - "gas boiler marginal" : "#d35050" - "gas-to-power/heat" : "#d35050" "gas" : "#d35050" "natural gas" : "#d35050" "CCGT" : "#b20101" - "CCGT marginal" : "#b20101" - "Nuclear" : "#ff9000" - "Nuclear marginal" : "#ff9000" "nuclear" : "#ff9000" "coal" : "#707070" - "Coal" : "#707070" - "Coal marginal" : "#707070" "lignite" : "#9e5a01" - "Lignite" : "#9e5a01" - "Lignite marginal" : "#9e5a01" - "Oil" : "#262626" "oil" : "#262626" "H2" : "#ea048a" "hydrogen storage" : "#ea048a" - "Sabatier" : "#a31597" - "methanation" : "#a31597" - "helmeth" : "#a31597" - "DAC" : "#d284ff" - "co2 stored" : "#e5e5e5" - "CO2 sequestration" : "#e5e5e5" "battery" : "#b8ea04" - "battery storage" : "#b8ea04" - "Li ion" : "#b8ea04" - "BEV charger" : "#e2ff7c" - "V2G" : "#7a9618" - "transport fuel cell" : "#e884be" - "retrofitting" : "#e0d6a8" - "building retrofitting" : "#e0d6a8" - "heat pumps" : "#ff9768" - "heat pump" : "#ff9768" - "air heat pump" : "#ffbea0" - "ground heat pump" : "#ff7a3d" - "power-to-heat" : "#a59e7c" - "power-to-gas" : "#db8585" - "power-to-liquid" : "#a9acd1" - "Fischer-Tropsch" : "#a9acd1" - "resistive heater" : "#aa4925" - "water tanks" : "#401f75" - "hot water storage" : "#401f75" - "hot water charging" : "#351c5e" - "hot water discharging" : "#683ab2" - "CHP" : "#d80a56" - "CHP heat" : "#d80a56" - "CHP electric" : "#d80a56" - "district heating" : "#93864b" - "Ambient" : "#262626" "Electric load" : "#f9d002" "electricity" : "#f9d002" - "Heat load" : "#d35050" - "heat" : "#d35050" - "Transport load" : "#235ebc" - "transport" : "#235ebc" "lines" : "#70af1d" "transmission lines" : "#70af1d" "AC-AC" : "#70af1d" @@ -324,17 +256,5 @@ plotting: hydro: "Reservoir & Dam" battery: "Battery Storage" H2: "Hydrogen Storage" - lines: "Transmission lines" - ror: "Run of river" - nice_names_n: - OCGT: "Open-Cycle\nGas" - CCGT: "Combined-Cycle\nGas" - offwind-ac: "Offshore\nWind (AC)" - offwind-dc: "Offshore\nWind (DC)" - onwind: "Onshore\nWind" - battery: "Battery\nStorage" - H2: "Hydrogen\nStorage" - lines: "Transmission\nlines" - ror: "Run of\nriver" - PHS: "Pumped Hydro\nStorage" - hydro: "Reservoir\n& Dam" + lines: "Transmission Lines" + ror: "Run of River" diff --git a/data/links_tyndp.csv b/data/links_tyndp.csv index f37f34c2..8079be72 100644 --- a/data/links_tyndp.csv +++ b/data/links_tyndp.csv @@ -23,4 +23,4 @@ HVDC Ultranet,Osterath (DE),Philippsburg (DE),,314,600,in permitting,,https://ty Gridlink,Kingsnorth (UK),Warande (FR),160,,1400,in permitting,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/285,0.596111111111111,51.41972,2.376776,51.034368 NeuConnect,Grain (UK),Fedderwarden (DE),680,,1400,in permitting,,https://tyndp.entsoe.eu/tyndp2018/projects/projects/309,0.716666666666667,51.44,8.046524,53.562763 NordBalt,Klaipeda (LT),Nybro (SE),450,,700,built,,https://en.wikipedia.org/wiki/NordBalt,21.256667,55.681667,15.854167,56.767778 -Estlink 1,Harku (EE),Espoo (FI),105,,350,built,,https://en.wikipedia.org/wiki/Estlink,24.560278,59.384722,24.551667,60.203889 \ No newline at end of file +Estlink 1,Harku (EE),Espoo (FI),105,,350,built,,https://en.wikipedia.org/wiki/Estlink,24.560278,59.384722,24.551667,60.203889 diff --git a/data/parameter_corrections.yaml b/data/parameter_corrections.yaml index 89e002c9..67b73ec1 100644 --- a/data/parameter_corrections.yaml +++ b/data/parameter_corrections.yaml @@ -36,10 +36,10 @@ Link: "8068": "5819" # fix GB location of Anglo-Scottish interconnector length: index: - "12998": 409.0 + "12998": 409.0 "5627": 26.39 bus0: index: "14552": "5819" # fix GB location of GB-IE interconnector "5628": "7276" # bus0 == bus1 to remove link in remove_unconnected_components - "12997": "7276" # bus0 == bus1 to remove link in remove_unconnected_components \ No newline at end of file + "12997": "7276" # bus0 == bus1 to remove link in remove_unconnected_components diff --git a/doc/_static/theme_overrides.css b/doc/_static/theme_overrides.css index efbe909c..4c6f277f 100644 --- a/doc/_static/theme_overrides.css +++ b/doc/_static/theme_overrides.css @@ -2,22 +2,78 @@ SPDX-License-Identifier: GPL-3.0-or-later */ +.wy-side-nav-search { + background-color: #eeeeee; +} + +.wy-side-nav-search .wy-dropdown>a, +.wy-side-nav-search>a { + color: rgb(34, 97, 156) +} + +.wy-side-nav-search>div.version { + color: rgb(34, 97, 156) +} + +.wy-menu-vertical header, +.wy-menu-vertical p.caption, +.rst-versions a { + color: #999999; +} + +.wy-menu-vertical a.reference:hover, +.wy-menu-vertical a.reference.internal:hover { + background: #dddddd; + color: #fff; +} + +.wy-nav-side { + background: #efefef; +} + +.wy-menu-vertical a.reference { + color: #000; +} + +.rst-versions .rst-current-version, +.wy-nav-top, +.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a:hover { + background: #002221; +} + +.wy-nav-content .highlight { + background: #ffffff; +} + +.wy-nav-content .highlight .nn, +.wy-nav-content .admonition.warning a { + color: #dddddd; +} + +.rst-content code.literal, +.rst-content tt.literal { + color: rgb(34, 97, 156) +} + +.wy-nav-content a.reference { + color: rgb(34, 97, 156); +} + + /* override table width restrictions */ + @media screen and (min-width: 767px) { - .wy-table-responsive table td { - /* !important prevents the common CSS stylesheets from overriding + /* !important prevents the common CSS stylesheets from overriding this as on RTD they are loaded after this stylesheet */ - white-space: normal !important; - /* background: #eeeeee !important; */ + white-space: normal !important; + background: rgb(250, 250, 250) !important; } - .wy-table-responsive { - max-width: 100%; - overflow: visible !important; + max-width: 100%; + overflow: visible !important; } - .wy-nav-content { max-width: 910px !important; } - } \ No newline at end of file +} \ No newline at end of file diff --git a/doc/cloudcomputing.rst b/doc/cloudcomputing.rst index 3da6b9c2..f91441a5 100644 --- a/doc/cloudcomputing.rst +++ b/doc/cloudcomputing.rst @@ -1,6 +1,6 @@ .. SPDX-FileCopyrightText: 2020 Maximilian Parzen and Emmanuel Paez - + SPDX-License-Identifier: CC-BY-4.0 @@ -18,7 +18,7 @@ Google Cloud Platform (GCP) We are happy to take pull requests explaining where the procedures deviate from the descriptions below for other operating systems. Likewise, tutorials for other cloud computing solutions are also highly welcome. -The Google Cloud Platform (GCP) is a cloud computing service you can use to run PyPSA-Eur calculations, especially if +The Google Cloud Platform (GCP) is a cloud computing service you can use to run PyPSA-Eur calculations, especially if - you do not have immediate access to high-performance computating facilities, - you have problems with the Windows operating system and want a quick run on a linux-based system, @@ -26,7 +26,7 @@ The Google Cloud Platform (GCP) is a cloud computing service you can use to run - you need quick results (trial version includes 32 vCPU cores and up to 800 GB of memory). With the Google Cloud Platform you set up a virtual machine in the cloud which can store and operate data. -Like on your local computer, you have to install all software and solvers, and create paths on the virtual machine to set up the required environment. +Like on your local computer, you have to install all software and solvers, and create paths on the virtual machine to set up the required environment. The 300$ free trial budget is offered which equals roughly 10-20 simulations with 180 nodes at hourly basis. The following steps are required: @@ -38,7 +38,7 @@ The following steps are required: Step 1 - Google Cloud Platform registration ------------------------------------------- -First, register at the `Google Cloud Platform `_ (GCP). +First, register at the `Google Cloud Platform `_ (GCP). Ann active bank account is required, which will not be charged unless you exceed the trial budget. Step 2 - Create your Virtual Machine instance @@ -73,26 +73,26 @@ Step 3 - Installation of Cloud SDK - Download Google Cloud SDK `SDK `_. Check that you are logged in in your Google account. The link should lead you to the Windows installation of Google Cloud SDK. - Follow the "Quickstart for Windows - Before you begin" steps. - After the successfull installation and initialization, close the Google Cloud SDK reopen it again. Type the following command into the "Google Cloud SDK Shell": - + .. code:: bash - + gcloud compute ssh -- -L 8888:localhost:8888 - + - This command above will open a PuTTy command window that is connected to your Virtual Machine. Time to celebrate if it works! -- Now install all necessary tools. As little help, the first steps: +- Now install all necessary tools. As little help, the first steps: .. code:: bash - + sudo apt-get update sudo apt-get install bzip2 libxml2-dev sudo apt-get install wget wget https://repo.anaconda.com/archive/Anaconda3-2020.07-Linux-x86_64.sh (Check the link. To be up to date with anaconda, check the Anaconda website https://www.anaconda.com/products/individual ) ls (to see what anaconda file to bash) - bash Anaconda3-2020.07-Linux-x86_64.sh - source ~/.bashrc - + bash Anaconda3-2020.07-Linux-x86_64.sh + source ~/.bashrc + - Close and reopen the PuTTy file (-> open Google Cloud SDK -> initialize again with the command above to open the PuTTY command window). Now ``conda`` can be listed with ``conda list``. Noq you can follow the standard installation instructions to finalize your machine (don't forget the solvers - for bigger simulations use commercial solvers such as Gurobi). - + Step 4 - Installation of WinSCP ------------------------------- @@ -102,22 +102,22 @@ Make sure that your instance is operating for the next steps. - Download `WinSCP `_ and follow the default installation steps. - Open WinSCP after the installation. A login window will open. - Keep SFTP as file protocol. -- As host name insert the External IP of your VM (click in your internet browser on your GCP VM instance to see the external IP) +- As host name insert the External IP of your VM (click in your internet browser on your GCP VM instance to see the external IP) - Set the User name in WinSCP to the name you see in your PuTTy window (check step 3 - for instance [username]@[VM-name]:~$) -- Click on the advanced setting. SSH -> Authentication. -- Option 1. Click on the Tools button and "Install Public Key into Server..". Somewhere in your folder structure must be a public key. I found it with the following folder syntax on my local windows computer -> :\Users\...\.ssh (there should be a PKK file). -- Option 2. Click on the Tools button and "Generate new key pair...". Save the private key at a folder you remember and add it to the "private key file" field in WinSCP. Upload the public key to the metadeta of your instance. +- Click on the advanced setting. SSH -> Authentication. +- Option 1. Click on the Tools button and "Install Public Key into Server..". Somewhere in your folder structure must be a public key. I found it with the following folder syntax on my local windows computer -> :\Users\...\.ssh (there should be a PKK file). +- Option 2. Click on the Tools button and "Generate new key pair...". Save the private key at a folder you remember and add it to the "private key file" field in WinSCP. Upload the public key to the metadeta of your instance. - Click ok and save. Then click Login. If successfull WinSCP will open on the left side your local computer folder structure and on the right side the folder strucutre of your VM. (If you followed Option 2 and its not initially working. Stop your instance, refresh the website, reopen the WinSCP field. Afterwards your your Login should be successfull) If you had struggle with the above steps, you could also try `this video `_. .. note:: Double check the External IP of your VM before you try to login with WinSCP. It's often a cause for an error. - + Step 5 - Extra. Copying your instance with all its data and paths included -------------------------------------------------------------------------- Especially if you think about operating several instance for quicker simulations, you can create a so called `"image" `_ of the virtual machine. -The "image" includes all the data and software set-ups from your VM. Afterwards you can create a VM from an image and avoid all the installation steps above. +The "image" includes all the data and software set-ups from your VM. Afterwards you can create a VM from an image and avoid all the installation steps above. Important points when to solve networks in PyPSA ------------------------------------------------ @@ -126,4 +126,4 @@ If you use the GCP with the default PyPSA-Eur settings, your budget will be used - Always test using low resolution networks; i.e. a single country at 5 nodes and 24h resolution for 2 month of weather data. - Adjust your solver in the ``config.yaml`` file. Set ``solving: skip_iterations: true``. - This will lead to a single solver iteration which is often precise enough. + This will lead to a single solver iteration which is often precise enough. diff --git a/doc/configtables/atlite.csv b/doc/configtables/atlite.csv index b60b15e5..5f21bb05 100644 --- a/doc/configtables/atlite.csv +++ b/doc/configtables/atlite.csv @@ -5,4 +5,4 @@ cutouts,,, -- -- module,--,"One of {'era5','sarah'}","Source of the reanalysis weather dataset (e.g. `ERA5 `_ or `SARAH-2 `_)" -- -- xs,°,"Float interval within [-180, 180]","Range of longitudes to download weather data for." -- -- ys,°,"Float interval within [-90, 90]","Range of latitudes to download weather data for." --- -- years,--,"Integer interval within [1979,2018]","Range of years to download weather data for." \ No newline at end of file +-- -- years,--,"Integer interval within [1979,2018]","Range of years to download weather data for." diff --git a/doc/configtables/costs.csv b/doc/configtables/costs.csv index 383a6423..ed2d56e4 100644 --- a/doc/configtables/costs.csv +++ b/doc/configtables/costs.csv @@ -5,4 +5,4 @@ USD2013_to_EUR2013,--,float,"Exchange rate from USD :math:`_{2013}` to EUR :math capital_cost,EUR/MW,"Keys should be in the 'technology' column of ``data/costs.csv``. Values can be any float.","For the given technologies, assumptions about their capital investment costs are set to the corresponding value. Optional; overwrites cost assumptions from ``data/costs.csv``." marginal_cost,EUR/MWh,"Keys should be in the 'technology' column of ``data/costs.csv``. Values can be any float.","For the given technologies, assumptions about their marginal operating costs are set to the corresponding value. Optional; overwrites cost assumptions from ``data/costs.csv``." emission_prices,,,"Specify exogenous prices for emission types listed in ``network.carriers`` to marginal costs." --- co2,EUR/t,float,"Exogenous price of carbon-dioxide added to the marginal costs of fossil-fuelled generators according to their carbon intensity. Added through the keyword ``Ep`` in the ``{opts}`` wildcard only in the rule :mod:`prepare_network``." \ No newline at end of file +-- co2,EUR/t,float,"Exogenous price of carbon-dioxide added to the marginal costs of fossil-fuelled generators according to their carbon intensity. Added through the keyword ``Ep`` in the ``{opts}`` wildcard only in the rule :mod:`prepare_network``." diff --git a/doc/configtables/hydro.csv b/doc/configtables/hydro.csv index 801b27ff..fc53334e 100644 --- a/doc/configtables/hydro.csv +++ b/doc/configtables/hydro.csv @@ -3,4 +3,4 @@ cutout,--,"Must be 'europe-2013-era5'","Specifies the directory where the releva carriers,--,"Any subset of {'ror', 'PHS', 'hydro'}","Specifies the types of hydro power plants to build per-unit availability time series for. 'ror' stands for run-of-river plants, 'PHS' represents pumped-hydro storage, and 'hydro' stands for hydroelectric dams." PHS_max_hours,h,float,"Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_." hydro_max_hours,h,"Any of {float, 'energy_capacity_totals_by_country', 'estimate_by_large_installations'}","Maximum state of charge capacity of the pumped-hydro storage (PHS) in terms of hours at full output capacity ``p_nom`` or heuristically determined. Cf. `PyPSA documentation `_." -clip_min_inflow,MW,float,"To avoid too small values in the inflow time series, values below this threshold are set to zero." \ No newline at end of file +clip_min_inflow,MW,float,"To avoid too small values in the inflow time series, values below this threshold are set to zero." diff --git a/doc/configtables/lines.csv b/doc/configtables/lines.csv index 14f91d22..ddf02e54 100644 --- a/doc/configtables/lines.csv +++ b/doc/configtables/lines.csv @@ -3,4 +3,4 @@ types,--,"Values should specify a `line type in PyPSA `_ which are at least in permitting." -under_construction,--,"One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity}","Specifies how to handle lines which are currently under construction." \ No newline at end of file +under_construction,--,"One of {'zero': set capacity to zero, 'remove': remove completely, 'keep': keep with full capacity}","Specifies how to handle lines which are currently under construction." diff --git a/doc/configtables/offwind-ac.csv b/doc/configtables/offwind-ac.csv index 02544506..e5bbc847 100644 --- a/doc/configtables/offwind-ac.csv +++ b/doc/configtables/offwind-ac.csv @@ -10,4 +10,4 @@ max_depth,m,float,"Maximum sea water depth at which wind turbines can be build. min_shore_distance,m,float,"Minimum distance to the shore below which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential." potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." -keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed." \ No newline at end of file +keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed." diff --git a/doc/configtables/offwind-dc.csv b/doc/configtables/offwind-dc.csv index 02544506..e5bbc847 100644 --- a/doc/configtables/offwind-dc.csv +++ b/doc/configtables/offwind-dc.csv @@ -10,4 +10,4 @@ max_depth,m,float,"Maximum sea water depth at which wind turbines can be build. min_shore_distance,m,float,"Minimum distance to the shore below which wind turbines cannot be build. Such areas close to the shore are excluded in the process of calculating the AC-connected offshore wind potential." potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." -keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed." \ No newline at end of file +keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed." diff --git a/doc/configtables/onwind.csv b/doc/configtables/onwind.csv index 11f05cfa..149dc0c4 100644 --- a/doc/configtables/onwind.csv +++ b/doc/configtables/onwind.csv @@ -11,4 +11,4 @@ corine,,, natura,bool,"{true, false}","Switch to exclude `Natura 2000 `_ natural protection areas. Area is excluded if ``true``." potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." -keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed." \ No newline at end of file +keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed." diff --git a/doc/configtables/opts.csv b/doc/configtables/opts.csv index 43d299d4..4cb473e2 100644 --- a/doc/configtables/opts.csv +++ b/doc/configtables/opts.csv @@ -8,4 +8,4 @@ Trigger, Description, Definition, Status ``ATK``, "Require each node to be autarkic. Example: ``ATK`` removes all lines and links. ``ATKc`` removes all cross-border lines and links.", ``prepare_network``, In active use ``BAU``, Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() `_, Untested ``SAFE``, Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network., ``solve_network`` `add_opts_constraints() `_, Untested -``carrier+{c|p}factor``, "Alter the capital cost (``c``) or installable potential (``p``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use \ No newline at end of file +``carrier+{c|p}factor``, "Alter the capital cost (``c``) or installable potential (``p``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use diff --git a/doc/configtables/plotting.csv b/doc/configtables/plotting.csv index 0f21c9a8..f5910e16 100644 --- a/doc/configtables/plotting.csv +++ b/doc/configtables/plotting.csv @@ -12,4 +12,3 @@ energy_min,TWh,float,"Lower y-axis limit in energy bar plots." energy_threshold,TWh,float,"Threshold below which technologies will not be shown in energy bar plots." tech_colors,--,"carrier -> HEX colour code","Mapping from network ``carrier`` to a colour (`HEX colour code `_)." nice_names,--,"str -> str","Mapping from network ``carrier`` to a more readable name." -nice_names_n,--,"str -> str","Same as nice_names, but with linebreaks." \ No newline at end of file diff --git a/doc/configtables/scenario.csv b/doc/configtables/scenario.csv index 52dafa56..a82462e4 100644 --- a/doc/configtables/scenario.csv +++ b/doc/configtables/scenario.csv @@ -1,6 +1,5 @@ ,Unit,Values,Description -sectors,--,"Must be 'elec'","Placeholder for integration of other energy sectors." simpl,--,cf. :ref:`simpl`,"List of ``{simpl}`` wildcards to run." -ll,--,cf. :ref:`ll`,"List of ``{ll}`` wildcards to run." clusters,--,cf. :ref:`clusters`,"List of ``{clusters}`` wildcards to run." -opts,--,cf. :ref:`opts`,"List of ``{opts}`` wildcards to run." \ No newline at end of file +ll,--,cf. :ref:`ll`,"List of ``{ll}`` wildcards to run." +opts,--,cf. :ref:`opts`,"List of ``{opts}`` wildcards to run." diff --git a/doc/configtables/snapshots.csv b/doc/configtables/snapshots.csv index 14fd8001..4d917f4d 100644 --- a/doc/configtables/snapshots.csv +++ b/doc/configtables/snapshots.csv @@ -1,4 +1,4 @@ ,Unit,Values,Description start,--,"str or datetime-like; e.g. YYYY-MM-DD","Left bound of date range" end,--,"str or datetime-like; e.g. YYYY-MM-DD","Right bound of date range" -closed,--,"One of {None, ‘left’, ‘right’}","Make the time interval closed to the ``left``, ``right``, or both sides ``None``." \ No newline at end of file +closed,--,"One of {None, ‘left’, ‘right’}","Make the time interval closed to the ``left``, ``right``, or both sides ``None``." diff --git a/doc/configtables/solar.csv b/doc/configtables/solar.csv index 93bfd04a..8e57e066 100644 --- a/doc/configtables/solar.csv +++ b/doc/configtables/solar.csv @@ -12,4 +12,4 @@ corine,--,"Any subset of the `CORINE Land Cover code list `_ natural protection areas. Area is excluded if ``true``." potential,--,"One of {'simple', 'conservative'}","Method to compute the maximal installable potential for a node; confer :ref:`renewableprofiles`" clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." -keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed." \ No newline at end of file +keep_all_available_areas,bool,"{'true', 'false'}","Use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed." diff --git a/doc/configtables/solving-options.csv b/doc/configtables/solving-options.csv index 0c640684..f77eec24 100644 --- a/doc/configtables/solving-options.csv +++ b/doc/configtables/solving-options.csv @@ -7,4 +7,4 @@ max_iterations,--,int,"Maximum number of solving iterations in between which res nhours,--,int,"Specifies the :math:`n` first snapshots to take into account. Must be less than the total number of snapshots. Rather recommended only for debugging." clip_p_max_pu,p.u.,float,"To avoid too small values in the renewables` per-unit availability time series values below this threshold are set to zero." skip_iterations,bool,"{'true','false'}","Skip iterating, do not update impedances of branches." -track_iterations,bool,"{'true','false'}","Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration)" \ No newline at end of file +track_iterations,bool,"{'true','false'}","Flag whether to store the intermediate branch capacities and objective function values are recorded for each iteration in ``network.lines['s_nom_opt_X']`` (where ``X`` labels the iteration)" diff --git a/doc/configtables/solving-solver.csv b/doc/configtables/solving-solver.csv index db16d867..3eae6310 100644 --- a/doc/configtables/solving-solver.csv +++ b/doc/configtables/solving-solver.csv @@ -1,3 +1,3 @@ ,Unit,Values,Description name,--,"One of {'gurobi', 'cplex', 'cbc', 'glpk', 'ipopt'}; potentially more possible","Solver to use for optimisation problems in the workflow; e.g. clustering and linear optimal power flow." -opts,--,"Parameter list for `Gurobi `_ and `CPLEX `_","Solver specific parameter settings." \ No newline at end of file +opts,--,"Parameter list for `Gurobi `_ and `CPLEX `_","Solver specific parameter settings." diff --git a/doc/configtables/toplevel.csv b/doc/configtables/toplevel.csv index 4592161b..da8a86a5 100644 --- a/doc/configtables/toplevel.csv +++ b/doc/configtables/toplevel.csv @@ -3,7 +3,7 @@ version,--,0.x.x,"Version of PyPSA-Eur" tutorial,bool,"{true, false}","Switch to retrieve the tutorial data set instead of the full data set." logging,,, -- level,--,"Any of {'INFO', 'WARNING', 'ERROR'}","Restrict console outputs to all infos, warning or errors only" --- format,--,"e.g. ``%(levelname)s:%(name)s:%(message)s``","Custom format for log messages. See `LogRecord `_ attributes." +-- format,--,"","Custom format for log messages. See `LogRecord `_ attributes." summary_dir,--,"e.g. 'results'","Directory into which results are written." countries,--,"Subset of {'AL', 'AT', 'BA', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'ME', 'MK', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'SE', 'SI', 'SK'}","European countries defined by their `Two-letter country codes (ISO 3166-1) `_ which should be included in the energy system model." focus_weights,--,"Keys should be two-digit country codes (e.g. DE) and values should range between 0 and 1","Ratio of total clusters for particular countries. the remaining weight is distributed according to mean load. An example: ``focus_weights: DE: 0.6 FR: 0.2``." @@ -14,4 +14,4 @@ enable,,, -- retrieve_cutout,bool,"{true, false}","Switch to enable the retrieval of cutouts from zenodo with :mod:`retrieve_cutout`." -- build_natura_raster,bool,"{true, false}","Switch to enable the creation of the raster ``natura.tiff`` via the rule :mod:`build_natura_raster`." -- retrieve_natura_raster,bool,"{true, false}","Switch to enable the retrieval of ``natura.tiff`` from zenodo with :mod:`retrieve_natura_raster`." --- custom_busmap,bool,"{true, false}","Switch to enable the use of custom busmaps in rule :mod:`cluster_network`. If activated the rule looks for provided busmaps at ``data/custom_busmap_elec_s{simpl}_{clusters}.csv`` which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``, i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc``." \ No newline at end of file +-- custom_busmap,bool,"{true, false}","Switch to enable the use of custom busmaps in rule :mod:`cluster_network`. If activated the rule looks for provided busmaps at ``data/custom_busmap_elec_s{simpl}_{clusters}.csv`` which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``, i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc``." diff --git a/doc/configtables/transformers.csv b/doc/configtables/transformers.csv index b58ae8f3..0ccd8e87 100644 --- a/doc/configtables/transformers.csv +++ b/doc/configtables/transformers.csv @@ -1,4 +1,4 @@ ,Unit,Values,Description x,p.u.,float,"Series reactance (per unit, using ``s_nom`` as base power of the transformer. Overwritten if ``type`` is specified." s_nom,MVA,float,"Limit of apparent power which can pass through branch. Overwritten if ``type`` is specified." -type,--,"A `transformer type in PyPSA `_.","Specifies transformer types to assume for the transformers of the ENTSO-E grid extraction." \ No newline at end of file +type,--,"A `transformer type in PyPSA `_.","Specifies transformer types to assume for the transformers of the ENTSO-E grid extraction." diff --git a/doc/configuration.rst b/doc/configuration.rst index f0e1717b..ba42a742 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -36,10 +36,10 @@ investment changes as more ambitious greenhouse-gas emission reduction targets a The ``scenario`` section is an extraordinary section of the config file that is strongly connected to the :ref:`wildcards` and is designed to -facilitate running multiple scenarios through a single command +facilitate running multiple scenarios through a single command .. code:: bash - + snakemake -j 1 solve_all_elec_networks For each wildcard, a **list of values** is provided. The rule ``solve_all_elec_networks`` will trigger the rules for creating ``results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc`` for **all combinations** of the provided wildcard values as defined by Python's `itertools.product(...) `_ function that snakemake's `expand(...) function `_ uses. diff --git a/doc/costs.rst b/doc/costs.rst index a51acb85..70bd12b8 100644 --- a/doc/costs.rst +++ b/doc/costs.rst @@ -50,4 +50,4 @@ Default Cost Assumptions .. csv-table:: :header-rows: 1 :widths: 10,3,5,4,6,8 - :file: ../data/costs.csv \ No newline at end of file + :file: ../data/costs.csv diff --git a/doc/index.rst b/doc/index.rst index bd2ec5f2..d8d9c852 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -8,7 +8,7 @@ PyPSA-Eur: An Open Optimisation Model of the European Transmission System .. image:: https://img.shields.io/github/v/release/pypsa/pypsa-eur?include_prereleases :alt: GitHub release (latest by date including pre-releases) - + .. image:: https://travis-ci.org/PyPSA/pypsa-eur.svg?branch=master :target: https://travis-ci.org/PyPSA/pypsa-eur @@ -101,9 +101,9 @@ Documentation simplification solving plotting - + **References** - + * :doc:`release_notes` * :doc:`limitations` * :doc:`contributing` @@ -183,7 +183,7 @@ There are pre-built networks available as a dataset on Zenodo as well for every The included ``.nc`` files are PyPSA network files which can be imported with PyPSA via: .. code:: python - + import pypsa filename = "elec_s_1024_ec.nc" # example @@ -202,7 +202,7 @@ PyPSA-Eur work is released under multiple licenses: See the individual files and the `dep5 <.reuse/dep5>`_ file for license details. Additionally, different licenses and terms of use also apply to the various input data, which are summarised below. -More details are included in +More details are included in `the description of the data bundles on zenodo `_. .. csv-table:: diff --git a/doc/installation.rst b/doc/installation.rst index f50083d3..e1de3f4b 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -1,6 +1,6 @@ .. SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors - + SPDX-License-Identifier: CC-BY-4.0 .. _installation: @@ -37,12 +37,12 @@ We recommend using the package manager and environment management system ``conda Install `miniconda `_, which is a mini version of `Anaconda `_ that includes only ``conda`` and its dependencies or make sure ``conda`` is already installed on your system. For instructions for your operating system follow the ``conda`` `installation guide `_. -The python package requirements are curated in the `environment.yaml `_ file. +The python package requirements are curated in the `envs/environment.yaml `_ file. The environment can be installed and activated using .. code:: bash - .../pypsa-eur % conda env create -f environment.yaml + .../pypsa-eur % conda env create -f envs/environment.yaml .../pypsa-eur % conda activate pypsa-eur @@ -55,14 +55,14 @@ The environment can be installed and activated using `mamba `_ as a fast drop-in replacement via .. code:: bash - + conda install -c conda-forge mamba and then install the environment with .. code:: bash - mamba env create -f environment.yaml + mamba env create -f envs/environment.yaml Install a Solver ================ @@ -102,10 +102,10 @@ For installation instructions of these solvers for your operating system, follow and on Windows .. code:: bash - + conda activate pypsa-eur conda install -c conda-forge ipopt glpk - + .. _defaultconfig: @@ -113,7 +113,7 @@ Set Up the Default Configuration ================================ PyPSA-Eur has several configuration options that must be specified in a ``config.yaml`` file located in the root directory. -An example configuration ``config.default.yaml`` is maintained in the repository. +An example configuration ``config.default.yaml`` is maintained in the repository. More details on the configuration options are in :ref:`config`. Before first use, create a ``config.yaml`` by copying the example. diff --git a/doc/introduction.rst b/doc/introduction.rst index 96094a9f..9ca049e7 100644 --- a/doc/introduction.rst +++ b/doc/introduction.rst @@ -1,6 +1,6 @@ .. SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors - + SPDX-License-Identifier: CC-BY-4.0 .. _intro: @@ -64,4 +64,4 @@ Folder Structure System Requirements =================== -Building the model with the scripts in this repository uses up to 20 GB of memory. Computing optimal investment and operation scenarios requires a strong interior-point solver compatible with the modelling library `Pyomo `_ like `Gurobi `_ or `CPLEX `_ with up to 100 GB of memory. \ No newline at end of file +Building the model with the scripts in this repository uses up to 20 GB of memory. Computing optimal investment and operation scenarios requires a strong interior-point solver compatible with the modelling library `Pyomo `_ like `Gurobi `_ or `CPLEX `_ with up to 100 GB of memory. diff --git a/doc/limitations.rst b/doc/limitations.rst index 16c57ac9..3b6c03d7 100644 --- a/doc/limitations.rst +++ b/doc/limitations.rst @@ -37,7 +37,7 @@ improving the approximations. not spatially disaggregated; assuming, as we have done, that the load time series shape is the same at each node within each country ignores local differences. -- **Currently installed renewable capacities:** +- **Currently installed renewable capacities:** Information on existing wind, solar and small hydro, geothermal, marine and biomass power plants are excluded from the dataset because of a lack of data availability in many countries. Approximate distributions of wind and solar @@ -56,4 +56,3 @@ improving the approximations. Belarus, Ukraine, Turkey and Morocco have not been taken into account; islands which are not connected to the main European system, such as Malta, Crete and Cyprus, are also excluded from the model. - \ No newline at end of file diff --git a/doc/plotting.rst b/doc/plotting.rst index 641d3962..cd404226 100644 --- a/doc/plotting.rst +++ b/doc/plotting.rst @@ -173,4 +173,4 @@ Rule ``plot_network`` .. automodule:: plot_network .. image:: img/tech-colors.png - :align: center \ No newline at end of file + :align: center diff --git a/doc/preparation/base_network.rst b/doc/preparation/base_network.rst index 464b003d..1afc4e05 100644 --- a/doc/preparation/base_network.rst +++ b/doc/preparation/base_network.rst @@ -2,7 +2,7 @@ SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 - + .. _base: Rule ``base_network`` @@ -51,4 +51,4 @@ Rule ``base_network`` | -.. automodule:: base_network \ No newline at end of file +.. automodule:: base_network diff --git a/doc/preparation/build_bus_regions.rst b/doc/preparation/build_bus_regions.rst index 6935832e..16aab725 100644 --- a/doc/preparation/build_bus_regions.rst +++ b/doc/preparation/build_bus_regions.rst @@ -48,4 +48,4 @@ Rule ``build_bus_regions`` | -.. automodule:: build_bus_regions \ No newline at end of file +.. automodule:: build_bus_regions diff --git a/doc/preparation/build_cutout.rst b/doc/preparation/build_cutout.rst index 5ad29a97..da2c04d1 100644 --- a/doc/preparation/build_cutout.rst +++ b/doc/preparation/build_cutout.rst @@ -39,4 +39,4 @@ Rule ``build_cutout`` | -.. automodule:: build_cutout \ No newline at end of file +.. automodule:: build_cutout diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 6e581705..213dde66 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -32,7 +32,7 @@ Upcoming Release * Corrected setting of exogenous emission price (in config -> cost -> emission price). This was not weighted by the efficiency and effective emission of the generators. Fixed in `#171 `_. -* Don't remove capital costs from lines and links, when imposing a line volume limit (wildcard ``lv``) or a line cost limit (``lc``). Previously, these were removed to move the expansion in direction of the limit. +* Don't remove capital costs from lines and links, when imposing a line volume limit (wildcard ``lv``) or a line cost limit (``lc``). Previously, these were removed to move the expansion in direction of the limit. * Fix bug of clustering offwind-{ac,dc} sites in the option of high-resolution sites for renewables. Now, there are more sites for offwind-{ac,dc} available than network nodes. Before, they were clustered to the resolution of the network. (e.g. elec_s1024_37m.nc: 37 network nodes, 1024 sites) @@ -42,6 +42,9 @@ Upcoming Release * The mappings for clustered lines and buses produced by the ``simplify_network`` and ``cluster_network`` rules changed from Hierarchical Data Format (.h5) to Comma-Separated Values format (.csv) (`#198 `_) +* Multiple smaller changes: Removed unused ``{network}`` wildcard, moved environment files to dedicated ``envs`` folder, + removed sector-coupling components from configuration files, minor refactoring and code cleaning (`#190 `_). + * Added an option to use custom busmaps in rule :mod:`cluster_network`. To use this feature set ``enable: custom_busmap: true``. Then, the rule looks for custom busmaps at ``data/custom_busmap_elec_s{simpl}_{clusters}.csv``, which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``. @@ -122,7 +125,7 @@ This is the first release of PyPSA-Eur, a model of the European power system at * Hydrogen pipelines (including cost assumptions) can now be added alongside clustered network connections in the rule :mod:`add_extra_components` . Set ``electricity: extendable_carriers: Link: [H2 pipeline]`` and ensure hydrogen storage is modelled as a ``Store``. This is a first simplified stage (`#108 `_). -* Logfiles for all rules of the ``snakemake`` workflow are now written in the folder ``log/`` (`#102 `_). +* Logfiles for all rules of the ``snakemake`` workflow are now written in the folder ``log/`` (`#102 `_). * The new function ``_helpers.mock_snakemake`` creates a ``snakemake`` object which mimics the actual ``snakemake`` object produced by workflow by parsing the ``Snakefile`` and setting all paths for inputs, outputs, and logs. This allows running all scripts within a (I)python terminal (or just by calling ``python ``) and thereby facilitates developing and debugging scripts significantly (`#107 `_). @@ -133,8 +136,8 @@ Release Process * Finalise release notes at ``doc/release_notes.rst``. -* Update ``environment.fixedversions.yaml`` via - ``conda env export -n pypsa-eur -f environment.fixedversions.yaml --no-builds`` +* Update ``envs/environment.fixed.yaml`` via + ``conda env export -n pypsa-eur -f envs/environment.fixed.yaml --no-builds`` from an up-to-date `pypsa-eur` environment. * Update version number in ``doc/conf.py`` and ``*config.*.yaml``. diff --git a/doc/simplification.rst b/doc/simplification.rst index 11d4867f..280b1da0 100644 --- a/doc/simplification.rst +++ b/doc/simplification.rst @@ -22,7 +22,7 @@ After simplification and clustering of the network, additional components may be .. toctree:: :caption: Overview - + simplification/simplify_network simplification/cluster_network simplification/add_extra_components diff --git a/doc/tutorial.rst b/doc/tutorial.rst index ad525481..3772bc43 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -1,6 +1,6 @@ .. SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors - + SPDX-License-Identifier: CC-BY-4.0 .. _tutorial: @@ -48,7 +48,7 @@ The model can be adapted to only include selected countries (e.g. Germany) inste .. literalinclude:: ../config.tutorial.yaml :language: yaml :lines: 20 - + Likewise, the example's temporal scope can be restricted (e.g. to a single month). .. literalinclude:: ../config.tutorial.yaml @@ -119,8 +119,8 @@ orders ``snakemake`` to run the script ``solve_network`` that produces the solve .. code:: rule solve_network: - input: "networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" - output: "results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" + input: "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" + output: "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" [...] script: "scripts/solve_network.py" @@ -132,7 +132,7 @@ orders ``snakemake`` to run the script ``solve_network`` that produces the solve Windows users should add the option ``--keep-target-files`` to the command or instead run ``snakemake -j 1 solve_all_elec_networks``. This triggers a workflow of multiple preceding jobs that depend on each rule's inputs and outputs: - + .. graphviz:: :align: center @@ -184,7 +184,7 @@ This triggers a workflow of multiple preceding jobs that depend on each rule's i 7 -> 11 5 -> 11 12 -> 11 - } + } | @@ -229,8 +229,8 @@ A job (here ``simplify_network``) will display its attributes and normally some INFO:__main__:Mapping all network lines onto a single 380kV layer INFO:__main__:Simplifying connected link components INFO:__main__:Removing stubs - INFO:__main__:Displacing offwind-ac generator(s) and adding connection costs to capital_costs: 20128 Eur/MW/a for `5718 offwind-ac` - INFO:__main__:Displacing offwind-dc generator(s) and adding connection costs to capital_costs: 14994 Eur/MW/a for `5718 offwind-dc`, 26939 Eur/MW/a for `5724 offwind-dc`, 29621 Eur/MW/a for `5725 offwind-dc` + INFO:__main__:Displacing offwind-ac generator(s) and adding connection costs to capital_costs: 20128 Eur/MW/a for `5718 offwind-ac` + INFO:__main__:Displacing offwind-dc generator(s) and adding connection costs to capital_costs: 14994 Eur/MW/a for `5718 offwind-dc`, 26939 Eur/MW/a for `5724 offwind-dc`, 29621 Eur/MW/a for `5725 offwind-dc` INFO:pypsa.io:Exported network elec_s.nc has lines, carriers, links, storage_units, loads, buses, generators [] Finished job 3. @@ -293,5 +293,5 @@ For inspiration, read the `examples section in the PyPSA documentation `_. -.. _network: - -The ``{network}`` wildcard -========================== - -The ``{network}`` wildcard specifies the considered energy sector(s) -and, as currently only ``elec`` (for electricity) is included, -it currently represents rather a placeholder wildcard to facilitate -future extensions including multiple energy sectors at once. - .. _simpl: The ``{simpl}`` wildcard @@ -75,7 +65,7 @@ The wildcard, in general, consists of two parts: 2. The second part can be ``opt`` or a float bigger than one (e.g. 1.25). - + (a) If ``opt`` is chosen line expansion is optimised according to its capital cost (where the choice ``v`` only considers overhead costs for HVDC transmission lines, while @@ -84,7 +74,7 @@ The wildcard, in general, consists of two parts: (b) ``v1.25`` will limit the total volume of line expansion to 25 % of currently installed capacities weighted by - individual line lengths; investment costs are neglected. + individual line lengths; investment costs are neglected. (c) ``c1.25`` will allow to build a transmission network that costs no more than 25 % more than the current system. diff --git a/environment.docs.yaml b/envs/environment.docs.yaml similarity index 59% rename from environment.docs.yaml rename to envs/environment.docs.yaml index 762e89af..0c937e43 100755 --- a/environment.docs.yaml +++ b/envs/environment.docs.yaml @@ -5,19 +5,17 @@ name: pypsa-eur-docs channels: - conda-forge - #- bioconda dependencies: - #- python + - python<=3.7 - pip - pypsa>=0.17.1 - atlite=0.0.3 + - pre-commit # Dependencies of the workflow itself - #- xlrd - scikit-learn - pycountry - seaborn - #- snakemake-minimal - memory_profiler - yaml - pytables @@ -25,31 +23,21 @@ dependencies: # Second order dependencies which should really be deps of atlite - xarray - #- netcdf4 - #- bottleneck - #- toolz - #- dask - progressbar2 - pyyaml>=5.1.0 - # Include ipython so that one does not inadvertently drop out of the conda - # environment by calling ipython - # - ipython - # GIS dependencies have to come all from conda-forge - - conda-forge::cartopy - - conda-forge::fiona - - conda-forge::proj - - conda-forge::pyshp - - conda-forge::geopandas - - conda-forge::rasterio - - conda-forge::shapely - - conda-forge::libgdal + - cartopy + - fiona + - proj + - pyshp + - geopandas + - rasterio + - shapely + - libgdal - # The FRESNA/KIT stuff is not packaged for conda yet - pip: - vresutils==0.3.1 - - tsam>=1.1.0 - git+https://github.com/PyPSA/glaes.git#egg=glaes - git+https://github.com/PyPSA/geokit.git#egg=geokit - cdsapi diff --git a/environment.fixedversions.yaml b/envs/environment.fixed.yaml similarity index 100% rename from environment.fixedversions.yaml rename to envs/environment.fixed.yaml diff --git a/environment.yaml b/envs/environment.yaml similarity index 82% rename from environment.yaml rename to envs/environment.yaml index cb322632..6514dae1 100644 --- a/environment.yaml +++ b/envs/environment.yaml @@ -37,8 +37,7 @@ dependencies: - progressbar2 - pyyaml>=5.1.0 - # Include ipython so that one does not inadvertently drop out of the conda - # environment by calling ipython + # Keep in conda environment when calling ipython - ipython # GIS dependencies: @@ -52,9 +51,6 @@ dependencies: - libgdal<=3.0.4 - descartes - # Solvers - - gurobi:gurobi # until https://github.com/conda-forge/pypsa-feedstock/issues/4 closed - - pip: - vresutils==0.3.1 - tsam>=1.1.0 diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 85f5eb76..807c439f 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -44,6 +44,7 @@ def configure_logging(snakemake, skip_handlers=False): }) logging.basicConfig(**kwargs) + def load_network(import_name=None, custom_components=None): """ Helper for importing a pypsa.Network with additional custom components. @@ -70,7 +71,6 @@ def load_network(import_name=None, custom_components=None): ------- pypsa.Network """ - import pypsa from pypsa.descriptors import Dict @@ -90,10 +90,12 @@ def load_network(import_name=None, custom_components=None): override_components=override_components, override_component_attrs=override_component_attrs) + def pdbcast(v, h): return pd.DataFrame(v.values.reshape((-1, 1)) * h.values, index=v.index, columns=h.index) + def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True): import pypsa from add_electricity import update_transmission_costs, load_costs @@ -113,11 +115,11 @@ def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True): if combine_hydro_ps: n.storage_units.loc[n.storage_units.carrier.isin({'PHS', 'hydro'}), 'carrier'] = 'hydro+PHS' - # #if the carrier was not set on the heat storage units + # if the carrier was not set on the heat storage units # bus_carrier = n.storage_units.bus.map(n.buses.carrier) # n.storage_units.loc[bus_carrier == "heat","carrier"] = "water tanks" - Nyears = n.snapshot_weightings.sum()/8760. + Nyears = n.snapshot_weightings.sum() / 8760. costs = load_costs(Nyears, tech_costs, config['costs'], config['electricity']) update_transmission_costs(n, costs) diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 80904b7a..4338a440 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -85,25 +85,28 @@ It further adds extendable ``generators`` with **zero** capacity for - additional open- and combined-cycle gas turbines (if ``OCGT`` and/or ``CCGT`` is listed in the config setting ``electricity: extendable_carriers``) """ -from vresutils.costdata import annuity -from vresutils import transfer as vtransfer - import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging +import pypsa import pandas as pd import numpy as np import xarray as xr import geopandas as gpd -import pypsa import powerplantmatching as ppm +from vresutils.costdata import annuity +from vresutils.load import timeseries_opsd +from vresutils import transfer as vtransfer + idx = pd.IndexSlice +logger = logging.getLogger(__name__) + def normed(s): return s/s.sum() + def _add_missing_carriers_from_costs(n, costs, carriers): missing_carriers = pd.Index(carriers).difference(n.carriers.index) if missing_carriers.empty: return @@ -115,6 +118,7 @@ def _add_missing_carriers_from_costs(n, costs, carriers): emissions.index = missing_carriers n.import_components_from_dataframe(emissions, 'Carrier') + def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None): if tech_costs is None: tech_costs = snakemake.input.tech_costs @@ -184,21 +188,17 @@ def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None): return costs + def load_powerplants(ppl_fn=None): if ppl_fn is None: ppl_fn = snakemake.input.powerplants - carrier_dict = {'ocgt': 'OCGT', 'ccgt': 'CCGT', 'bioenergy':'biomass', + carrier_dict = {'ocgt': 'OCGT', 'ccgt': 'CCGT', 'bioenergy': 'biomass', 'ccgt, thermal': 'CCGT', 'hard coal': 'coal'} return (pd.read_csv(ppl_fn, index_col=0, dtype={'bus': 'str'}) .powerplant.to_pypsa_names() .rename(columns=str.lower).drop(columns=['efficiency']) .replace({'carrier': carrier_dict})) -# ============================================================================= -# Attach components -# ============================================================================= - -# ### Load def attach_load(n): substation_lv_i = n.buses.index[n.buses['substation_lv']] @@ -238,7 +238,6 @@ def attach_load(n): n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load) -### Set line costs def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=False): n.lines['capital_cost'] = (n.lines['length'] * length_factor * @@ -259,7 +258,6 @@ def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=Fal costs.at['HVDC inverter pair', 'capital_cost']) n.links.loc[dc_b, 'capital_cost'] = costs -### Generators def attach_wind_and_solar(n, costs): for tech in snakemake.config['renewable']: @@ -298,15 +296,17 @@ def attach_wind_and_solar(n, costs): p_max_pu=ds['profile'].transpose('time', 'bus').to_pandas()) - def attach_conventional_generators(n, costs, ppl): carriers = snakemake.config['electricity']['conventional_carriers'] + _add_missing_carriers_from_costs(n, costs, carriers) + ppl = (ppl.query('carrier in @carriers').join(costs, on='carrier') .rename(index=lambda s: 'C' + str(s))) logger.info('Adding {} generators with capacities\n{}' .format(len(ppl), ppl.groupby('carrier').p_nom.sum())) + n.madd("Generator", ppl.index, carrier=ppl.carrier, bus=ppl.bus, @@ -314,6 +314,7 @@ def attach_conventional_generators(n, costs, ppl): efficiency=ppl.efficiency, marginal_cost=ppl.marginal_cost, capital_cost=0) + logger.warning(f'Capital costs for conventional generators put to 0 EUR/MW.') @@ -363,8 +364,8 @@ def attach_hydro(n, costs, ppl): .where(lambda df: df<=1., other=1.))) if 'PHS' in carriers and not phs.empty: - # fill missing max hours to config value and assume no natural inflow - # due to lack of data + # fill missing max hours to config value and + # assume no natural inflow due to lack of data phs = phs.replace({'max_hours': {0: c['PHS_max_hours']}}) n.madd('StorageUnit', phs.index, carrier='PHS', @@ -402,7 +403,6 @@ def attach_hydro(n, costs, ppl): hydro_max_hours = hydro.max_hours.where(hydro.max_hours > 0, hydro.country.map(max_hours_country)).fillna(6) - n.madd('StorageUnit', hydro.index, carrier='hydro', bus=hydro['bus'], p_nom=hydro['p_nom'], @@ -421,6 +421,7 @@ def attach_hydro(n, costs, ppl): def attach_extendable_generators(n, costs, ppl): elec_opts = snakemake.config['electricity'] carriers = pd.Index(elec_opts['extendable_carriers']['Generator']) + _add_missing_carriers_from_costs(n, costs, carriers) for tech in carriers: @@ -486,10 +487,11 @@ def estimate_renewable_capacities(n, tech_map=None): n.generators.loc[tech_i, 'p_nom'] = ( (n.generators_t.p_max_pu[tech_i].mean() * n.generators.loc[tech_i, 'p_nom_max']) # maximal yearly generation - .groupby(n.generators.bus.map(n.buses.country)) # for each country + .groupby(n.generators.bus.map(n.buses.country)) .transform(lambda s: normed(s) * tech_capacities.at[s.name]) .where(lambda s: s>0.1, 0.)) # only capacities above 100kW + def add_nice_carrier_names(n, config=None): if config is None: config = snakemake.config carrier_i = n.carriers.index @@ -511,7 +513,7 @@ if __name__ == "__main__": configure_logging(snakemake) n = pypsa.Network(snakemake.input.base_network) - Nyears = n.snapshot_weightings.sum()/8760. + Nyears = n.snapshot_weightings.sum() / 8760. costs = load_costs(Nyears) ppl = load_powerplants() diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 0b48af2a..e2cd8ea6 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -37,30 +37,33 @@ Inputs Outputs ------- -- ``networks/{network}_s{simpl}_{clusters}_ec.nc``: +- ``networks/elec_s{simpl}_{clusters}_ec.nc``: Description ----------- -The rule :mod:`add_extra_components` attaches additional extendable components to the clustered and simplified network. These can be configured in the ``config.yaml`` at ``electricity: extendable_carriers: ``. It processes ``networks/{network}_s{simpl}_{clusters}.nc`` to build ``networks/{network}_s{simpl}_{clusters}_ec.nc``, which in contrast to the former (depending on the configuration) contain with **zero** initial capacity +The rule :mod:`add_extra_components` attaches additional extendable components to the clustered and simplified network. These can be configured in the ``config.yaml`` at ``electricity: extendable_carriers: ``. It processes ``networks/elec_s{simpl}_{clusters}.nc`` to build ``networks/elec_s{simpl}_{clusters}_ec.nc``, which in contrast to the former (depending on the configuration) contain with **zero** initial capacity - ``StorageUnits`` of carrier 'H2' and/or 'battery'. If this option is chosen, every bus is given an extendable ``StorageUnit`` of the corresponding carrier. The energy and power capacities are linked through a parameter that specifies the energy capacity as maximum hours at full dispatch power and is configured in ``electricity: max_hours:``. This linkage leads to one investment variable per storage unit. The default ``max_hours`` lead to long-term hydrogen and short-term battery storage units. - ``Stores`` of carrier 'H2' and/or 'battery' in combination with ``Links``. If this option is chosen, the script adds extra buses with corresponding carrier where energy ``Stores`` are attached and which are connected to the corresponding power buses via two links, one each for charging and discharging. This leads to three investment variables for the energy capacity, charging and discharging capacity of the storage unit. """ import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging +import pypsa import pandas as pd import numpy as np -import pypsa + from add_electricity import (load_costs, add_nice_carrier_names, _add_missing_carriers_from_costs) idx = pd.IndexSlice +logger = logging.getLogger(__name__) + + def attach_storageunits(n, costs): elec_opts = snakemake.config['electricity'] carriers = elec_opts['extendable_carriers']['StorageUnit'] @@ -85,6 +88,7 @@ def attach_storageunits(n, costs): max_hours=max_hours[carrier], cyclic_state_of_charge=True) + def attach_stores(n, costs): elec_opts = snakemake.config['electricity'] carriers = elec_opts['extendable_carriers']['Store'] @@ -147,6 +151,7 @@ def attach_stores(n, costs): capital_cost=costs.at['battery inverter', 'capital_cost'], p_nom_extendable=True) + def attach_hydrogen_pipelines(n, costs): elec_opts = snakemake.config['electricity'] ext_carriers = elec_opts['extendable_carriers'] @@ -179,6 +184,7 @@ def attach_hydrogen_pipelines(n, costs): efficiency=costs.at['H2 pipeline','efficiency'], carrier="H2 pipeline") + if __name__ == "__main__": if 'snakemake' not in globals(): from _helpers import mock_snakemake @@ -187,7 +193,7 @@ if __name__ == "__main__": configure_logging(snakemake) n = pypsa.Network(snakemake.input.network) - Nyears = n.snapshot_weightings.sum()/8760. + Nyears = n.snapshot_weightings.sum() / 8760. costs = load_costs(Nyears, tech_costs=snakemake.input.tech_costs, config=snakemake.config['costs'], elec_config=snakemake.config['electricity']) diff --git a/scripts/base_network.py b/scripts/base_network.py index e64f533b..e43c4baf 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -63,14 +63,16 @@ Description """ import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging +import pypsa import yaml import pandas as pd import geopandas as gpd import numpy as np import scipy as sp +import networkx as nx + from scipy.sparse import csgraph from six import iteritems from itertools import product @@ -78,9 +80,8 @@ from itertools import product from shapely.geometry import Point, LineString import shapely, shapely.prepared, shapely.wkt -import networkx as nx +logger = logging.getLogger(__name__) -import pypsa def _get_oid(df): if "tags" in df.columns: @@ -88,12 +89,14 @@ def _get_oid(df): else: return pd.Series(np.nan, df.index) + def _get_country(df): if "tags" in df.columns: return df.tags.str.extract('"country"=>"([A-Z]{2})"', expand=False) else: return pd.Series(np.nan, df.index) + def _find_closest_links(links, new_links, distance_upper_bound=1.5): treecoords = np.asarray([np.asarray(shapely.wkt.loads(s))[[0, -1]].flatten() for s in links.geometry]) @@ -109,6 +112,7 @@ def _find_closest_links(links, new_links, distance_upper_bound=1.5): [lambda ds: ~ds.index.duplicated(keep='first')]\ .sort_index()['i'] + def _load_buses_from_eg(): buses = (pd.read_csv(snakemake.input.eg_buses, quotechar="'", true_values='t', false_values='f', @@ -130,6 +134,7 @@ def _load_buses_from_eg(): return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b]) + def _load_transformers_from_eg(buses): transformers = (pd.read_csv(snakemake.input.eg_transformers, quotechar="'", true_values='t', false_values='f', @@ -140,6 +145,7 @@ def _load_transformers_from_eg(buses): return transformers + def _load_converters_from_eg(buses): converters = (pd.read_csv(snakemake.input.eg_converters, quotechar="'", true_values='t', false_values='f', @@ -241,6 +247,7 @@ def _add_links_from_tyndp(buses, links): return buses, links.append(links_tyndp, sort=True) + def _load_lines_from_eg(buses): lines = (pd.read_csv(snakemake.input.eg_lines, quotechar="'", true_values='t', false_values='f', dtype=dict(line_id='str', bus0='str', bus1='str', @@ -254,11 +261,13 @@ def _load_lines_from_eg(buses): return lines + def _apply_parameter_corrections(n): with open(snakemake.input.parameter_corrections) as f: corrections = yaml.safe_load(f) if corrections is None: return + for component, attrs in iteritems(corrections): df = n.df(component) oid = _get_oid(df) @@ -275,6 +284,7 @@ def _apply_parameter_corrections(n): inds = r.index.intersection(df.index) df.loc[inds, attr] = r[inds].astype(df[attr].dtype) + def _set_electrical_parameters_lines(lines): v_noms = snakemake.config['electricity']['voltages'] linetypes = snakemake.config['lines']['types'] @@ -286,12 +296,14 @@ def _set_electrical_parameters_lines(lines): return lines + def _set_lines_s_nom_from_linetypes(n): n.lines['s_nom'] = ( np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) * n.lines['v_nom'] * n.lines.num_parallel ) + def _set_electrical_parameters_links(links): if links.empty: return links @@ -300,24 +312,25 @@ def _set_electrical_parameters_links(links): links['p_min_pu'] = -p_max_pu links_p_nom = pd.read_csv(snakemake.input.links_p_nom) - - #Filter links that are not in operation anymore + + # filter links that are not in operation anymore removed_b = links_p_nom.Remarks.str.contains('Shut down|Replaced', na=False) links_p_nom = links_p_nom[~removed_b] - - #find closest link for all links in links_p_nom + + # find closest link for all links in links_p_nom links_p_nom['j'] = _find_closest_links(links, links_p_nom) - - links_p_nom = links_p_nom.groupby(['j'],as_index=False).agg({'Power (MW)': 'sum'}) - + + links_p_nom = links_p_nom.groupby(['j'],as_index=False).agg({'Power (MW)': 'sum'}) + p_nom = links_p_nom.dropna(subset=["j"]).set_index("j")["Power (MW)"] - + # Don't update p_nom if it's already set p_nom_unset = p_nom.drop(links.index[links.p_nom.notnull()], errors='ignore') if "p_nom" in links else p_nom links.loc[p_nom_unset.index, "p_nom"] = p_nom_unset return links + def _set_electrical_parameters_converters(converters): p_max_pu = snakemake.config['links'].get('p_max_pu', 1.) converters['p_max_pu'] = p_max_pu @@ -331,6 +344,7 @@ def _set_electrical_parameters_converters(converters): return converters + def _set_electrical_parameters_transformers(transformers): config = snakemake.config['transformers'] @@ -341,9 +355,11 @@ def _set_electrical_parameters_transformers(transformers): return transformers + def _remove_dangling_branches(branches, buses): return pd.DataFrame(branches.loc[branches.bus0.isin(buses.index) & branches.bus1.isin(buses.index)]) + def _remove_unconnected_components(network): _, labels = csgraph.connected_components(network.adjacency_matrix(), directed=False) component = pd.Series(labels, index=network.buses.index) @@ -356,6 +372,7 @@ def _remove_unconnected_components(network): return network[component == component_sizes.index[0]] + def _set_countries_and_substations(n): buses = n.buses @@ -442,6 +459,7 @@ def _set_countries_and_substations(n): return buses + def _replace_b2b_converter_at_country_border_by_link(n): # Affects only the B2B converter in Lithuania at the Polish border at the moment buscntry = n.buses.country @@ -479,6 +497,7 @@ def _replace_b2b_converter_at_country_border_by_link(n): logger.info("Replacing B2B converter `{}` together with bus `{}` and line `{}` by an HVDC tie-line {}-{}" .format(i, b0, line, linkcntry.at[i], buscntry.at[b1])) + def _set_links_underwater_fraction(n): if n.links.empty: return @@ -489,6 +508,7 @@ def _set_links_underwater_fraction(n): links = gpd.GeoSeries(n.links.geometry.dropna().map(shapely.wkt.loads)) n.links['underwater_fraction'] = links.intersection(offshore_shape).length / links.length + def _adjust_capacities_of_under_construction_branches(n): lines_mode = snakemake.config['lines'].get('under_construction', 'undef') if lines_mode == 'zero': @@ -513,6 +533,7 @@ def _adjust_capacities_of_under_construction_branches(n): return n + def base_network(): buses = _load_buses_from_eg() @@ -534,7 +555,7 @@ def base_network(): n.name = 'PyPSA-Eur' n.set_snapshots(pd.date_range(freq='h', **snakemake.config['snapshots'])) - n.snapshot_weightings[:] *= 8760./n.snapshot_weightings.sum() + n.snapshot_weightings[:] *= 8760. / n.snapshot_weightings.sum() n.import_components_from_dataframe(buses, "Bus") n.import_components_from_dataframe(lines, "Line") @@ -565,4 +586,5 @@ if __name__ == "__main__": configure_logging(snakemake) n = base_network() + n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py index ce012c39..87890d92 100644 --- a/scripts/build_bus_regions.py +++ b/scripts/build_bus_regions.py @@ -42,17 +42,24 @@ Description """ import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging -from vresutils.graph import voronoi_partition_pts - +import pypsa import os - import pandas as pd import geopandas as gpd -import pypsa +from vresutils.graph import voronoi_partition_pts + +logger = logging.getLogger(__name__) + + +def save_to_geojson(s, fn): + if os.path.exists(fn): + os.unlink(fn) + schema = {**gpd.io.file.infer_schema(s), 'geometry': 'Unknown'} + s.to_file(fn, driver='GeoJSON', schema=schema) + if __name__ == "__main__": if 'snakemake' not in globals(): @@ -96,12 +103,6 @@ if __name__ == "__main__": offshore_regions_c = offshore_regions_c.loc[offshore_regions_c.area > 1e-2] offshore_regions.append(offshore_regions_c) - def save_to_geojson(s, fn): - if os.path.exists(fn): - os.unlink(fn) - schema = {**gpd.io.file.infer_schema(s), 'geometry': 'Unknown'} - s.to_file(fn, driver='GeoJSON', schema=schema) - save_to_geojson(pd.concat(onshore_regions, ignore_index=True), snakemake.output.regions_onshore) save_to_geojson(pd.concat(offshore_regions, ignore_index=True), snakemake.output.regions_offshore) diff --git a/scripts/build_country_flh.py b/scripts/build_country_flh.py index 2fb8a173..459b8f38 100644 --- a/scripts/build_country_flh.py +++ b/scripts/build_country_flh.py @@ -63,7 +63,6 @@ Description """ import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging import os @@ -84,6 +83,9 @@ import progressbar as pgb from build_renewable_profiles import init_globals, calculate_potential +logger = logging.getLogger(__name__) + + def build_area(flh, countries, areamatrix, breaks, fn): area_unbinned = xr.DataArray(areamatrix.todense(), [countries, capacity_factor.coords['spatial']]) bins = xr.DataArray(pd.cut(flh.to_series(), bins=breaks), flh.coords, name="bins") @@ -92,6 +94,7 @@ def build_area(flh, countries, areamatrix, breaks, fn): area.columns = area.columns.map(lambda s: s.left) return area + def plot_area_not_solar(area, countries): # onshore wind/offshore wind a = area.T diff --git a/scripts/build_cutout.py b/scripts/build_cutout.py index b6fc6761..1e55faf5 100644 --- a/scripts/build_cutout.py +++ b/scripts/build_cutout.py @@ -92,12 +92,13 @@ Description """ import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging import os import atlite +logger = logging.getLogger(__name__) + if __name__ == "__main__": if 'snakemake' not in globals(): from _helpers import mock_snakemake @@ -113,4 +114,6 @@ if __name__ == "__main__": cutout_dir=os.path.dirname(snakemake.output[0]), **cutout_params) - cutout.prepare(nprocesses=snakemake.config['atlite'].get('nprocesses', 4)) + nprocesses = snakemake.config['atlite'].get('nprocesses', 4) + + cutout.prepare(nprocesses=nprocesses) diff --git a/scripts/build_hydro_profile.py b/scripts/build_hydro_profile.py index 0736511a..339fccaf 100644 --- a/scripts/build_hydro_profile.py +++ b/scripts/build_hydro_profile.py @@ -60,7 +60,6 @@ Description """ import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging import os @@ -68,6 +67,8 @@ import atlite import geopandas as gpd from vresutils import hydro as vhydro +logger = logging.getLogger(__name__) + if __name__ == "__main__": if 'snakemake' not in globals(): from _helpers import mock_snakemake @@ -75,8 +76,8 @@ if __name__ == "__main__": configure_logging(snakemake) config = snakemake.config['renewable']['hydro'] - cutout = atlite.Cutout(config['cutout'], - cutout_dir=os.path.dirname(snakemake.input.cutout)) + cutout_dir = os.path.dirname(snakemake.input.cutout) + cutout = atlite.Cutout(config['cutout'], cutout_dir=cutout_dir) countries = snakemake.config['countries'] country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry'].reindex(countries) @@ -84,9 +85,9 @@ if __name__ == "__main__": eia_stats = vhydro.get_eia_annual_hydro_generation(snakemake.input.eia_hydro_generation).reindex(columns=countries) inflow = cutout.runoff(shapes=country_shapes, - smooth=True, - lower_threshold_quantile=True, - normalize_using_yearly=eia_stats) + smooth=True, + lower_threshold_quantile=True, + normalize_using_yearly=eia_stats) if 'clip_min_inflow' in config: inflow.values[inflow.values < config['clip_min_inflow']] = 0. diff --git a/scripts/build_load_data.py b/scripts/build_load_data.py index 0b781dd3..a4745630 100755 --- a/scripts/build_load_data.py +++ b/scripts/build_load_data.py @@ -1,4 +1,7 @@ -# coding: utf-8 +# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors +# +# SPDX-License-Identifier: GPL-3.0-or-later + """ This rule downloads the load data from `Open Power System Data Time series `_. For all countries in the network, the per country load timeseries with suffix ``_load_actual_entsoe_transparency`` are extracted from the dataset. After filling small gaps linearly and large gaps by copying time-slice of a given period, the load data is exported to a ``.csv`` file. diff --git a/scripts/build_natura_raster.py b/scripts/build_natura_raster.py index f2ee491b..39667ca0 100644 --- a/scripts/build_natura_raster.py +++ b/scripts/build_natura_raster.py @@ -41,6 +41,7 @@ Description import logging from _helpers import configure_logging + import atlite import geokit as gk from pathlib import Path @@ -58,7 +59,7 @@ def determine_cutout_xXyY(cutout_name): if __name__ == "__main__": if 'snakemake' not in globals(): from _helpers import mock_snakemake - snakemake = mock_snakemake('build_natura_raster') #has to be enabled + snakemake = mock_snakemake('build_natura_raster') configure_logging(snakemake) cutout_dir = Path(snakemake.input.cutouts[0]).parent.resolve() diff --git a/scripts/build_powerplants.py b/scripts/build_powerplants.py index 67bdaeb9..8b329469 100755 --- a/scripts/build_powerplants.py +++ b/scripts/build_powerplants.py @@ -72,16 +72,18 @@ The configuration options ``electricity: powerplants_filter`` and ``electricity: """ import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging -from scipy.spatial import cKDTree as KDTree - import pypsa import powerplantmatching as pm import pandas as pd import numpy as np +from scipy.spatial import cKDTree as KDTree + +logger = logging.getLogger(__name__) + + def add_custom_powerplants(ppl): custom_ppl_query = snakemake.config['electricity']['custom_powerplants'] if not custom_ppl_query: @@ -94,7 +96,6 @@ def add_custom_powerplants(ppl): if __name__ == "__main__": - if 'snakemake' not in globals(): from _helpers import mock_snakemake snakemake = mock_snakemake('build_powerplants') diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py index 6578fd75..71adb66e 100644 --- a/scripts/build_renewable_profiles.py +++ b/scripts/build_renewable_profiles.py @@ -181,27 +181,28 @@ node (`p_nom_max`): ``simple`` and ``conservative``: """ import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging -import matplotlib.pyplot as plt - import os import atlite + import numpy as np import xarray as xr import pandas as pd import multiprocessing as mp +import matplotlib.pyplot as plt +import progressbar as pgb from scipy.sparse import csr_matrix, vstack - from pypsa.geo import haversine from vresutils import landuse as vlanduse from vresutils.array import spdiag -import progressbar as pgb +logger = logging.getLogger(__name__) bounds = dx = dy = config = paths = gebco = clc = natura = None + + def init_globals(bounds_xXyY, n_dx, n_dy, n_config, n_paths): # Late import so that the GDAL Context is only created in the new processes global gl, gk, gdal @@ -227,6 +228,7 @@ def init_globals(bounds_xXyY, n_dx, n_dy, n_config, n_paths): natura = gk.raster.loadRaster(paths["natura"]) + def downsample_to_coarse_grid(bounds, dx, dy, mask, data): # The GDAL warp function with the 'average' resample algorithm needs a band of zero values of at least # the size of one coarse cell around the original raster or it produces erroneous results @@ -238,6 +240,7 @@ def downsample_to_coarse_grid(bounds, dx, dy, mask, data): assert gdal.Warp(average, padded, resampleAlg='average') == 1, "gdal warp failed: %s" % gdal.GetLastErrorMsg() return average + def calculate_potential(gid, save_map=None): feature = gk.vector.extractFeature(paths["regions"], where=gid) ec = gl.ExclusionCalculator(feature.geom) diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index 1d6fc5e1..2651837b 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -92,6 +92,7 @@ def _get_country(target, **keys): except (KeyError, AttributeError): return np.nan + def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True): if isinstance(polys, MultiPolygon): polys = sorted(polys, key=attrgetter('area'), reverse=True) @@ -105,6 +106,7 @@ def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True): polys = mainpoly return polys.simplify(tolerance=tolerance) + def countries(): cntries = snakemake.config['countries'] if 'RS' in cntries: cntries.append('KV') @@ -121,6 +123,7 @@ def countries(): return s + def eez(country_shapes): df = gpd.read_file(snakemake.input.eez) df = df.loc[df['ISO_3digit'].isin([_get_country('alpha_3', alpha_2=c) for c in snakemake.config['countries']])] @@ -130,6 +133,7 @@ def eez(country_shapes): s.index.name = "name" return s + def country_cover(country_shapes, eez_shapes=None): shapes = list(country_shapes) if eez_shapes is not None: @@ -140,6 +144,7 @@ def country_cover(country_shapes, eez_shapes=None): europe_shape = max(europe_shape, key=attrgetter('area')) return Polygon(shell=europe_shape.exterior) + def nuts3(country_shapes): df = gpd.read_file(snakemake.input.nuts3) df = df.loc[df['STAT_LEVL_'] == 3] @@ -158,7 +163,6 @@ def nuts3(country_shapes): .applymap(lambda x: pd.to_numeric(x, errors='coerce')) .fillna(method='bfill', axis=1))['2014'] - # Swiss data cantons = pd.read_csv(snakemake.input.ch_cantons) cantons = cantons.set_index(cantons['HASC'].str[3:])['NUTS'] cantons = cantons.str.pad(5, side='right', fillchar='0') @@ -197,6 +201,7 @@ def nuts3(country_shapes): return df + def save_to_geojson(df, fn): if os.path.exists(fn): os.unlink(fn) @@ -206,20 +211,23 @@ def save_to_geojson(df, fn): schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'} df.to_file(fn, driver='GeoJSON', schema=schema) + if __name__ == "__main__": if 'snakemake' not in globals(): from _helpers import mock_snakemake snakemake = mock_snakemake('build_shapes') configure_logging(snakemake) + out = snakemake.output + country_shapes = countries() - save_to_geojson(country_shapes, snakemake.output.country_shapes) + save_to_geojson(country_shapes, out.country_shapes) offshore_shapes = eez(country_shapes) - save_to_geojson(offshore_shapes, snakemake.output.offshore_shapes) + save_to_geojson(offshore_shapes, out.offshore_shapes) europe_shape = country_cover(country_shapes, offshore_shapes) - save_to_geojson(gpd.GeoSeries(europe_shape), snakemake.output.europe_shape) + save_to_geojson(gpd.GeoSeries(europe_shape), out.europe_shape) nuts3_shapes = nuts3(country_shapes) - save_to_geojson(nuts3_shapes, snakemake.output.nuts3_shapes) + save_to_geojson(nuts3_shapes, out.nuts3_shapes) diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index 3fbe2d68..a01f682f 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -31,28 +31,28 @@ Relevant Settings Inputs ------ -- ``resources/regions_onshore_{network}_s{simpl}.geojson``: confer :ref:`simplify` -- ``resources/regions_offshore_{network}_s{simpl}.geojson``: confer :ref:`simplify` -- ``resources/busmap_{network}_s{simpl}.csv``: confer :ref:`simplify` -- ``networks/{network}_s{simpl}.nc``: confer :ref:`simplify` -- ``data/custom_busmap_{network}_s{simpl}_{clusters}.csv``: optional input +- ``resources/regions_onshore_elec_s{simpl}.geojson``: confer :ref:`simplify` +- ``resources/regions_offshore_elec_s{simpl}.geojson``: confer :ref:`simplify` +- ``resources/busmap_elec_s{simpl}.csv``: confer :ref:`simplify` +- ``networks/elec_s{simpl}.nc``: confer :ref:`simplify` +- ``data/custom_busmap_elec_s{simpl}_{clusters}.csv``: optional input Outputs ------- -- ``resources/regions_onshore_{network}_s{simpl}_{clusters}.geojson``: +- ``resources/regions_onshore_elec_s{simpl}_{clusters}.geojson``: .. image:: ../img/regions_onshore_elec_s_X.png :scale: 33 % -- ``resources/regions_offshore_{network}_s{simpl}_{clusters}.geojson``: +- ``resources/regions_offshore_elec_s{simpl}_{clusters}.geojson``: .. image:: ../img/regions_offshore_elec_s_X.png :scale: 33 % -- ``resources/busmap_{network}_s{simpl}_{clusters}.csv``: Mapping of buses from ``networks/elec_s{simpl}.nc`` to ``networks/elec_s{simpl}_{clusters}.nc``; -- ``resources/linemap_{network}_s{simpl}_{clusters}.csv``: Mapping of lines from ``networks/elec_s{simpl}.nc`` to ``networks/elec_s{simpl}_{clusters}.nc``; -- ``networks/{network}_s{simpl}_{clusters}.nc``: +- ``resources/busmap_elec_s{simpl}_{clusters}.csv``: Mapping of buses from ``networks/elec_s{simpl}.nc`` to ``networks/elec_s{simpl}_{clusters}.nc``; +- ``resources/linemap_elec_s{simpl}_{clusters}.csv``: Mapping of lines from ``networks/elec_s{simpl}.nc`` to ``networks/elec_s{simpl}_{clusters}.nc``; +- ``networks/elec_s{simpl}_{clusters}.nc``: .. image:: ../img/elec_s_X.png :scale: 40 % @@ -117,36 +117,38 @@ Exemplary unsolved network clustered to 37 nodes: .. image:: ../img/elec_s_37.png :scale: 40 % - :align: center + :align: center """ import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging -import pandas as pd -idx = pd.IndexSlice - +import pypsa import os +import shapely + +import pandas as pd import numpy as np import geopandas as gpd -import shapely +import pyomo.environ as po import matplotlib.pyplot as plt import seaborn as sns from six.moves import reduce -import pyomo.environ as po - -import pypsa from pypsa.networkclustering import (busmap_by_kmeans, busmap_by_spectral_clustering, _make_consense, get_clustering_from_busmap) from add_electricity import load_costs -def normed(x): - return (x/x.sum()).fillna(0.) +idx = pd.IndexSlice + +logger = logging.getLogger(__name__) + + +def normed(x): return (x/x.sum()).fillna(0.) + def weighting_for_country(n, x): conv_carriers = {'OCGT','CCGT','PHS', 'hydro'} @@ -164,22 +166,13 @@ def weighting_for_country(n, x): g = normed(gen.reindex(b_i, fill_value=0)) l = normed(load.reindex(b_i, fill_value=0)) - w= g + l + w = g + l return (w * (100. / w.max())).clip(lower=1.).astype(int) -## Plot weighting for Germany - -def plot_weighting(n, country, country_shape=None): - n.plot(bus_sizes=(2*weighting_for_country(n.buses.loc[n.buses.country == country])).reindex(n.buses.index, fill_value=1)) - if country_shape is not None: - plt.xlim(country_shape.bounds[0], country_shape.bounds[2]) - plt.ylim(country_shape.bounds[1], country_shape.bounds[3]) - - -# # Determining the number of clusters per country - def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None): + """Determine the number of clusters per country""" + if solver_name is None: solver_name = snakemake.config['solving']['solver']['name'] @@ -191,7 +184,7 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None): N = n.buses.groupby(['country', 'sub_network']).size() assert n_clusters >= len(N) and n_clusters <= N.sum(), \ - "Number of clusters must be {} <= n_clusters <= {} for this selection of countries.".format(len(N), N.sum()) + f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries." if focus_weights is not None: @@ -207,7 +200,7 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None): logger.warning('Using custom focus weights for determining number of clusters.') - assert np.isclose(L.sum(), 1.0, rtol=1e-3), "Country weights L must sum up to 1.0 when distributing clusters. Is {}.".format(L.sum()) + assert np.isclose(L.sum(), 1.0, rtol=1e-3), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}." m = po.ConcreteModel() def n_bounds(model, *n_id): @@ -223,10 +216,11 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None): opt = po.SolverFactory('ipopt') results = opt.solve(m) - assert results['Solver'][0]['Status'] == 'ok', "Solver returned non-optimally: {}".format(results) + assert results['Solver'][0]['Status'] == 'ok', f"Solver returned non-optimally: {results}" return pd.Series(m.n.get_values(), index=L.index).astype(int) + def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algorithm="kmeans", **algorithm_kwds): if algorithm == "kmeans": algorithm_kwds.setdefault('n_init', 1000) @@ -245,7 +239,7 @@ def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algori def busmap_for_country(x): prefix = x.name[0] + x.name[1] + ' ' - logger.debug("Determining busmap for country {}".format(prefix[:-1])) + logger.debug(f"Determining busmap for country {prefix[:-1]}") if len(x) == 1: return pd.Series(prefix + '0', index=x.index) weight = weighting_for_country(n, x) @@ -262,12 +256,6 @@ def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algori return (n.buses.groupby(['country', 'sub_network'], group_keys=False) .apply(busmap_for_country).squeeze().rename('busmap')) -def plot_busmap_for_n_clusters(n, n_clusters=50): - busmap = busmap_for_n_clusters(n, n_clusters) - cs = busmap.unique() - cr = sns.color_palette("hls", len(cs)) - n.plot(bus_colors=busmap.map(dict(zip(cs, cr)))) - del cs, cr def clustering_for_n_clusters(n, n_clusters, custom_busmap=False, aggregate_carriers=None, line_length_factor=1.25, potential_mode='simple', solver_name="cbc", @@ -278,8 +266,7 @@ def clustering_for_n_clusters(n, n_clusters, custom_busmap=False, aggregate_carr elif potential_mode == 'conservative': p_nom_max_strategy = np.min else: - raise AttributeError("potential_mode should be one of 'simple' or 'conservative', " - "but is '{}'".format(potential_mode)) + raise AttributeError(f"potential_mode should be one of 'simple' or 'conservative' but is '{potential_mode}'") if custom_busmap: busmap = pd.read_csv(snakemake.input.custom_busmap, index_col=0, squeeze=True) @@ -309,6 +296,7 @@ def clustering_for_n_clusters(n, n_clusters, custom_busmap=False, aggregate_carr return clustering + def save_to_geojson(s, fn): if os.path.exists(fn): os.unlink(fn) @@ -316,6 +304,7 @@ def save_to_geojson(s, fn): schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'} df.to_file(fn, driver='GeoJSON', schema=schema) + def cluster_regions(busmaps, input=None, output=None): if input is None: input = snakemake.input if output is None: output = snakemake.output @@ -329,6 +318,17 @@ def cluster_regions(busmaps, input=None, output=None): regions_c.index.name = 'name' save_to_geojson(regions_c, getattr(output, which)) + +def plot_busmap_for_n_clusters(n, n_clusters, fn=None): + busmap = busmap_for_n_clusters(n, n_clusters) + cs = busmap.unique() + cr = sns.color_palette("hls", len(cs)) + n.plot(bus_colors=busmap.map(dict(zip(cs, cr)))) + if fn is not None: + plt.savefig(fn, bbox_inches='tight') + del cs, cr + + if __name__ == "__main__": if 'snakemake' not in globals(): from _helpers import mock_snakemake diff --git a/scripts/make_summary.py b/scripts/make_summary.py index db9eff46..ada3fa8a 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -54,22 +54,22 @@ Replacing '/summaries/' with '/plots/' creates nice colored maps of the results. """ import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging import os - -from six import iteritems +import pypsa import pandas as pd -import pypsa - +from six import iteritems from add_electricity import load_costs, update_transmission_costs idx = pd.IndexSlice +logger = logging.getLogger(__name__) + opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"} + def _add_indexed_rows(df, raw_index): new_index = df.index|pd.MultiIndex.from_product(raw_index) if isinstance(new_index, pd.Index): @@ -77,6 +77,7 @@ def _add_indexed_rows(df, raw_index): return df.reindex(new_index) + def assign_carriers(n): if "carrier" not in n.loads: @@ -97,7 +98,8 @@ def assign_carriers(n): if "EU gas store" in n.stores.index and n.stores.loc["EU gas Store","carrier"] == "": n.stores.loc["EU gas Store","carrier"] = "gas Store" -def calculate_costs(n,label,costs): + +def calculate_costs(n, label, costs): for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): capital_costs = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"] @@ -130,7 +132,7 @@ def calculate_costs(n,label,costs): return costs -def calculate_curtailment(n,label,curtailment): +def calculate_curtailment(n, label, curtailment): avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum() used = n.generators_t.p.sum().groupby(n.generators.carrier).sum() @@ -139,7 +141,7 @@ def calculate_curtailment(n,label,curtailment): return curtailment -def calculate_energy(n,label,energy): +def calculate_energy(n, label, energy): for c in n.iterate_components(n.one_port_components|n.branch_components): @@ -159,6 +161,7 @@ def include_in_summary(summary, multiindexprefix, label, item): summary = _add_indexed_rows(summary, raw_index) summary.loc[idx[raw_index], label] = item.values + return summary def calculate_capacity(n,label,capacity): @@ -178,7 +181,7 @@ def calculate_capacity(n,label,capacity): return capacity -def calculate_supply(n,label,supply): +def calculate_supply(n, label, supply): """calculate the max dispatch of each component at the buses where the loads are attached""" load_types = n.loads.carrier.value_counts().index @@ -224,7 +227,8 @@ def calculate_supply(n,label,supply): return supply -def calculate_supply_energy(n,label,supply_energy): + +def calculate_supply_energy(n, label, supply_energy): """calculate the total dispatch of each component at the buses where the loads are attached""" load_types = n.loads.carrier.value_counts().index @@ -269,6 +273,7 @@ def calculate_supply_energy(n,label,supply_energy): return supply_energy + def calculate_metrics(n,label,metrics): metrics = metrics.reindex(metrics.index|pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"])) @@ -295,16 +300,15 @@ def calculate_prices(n,label,prices): prices = prices.reindex(prices.index|bus_type.value_counts().index) - #WARNING: this is time-averaged, should really be load-weighted average + logger.warning("Prices are time-averaged, not load-weighted") prices[label] = n.buses_t.marginal_price.mean().groupby(bus_type).mean() return prices - def calculate_weighted_prices(n,label,weighted_prices): - # Warning: doesn't include storage units as loads + logger.warning("Weighted prices don't include storage units as loads") weighted_prices = weighted_prices.reindex(pd.Index(["electricity","heat","space heat","urban heat","space urban heat","gas","H2"])) @@ -347,7 +351,7 @@ def calculate_weighted_prices(n,label,weighted_prices): load += n.links_t.p0[names].groupby(n.links.loc[names,"bus0"],axis=1).sum(axis=1) - #Add H2 Store when charging + # Add H2 Store when charging if carrier == "H2": stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1) stores[stores > 0.] = 0. @@ -361,62 +365,6 @@ def calculate_weighted_prices(n,label,weighted_prices): return weighted_prices - -# BROKEN don't use -# -# def calculate_market_values(n, label, market_values): -# # Warning: doesn't include storage units - -# n.buses["suffix"] = n.buses.index.str[2:] -# suffix = "" -# buses = n.buses.index[n.buses.suffix == suffix] - -# ## First do market value of generators ## -# generators = n.generators.index[n.buses.loc[n.generators.bus,"suffix"] == suffix] -# techs = n.generators.loc[generators,"carrier"].value_counts().index -# market_values = market_values.reindex(market_values.index | techs) - -# for tech in techs: -# gens = generators[n.generators.loc[generators,"carrier"] == tech] -# dispatch = n.generators_t.p[gens].groupby(n.generators.loc[gens,"bus"],axis=1).sum().reindex(columns=buses,fill_value=0.) -# revenue = dispatch*n.buses_t.marginal_price[buses] -# market_values.at[tech,label] = revenue.sum().sum()/dispatch.sum().sum() - -# ## Now do market value of links ## - -# for i in ["0","1"]: -# all_links = n.links.index[n.buses.loc[n.links["bus"+i],"suffix"] == suffix] -# techs = n.links.loc[all_links,"carrier"].value_counts().index -# market_values = market_values.reindex(market_values.index | techs) - -# for tech in techs: -# links = all_links[n.links.loc[all_links,"carrier"] == tech] -# dispatch = n.links_t["p"+i][links].groupby(n.links.loc[links,"bus"+i],axis=1).sum().reindex(columns=buses,fill_value=0.) -# revenue = dispatch*n.buses_t.marginal_price[buses] -# market_values.at[tech,label] = revenue.sum().sum()/dispatch.sum().sum() - -# return market_values - - -# OLD CODE must be adapted - -# def calculate_price_statistics(n, label, price_statistics): - - -# price_statistics = price_statistics.reindex(price_statistics.index|pd.Index(["zero_hours","mean","standard_deviation"])) -# n.buses["suffix"] = n.buses.index.str[2:] -# suffix = "" -# buses = n.buses.index[n.buses.suffix == suffix] - -# threshold = 0.1 #higher than phoney marginal_cost of wind/solar -# df = pd.DataFrame(data=0.,columns=buses,index=n.snapshots) -# df[n.buses_t.marginal_price[buses] < threshold] = 1. -# price_statistics.at["zero_hours", label] = df.sum().sum()/(df.shape[0]*df.shape[1]) -# price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].unstack().mean() -# price_statistics.at["standard_deviation", label] = n.buses_t.marginal_price[buses].unstack().std() -# return price_statistics - - outputs = ["costs", "curtailment", "energy", @@ -425,11 +373,10 @@ outputs = ["costs", "supply_energy", "prices", "weighted_prices", - # "price_statistics", - # "market_values", "metrics", ] + def make_summaries(networks_dict, country='all'): columns = pd.MultiIndex.from_tuples(networks_dict.keys(),names=["simpl","clusters","ll","opts"]) @@ -454,7 +401,7 @@ def make_summaries(networks_dict, country='all'): if country != 'all': n = n[n.buses.country == country] - Nyears = n.snapshot_weightings.sum()/8760. + Nyears = n.snapshot_weightings.sum() / 8760. costs = load_costs(Nyears, snakemake.input[0], snakemake.config['costs'], snakemake.config['electricity']) update_transmission_costs(n, costs, simple_hvdc_costs=False) @@ -484,7 +431,6 @@ if __name__ == "__main__": network_dir = os.path.join('results', 'networks') configure_logging(snakemake) - def expand_from_wildcard(key): w = getattr(snakemake.wildcards, key) return snakemake.config["scenario"][key] if w == "all" else [w] @@ -504,8 +450,6 @@ if __name__ == "__main__": for l in ll for opts in expand_from_wildcard("opts")} - print(networks_dict) - dfs = make_summaries(networks_dict, country=snakemake.wildcards.country) to_csv(dfs) diff --git a/scripts/plot_network.py b/scripts/plot_network.py index 4e162e2f..84423916 100755 --- a/scripts/plot_network.py +++ b/scripts/plot_network.py @@ -20,7 +20,6 @@ Description """ import logging -logger = logging.getLogger(__name__) from _helpers import (load_network_for_plots, aggregate_p, aggregate_costs, configure_logging) @@ -35,6 +34,9 @@ from matplotlib.patches import Circle, Ellipse from matplotlib.legend_handler import HandlerPatch to_rgba = mpl.colors.colorConverter.to_rgba +logger = logging.getLogger(__name__) + + def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False): fig = ax.get_figure() def axes2pt(): @@ -57,9 +59,11 @@ def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False): return e return {Circle: HandlerPatch(patch_func=legend_circle_handler)} + def make_legend_circles_for(sizes, scale=1.0, **kw): return [Circle((0,0), radius=(s/scale)**0.5, **kw) for s in sizes] + def set_plot_style(): plt.style.use(['classic', 'seaborn-white', {'axes.grid': False, 'grid.linestyle': '--', 'grid.color': u'0.6', @@ -69,9 +73,9 @@ def set_plot_style(): 'legend.fontsize': 'medium', 'lines.linewidth': 1.5, 'pdf.fonttype': 42, - # 'font.family': 'Times New Roman' }]) + def plot_map(n, ax=None, attribute='p_nom', opts={}): if ax is None: ax = plt.gca() @@ -114,16 +118,11 @@ def plot_map(n, ax=None, attribute='p_nom', opts={}): bus_sizes=0, bus_colors=tech_colors, boundaries=map_boundaries, - geomap=True, # TODO : Turn to False, after the release of PyPSA 0.14.2 (refer to https://github.com/PyPSA/PyPSA/issues/75) + geomap=False, ax=ax) ax.set_aspect('equal') ax.axis('off') - # x1, y1, x2, y2 = map_boundaries - # ax.set_xlim(x1, x2) - # ax.set_ylim(y1, y2) - - # Rasterize basemap # TODO : Check if this also works with cartopy for c in ax.collections[:2]: c.set_rasterized(True) @@ -176,13 +175,9 @@ def plot_map(n, ax=None, attribute='p_nom', opts={}): return fig -#n = load_network_for_plots(snakemake.input.network, opts, combine_hydro_ps=False) - def plot_total_energy_pie(n, ax=None): - """Add total energy pie plot""" - if ax is None: - ax = plt.gca() + if ax is None: ax = plt.gca() ax.set_title('Energy per technology', fontdict=dict(fontsize="medium")) @@ -190,7 +185,7 @@ def plot_total_energy_pie(n, ax=None): patches, texts, autotexts = ax.pie(e_primary, startangle=90, - labels = e_primary.rename(opts['nice_names_n']).index, + labels = e_primary.rename(opts['nice_names']).index, autopct='%.0f%%', shadow=False, colors = [opts['tech_colors'][tech] for tech in e_primary.index]) @@ -200,9 +195,7 @@ def plot_total_energy_pie(n, ax=None): t2.remove() def plot_total_cost_bar(n, ax=None): - """Add average system cost bar plot""" - if ax is None: - ax = plt.gca() + if ax is None: ax = plt.gca() total_load = (n.snapshot_weightings * n.loads_t.p.sum(axis=1)).sum() tech_colors = opts['tech_colors'] @@ -240,14 +233,13 @@ def plot_total_cost_bar(n, ax=None): if abs(data[-1]) < 5: continue - text = ax.text(1.1,(bottom-0.5*data)[-1]-3,opts['nice_names_n'].get(ind,ind)) + text = ax.text(1.1,(bottom-0.5*data)[-1]-3,opts['nice_names'].get(ind,ind)) texts.append(text) ax.set_ylabel("Average system cost [Eur/MWh]") - ax.set_ylim([0, 80]) # opts['costs_max']]) + ax.set_ylim([0, opts.get('costs_max', 80)]) ax.set_xlim([0, 1]) - #ax.set_xticks([0.5]) - ax.set_xticklabels([]) #["w/o\nEp", "w/\nEp"]) + ax.set_xticklabels([]) ax.grid(True, axis="y", color='k', linestyle='dotted') @@ -280,8 +272,6 @@ if __name__ == "__main__": ax2 = fig.add_axes([-0.075, 0.1, 0.1, 0.45]) plot_total_cost_bar(n, ax2) - #fig.tight_layout() - ll = snakemake.wildcards.ll ll_type = ll[0] ll_factor = ll[1:] diff --git a/scripts/plot_p_nom_max.py b/scripts/plot_p_nom_max.py index 0c2e06f2..bc346785 100644 --- a/scripts/plot_p_nom_max.py +++ b/scripts/plot_p_nom_max.py @@ -19,19 +19,19 @@ Description """ import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging import pypsa - import pandas as pd import matplotlib.pyplot as plt +logger = logging.getLogger(__name__) + + def cum_p_nom_max(net, tech, country=None): carrier_b = net.generators.carrier == tech - generators = \ - pd.DataFrame(dict( + generators = pd.DataFrame(dict( p_nom_max=net.generators.loc[carrier_b, 'p_nom_max'], p_max_pu=net.generators_t.p_max_pu.loc[:,carrier_b].mean(), country=net.generators.loc[carrier_b, 'bus'].map(net.buses.country) diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index 8eceea91..c8cc169c 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -21,41 +21,19 @@ Description import os import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging import pandas as pd import matplotlib.pyplot as plt -#consolidate and rename -def rename_techs(label): - if label.startswith("central "): - label = label[len("central "):] - elif label.startswith("urban "): - label = label[len("urban "):] +logger = logging.getLogger(__name__) - if "retrofitting" in label: - label = "building retrofitting" - elif "H2" in label: + +def rename_techs(label): + if "H2" in label: label = "hydrogen storage" - elif "CHP" in label: - label = "CHP" - elif "water tank" in label: - label = "water tanks" - elif label == "water tanks": - label = "hot water storage" - elif "gas" in label and label != "gas boiler": - label = "natural gas" - elif "solar thermal" in label: - label = "solar thermal" elif label == "solar": label = "solar PV" - elif label == "heat pump": - label = "air heat pump" - elif label == "Sabatier": - label = "methanation" - elif label == "offwind": - label = "offshore wind" elif label == "offwind-ac": label = "offshore wind ac" elif label == "offwind-dc": @@ -68,15 +46,14 @@ def rename_techs(label): label = "hydroelectricity" elif label == "PHS": label = "hydroelectricity" - elif label == "co2 Store": - label = "DAC" elif "battery" in label: label = "battery storage" return label -preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","onshore wind","offshore wind ac", "offshore wind dc","solar PV","solar thermal","building retrofitting","ground heat pump","air heat pump","resistive heater","CHP","OCGT","gas boiler","gas","natural gas","methanation","hydrogen storage","battery storage","hot water storage"]) +preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","onshore wind","offshore wind ac", "offshore wind dc","solar PV","solar thermal","OCGT","hydrogen storage","battery storage"]) + def plot_costs(infn, fn=None): diff --git a/scripts/prepare_links_p_nom.py b/scripts/prepare_links_p_nom.py index 757e8345..7623d1bf 100644 --- a/scripts/prepare_links_p_nom.py +++ b/scripts/prepare_links_p_nom.py @@ -37,11 +37,26 @@ Description """ import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging import pandas as pd +logger = logging.getLogger(__name__) + + +def multiply(s): + return s.str[0].astype(float) * s.str[1].astype(float) + + +def extract_coordinates(s): + regex = (r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(N|S) " + r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(E|W)") + e = s.str.extract(regex, expand=True) + lat = (e[0].astype(float) + (e[1].astype(float) + e[2].astype(float)/60.)/60.)*e[3].map({'N': +1., 'S': -1.}) + lon = (e[4].astype(float) + (e[5].astype(float) + e[6].astype(float)/60.)/60.)*e[7].map({'E': +1., 'W': -1.}) + return lon, lat + + if __name__ == "__main__": if 'snakemake' not in globals(): from _helpers import mock_snakemake #rule must be enabled in config @@ -50,19 +65,11 @@ if __name__ == "__main__": links_p_nom = pd.read_html('https://en.wikipedia.org/wiki/List_of_HVDC_projects', header=0, match="SwePol")[0] - def extract_coordinates(s): - regex = (r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(N|S) " - r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(E|W)") - e = s.str.extract(regex, expand=True) - lat = (e[0].astype(float) + (e[1].astype(float) + e[2].astype(float)/60.)/60.)*e[3].map({'N': +1., 'S': -1.}) - lon = (e[4].astype(float) + (e[5].astype(float) + e[6].astype(float)/60.)/60.)*e[7].map({'E': +1., 'W': -1.}) - return lon, lat + mw = "Power (MW)" + m_b = links_p_nom[mw].str.contains('x').fillna(False) - m_b = links_p_nom["Power (MW)"].str.contains('x').fillna(False) - def multiply(s): return s.str[0].astype(float) * s.str[1].astype(float) - - links_p_nom.loc[m_b, "Power (MW)"] = links_p_nom.loc[m_b, "Power (MW)"].str.split('x').pipe(multiply) - links_p_nom["Power (MW)"] = links_p_nom["Power (MW)"].str.extract("[-/]?([\d.]+)", expand=False).astype(float) + links_p_nom.loc[m_b, mw] = links_p_nom.loc[m_b, mw].str.split('x').pipe(multiply) + links_p_nom[mw] = links_p_nom[mw].str.extract("[-/]?([\d.]+)", expand=False).astype(float) links_p_nom['x1'], links_p_nom['y1'] = extract_coordinates(links_p_nom['Converterstation 1']) links_p_nom['x2'], links_p_nom['y2'] = extract_coordinates(links_p_nom['Converterstation 2']) diff --git a/scripts/prepare_network.py b/scripts/prepare_network.py index c2092980..3b1e09ce 100755 --- a/scripts/prepare_network.py +++ b/scripts/prepare_network.py @@ -38,12 +38,12 @@ Inputs ------ - ``data/costs.csv``: The database of cost assumptions for all included technologies for specific years from various sources; e.g. discount rate, lifetime, investment (CAPEX), fixed operation and maintenance (FOM), variable operation and maintenance (VOM), fuel costs, efficiency, carbon-dioxide intensity. -- ``networks/{network}_s{simpl}_{clusters}.nc``: confer :ref:`cluster` +- ``networks/elec_s{simpl}_{clusters}.nc``: confer :ref:`cluster` Outputs ------- -- ``networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc``: Complete PyPSA network that will be handed to the ``solve_network`` rule. +- ``networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc``: Complete PyPSA network that will be handed to the ``solve_network`` rule. Description ----------- @@ -56,19 +56,21 @@ Description """ import logging -logger = logging.getLogger(__name__) from _helpers import configure_logging -from add_electricity import load_costs, update_transmission_costs -from six import iteritems - -import numpy as np import re import pypsa +import numpy as np import pandas as pd +from six import iteritems + +from add_electricity import load_costs, update_transmission_costs idx = pd.IndexSlice +logger = logging.getLogger(__name__) + + def add_co2limit(n, Nyears=1., factor=None): if factor is not None: @@ -129,8 +131,8 @@ def set_transmission_limit(n, ll_type, factor, Nyears=1): n.add('GlobalConstraint', f'l{ll_type}_limit', type=f'transmission_{con_type}_limit', sense='<=', constant=rhs, carrier_attribute='AC, DC') - return n + return n def average_every_nhours(n, offset): @@ -222,7 +224,7 @@ if __name__ == "__main__": opts = snakemake.wildcards.opts.split('-') n = pypsa.Network(snakemake.input[0]) - Nyears = n.snapshot_weightings.sum()/8760. + Nyears = n.snapshot_weightings.sum() / 8760. set_line_s_max_pu(n) @@ -245,6 +247,7 @@ if __name__ == "__main__": add_co2limit(n, Nyears, float(m[0])) else: add_co2limit(n, Nyears) + break for o in opts: oo = o.split("+") diff --git a/scripts/retrieve_databundle.py b/scripts/retrieve_databundle.py index 9bb85833..7ee6c2b1 100644 --- a/scripts/retrieve_databundle.py +++ b/scripts/retrieve_databundle.py @@ -33,14 +33,15 @@ The :ref:`tutorial` uses a smaller `data bundle Date: Thu, 3 Dec 2020 23:13:41 +0100 Subject: [PATCH 14/34] Opsd renewable ppls (#212) * fix clustering of offwind-ac and offwind-dc in sites option * add release nodes * attach renewable assets by location (lat and lon) from OPSD register to network * adapt default config to changes * undo changes from a differen PR in cluster_network.py * undo changes from a different PR, add release notes for this PR * correct release notes * add comments for relevant settings in add_electricity.py * adjust configtable for electricity to OPSD renewable option and add estimate_renewable_capacities_from_capacitiy_stats * reset cluster_network to HEAD * add_electricity: Capacity is float * config: add GB to OPSD_VRE_countries * review and modify implementation * update release notes * Update envs/environment.yaml Co-authored-by: Fabian Neumann Co-authored-by: martha.frysztacki Co-authored-by: eb5194 Co-authored-by: Fabian Neumann --- config.default.yaml | 2 ++ doc/configtables/electricity.csv | 36 +++++++++++---------- doc/release_notes.rst | 2 ++ envs/environment.yaml | 2 +- scripts/add_electricity.py | 55 +++++++++++++++++++++++++++++--- 5 files changed, 76 insertions(+), 21 deletions(-) diff --git a/config.default.yaml b/config.default.yaml index 67a4f15a..1294d0c5 100755 --- a/config.default.yaml +++ b/config.default.yaml @@ -53,6 +53,8 @@ electricity: custom_powerplants: false # use pandas query strings here, e.g. Country in ['Germany'] conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass] + renewable_capacities_from_OPSD: [] # onwind, offwind, solar + # estimate_renewable_capacities_from_capacity_stats: # # Wind is the Fueltype in ppm.data.Capacity_stats, onwind, offwind-{ac,dc} the carrier in PyPSA-Eur # Wind: [onwind, offwind-ac, offwind-dc] diff --git a/doc/configtables/electricity.csv b/doc/configtables/electricity.csv index be447136..70a2dd41 100644 --- a/doc/configtables/electricity.csv +++ b/doc/configtables/electricity.csv @@ -1,16 +1,20 @@ -,Unit,Values,Description -voltages,kV,"Any subset of {220., 300., 380.}","Voltage levels to consider when" -co2limit,:math:`t_{CO_2-eq}/a`,float,"Cap on total annual system carbon dioxide emissions" -co2base,:math:`t_{CO_2-eq}/a`,float,"Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard." -agg_p_nom_limits,--,file path,"Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``." -extendable_carriers,,, --- Generator,--,"Any subset of {'OCGT','CCGT'}","Places extendable conventional power plants (OCGT and/or CCGT) where gas power plants are located today without capacity limits." --- StorageUnit,--,"Any subset of {'battery','H2'}","Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity." --- Store,--,"Any subset of {'battery','H2'}","Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity." --- Link,--,"Any subset of {'H2 pipeline'}","Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``." -max_hours,,, --- battery,h,float,"Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_." --- H2,h,float,"Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_." -powerplants_filter,--,"use `pandas.query `_ strings here, e.g. Country not in ['Germany']","Filter query for the default powerplant database." -custom_powerplants,--,"use `pandas.query `_ strings here, e.g. Country in ['Germany']","Filter query for the custom powerplant database." -conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}","List of conventional power plants to include in the model from ``resources/powerplants.csv``." +,Unit,Values,Description, +voltages,kV,"Any subset of {220., 300., 380.}",Voltage levels to consider when, +co2limit,:math:`t_{CO_2-eq}/a`,float,Cap on total annual system carbon dioxide emissions, +co2base,:math:`t_{CO_2-eq}/a`,float,Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard., +agg_p_nom_limits,--,file,path,Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``. +extendable_carriers,,,, +--,Generator,--,"Any subset of {'OCGT','CCGT'}",Places extendable conventional power plants (OCGT and/or CCGT) where gas power plants are located today without capacity limits. +--,StorageUnit,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. +--,Store,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. +--,Link,--,Any subset of {'H2 pipeline'},Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``. +max_hours,,,, +--,battery,h,float,Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_. +--,H2,h,float,Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_. +powerplants_filter,--,"use `pandas.query `_ strings here, e.g. Country not in ['Germany']",Filter query for the default powerplant database., +custom_powerplants,--,"use `pandas.query `_ strings here, e.g. Country in ['Germany']",Filter query for the custom powerplant database., +conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}",List of conventional power plants to include in the model from ``resources/powerplants.csv``., +renewable_capacities_from_OPSD,,[solar, onwind, offwind],List of carriers (offwind-ac and offwind-dc are included in offwind) whose capacities 'p_nom' are aligned to the `OPSD renewable power plant list `_, +,"Fueltype [ppm], e.g. “Wind”","list of fueltypes stings in PyPSA-EUR, eg. “[onwind, offwind-ac, offwind-dc]”",converts ppm Fueltype to PyPSA-EUR Fueltype, +estimate_renewable_capacities_from_capacitiy_stats,,,, +,"Fueltype [ppm], e.g. “Wind”","list of fueltypes stings in PyPSA-EUR, eg. “[onwind, offwind-ac, offwind-dc]”",converts ppm Fueltype to PyPSA-EUR Fueltype, diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 213dde66..ae29d399 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -34,6 +34,8 @@ Upcoming Release * Don't remove capital costs from lines and links, when imposing a line volume limit (wildcard ``lv``) or a line cost limit (``lc``). Previously, these were removed to move the expansion in direction of the limit. +* Add renewable power plants from `OPSD `_ to the network for specified technologies. This will overwrite the capacities calculated from the heuristic approach in :func:`estimate_renewable_capacities()`. + * Fix bug of clustering offwind-{ac,dc} sites in the option of high-resolution sites for renewables. Now, there are more sites for offwind-{ac,dc} available than network nodes. Before, they were clustered to the resolution of the network. (e.g. elec_s1024_37m.nc: 37 network nodes, 1024 sites) * Use `mamba` (https://github.com/mamba-org/mamba) for faster Travis CI builds (`#196 `_) diff --git a/envs/environment.yaml b/envs/environment.yaml index 6514dae1..19c96d43 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -25,7 +25,7 @@ dependencies: - yaml - pytables - lxml - - powerplantmatching>=0.4.3 + - powerplantmatching>=0.4.8 - numpy<=1.19.0 # otherwise macos fails # Second order dependencies which should really be deps of atlite diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 4338a440..85bea7fd 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -25,6 +25,8 @@ Relevant Settings co2limit: extendable_carriers: Generator: + OPSD_VRES_countries: + include_renewable_capacities_from_OPSD: estimate_renewable_capacities_from_capacity_stats: load: @@ -93,7 +95,8 @@ import pandas as pd import numpy as np import xarray as xr import geopandas as gpd -import powerplantmatching as ppm +import powerplantmatching as pm +from powerplantmatching.export import map_country_bus from vresutils.costdata import annuity from vresutils.load import timeseries_opsd @@ -304,7 +307,7 @@ def attach_conventional_generators(n, costs, ppl): ppl = (ppl.query('carrier in @carriers').join(costs, on='carrier') .rename(index=lambda s: 'C' + str(s))) - logger.info('Adding {} generators with capacities\n{}' + logger.info('Adding {} generators with capacities [MW] \n{}' .format(len(ppl), ppl.groupby('carrier').p_nom.sum())) n.madd("Generator", ppl.index, @@ -467,6 +470,39 @@ def attach_extendable_generators(n, costs, ppl): "Only OCGT, CCGT and nuclear are allowed at the moment.") + +def attach_OPSD_renewables(n): + + available = ['DE', 'FR', 'PL', 'CH', 'DK', 'CZ', 'SE', 'GB'] + tech_map = {'Onshore': 'onwind', 'Offshore': 'offwind', 'Solar': 'solar'} + countries = set(available) & set(n.buses.country) + techs = snakemake.config['electricity'].get('renewable_capacities_from_OPSD', []) + tech_map = {k: v for k, v in tech_map.items() if v in techs} + + if not tech_map: + return + + logger.info(f'Using OPSD renewable capacities in {", ".join(countries)} ' + f'for technologies {", ".join(tech_map.values())}.') + + df = pd.concat([pm.data.OPSD_VRE_country(c) for c in countries]) + technology_b = ~df.Technology.isin(['Onshore', 'Offshore']) + df['Fueltype'] = df.Fueltype.where(technology_b, df.Technology) + df = df.query('Fueltype in @tech_map').powerplant.convert_country_to_alpha2() + + for fueltype, carrier_like in tech_map.items(): + gens = n.generators[lambda df: df.carrier.str.contains(carrier_like)] + buses = n.buses.loc[gens.bus.unique()] + gens_per_bus = gens.groupby('bus').p_nom.count() + + caps = map_country_bus(df.query('Fueltype == @fueltype'), buses) + caps = caps.groupby(['bus']).Capacity.sum() + caps = caps / gens_per_bus.reindex(caps.index, fill_value=1) + + n.generators.p_nom.update(gens.bus.map(caps).dropna()) + + + def estimate_renewable_capacities(n, tech_map=None): if tech_map is None: tech_map = (snakemake.config['electricity'] @@ -474,16 +510,25 @@ def estimate_renewable_capacities(n, tech_map=None): if len(tech_map) == 0: return - capacities = (ppm.data.Capacity_stats().powerplant.convert_country_to_alpha2() + capacities = (pm.data.Capacity_stats().powerplant.convert_country_to_alpha2() [lambda df: df.Energy_Source_Level_2] .set_index(['Fueltype', 'Country']).sort_index()) countries = n.buses.country.unique() + if len(countries) == 0: return + + logger.info('heuristics applied to distribute renewable capacities [MW] \n{}' + .format(capacities.query('Fueltype in @tech_map.keys() and Capacity >= 0.1') + .groupby('Country').agg({'Capacity': 'sum'}))) + for ppm_fueltype, techs in tech_map.items(): tech_capacities = capacities.loc[ppm_fueltype, 'Capacity']\ .reindex(countries, fill_value=0.) - tech_i = n.generators.query('carrier in @techs').index + #tech_i = n.generators.query('carrier in @techs').index + tech_i = (n.generators.query('carrier in @techs') + [n.generators.query('carrier in @techs') + .bus.map(n.buses.country).isin(countries)].index) n.generators.loc[tech_i, 'p_nom'] = ( (n.generators_t.p_max_pu[tech_i].mean() * n.generators.loc[tech_i, 'p_nom_max']) # maximal yearly generation @@ -528,6 +573,8 @@ if __name__ == "__main__": attach_extendable_generators(n, costs, ppl) estimate_renewable_capacities(n) + attach_OPSD_renewables(n) + add_nice_carrier_names(n) n.export_to_netcdf(snakemake.output[0]) From f563b612e48eb825dad433e4081928f08b2d93d1 Mon Sep 17 00:00:00 2001 From: Fabian Date: Thu, 3 Dec 2020 23:47:38 +0100 Subject: [PATCH 15/34] doc/configuration.rst: update lines --- config.default.yaml | 4 +--- doc/configuration.rst | 28 ++++++++++++++-------------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/config.default.yaml b/config.default.yaml index 1294d0c5..51776d45 100755 --- a/config.default.yaml +++ b/config.default.yaml @@ -52,7 +52,6 @@ electricity: powerplants_filter: false # use pandas query strings here, e.g. Country not in ['Germany'] custom_powerplants: false # use pandas query strings here, e.g. Country in ['Germany'] conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass] - renewable_capacities_from_OPSD: [] # onwind, offwind, solar # estimate_renewable_capacities_from_capacity_stats: @@ -144,8 +143,7 @@ renewable: cutout: europe-2013-era5 carriers: [ror, PHS, hydro] PHS_max_hours: 6 - hydro_max_hours: "energy_capacity_totals_by_country" # one of energy_capacity_totals_by_country, - # estimate_by_large_installations or a float + hydro_max_hours: "energy_capacity_totals_by_country" # one of energy_capacity_totals_by_country, estimate_by_large_installations or a float clip_min_inflow: 1.0 lines: diff --git a/doc/configuration.rst b/doc/configuration.rst index ba42a742..bd0b8137 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -50,7 +50,7 @@ An exemplary dependency graph (starting from the simplification rules) then look .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 14-19 + :lines: 14-18 .. csv-table:: :header-rows: 1 @@ -66,7 +66,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 23-26 + :lines: 22-25 .. csv-table:: :header-rows: 1 @@ -80,7 +80,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 36-54 + :lines: 36-55 .. csv-table:: :header-rows: 1 @@ -97,7 +97,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 61-74 + :lines: 62-75 .. csv-table:: :header-rows: 1 @@ -114,7 +114,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 76-93 + :lines: 77-94 .. csv-table:: :header-rows: 1 @@ -126,7 +126,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 76,94-106 + :lines: 77,95-107 .. csv-table:: :header-rows: 1 @@ -138,7 +138,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 76,107-120 + :lines: 77,108-121 .. csv-table:: :header-rows: 1 @@ -150,7 +150,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 76,121-140 + :lines: 77,122-141 .. csv-table:: :header-rows: 1 @@ -162,7 +162,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 76,141-147 + :lines: 77,142-147 .. csv-table:: :header-rows: 1 @@ -218,7 +218,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 170-174 + :lines: 170-176 .. csv-table:: :header-rows: 1 @@ -232,7 +232,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 175-188 + :lines: 178-190 .. csv-table:: :header-rows: 1 @@ -254,7 +254,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 190-200 + :lines: 192-202 .. csv-table:: :header-rows: 1 @@ -266,7 +266,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 190,201-217 + :lines: 192,203-219 .. csv-table:: :header-rows: 1 @@ -280,7 +280,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 219-358 + :lines: 221-299 .. csv-table:: :header-rows: 1 From 1baa309252c69c758a07d17a50e938704aa729dc Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Fri, 4 Dec 2020 18:44:34 +0100 Subject: [PATCH 16/34] travis: ipopt 3.13.3 does not provide windows executables https://github.com/conda-forge/ipopt-feedstock/issues/55 --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index a6a29a39..5b06a779 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,7 +24,7 @@ before_install: - conda activate pypsa-eur # install open-source solver - - mamba install -c conda-forge glpk ipopt + - mamba install -c conda-forge glpk ipopt<3.13.3 # list packages for easier debugging - conda list From 3ad6340c13c2b03ab3d7a44963f8adb82f66eec3 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Fri, 4 Dec 2020 18:53:12 +0100 Subject: [PATCH 17/34] travis: correctly constrain version --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 5b06a779..75a82378 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,7 +24,7 @@ before_install: - conda activate pypsa-eur # install open-source solver - - mamba install -c conda-forge glpk ipopt<3.13.3 + - mamba install -c conda-forge glpk ipopt'<3.13.3' # list packages for easier debugging - conda list From 08674a6cda3161ac0b562aa532da87e285179397 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Sat, 5 Dec 2020 17:54:50 +0100 Subject: [PATCH 18/34] Release v0.3 (#213) * release_notes: order for release * doc: fix smaller typos and tidy up * config: bump version * doc: fix line references * doc: bump confpy version * envs: update fixed versions yaml * Snakefile: simplify all_elec to all --- .travis.yml | 2 +- Snakefile | 8 +- config.default.yaml | 2 +- config.tutorial.yaml | 3 +- doc/_static/theme_overrides.css | 5 - doc/cloudcomputing.rst | 4 +- doc/conf.py | 4 +- doc/configtables/opts.csv | 2 +- doc/configuration.rst | 9 +- doc/contributing.rst | 2 +- doc/costs.rst | 2 +- doc/index.rst | 2 + doc/installation.rst | 20 +- doc/introduction.rst | 4 +- doc/release_notes.rst | 178 ++++++++++------- doc/solving.rst | 2 +- doc/tutorial.rst | 22 +-- doc/wildcards.rst | 39 +--- envs/environment.fixed.yaml | 332 +++++++++++++++++--------------- scripts/add_electricity.py | 8 +- scripts/add_extra_components.py | 2 +- scripts/build_load_data.py | 2 +- scripts/solve_network.py | 8 - test/config.test1.yaml | 2 +- 24 files changed, 338 insertions(+), 326 deletions(-) diff --git a/.travis.yml b/.travis.yml index 75a82378..43b25200 100644 --- a/.travis.yml +++ b/.travis.yml @@ -31,6 +31,6 @@ before_install: script: - cp ./test/config.test1.yaml ./config.yaml - - snakemake -j all solve_all_elec_networks + - snakemake -j all solve_all_networks - rm -rf resources/*.nc resources/*.geojson resources/*.h5 networks results # could repeat for more configurations in future diff --git a/Snakefile b/Snakefile index 7616974e..817c905e 100644 --- a/Snakefile +++ b/Snakefile @@ -21,19 +21,19 @@ wildcard_constraints: opts="[-+a-zA-Z0-9\.]*" -rule cluster_all_elec_networks: +rule cluster_all_networks: input: expand("networks/elec_s{simpl}_{clusters}.nc", **config['scenario']) -rule extra_components_all_elec_networks: +rule extra_components_all_networks: input: expand("networks/elec_s{simpl}_{clusters}_ec.nc", **config['scenario']) -rule prepare_all_elec_networks: +rule prepare_all_networks: input: expand("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario']) -rule solve_all_elec_networks: +rule solve_all_networks: input: expand("results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario']) diff --git a/config.default.yaml b/config.default.yaml index 51776d45..cca71e61 100755 --- a/config.default.yaml +++ b/config.default.yaml @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: CC0-1.0 -version: 0.2.0 +version: 0.3.0 tutorial: false logging: diff --git a/config.tutorial.yaml b/config.tutorial.yaml index 17c7509e..5cc23e72 100755 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -2,8 +2,9 @@ # # SPDX-License-Identifier: CC0-1.0 -version: 0.2.0 +version: 0.3.0 tutorial: true + logging: level: INFO format: '%(levelname)s:%(name)s:%(message)s' diff --git a/doc/_static/theme_overrides.css b/doc/_static/theme_overrides.css index 4c6f277f..d14367ee 100644 --- a/doc/_static/theme_overrides.css +++ b/doc/_static/theme_overrides.css @@ -45,11 +45,6 @@ background: #ffffff; } -.wy-nav-content .highlight .nn, -.wy-nav-content .admonition.warning a { - color: #dddddd; -} - .rst-content code.literal, .rst-content tt.literal { color: rgb(34, 97, 156) diff --git a/doc/cloudcomputing.rst b/doc/cloudcomputing.rst index f91441a5..f20e1b23 100644 --- a/doc/cloudcomputing.rst +++ b/doc/cloudcomputing.rst @@ -60,7 +60,7 @@ Now a window with the machine details will open. You have to configure the follo You can edit your machine configuration later. So use a cheap machine type configuration to transfer data and only when everything is ready and tested, your expensive machine type, for instance a custom 8 vCPU with 160 GB memory. Solvers do not parallelise well, so we recommend not to choose more than 8 vCPU. - Check ``snakemake -j -n 1 solve_all_elec_networks`` as a dry run to see how much memory is required. + Check ``snakemake -n -j 1 solve_all_networks`` as a dry run to see how much memory is required. The memory requirements will vary depending on the spatial and temporal resoulution of your optimisation. Example: for an hourly, 181 node full European network, set 8 vCPU and 150 GB memory since the dry-run calculated a 135 GB memory requirement.) - Boot disk: As default, your VM is created with 10 GB. Depending on how much you want to handle on one VM you should increase the disk size. @@ -85,7 +85,7 @@ Step 3 - Installation of Cloud SDK sudo apt-get update sudo apt-get install bzip2 libxml2-dev sudo apt-get install wget - wget https://repo.anaconda.com/archive/Anaconda3-2020.07-Linux-x86_64.sh (Check the link. To be up to date with anaconda, check the Anaconda website https://www.anaconda.com/products/individual ) + wget https://repo.anaconda.com/archive/Anaconda3-2020.07-Linux-x86_64.sh ls (to see what anaconda file to bash) bash Anaconda3-2020.07-Linux-x86_64.sh source ~/.bashrc diff --git a/doc/conf.py b/doc/conf.py index 899cbe1a..b2b220a0 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -74,9 +74,9 @@ author = u'Jonas Hoersch (KIT, FIAS), Fabian Hofmann (FIAS), David Schlachtberge # built documents. # # The short X.Y version. -version = u'0.2' +version = u'0.3' # The full version, including alpha/beta/rc tags. -release = u'0.2.0' +release = u'0.3.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/configtables/opts.csv b/doc/configtables/opts.csv index 4cb473e2..da673ac8 100644 --- a/doc/configtables/opts.csv +++ b/doc/configtables/opts.csv @@ -1,6 +1,6 @@ Trigger, Description, Definition, Status ``nH``; i.e. ``2H``-``6H``, Resample the time-resolution by averaging over every ``n`` snapshots, ``prepare_network``: `average_every_nhours() `_ and its `caller `_), In active use -``nSEG``; e.g. ``4380SEG``, "Apply time series segmentation with `tsam `_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load.", ``prepare_network``: ``apply_time_segmentation(), In active use +``nSEG``; e.g. ``4380SEG``, "Apply time series segmentation with `tsam `_ package to ``n`` adjacent snapshots of varying lengths based on capacity factors of varying renewables, hydro inflow and load.", ``prepare_network``: apply_time_segmentation(), In active use ``Co2L``, Add an overall absolute carbon-dioxide emissions limit configured in ``electricity: co2limit``. If a float is appended an overall emission limit relative to the emission level given in ``electricity: co2base`` is added (e.g. ``Co2L0.05`` limits emissisions to 5% of what is given in ``electricity: co2base``), ``prepare_network``: `add_co2limit() `_ and its `caller `_, In active use ``Ep``, Add cost for a carbon-dioxide price configured in ``costs: emission_prices: co2`` to ``marginal_cost`` of generators (other emission types listed in ``network.carriers`` possible as well), ``prepare_network``: `add_emission_prices() `_ and its `caller `_, In active use ``CCL``, Add minimum and maximum levels of generator nominal capacity per carrier for individual countries. These can be specified in the file linked at ``electricity: agg_p_nom_limits`` in the configuration. File defaults to ``data/agg_p_nom_minmax.csv``., ``solve_network``, In active use diff --git a/doc/configuration.rst b/doc/configuration.rst index bd0b8137..1a42c70a 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -18,7 +18,7 @@ Top-level configuration .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 5-12,21,28-34 + :lines: 5-12,20,27-34 .. csv-table:: :header-rows: 1 @@ -40,9 +40,9 @@ facilitate running multiple scenarios through a single command .. code:: bash - snakemake -j 1 solve_all_elec_networks + snakemake -j 1 solve_all_networks -For each wildcard, a **list of values** is provided. The rule ``solve_all_elec_networks`` will trigger the rules for creating ``results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc`` for **all combinations** of the provided wildcard values as defined by Python's `itertools.product(...) `_ function that snakemake's `expand(...) function `_ uses. +For each wildcard, a **list of values** is provided. The rule ``solve_all_networks`` will trigger the rules for creating ``results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc`` for **all combinations** of the provided wildcard values as defined by Python's `itertools.product(...) `_ function that snakemake's `expand(...) function `_ uses. An exemplary dependency graph (starting from the simplification rules) then looks like this: @@ -80,7 +80,7 @@ Specifies the temporal range to build an energy system model for as arguments to .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 36-55 + :lines: 36-60 .. csv-table:: :header-rows: 1 @@ -241,7 +241,6 @@ Specifies the temporal range to build an energy system model for as arguments to .. note:: To change cost assumptions in more detail (i.e. other than ``marginal_cost`` and ``capital_cost``), consider modifying cost assumptions directly in ``data/costs.csv`` as this is not yet supported through the config file. - You can also build multiple different cost databases. Make a renamed copy of ``data/costs.csv`` (e.g. ``data/costs-optimistic.csv``) and set the variable ``COSTS=data/costs-optimistic.csv`` in the ``Snakefile``. .. _solving_cf: diff --git a/doc/contributing.rst b/doc/contributing.rst index a6321989..d57f1212 100644 --- a/doc/contributing.rst +++ b/doc/contributing.rst @@ -12,7 +12,7 @@ be it with new ideas, suggestions, by filing bug reports or contributing code to our `GitHub repository `_. * If you already have some code changes, you can submit them directly as a `pull request `_. -* If you are wondering where we would greatly appreciate your efforts, check out the ``help wanted`` tag in the `issues list `_ and initiate a discussion there. * If you start working on a feature in the code, let us know by opening an issue or a draft pull request. This helps all of us to keep an overview on what is being done and helps to avoid a situation where we are doing the same work twice in parallel. diff --git a/doc/costs.rst b/doc/costs.rst index 70bd12b8..5ced95dc 100644 --- a/doc/costs.rst +++ b/doc/costs.rst @@ -32,7 +32,7 @@ Based on the parameters above the ``marginal_cost`` and ``capital_cost`` of the .. note:: - Another great resource for `cost assumptions `_. Modifying Cost Assumptions ========================== diff --git a/doc/index.rst b/doc/index.rst index d8d9c852..2ff0cd1c 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -42,6 +42,8 @@ It contains alternating current lines at and above 220 kV voltage level and all The model is suitable both for operational studies and generation and transmission expansion planning studies. The continental scope and highly resolved spatial scale enables a proper description of the long-range smoothing effects for renewable power generation and their varying resource availability. .. image:: img/base.png + :width: 50% + :align: center The restriction to freely available and open data encourages the open exchange of model data developments and eases the comparison of model results. It provides a full, automated software pipeline to assemble the load-flow-ready model from the original datasets, which enables easy replacement and improvement of the individual parts. diff --git a/doc/installation.rst b/doc/installation.rst index e1de3f4b..16fdf766 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -17,6 +17,7 @@ Clone the Repository First of all, clone the `PyPSA-Eur repository `_ using the version control system ``git``. The path to the directory into which the ``git repository`` is cloned, must **not** have any spaces! +If you do not have ``git`` installed, follow installation instructions `here `_. .. code:: bash @@ -24,8 +25,6 @@ The path to the directory into which the ``git repository`` is cloned, must **no /some/path/without/spaces % git clone https://github.com/PyPSA/pypsa-eur.git -.. note:: - If you do not have ``git`` installed, follow installation instructions `here `_. .. _deps: @@ -46,9 +45,8 @@ The environment can be installed and activated using .../pypsa-eur % conda activate pypsa-eur -.. note:: - Note that activation is local to the currently open shell! - After opening a new terminal window, one needs to reissue the second command! +Note that activation is local to the currently open shell! +After opening a new terminal window, one needs to reissue the second command! .. note:: If you have troubles with a slow ``conda`` installation, we recommend to install @@ -74,25 +72,23 @@ PyPSA is known to work with the free software - `Cbc `_ - `GLPK `_ (`WinGLKP `_) -and the non-free, commercial software (for which free academic licenses are available) +and the non-free, commercial software (for some of which free academic licenses are available) - `Gurobi `_ - `CPLEX `_ +- `FICO® Xpress Solver `_ -and any other solver that works with the underlying modelling framework `Pyomo `_. For installation instructions of these solvers for your operating system, follow the links above. +Commercial solvers such as Gurobi and CPLEX currently significantly outperform open-source solvers for large-scale problems. +It might be the case that you can only retrieve solutions by using a commercial solver. .. seealso:: `Getting a solver in the PyPSA documentation `_ -.. note:: - Commercial solvers such as Gurobi and CPLEX currently significantly outperform open-source solvers for large-scale problems. - It might be the case that you can only retrieve solutions by using a commercial solver. - .. note:: The rules :mod:`cluster_network` and :mod:`simplify_network` solve a quadratic optimisation problem for clustering. The open-source solvers Cbc and GlPK cannot handle this. A fallback to Ipopt is implemented in this case, but requires - also Ipopt to be installed. For an open-source solver setup install in your `conda` environment on OSX/Linux + also Ipopt to be installed. For an open-source solver setup install in your ``conda`` environment on OSX/Linux .. code:: bash diff --git a/doc/introduction.rst b/doc/introduction.rst index 9ca049e7..bc4f267f 100644 --- a/doc/introduction.rst +++ b/doc/introduction.rst @@ -64,4 +64,6 @@ Folder Structure System Requirements =================== -Building the model with the scripts in this repository uses up to 20 GB of memory. Computing optimal investment and operation scenarios requires a strong interior-point solver compatible with the modelling library `Pyomo `_ like `Gurobi `_ or `CPLEX `_ with up to 100 GB of memory. +Building the model with the scripts in this repository runs on a normal computer. +But computing optimal investment and operation scenarios requires a strong interior-point solver +like `Gurobi `_ or `CPLEX `_ with more memory. diff --git a/doc/release_notes.rst b/doc/release_notes.rst index ae29d399..72e82ac2 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -11,70 +11,114 @@ Release Notes Upcoming Release ================ + +PyPSA-Eur 0.3.0 (7th December 2020) +================================== + +**New Features** + +Using the ``{opts}`` wildcard for scenarios: + * An option is introduced which adds constraints such that each country or node produces on average a minimal share of its total consumption itself. For example ``EQ0.5c`` set in the ``{opts}`` wildcard requires each country to produce on average at least 50% of its consumption. Additionally, the option ``ATK`` requires autarky at each node and removes all means of power transmission through lines and links. ``ATKc`` only removes - cross-border transfer capacities. Moreover, line and link capacities can be capped in the ``config.yaml`` at - ``lines: s_nom_max:`` and ``links: p_nom_max`` (`#166 `_). + cross-border transfer capacities. + [`#166 `_]. * Added an option to alter the capital cost (``c``) or installable potentials (``p``) of carriers by a factor via ``carrier+{c,p}factor`` in the ``{opts}`` wildcard. This can be useful for exploring uncertain cost parameters. Example: ``solar+c0.5`` reduces the capital cost of solar to 50% of original values - (`#167 `_ and `#207 `_). - -* Add compatibility for pyomo 5.7.0 in :mod:`cluster_network` and :mod:`simplify_network`. - -* Raise a warning if `tech_colors` in the config are not defined for all carriers. - -* Corrected HVDC link connections (a) between Norway and Denmark and (b) mainland Italy, Corsica (FR) and Sardinia (IT) (`#181 `_) - -* Added Google Cloud Platform tutorial (for Windows users). - -* Corrected setting of exogenous emission price (in config -> cost -> emission price). This was not weighted by the efficiency and effective emission of the generators. Fixed in `#171 `_. - -* Don't remove capital costs from lines and links, when imposing a line volume limit (wildcard ``lv``) or a line cost limit (``lc``). Previously, these were removed to move the expansion in direction of the limit. - -* Add renewable power plants from `OPSD `_ to the network for specified technologies. This will overwrite the capacities calculated from the heuristic approach in :func:`estimate_renewable_capacities()`. - -* Fix bug of clustering offwind-{ac,dc} sites in the option of high-resolution sites for renewables. Now, there are more sites for offwind-{ac,dc} available than network nodes. Before, they were clustered to the resolution of the network. (e.g. elec_s1024_37m.nc: 37 network nodes, 1024 sites) - -* Use `mamba` (https://github.com/mamba-org/mamba) for faster Travis CI builds (`#196 `_) - -* The N-1 security margin for transmission lines is now fixed to a provided value in ``config.yaml``, removing an undocumented linear interpolation between 0.5 and 0.7 in the range between 37 and 200 nodes. - -* The mappings for clustered lines and buses produced by the ``simplify_network`` and ``cluster_network`` rules changed from Hierarchical Data Format (.h5) to Comma-Separated Values format (.csv) (`#198 `_) - -* Multiple smaller changes: Removed unused ``{network}`` wildcard, moved environment files to dedicated ``envs`` folder, - removed sector-coupling components from configuration files, minor refactoring and code cleaning (`#190 `_). - -* Added an option to use custom busmaps in rule :mod:`cluster_network`. To use this feature set ``enable: custom_busmap: true``. - Then, the rule looks for custom busmaps at ``data/custom_busmap_elec_s{simpl}_{clusters}.csv``, - which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``. - i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc`` (`#193 `_). - -* Fixed a bug for storage units such that individual store and dispatch efficiencies are correctly taken account of rather than only their round-trip efficiencies. - In the cost database (``data/costs.csv``) the efficiency of battery inverters should be stated as per discharge/charge rather than per roundtrip (`#202 _). - -* Parameter corrections for East-Western and Anglo-Scottish interconnectors (`#206 `_) - -* Modelling hydrogen and battery storage with Store and Link components is now the default, rather than using StorageUnit components with fixed power-to-energy ratio (`#205 `_). - -* Electricity consumption data is now directly retrieved from the `OPSD website `_ using the rule ``build_load_data``. The user can decide whether to take the ENTSOE power statistics data (defaul) or the ENTSOE transparency data. + [`#167 `_, `#207 `_]. * Added an option to the ``{opts}`` wildcard that applies a time series segmentation algorithm based on renewables, hydro inflow and load time series to produce a given total number of adjacent snapshots of varying lengths. This feature is an alternative to downsampling the temporal resolution by simply averaging and uses the `tsam `_ package - (#186 `_). + [`#186 `_]. + +More OPSD integration: + +* Add renewable power plants from `OPSD `_ to the network for specified technologies. + This will overwrite the capacities calculated from the heuristic approach in :func:`estimate_renewable_capacities()` + [`#212 `_]. + +* Electricity consumption data is now retrieved directly from the `OPSD website `_ using the rule :mod:`build_load_data`. + The user can decide whether to take the ENTSO-E power statistics data (default) or the ENTSO-E transparency data + [`#211 `_]. + +Other: + +* Added an option to use custom busmaps in rule :mod:`cluster_network`. To use this feature set ``enable: custom_busmap: true``. + Then, the rule looks for custom busmaps at ``data/custom_busmap_elec_s{simpl}_{clusters}.csv``, + which should have the same format as ``resources/busmap_elec_s{simpl}_{clusters}.csv``. + i.e. the index should contain the buses of ``networks/elec_s{simpl}.nc`` + [`#193 `_]. + +* Line and link capacities can be capped in the ``config.yaml`` at ``lines: s_nom_max:`` and ``links: p_nom_max``: + [`#166 `_]. + +* Added Google Cloud Platform tutorial (for Windows users) + [`#177 `_]. + +**Changes** + +* Don't remove capital costs from lines and links, when imposing a line volume limit (``lv``) or a line cost limit (``lc``). + Previously, these were removed to move the expansion in direction of the limit + [`#183 `_]. + +* The mappings for clustered lines and buses produced by the :mod:`simplify_network` and :mod:`cluster_network` rules + changed from Hierarchical Data Format (``.h5``) to Comma-Separated Values format (``.csv``) for ease of use. + [`#198 `_] + +* The N-1 security margin for transmission lines is now fixed to a provided value in ``config.yaml``, + removing an undocumented linear interpolation between 0.5 and 0.7 in the range between 37 and 200 nodes. + [`#199 `_]. + +* Modelling hydrogen and battery storage with Store and Link components is now the default, + rather than using StorageUnit components with fixed power-to-energy ratio + [`#205 `_]. + +* Use ``mamba`` (https://github.com/mamba-org/mamba) for faster Travis CI builds + [`#196 `_]. + +* Multiple smaller changes: Removed unused ``{network}`` wildcard, moved environment files to dedicated ``envs`` folder, + removed sector-coupling components from configuration files, updated documentation colors, minor refactoring and code cleaning + [`#190 `_]. + +**Bugs and Compatibility** + +* Add compatibility for pyomo 5.7.0 in :mod:`cluster_network` and :mod:`simplify_network` + [`#172 `_]. + +* Fixed a bug for storage units such that individual store and dispatch efficiencies are correctly taken account of rather than only their round-trip efficiencies. + In the cost database (``data/costs.csv``) the efficiency of battery inverters should be stated as per discharge/charge rather than per roundtrip + [`#202 `_]. + +* Corrected exogenous emission price setting (in ``config: cost: emission price:``), + which now correctly accounts for the efficiency and effective emission of the generators + [`#171 `_]. + +* Corrected HVDC link connections (a) between Norway and Denmark and (b) mainland Italy, Corsica (FR) and Sardinia (IT) + as well as for East-Western and Anglo-Scottish interconnectors + [`#181 `_, `#206 `_]. + +* Fix bug of clustering ``offwind-{ac,dc}`` generators in the option of high-resolution generators for renewables. + Now, there are more sites for ``offwind-{ac,dc}`` available than network nodes. + Before, they were clustered to the resolution of the network (``elec_s1024_37m.nc``: 37 network nodes, 1024 generators) + [`#191 `_]. + +* Raise a warning if ``tech_colors`` in the config are not defined for all carriers + [`#178 `_]. + PyPSA-Eur 0.2.0 (8th June 2020) ================================== -* The optimization is now performed using the ``pyomo=False`` setting in the :func:`pypsa.lopf.network_lopf`. This speeds up the solving process significantly and consumes much less memory. The inclusion of additional constraints were adjusted to the new implementation. They are all passed to the :func:`network_lopf` function via the ``extra_functionality`` argument. The rule ``trace_solve_network`` was integrated into the rule :mod:`solve_network` and can be activated via configuration with ``solving: options: track_iterations: true``. The charging and discharging capacities of batteries modelled as store-link combination are now coupled (`#116 `_). +* The optimization is now performed using the ``pyomo=False`` setting in the :func:`pypsa.lopf.network_lopf`. This speeds up the solving process significantly and consumes much less memory. The inclusion of additional constraints were adjusted to the new implementation. They are all passed to the :func:`network_lopf` function via the ``extra_functionality`` argument. The rule ``trace_solve_network`` was integrated into the rule :mod:`solve_network` and can be activated via configuration with ``solving: options: track_iterations: true``. The charging and discharging capacities of batteries modelled as store-link combination are now coupled [`#116 `_]. -* An updated extract of the `ENTSO-E Transmission System Map `_ (including Malta) was added to the repository using the `GridKit `_ tool. This tool has been updated to retrieve up-to-date map extracts using a single `script `_. The update extract features 5322 buses, 6574 lines, 46 links. (`#118 `_). +* An updated extract of the `ENTSO-E Transmission System Map `_ (including Malta) was added to the repository using the `GridKit `_ tool. This tool has been updated to retrieve up-to-date map extracts using a single `script `_. The update extract features 5322 buses, 6574 lines, 46 links. [`#118 `_]. -* Added `FSFE REUSE `_ compliant license information. Documentation now licensed under CC-BY-4.0 (`#160 `_). +* Added `FSFE REUSE `_ compliant license information. Documentation now licensed under CC-BY-4.0 [`#160 `_]. * Added a 30 minute `video introduction `_ and a 20 minute `video tutorial `_ @@ -82,19 +126,19 @@ PyPSA-Eur 0.2.0 (8th June 2020) * Added an option to skip iterative solving usually performed to update the line impedances of expanded lines at ``solving: options: skip_iterations:``. -* ``snakemake`` rules for retrieving cutouts and the natura raster can now be disabled independently from their respective rules to build them; via ``config.*yaml`` (`#136 `_). +* ``snakemake`` rules for retrieving cutouts and the natura raster can now be disabled independently from their respective rules to build them; via ``config.*yaml`` [`#136 `_]. -* Removed the ``id`` column for custom power plants in ``data/custom_powerplants.csv`` to avoid custom power plants with conflicting ids getting attached to the wrong bus (`#131 `_). +* Removed the ``id`` column for custom power plants in ``data/custom_powerplants.csv`` to avoid custom power plants with conflicting ids getting attached to the wrong bus [`#131 `_]. -* Add option ``renewables: {carrier}: keep_all_available_areas:`` to use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed (`#150 `_). +* Add option ``renewables: {carrier}: keep_all_available_areas:`` to use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed [`#150 `_]. -* Added a function ``_helpers.load_network()`` which loads a network with overridden components specified in ``snakemake.config['override_components']`` (`#128 `_). +* Added a function ``_helpers.load_network()`` which loads a network with overridden components specified in ``snakemake.config['override_components']`` [`#128 `_]. -* Bugfix in :mod:`base_network` which now finds all closest links, not only the first entry (`#143 `_). +* Bugfix in :mod:`base_network` which now finds all closest links, not only the first entry [`#143 `_]. -* Bugfix in :mod:`cluster_network` which now skips recalculation of link parameters if there are no links (`#149 `_). +* Bugfix in :mod:`cluster_network` which now skips recalculation of link parameters if there are no links [`#149 `_]. -* Added information on pull requests to contribution guidelines (`#151 `_). +* Added information on pull requests to contribution guidelines [`#151 `_]. * Improved documentation on open-source solver setup and added usage warnings. @@ -105,31 +149,31 @@ PyPSA-Eur 0.1.0 (9th January 2020) This is the first release of PyPSA-Eur, a model of the European power system at the transmission network level. Recent changes include: -* Documentation on installation, workflows and configuration settings is now available online at `pypsa-eur.readthedocs.io `_ (`#65 `_). +* Documentation on installation, workflows and configuration settings is now available online at `pypsa-eur.readthedocs.io `_ [`#65 `_]. -* The ``conda`` environment files were updated and extended (`#81 `_). +* The ``conda`` environment files were updated and extended [`#81 `_]. -* The power plant database was updated with extensive filtering options via ``pandas.query`` functionality (`#84 `_ and `#94 `_). +* The power plant database was updated with extensive filtering options via ``pandas.query`` functionality [`#84 `_ and `#94 `_]. -* Continuous integration testing with `Travis CI `_ is now included for Linux, Mac and Windows (`#82 `_). +* Continuous integration testing with `Travis CI `_ is now included for Linux, Mac and Windows [`#82 `_]. -* Data dependencies were moved to `zenodo `_ and are now versioned (`#60 `_). +* Data dependencies were moved to `zenodo `_ and are now versioned [`#60 `_]. -* Data dependencies are now retrieved directly from within the snakemake workflow (`#86 `_). +* Data dependencies are now retrieved directly from within the snakemake workflow [`#86 `_]. -* Emission prices can be added to marginal costs of generators through the keyworks ``Ep`` in the ``{opts}`` wildcard (`#100 `_). +* Emission prices can be added to marginal costs of generators through the keyworks ``Ep`` in the ``{opts}`` wildcard [`#100 `_]. -* An option is introduced to add extendable nuclear power plants to the network (`#98 `_). +* An option is introduced to add extendable nuclear power plants to the network [`#98 `_]. -* Focus weights can now be specified for particular countries for the network clustering, which allows to set a proportion of the total number of clusters for particular countries (`#87 `_). +* Focus weights can now be specified for particular countries for the network clustering, which allows to set a proportion of the total number of clusters for particular countries [`#87 `_]. -* A new rule :mod:`add_extra_components` allows to add additional components to the network only after clustering. It is thereby possible to model storage units (e.g. battery and hydrogen) in more detail via a combination of ``Store``, ``Link`` and ``Bus`` elements (`#97 `_). +* A new rule :mod:`add_extra_components` allows to add additional components to the network only after clustering. It is thereby possible to model storage units (e.g. battery and hydrogen) in more detail via a combination of ``Store``, ``Link`` and ``Bus`` elements [`#97 `_]. -* Hydrogen pipelines (including cost assumptions) can now be added alongside clustered network connections in the rule :mod:`add_extra_components` . Set ``electricity: extendable_carriers: Link: [H2 pipeline]`` and ensure hydrogen storage is modelled as a ``Store``. This is a first simplified stage (`#108 `_). +* Hydrogen pipelines (including cost assumptions) can now be added alongside clustered network connections in the rule :mod:`add_extra_components` . Set ``electricity: extendable_carriers: Link: [H2 pipeline]`` and ensure hydrogen storage is modelled as a ``Store``. This is a first simplified stage [`#108 `_]. -* Logfiles for all rules of the ``snakemake`` workflow are now written in the folder ``log/`` (`#102 `_). +* Logfiles for all rules of the ``snakemake`` workflow are now written in the folder ``log/`` [`#102 `_]. -* The new function ``_helpers.mock_snakemake`` creates a ``snakemake`` object which mimics the actual ``snakemake`` object produced by workflow by parsing the ``Snakefile`` and setting all paths for inputs, outputs, and logs. This allows running all scripts within a (I)python terminal (or just by calling ``python ``) and thereby facilitates developing and debugging scripts significantly (`#107 `_). +* The new function ``_helpers.mock_snakemake`` creates a ``snakemake`` object which mimics the actual ``snakemake`` object produced by workflow by parsing the ``Snakefile`` and setting all paths for inputs, outputs, and logs. This allows running all scripts within a (I)python terminal (or just by calling ``python ``) and thereby facilitates developing and debugging scripts significantly [`#107 `_]. Release Process =============== @@ -151,7 +195,7 @@ Release Process * Upload code to `zenodo code repository `_ with `GNU GPL 3.0 `_ license. -* Create pre-built networks for ``config.default.yaml`` by running ``snakemake -j 1 extra_components_all_elec_networks``. +* Create pre-built networks for ``config.default.yaml`` by running ``snakemake -j 1 extra_components_all_networks``. * Upload pre-built networks to `zenodo data repository `_ with `CC BY 4.0 `_ license. diff --git a/doc/solving.rst b/doc/solving.rst index 2cc8eb57..87fdc040 100644 --- a/doc/solving.rst +++ b/doc/solving.rst @@ -7,7 +7,7 @@ Solving Networks ########################################## -After generating and simplifying the networks they can be solved through the rule :mod:`solve_network` by using the collection rule :mod:`solve_all_elec_networks`. Moreover, networks can be solved for another focus with the derivative rules :mod:`solve_network` by using the collection rule :mod:`solve_operations_network` for dispatch-only analyses on an already solved network. +After generating and simplifying the networks they can be solved through the rule :mod:`solve_network` by using the collection rule :mod:`solve_all_networks`. Moreover, networks can be solved for another focus with the derivative rules :mod:`solve_network` by using the collection rule :mod:`solve_operations_network` for dispatch-only analyses on an already solved network. .. toctree:: :caption: Overview diff --git a/doc/tutorial.rst b/doc/tutorial.rst index 3772bc43..507b1485 100644 --- a/doc/tutorial.rst +++ b/doc/tutorial.rst @@ -59,35 +59,35 @@ It is also possible to allow less or more carbon-dioxide emissions. Here, we lim .. literalinclude:: ../config.tutorial.yaml :language: yaml - :lines: 35,37 + :lines: 36,38 PyPSA-Eur also includes a database of existing conventional powerplants. We can select which types of powerplants we like to be included with fixed capacities: .. literalinclude:: ../config.tutorial.yaml :language: yaml - :lines: 35,51 + :lines: 36,52 To accurately model the temporal and spatial availability of renewables such as wind and solar energy, we rely on historical weather data. It is advisable to adapt the required range of coordinates to the selection of countries. .. literalinclude:: ../config.tutorial.yaml :language: yaml - :lines: 53-61 + :lines: 54-62 We can also decide which weather data source should be used to calculate potentials and capacity factor time-series for each carrier. For example, we may want to use the ERA-5 dataset for solar and not the default SARAH-2 dataset. .. literalinclude:: ../config.tutorial.yaml :language: yaml - :lines: 63,106-107 + :lines: 64,107-108 Finally, it is possible to pick a solver. For instance, this tutorial uses the open-source solvers CBC and Ipopt and does not rely on the commercial solvers Gurobi or CPLEX (for which free academic licenses are available). .. literalinclude:: ../config.tutorial.yaml :language: yaml - :lines: 164,175-176 + :lines: 170,180-181 .. note:: @@ -129,7 +129,7 @@ orders ``snakemake`` to run the script ``solve_network`` that produces the solve .. warning:: On Windows the previous command may currently cause a ``MissingRuleException`` due to problems with output files in subfolders. This is an `open issue `_ at `snakemake `_. - Windows users should add the option ``--keep-target-files`` to the command or instead run ``snakemake -j 1 solve_all_elec_networks``. + Windows users should add the option ``--keep-target-files`` to the command or instead run ``snakemake -j 1 solve_all_networks``. This triggers a workflow of multiple preceding jobs that depend on each rule's inputs and outputs: @@ -271,7 +271,7 @@ the wildcards given in ``scenario`` in the configuration file ``config.yaml`` ar .. literalinclude:: ../config.tutorial.yaml :language: yaml - :lines: 13-18 + :lines: 14-18 In this example we would not only solve a 6-node model of Germany but also a 2-node model. @@ -286,12 +286,4 @@ The solved networks can be analysed just like any other PyPSA network (e.g. in J network = pypsa.Network("results/networks/elec_s_6_ec_lcopt_Co2L-24H.nc") - ... - For inspiration, read the `examples section in the PyPSA documentation `_. - -.. note:: - - There are rules for summaries and plotting available in the repository of PyPSA-Eur. - - They are currently under revision and therefore not yet documented. diff --git a/doc/wildcards.rst b/doc/wildcards.rst index 71b39ef1..227997d1 100644 --- a/doc/wildcards.rst +++ b/doc/wildcards.rst @@ -27,9 +27,6 @@ The ``{simpl}`` wildcard specifies number of buses a detailed network model should be pre-clustered to in the rule :mod:`simplify_network` (before :mod:`cluster_network`). -.. seealso:: - :mod:`simplify_network` - .. _clusters: The ``{clusters}`` wildcard @@ -45,9 +42,6 @@ If an `m` is placed behind the number of clusters (e.g. ``100m``), generators are only moved to the clustered buses but not aggregated by carrier; i.e. the clustered bus may have more than one e.g. wind generator. -.. seealso:: - :mod:`cluster_network` - .. _ll: The ``{ll}`` wildcard @@ -79,9 +73,6 @@ The wildcard, in general, consists of two parts: (c) ``c1.25`` will allow to build a transmission network that costs no more than 25 % more than the current system. -.. seealso:: - :mod:`prepare_network` - .. _opts: The ``{opts}`` wildcard @@ -98,21 +89,18 @@ It may hold multiple triggers separated by ``-``, i.e. ``Co2L-3H`` contains the :widths: 10,20,10,10 :file: configtables/opts.csv -.. seealso:: - :mod:`prepare_network`, :mod:`solve_network` - .. _country: The ``{country}`` wildcard ========================== -The rules ``make_summary`` and ``plot_summary`` (generating summaries of all or a subselection -of the solved networks) as well as ``plot_p_nom_max`` (for plotting the cumulative +The rules :mod:`make_summary` and :mod:`plot_summary` (generating summaries of all or a subselection +of the solved networks) as well as :mod:`plot_p_nom_map` (for plotting the cumulative generation potentials for renewable technologies) can be narrowed to individual countries using the ``{country}`` wildcard. -If ``country = all``, then the rule acts on the network for all countries -defined in ``config.yaml``. If otherwise ``country = DE`` or another 2-letter +If ``country=all``, then the rule acts on the network for all countries +defined in ``config.yaml``. If otherwise ``country=DE`` or another 2-letter country code, then the network is narrowed to buses of this country for the rule. For example to get a summary of the energy generated in Germany (in the solution for Europe) use: @@ -121,9 +109,6 @@ in Germany (in the solution for Europe) use: snakemake -j 1 results/summaries/elec_s_all_lall_Co2L-3H_DE -.. seealso:: - :mod:`make_summary`, :mod:`plot_summary`, :mod:`plot_p_nom_max` - .. _cutout_wc: The ``{cutout}`` wildcard @@ -133,9 +118,6 @@ The ``{cutout}`` wildcard facilitates running the rule :mod:`build_cutout` for all cutout configurations specified under ``atlite: cutouts:``. These cutouts will be stored in a folder specified by ``{cutout}``. -.. seealso:: - :mod:`build_cutout`, :ref:`atlite_cf` - .. _technology: The ``{technology}`` wildcard @@ -151,22 +133,16 @@ For instance ``{technology}`` can be used to plot regionally disaggregated poten with the rule :mod:`plot_p_nom_max` or to summarize a particular technology's full load hours in various countries with the rule :mod:`build_country_flh`. -.. seealso:: - :mod:`build_renewable_profiles`, :mod:`plot_p_nom_max`, :mod:`build_country_flh` - .. _attr: The ``{attr}`` wildcard ======================= -The ``{attr}`` wildcard specifies which attribute are used for size +The ``{attr}`` wildcard specifies which attribute is used for size representations of network components on a map plot produced by the rule -``plot_network``. While it might be extended in the future, ``{attr}`` +:mod:`plot_network`. While it might be extended in the future, ``{attr}`` currently only supports plotting of ``p_nom``. -.. seealso:: - :mod:`plot_network` - .. _ext: The ``{ext}`` wildcard @@ -181,6 +157,3 @@ formats depends on the used backend. To query the supported file types on your s import matplotlib.pyplot as plt plt.gcf().canvas.get_supported_filetypes() - -.. seealso:: - :mod:`plot_network`, :mod:`plot_summary`, :mod:`plot_p_nom_max` diff --git a/envs/environment.fixed.yaml b/envs/environment.fixed.yaml index 3f027166..8e7bb978 100644 --- a/envs/environment.fixed.yaml +++ b/envs/environment.fixed.yaml @@ -1,241 +1,261 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors -# -# SPDX-License-Identifier: GPL-3.0-or-later - name: pypsa-eur channels: - bioconda - - gurobi - conda-forge - defaults dependencies: - _libgcc_mutex=0.1 + - _openmp_mutex=4.5 - affine=2.3.0 - - appdirs=1.4.3 + - amply=0.1.4 + - appdirs=1.4.4 - atlite=0.0.3 - - attrs=19.3.0 - - backcall=0.1.0 - - beautifulsoup4=4.9.1 - - blas=1.0 - - blosc=1.16.3 - - bokeh=2.0.2 + - attrs=20.3.0 + - backcall=0.2.0 + - backports=1.0 + - backports.functools_lru_cache=1.6.1 + - beautifulsoup4=4.9.3 + - blosc=1.20.1 + - bokeh=2.2.3 + - boost-cpp=1.72.0 - bottleneck=1.3.2 + - brotlipy=0.7.0 - bzip2=1.0.8 - - ca-certificates=2020.1.1 - - cairo=1.14.12 + - c-ares=1.17.1 + - ca-certificates=2020.11.8 + - cairo=1.16.0 - cartopy=0.17.0 - - certifi=2020.4.5.1 - - cffi=1.14.0 + - certifi=2020.11.8 + - cffi=1.14.4 - cfitsio=3.470 - - cftime=1.1.2 + - cftime=1.3.0 - chardet=3.0.4 - click=7.1.2 - click-plugins=1.1.1 - - cligj=0.5.0 - - cloudpickle=1.4.1 + - cligj=0.7.1 + - cloudpickle=1.6.0 - coincbc=2.10.5 - - configargparse=1.1 - - cryptography=2.9.2 - - curl=7.67.0 + - conda=4.9.2 + - conda-package-handling=1.7.2 + - configargparse=1.2.3 + - cryptography=3.2.1 + - curl=7.71.1 - cycler=0.10.0 - - cytoolz=0.10.1 - - dask=2.17.2 - - dask-core=2.17.2 + - cytoolz=0.11.0 + - dask=2.30.0 + - dask-core=2.30.0 - datrie=0.8.2 - - dbus=1.13.14 - decorator=4.4.2 - - distributed=2.17.0 + - descartes=1.1.0 + - distributed=2.30.1 - docutils=0.16 - entsoe-py=0.2.10 - - expat=2.2.6 - - fiona=1.8.11 - - fontconfig=2.13.0 - - freetype=2.9.1 + - expat=2.2.9 + - fiona=1.8.13 + - fontconfig=2.13.1 + - freetype=2.10.4 - freexl=1.0.5 - - fsspec=0.7.4 - - gdal=3.0.2 + - fsspec=0.8.4 + - gdal=3.0.4 - geographiclib=1.50 - - geopandas=0.6.1 - - geopy=1.22.0 - - geos=3.8.0 - - geotiff=1.5.1 - - giflib=5.1.4 - - gitdb=4.0.2 - - gitpython=3.1.1 - - glib=2.63.1 - - gst-plugins-base=1.14.0 - - gstreamer=1.14.0 - - gurobi=9.0.2 + - geopandas=0.8.1 + - geopy=2.0.0 + - geos=3.8.1 + - geotiff=1.6.0 + - gettext=0.19.8.1 + - giflib=5.2.1 + - gitdb=4.0.5 + - gitpython=3.1.11 + - glib=2.66.3 + - glpk=4.65 + - gmp=6.2.1 - hdf4=4.2.13 - - hdf5=1.10.4 + - hdf5=1.10.6 - heapdict=1.0.1 - - icu=58.2 - - idna=2.9 - - importlib-metadata=1.6.0 - - importlib_metadata=1.6.0 - - intel-openmp=2020.1 + - icu=64.2 + - idna=2.10 + - importlib-metadata=3.1.1 + - importlib_metadata=3.1.1 - ipopt=3.13.2 - - ipython=7.13.0 + - ipython=7.19.0 - ipython_genutils=0.2.0 - - jedi=0.17.0 + - jedi=0.17.2 - jinja2=2.11.2 - - joblib=0.15.1 - - jpeg=9b + - joblib=0.17.0 + - jpeg=9d - json-c=0.13.1 - jsonschema=3.2.0 - - jupyter_core=4.6.3 - - kealib=1.4.7 - - kiwisolver=1.2.0 - - krb5=1.16.4 - - ld_impl_linux-64=2.33.1 - - libblas=3.8.0 - - libboost=1.67.0 - - libcblas=3.8.0 - - libcurl=7.67.0 - - libdap4=3.19.1 - - libedit=3.1.20181209 + - jupyter_core=4.7.0 + - kealib=1.4.14 + - kiwisolver=1.3.1 + - krb5=1.17.2 + - lcms2=2.11 + - ld_impl_linux-64=2.35.1 + - libarchive=3.3.3 + - libblas=3.9.0 + - libcblas=3.9.0 + - libcurl=7.71.1 + - libdap4=3.20.6 + - libedit=3.1.20191231 + - libev=4.33 - libffi=3.3 - - libgcc-ng=9.1.0 - - libgdal=3.0.2 - - libgfortran-ng=7.3.0 + - libgcc-ng=9.3.0 + - libgdal=3.0.4 + - libgfortran-ng=7.5.0 + - libgfortran4=7.5.0 + - libgfortran5=9.3.0 + - libglib=2.66.3 + - libgomp=9.3.0 + - libiconv=1.16 - libkml=1.3.0 - - liblapack=3.8.0 - - libnetcdf=4.6.1 + - liblapack=3.9.0 + - libnetcdf=4.7.4 + - libnghttp2=1.41.0 + - libopenblas=0.3.12 - libpng=1.6.37 - - libpq=11.5 + - libpq=12.3 + - libsolv=0.7.16 - libspatialindex=1.9.3 - libspatialite=4.3.0a - libssh2=1.9.0 - - libstdcxx-ng=9.1.0 + - libstdcxx-ng=9.3.0 - libtiff=4.1.0 - - libuuid=1.0.3 + - libuuid=2.32.1 + - libwebp-base=1.1.0 - libxcb=1.13 - - libxml2=2.9.9 + - libxml2=2.9.10 - libxslt=1.1.33 - locket=0.2.0 - - lxml=4.5.0 - - lz4-c=1.8.1.2 + - lxml=4.6.2 + - lz4-c=1.9.2 - lzo=2.10 + - mamba=0.7.3 - markupsafe=1.1.1 - - matplotlib=3.1.3 - - matplotlib-base=3.1.3 - - memory_profiler=0.55.0 + - matplotlib-base=3.3.3 + - memory_profiler=0.58.0 - metis=5.1.0 - - mkl=2020.1 - - mkl-service=2.3.0 - - mkl_fft=1.0.15 - - mkl_random=1.1.1 - mock=4.0.2 - - more-itertools=8.3.0 - msgpack-python=1.0.0 - munch=2.5.0 - - nbformat=5.0.6 + - nbformat=5.0.8 - ncurses=6.2 - - netcdf4=1.4.2 - - networkx=2.4 + - netcdf4=1.5.4 + - networkx=2.5 - nose=1.3.7 - numexpr=2.7.1 - - numpy=1.18.1 - - numpy-base=1.18.1 + - numpy=1.19.0 - olefile=0.46 - - openjpeg=2.3.0 - - openssl=1.1.1g - - owslib=0.19.2 - - packaging=20.3 - - pandas=1.0.3 - - parso=0.7.0 + - openjpeg=2.3.1 + - openssl=1.1.1h + - owslib=0.20.0 + - packaging=20.7 + - pandas=1.1.4 + - parso=0.7.1 - partd=1.1.0 - - pcre=8.43 + - patsy=0.5.1 + - pcre=8.44 - pexpect=4.8.0 - pickleshare=0.7.5 - - pillow=7.1.2 - - pip=20.0.2 + - pillow=8.0.1 + - pip=20.3.1 - pixman=0.38.0 - - pluggy=0.13.1 - ply=3.11 - - poppler=0.65.0 - - poppler-data=0.4.9 - - postgresql=11.5 - - powerplantmatching=0.4.5 - - progressbar2=3.37.1 - - proj=6.2.1 - - prompt-toolkit=3.0.5 - - prompt_toolkit=3.0.5 - - psutil=5.7.0 + - poppler=0.87.0 + - poppler-data=0.4.10 + - postgresql=12.3 + - powerplantmatching=0.4.8 + - progressbar2=3.53.1 + - proj=7.0.0 + - prompt-toolkit=3.0.8 + - psutil=5.7.3 + - pthread-stubs=0.4 - ptyprocess=0.6.0 - - py=1.8.1 - - pycountry=19.8.18 + - pulp=2.3.1 + - pycosat=0.6.3 + - pycountry=20.7.3 - pycparser=2.20 - pyepsg=0.4.0 - - pygments=2.6.1 - - pykdtree=1.3.1 - - pyomo=5.6.9 - - pyopenssl=19.1.0 + - pygments=2.7.2 + - pykdtree=1.3.4 + - pyomo=5.7.1 + - pyopenssl=20.0.0 - pyparsing=2.4.7 - pyproj=2.6.1.post1 - - pypsa=0.17.0 - - pyqt=5.9.2 - - pyrsistent=0.16.0 - - pyshp=2.1.0 + - pypsa=0.17.1 + - pyrsistent=0.17.3 + - pyshp=2.1.2 - pysocks=1.7.1 - pytables=3.6.1 - - pytest=5.4.2 - - pytest-runner=5.2 - - python=3.7.7 + - python=3.8.6 - python-dateutil=2.8.1 - - python-utils=2.3.0 - - python_abi=3.7 - - pytz=2020.1 - - pyutilib=5.8.0 + - python-utils=2.4.0 + - python_abi=3.8 + - pytz=2020.4 + - pyutilib=6.0.0 - pyyaml=5.3.1 - - qt=5.9.7 - - rasterio=1.1.0 + - rasterio=1.1.5 - ratelimiter=1.2.0 - readline=8.0 - - requests=2.23.0 + - reproc=14.2.1 + - reproc-cpp=14.2.1 + - requests=2.25.0 - rtree=0.9.4 - - scikit-learn=0.22.1 - - scipy=1.4.1 - - seaborn=0.10.1 - - setuptools=47.1.1 - - shapely=1.7.0 - - sip=4.19.8 + - ruamel_yaml=0.15.80 + - scikit-learn=0.23.2 + - scipy=1.5.3 + - seaborn=0.11.0 + - seaborn-base=0.11.0 + - setuptools=49.6.0 + - shapely=1.7.1 - six=1.15.0 - - smmap=3.0.2 - - snakemake-minimal=5.19.2 - - snappy=1.1.7 + - smmap=3.0.4 + - snakemake-minimal=5.30.1 - snuggs=1.4.7 - - sortedcontainers=2.1.0 + - sortedcontainers=2.3.0 - soupsieve=2.0.1 - - sqlite=3.31.1 - - tbb=2018.0.5 + - sqlite=3.34.0 + - statsmodels=0.12.1 + - tbb=2020.2 - tblib=1.6.0 - - tiledb=1.6.3 - - tk=8.6.8 - - toolz=0.10.0 + - threadpoolctl=2.1.0 + - tiledb=1.7.7 + - tk=8.6.10 + - toolz=0.11.1 - toposort=1.5 - - tornado=6.0.4 - - traitlets=4.3.3 - - typing_extensions=3.7.4.1 + - tornado=6.1 + - tqdm=4.54.1 + - traitlets=5.0.5 + - typing_extensions=3.7.4.3 - tzcode=2020a - - urllib3=1.25.8 - - wcwidth=0.1.9 - - wheel=0.34.2 + - urllib3=1.25.11 + - wcwidth=0.2.5 + - wheel=0.36.1 - wrapt=1.12.1 - - xarray=0.15.1 + - xarray=0.16.2 - xerces-c=3.2.2 - xlrd=1.2.0 + - xorg-kbproto=1.0.7 + - xorg-libice=1.0.10 + - xorg-libsm=1.2.3 + - xorg-libx11=1.6.12 + - xorg-libxau=1.0.9 + - xorg-libxdmcp=1.1.3 + - xorg-libxext=1.3.4 + - xorg-libxrender=0.9.10 + - xorg-renderproto=0.11.1 + - xorg-xextproto=7.3.0 + - xorg-xproto=7.0.31 - xz=5.2.5 - - yaml=0.1.7 + - yaml=0.2.5 - zict=2.0.0 - - zipp=3.1.0 + - zipp=3.4.0 - zlib=1.2.11 - - zstd=1.3.7 + - zstd=1.4.5 - pip: - - cdsapi==0.2.7 + - cdsapi==0.4.0 - countrycode==0.2 - - descartes==1.1.0 - geokit==1.1.2 - glaes==1.1.2 - - tqdm==4.46.1 + - sklearn==0.0 + - tsam==1.1.0 - vresutils==0.3.1 diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 85bea7fd..8fc8ad5c 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -24,15 +24,13 @@ Relevant Settings conventional_carriers: co2limit: extendable_carriers: - Generator: - OPSD_VRES_countries: include_renewable_capacities_from_OPSD: estimate_renewable_capacities_from_capacity_stats: load: scaling_factor: - renewable: (keys) + renewable: hydro: carriers: hydro_max_hours: @@ -54,9 +52,7 @@ Inputs .. image:: ../img/hydrocapacities.png :scale: 34 % -- ``data/geth2015_hydro_capacities.csv``: alternative to capacities above; NOT CURRENTLY USED! - - +- ``data/geth2015_hydro_capacities.csv``: alternative to capacities above; not currently used! - ``resources/opsd_load.csv`` Hourly per-country load profiles. - ``resources/regions_onshore.geojson``: confer :ref:`busregions` - ``resources/nuts3_shapes.geojson``: confer :ref:`shapes` diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index e2cd8ea6..00851d87 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -43,7 +43,7 @@ Outputs Description ----------- -The rule :mod:`add_extra_components` attaches additional extendable components to the clustered and simplified network. These can be configured in the ``config.yaml`` at ``electricity: extendable_carriers: ``. It processes ``networks/elec_s{simpl}_{clusters}.nc`` to build ``networks/elec_s{simpl}_{clusters}_ec.nc``, which in contrast to the former (depending on the configuration) contain with **zero** initial capacity +The rule :mod:`add_extra_components` attaches additional extendable components to the clustered and simplified network. These can be configured in the ``config.yaml`` at ``electricity: extendable_carriers:``. It processes ``networks/elec_s{simpl}_{clusters}.nc`` to build ``networks/elec_s{simpl}_{clusters}_ec.nc``, which in contrast to the former (depending on the configuration) contain with **zero** initial capacity - ``StorageUnits`` of carrier 'H2' and/or 'battery'. If this option is chosen, every bus is given an extendable ``StorageUnit`` of the corresponding carrier. The energy and power capacities are linked through a parameter that specifies the energy capacity as maximum hours at full dispatch power and is configured in ``electricity: max_hours:``. This linkage leads to one investment variable per storage unit. The default ``max_hours`` lead to long-term hydrogen and short-term battery storage units. diff --git a/scripts/build_load_data.py b/scripts/build_load_data.py index a4745630..e31fa3f2 100755 --- a/scripts/build_load_data.py +++ b/scripts/build_load_data.py @@ -17,7 +17,7 @@ Relevant Settings url: interpolate_limit: time_shift_for_large_gaps: - manual_adjustments: true + manual_adjustments: .. seealso:: diff --git a/scripts/solve_network.py b/scripts/solve_network.py index e6936db6..db64e576 100755 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -10,10 +10,6 @@ Relevant Settings .. code:: yaml - (electricity:) - (BAU_mincapacities:) - (SAFE_reservemargin:) - solving: tmpdir: options: @@ -28,10 +24,6 @@ Relevant Settings track_iterations: solver: name: - (solveroptions): - - (plotting:) - (conv_techs:) .. seealso:: Documentation of the configuration file ``config.yaml`` at diff --git a/test/config.test1.yaml b/test/config.test1.yaml index 149435e0..2a91aaf0 100755 --- a/test/config.test1.yaml +++ b/test/config.test1.yaml @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: CC0-1.0 -version: 0.2.0 +version: 0.3.0 tutorial: true logging: level: INFO From 5a917dc5a96aefa6e79b400c9bfb0992f8acc38f Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 7 Dec 2020 09:43:17 +0100 Subject: [PATCH 19/34] doc: change zenodo links to latest [skip travis] --- README.md | 4 ++-- doc/index.rst | 12 ++++++------ doc/release_notes.rst | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index c281e589..426ad559 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ SPDX-License-Identifier: CC-BY-4.0 [![Build Status](https://travis-ci.org/PyPSA/pypsa-eur.svg?branch=master)](https://travis-ci.org/PyPSA/pypsa-eur) [![Documentation](https://readthedocs.org/projects/pypsa-eur/badge/?version=latest)](https://pypsa-eur.readthedocs.io/en/latest/?badge=latest) ![Size](https://img.shields.io/github/repo-size/pypsa/pypsa-eur) -[![Zenodo](https://zenodo.org/badge/DOI/10.5281/zenodo.3520875.svg)](https://doi.org/10.5281/zenodo.3520875) +[![Zenodo](https://zenodo.org/badge/DOI/10.5281/zenodo.3520874.svg)](https://doi.org/10.5281/zenodo.3520874) [![Gitter](https://badges.gitter.im/PyPSA/community.svg)](https://gitter.im/PyPSA/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![Snakemake](https://img.shields.io/badge/snakemake-≥5.0.0-brightgreen.svg?style=flat)](https://snakemake.readthedocs.io) [![REUSE status](https://api.reuse.software/badge/github.com/pypsa/pypsa-eur)](https://api.reuse.software/info/github.com/pypsa/pypsa-eur) @@ -61,7 +61,7 @@ The dataset consists of: - Geographical potentials for wind and solar generators based on land use (CORINE) and excluding nature reserves (Natura2000) are computed with the [vresutils library](https://github.com/FRESNA/vresutils) and the [glaes library](https://github.com/FZJ-IEK3-VSA/glaes). Already-built versions of the model can be found in the accompanying [Zenodo -repository](https://doi.org/10.5281/zenodo.3601882). +repository](https://doi.org/10.5281/zenodo.3601881). A version of the model that adds building heating, transport and industry sectors to the model, as well as gas networks, can be found diff --git a/doc/index.rst b/doc/index.rst index 2ff0cd1c..02b02ce2 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -19,8 +19,8 @@ PyPSA-Eur: An Open Optimisation Model of the European Transmission System .. image:: https://img.shields.io/github/repo-size/pypsa/pypsa-eur :alt: GitHub repo size -.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3520875.svg - :target: https://doi.org/10.5281/zenodo.3520875 +.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3520874.svg + :target: https://doi.org/10.5281/zenodo.3520874 .. image:: https://badges.gitter.im/PyPSA/community.svg :target: https://gitter.im/PyPSA/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge @@ -171,16 +171,16 @@ Please use the following BibTeX: :: If you want to cite a specific PyPSA-Eur version, each release of PyPSA-Eur is stored on Zenodo with a release-specific DOI. This can be found linked from the overall PyPSA-Eur Zenodo DOI: -.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3520875.svg - :target: https://doi.org/10.5281/zenodo.3520875 +.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3520874.svg + :target: https://doi.org/10.5281/zenodo.3520874 Pre-Built Networks as a Dataset =============================== There are pre-built networks available as a dataset on Zenodo as well for every release of PyPSA-Eur. -.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3601882.svg - :target: https://doi.org/10.5281/zenodo.3601882 +.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3601881.svg + :target: https://doi.org/10.5281/zenodo.3601881 The included ``.nc`` files are PyPSA network files which can be imported with PyPSA via: diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 72e82ac2..e678f669 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -193,10 +193,10 @@ Release Process * Tag a release on Github via ``git tag v0.x.x``, ``git push``, ``git push --tags``. Include release notes in the tag message. -* Upload code to `zenodo code repository `_ with `GNU GPL 3.0 `_ license. +* Upload code to `zenodo code repository `_ with `GNU GPL 3.0 `_ license. * Create pre-built networks for ``config.default.yaml`` by running ``snakemake -j 1 extra_components_all_networks``. -* Upload pre-built networks to `zenodo data repository `_ with `CC BY 4.0 `_ license. +* Upload pre-built networks to `zenodo data repository `_ with `CC BY 4.0 `_ license. * Send announcement on the `PyPSA and PyPSA-Eur mailing list `_. From 9966c751bdbf4cae4883afbc7120a811969767ef Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 7 Dec 2020 09:45:14 +0100 Subject: [PATCH 20/34] envs: restore REUSE compliance [skip travis] --- envs/environment.fixed.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/envs/environment.fixed.yaml b/envs/environment.fixed.yaml index 8e7bb978..558db60e 100644 --- a/envs/environment.fixed.yaml +++ b/envs/environment.fixed.yaml @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: GPL-3.0-or-later + name: pypsa-eur channels: - bioconda From 8f2ace03ee36a0866c78b5a1f64e27d7c39bca3a Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Tue, 15 Dec 2020 15:17:17 +0100 Subject: [PATCH 21/34] doc: fix electricity.csv format [skip travis] --- doc/configtables/electricity.csv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/configtables/electricity.csv b/doc/configtables/electricity.csv index 70a2dd41..144199e0 100644 --- a/doc/configtables/electricity.csv +++ b/doc/configtables/electricity.csv @@ -14,7 +14,7 @@ max_hours,,,, powerplants_filter,--,"use `pandas.query `_ strings here, e.g. Country not in ['Germany']",Filter query for the default powerplant database., custom_powerplants,--,"use `pandas.query `_ strings here, e.g. Country in ['Germany']",Filter query for the custom powerplant database., conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}",List of conventional power plants to include in the model from ``resources/powerplants.csv``., -renewable_capacities_from_OPSD,,[solar, onwind, offwind],List of carriers (offwind-ac and offwind-dc are included in offwind) whose capacities 'p_nom' are aligned to the `OPSD renewable power plant list `_, +renewable_capacities_from_OPSD,,"[solar, onwind, offwind]",List of carriers (offwind-ac and offwind-dc are included in offwind) whose capacities 'p_nom' are aligned to the `OPSD renewable power plant list `_, ,"Fueltype [ppm], e.g. “Wind”","list of fueltypes stings in PyPSA-EUR, eg. “[onwind, offwind-ac, offwind-dc]”",converts ppm Fueltype to PyPSA-EUR Fueltype, estimate_renewable_capacities_from_capacitiy_stats,,,, ,"Fueltype [ppm], e.g. “Wind”","list of fueltypes stings in PyPSA-EUR, eg. “[onwind, offwind-ac, offwind-dc]”",converts ppm Fueltype to PyPSA-EUR Fueltype, From ed4fdc9150aeedf1f8da5e28203d561214b59ede Mon Sep 17 00:00:00 2001 From: Chiara Anselmetti <40397544+chiaroo@users.noreply.github.com> Date: Mon, 11 Jan 2021 10:27:27 +0100 Subject: [PATCH 22/34] Keep links to store components when using ATK wildcard (#214) * Update prepare_network.py The new ATK wildcard removes all lines + links without further distinction; however, since storage options are now modeled as store components, the links to and from the the storage units for (dis)charge are eliminated as well. Thus, the storage options drop out of the optimisation. Especially when only allowing renewables as generation sources, optimisation may become infeasible for a high temporal resolution (capacity factors = 0 for certain hours; no further options to serve the load). This issue does not arise with the ATKc wildcard, since bus0 and bus1 of the (dis)charge links share the same country code. The proposed change is a very quick fix in the enforce_autarky function, solely removing DC links. * Update scripts/prepare_network.py Co-authored-by: Fabian Neumann * Update prepare_network.py Co-authored-by: Fabian Neumann --- scripts/prepare_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/prepare_network.py b/scripts/prepare_network.py index 3b1e09ce..fc5c6e77 100755 --- a/scripts/prepare_network.py +++ b/scripts/prepare_network.py @@ -204,7 +204,7 @@ def enforce_autarky(n, only_crossborder=False): ].index else: lines_rm = n.lines.index - links_rm = n.links.index + links_rm = n.links.loc[n.links.carrier=="DC"].index n.mremove("Line", lines_rm) n.mremove("Link", links_rm) From 4e44822514755cdd0289687556547100fba6218b Mon Sep 17 00:00:00 2001 From: lukasnacken <47030274+lukasnacken@users.noreply.github.com> Date: Wed, 13 Jan 2021 17:24:42 +0100 Subject: [PATCH 23/34] Add openpyxl to environment.yaml to fix #215 (#216) --- envs/environment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/envs/environment.yaml b/envs/environment.yaml index 19c96d43..7c5faef3 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -17,6 +17,7 @@ dependencies: # Dependencies of the workflow itself - xlrd + - openpyxl - scikit-learn - pycountry - seaborn From df2425d4a04648e948d8291421cab7d2ca36529f Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 14 Jan 2021 21:22:34 +0100 Subject: [PATCH 24/34] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 426ad559..dc6b4791 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ discussion in Section 3.4 "Model validation" of the paper. ![PyPSA-Eur Grid Model Simplified](doc/img/elec_s_X.png) -The model is designed to be imported into the open toolbox +The model building routines are defined through a snakemake workflow. The model is designed to be imported into the open toolbox [PyPSA](https://github.com/PyPSA/PyPSA) for operational studies as well as generation and transmission expansion planning studies. From 9048af482fdd6cfbe7202efa5561fe6f078c7734 Mon Sep 17 00:00:00 2001 From: Koen van Greevenbroek <74298901+koen-van-greevenbroek@users.noreply.github.com> Date: Wed, 10 Mar 2021 18:16:09 +0100 Subject: [PATCH 25/34] Add a check to fix KeyError (#220) (#228) In some cases, in networks with DC links, a non-existing column would be referenced in an empty Dataframe. This commit adds a check for this case. Co-authored-by: Koen van Greevenbroek --- scripts/add_electricity.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 8fc8ad5c..e16e1766 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -245,6 +245,11 @@ def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=Fal if n.links.empty: return dc_b = n.links.carrier == 'DC' + + # If there are no dc links, then the 'underwater_fraction' column + # may be missing. Therefore we have to return here. + if n.links.loc[dc_b].empty: return + if simple_hvdc_costs: costs = (n.links.loc[dc_b, 'length'] * length_factor * costs.at['HVDC overhead', 'capital_cost']) From 2f9e9075e8134139afe2ab89aea9ee4a9f9c4735 Mon Sep 17 00:00:00 2001 From: pz-max Date: Tue, 23 Mar 2021 17:29:59 +0000 Subject: [PATCH 26/34] adding marginal cost for storage --- config.default.yaml | 2 +- scripts/add_extra_components.py | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/config.default.yaml b/config.default.yaml index cca71e61..195990d9 100755 --- a/config.default.yaml +++ b/config.default.yaml @@ -179,7 +179,7 @@ costs: year: 2030 discountrate: 0.07 # From a Lion Hirth paper, also reflects average of Noothout et al 2016 USD2013_to_EUR2013: 0.7532 # [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html - marginal_cost: + marginal_cost: # EUR/MWh solar: 0.01 onwind: 0.015 offwind: 0.015 diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 00851d87..da871715 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -114,7 +114,8 @@ def attach_stores(n, costs): carrier='H2 electrolysis', p_nom_extendable=True, efficiency=costs.at["electrolysis", "efficiency"], - capital_cost=costs.at["electrolysis", "capital_cost"]) + capital_cost=costs.at["electrolysis", "capital_cost"], + marginal_cost=snakemake.config['costs']['marginal_cost'].get('H2')) n.madd("Link", h2_buses_i + " Fuel Cell", bus0=h2_buses_i, @@ -123,7 +124,8 @@ def attach_stores(n, costs): p_nom_extendable=True, efficiency=costs.at["fuel cell", "efficiency"], #NB: fixed cost is per MWel - capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"]) + capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"], + marginal_cost=snakemake.config['costs']['marginal_cost'].get('H2')) if 'battery' in carriers: b_buses_i = n.madd("Bus", buses_i + " battery", carrier="battery", **bus_sub_dict) @@ -141,7 +143,8 @@ def attach_stores(n, costs): carrier='battery charger', efficiency=costs.at['battery inverter', 'efficiency'], capital_cost=costs.at['battery inverter', 'capital_cost'], - p_nom_extendable=True) + p_nom_extendable=True, + marginal_cost=snakemake.config['costs']['marginal_cost'].get('battery'))) n.madd("Link", b_buses_i + " discharger", bus0=b_buses_i, @@ -149,7 +152,8 @@ def attach_stores(n, costs): carrier='battery discharger', efficiency=costs.at['battery inverter','efficiency'], capital_cost=costs.at['battery inverter', 'capital_cost'], - p_nom_extendable=True) + p_nom_extendable=True, + marginal_cost=snakemake.config['costs']['marginal_cost'].get('battery'))) def attach_hydrogen_pipelines(n, costs): From 165148d26c84bd234716f690c06985949b66090f Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Tue, 23 Mar 2021 19:22:39 +0000 Subject: [PATCH 27/34] Update scripts/add_extra_components.py Co-authored-by: Fabian Neumann --- scripts/add_extra_components.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index da871715..b2b2b6a7 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -115,7 +115,7 @@ def attach_stores(n, costs): p_nom_extendable=True, efficiency=costs.at["electrolysis", "efficiency"], capital_cost=costs.at["electrolysis", "capital_cost"], - marginal_cost=snakemake.config['costs']['marginal_cost'].get('H2')) + marginal_cost=costs.at["electrolysis", "marginal_cost"]) n.madd("Link", h2_buses_i + " Fuel Cell", bus0=h2_buses_i, From b4a992c4db68c5a15ba09bdfc69ea681c744d7b1 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Tue, 23 Mar 2021 19:30:06 +0000 Subject: [PATCH 28/34] update config.yaml adding to marginal costs: - electroysis - fuel cell - battery inverter --- config.default.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/config.default.yaml b/config.default.yaml index 195990d9..7db6d18c 100755 --- a/config.default.yaml +++ b/config.default.yaml @@ -185,7 +185,10 @@ costs: offwind: 0.015 hydro: 0. H2: 0. + electrolysis: 0. + fuel cell: 0. battery: 0. + battery inverter: 0. emission_prices: # in currency per tonne emission, only used with the option Ep co2: 0. From f9e73690e1ee1a721a765257161ff9051fd1aec6 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Tue, 23 Mar 2021 19:34:13 +0000 Subject: [PATCH 29/34] Update add_extra_components --- scripts/add_extra_components.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index b2b2b6a7..ce78e493 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -106,7 +106,8 @@ def attach_stores(n, costs): carrier='H2', e_nom_extendable=True, e_cyclic=True, - capital_cost=costs.at["hydrogen storage", "capital_cost"]) + capital_cost=costs.at["hydrogen storage", "capital_cost"], + marginal_cost=costs.at["H2", "marginal_cost"]) n.madd("Link", h2_buses_i + " Electrolysis", bus0=buses_i, @@ -125,7 +126,7 @@ def attach_stores(n, costs): efficiency=costs.at["fuel cell", "efficiency"], #NB: fixed cost is per MWel capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"], - marginal_cost=snakemake.config['costs']['marginal_cost'].get('H2')) + marginal_cost=costs.at["fuel cell", "marginal_cost"]) if 'battery' in carriers: b_buses_i = n.madd("Bus", buses_i + " battery", carrier="battery", **bus_sub_dict) @@ -135,7 +136,8 @@ def attach_stores(n, costs): carrier='battery', e_cyclic=True, e_nom_extendable=True, - capital_cost=costs.at['battery storage', 'capital_cost']) + capital_cost=costs.at['battery storage', 'capital_cost'], + marginal_cost=costs.at["battery", "marginal_cost"]) n.madd("Link", b_buses_i + " charger", bus0=buses_i, @@ -144,7 +146,7 @@ def attach_stores(n, costs): efficiency=costs.at['battery inverter', 'efficiency'], capital_cost=costs.at['battery inverter', 'capital_cost'], p_nom_extendable=True, - marginal_cost=snakemake.config['costs']['marginal_cost'].get('battery'))) + marginal_cost=costs.at["battery inverter", "marginal_cost"]) n.madd("Link", b_buses_i + " discharger", bus0=b_buses_i, @@ -153,7 +155,7 @@ def attach_stores(n, costs): efficiency=costs.at['battery inverter','efficiency'], capital_cost=costs.at['battery inverter', 'capital_cost'], p_nom_extendable=True, - marginal_cost=snakemake.config['costs']['marginal_cost'].get('battery'))) + marginal_cost=costs.at["battery inverter", "marginal_cost"])) def attach_hydrogen_pipelines(n, costs): From 184b060903b00f8d3347e2c5f01f14c6aa70b362 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Tue, 23 Mar 2021 20:27:13 +0000 Subject: [PATCH 30/34] syntax correction --- scripts/add_extra_components.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index ce78e493..6faced60 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -155,7 +155,7 @@ def attach_stores(n, costs): efficiency=costs.at['battery inverter','efficiency'], capital_cost=costs.at['battery inverter', 'capital_cost'], p_nom_extendable=True, - marginal_cost=costs.at["battery inverter", "marginal_cost"])) + marginal_cost=costs.at["battery inverter", "marginal_cost"]) def attach_hydrogen_pipelines(n, costs): From 6765ffda8fb19db5476a6898a36efd05e82f4712 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Tue, 6 Apr 2021 13:27:21 +0200 Subject: [PATCH 31/34] fix electricity.csv --- doc/configtables/electricity.csv | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/doc/configtables/electricity.csv b/doc/configtables/electricity.csv index 144199e0..aaeab239 100644 --- a/doc/configtables/electricity.csv +++ b/doc/configtables/electricity.csv @@ -2,19 +2,18 @@ voltages,kV,"Any subset of {220., 300., 380.}",Voltage levels to consider when, co2limit,:math:`t_{CO_2-eq}/a`,float,Cap on total annual system carbon dioxide emissions, co2base,:math:`t_{CO_2-eq}/a`,float,Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard., -agg_p_nom_limits,--,file,path,Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``. +agg_p_nom_limits,file,path,Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``. extendable_carriers,,,, ---,Generator,--,"Any subset of {'OCGT','CCGT'}",Places extendable conventional power plants (OCGT and/or CCGT) where gas power plants are located today without capacity limits. ---,StorageUnit,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. ---,Store,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. ---,Link,--,Any subset of {'H2 pipeline'},Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``. +-- Generator,--,"Any subset of {'OCGT','CCGT'}",Places extendable conventional power plants (OCGT and/or CCGT) where gas power plants are located today without capacity limits. +-- StorageUnit,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. +-- Store,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. +-- Link,--,Any subset of {'H2 pipeline'},Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``. max_hours,,,, ---,battery,h,float,Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_. ---,H2,h,float,Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_. +-- battery,h,float,Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_. +-- H2,h,float,Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_. powerplants_filter,--,"use `pandas.query `_ strings here, e.g. Country not in ['Germany']",Filter query for the default powerplant database., custom_powerplants,--,"use `pandas.query `_ strings here, e.g. Country in ['Germany']",Filter query for the custom powerplant database., conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}",List of conventional power plants to include in the model from ``resources/powerplants.csv``., renewable_capacities_from_OPSD,,"[solar, onwind, offwind]",List of carriers (offwind-ac and offwind-dc are included in offwind) whose capacities 'p_nom' are aligned to the `OPSD renewable power plant list `_, -,"Fueltype [ppm], e.g. “Wind”","list of fueltypes stings in PyPSA-EUR, eg. “[onwind, offwind-ac, offwind-dc]”",converts ppm Fueltype to PyPSA-EUR Fueltype, estimate_renewable_capacities_from_capacitiy_stats,,,, -,"Fueltype [ppm], e.g. “Wind”","list of fueltypes stings in PyPSA-EUR, eg. “[onwind, offwind-ac, offwind-dc]”",converts ppm Fueltype to PyPSA-EUR Fueltype, +"-- Fueltype [ppm], e.g. Wind",,"list of fueltypes strings in PyPSA-Eur, e.g. [onwind, offwind-ac, offwind-dc]",converts ppm Fueltype to PyPSA-EUR Fueltype, From d3dc2e924ade3bbb2df32baccf557265ae409cac Mon Sep 17 00:00:00 2001 From: euronion <42553970+euronion@users.noreply.github.com> Date: Fri, 23 Apr 2021 11:41:55 +0200 Subject: [PATCH 32/34] Correct co2base in config.default.yaml . (#233) * Correct co2base in config.default.yaml . Based on PyPSA-EUR-SEC data. * Update release_notes.rst * Fix .rst . --- config.default.yaml | 2 +- doc/release_notes.rst | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/config.default.yaml b/config.default.yaml index cca71e61..d9376857 100755 --- a/config.default.yaml +++ b/config.default.yaml @@ -36,7 +36,7 @@ enable: electricity: voltages: [220., 300., 380.] co2limit: 7.75e+7 # 0.05 * 3.1e9*0.5 - co2base: 3.1e+9 # 1 * 3.1e9*0.5 + co2base: 1.487e9 agg_p_nom_limits: data/agg_p_nom_minmax.csv extendable_carriers: diff --git a/doc/release_notes.rst b/doc/release_notes.rst index e678f669..54419681 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -11,6 +11,8 @@ Release Notes Upcoming Release ================ +* Fix: Value for ``co2base`` in ``config.yaml`` adjusted to 1.487e9 t CO2-eq (from 3.1e9 t CO2-eq). The new value represents emissions related to the electricity sector for EU+UK. The old value was ~2x too high and used when the emissions wildcard in ``{opts}`` was used. + PyPSA-Eur 0.3.0 (7th December 2020) ================================== From efdfad97a3dddb050b6a1289aea1d0124d05702e Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Tue, 27 Apr 2021 07:51:46 +0200 Subject: [PATCH 33/34] address FutureWarning re set operations on pd.Index (#238) --- scripts/add_electricity.py | 2 +- scripts/make_summary.py | 12 ++++++------ scripts/plot_network.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index e16e1766..45d537c4 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -337,7 +337,7 @@ def attach_hydro(n, costs, ppl): country = ppl['bus'].map(n.buses.country).rename("country") - inflow_idx = ror.index | hydro.index + inflow_idx = ror.index.union(hydro.index) if not inflow_idx.empty: dist_key = ppl.loc[inflow_idx, 'p_nom'].groupby(country).transform(normed) diff --git a/scripts/make_summary.py b/scripts/make_summary.py index ada3fa8a..4d3e9ee5 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -71,7 +71,7 @@ opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"} def _add_indexed_rows(df, raw_index): - new_index = df.index|pd.MultiIndex.from_product(raw_index) + new_index = df.index.union(pd.MultiIndex.from_product(raw_index)) if isinstance(new_index, pd.Index): new_index = pd.MultiIndex.from_tuples(new_index) @@ -126,7 +126,7 @@ def calculate_costs(n, label, costs): marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum() - costs = costs.reindex(costs.index|pd.MultiIndex.from_product([[c.list_name],["marginal"],marginal_costs_grouped.index])) + costs = costs.reindex(costs.index.union(pd.MultiIndex.from_product([[c.list_name],["marginal"],marginal_costs_grouped.index]))) costs.loc[idx[c.list_name,"marginal",list(marginal_costs_grouped.index)],label] = marginal_costs_grouped.values @@ -222,7 +222,7 @@ def calculate_supply(n, label, supply): #lots of sign compensation for direction and to do maximums s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items,'carrier']).sum() - supply = supply.reindex(supply.index|pd.MultiIndex.from_product([[i],[c.list_name],s.index])) + supply = supply.reindex(supply.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index]))) supply.loc[idx[i,c.list_name,list(s.index)],label] = s.values return supply @@ -268,7 +268,7 @@ def calculate_supply_energy(n, label, supply_energy): s = (-1)*c.pnl["p"+end][items].sum().groupby(c.df.loc[items,'carrier']).sum() - supply_energy = supply_energy.reindex(supply_energy.index|pd.MultiIndex.from_product([[i],[c.list_name],s.index])) + supply_energy = supply_energy.reindex(supply_energy.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index]))) supply_energy.loc[idx[i,c.list_name,list(s.index)],label] = s.values return supply_energy @@ -276,7 +276,7 @@ def calculate_supply_energy(n, label, supply_energy): def calculate_metrics(n,label,metrics): - metrics = metrics.reindex(metrics.index|pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"])) + metrics = metrics.reindex(metrics.index.union(pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"]))) metrics.at["line_volume_DC",label] = (n.links.length*n.links.p_nom_opt)[n.links.carrier == "DC"].sum() metrics.at["line_volume_AC",label] = (n.lines.length*n.lines.s_nom_opt).sum() @@ -298,7 +298,7 @@ def calculate_prices(n,label,prices): bus_type = pd.Series(n.buses.index.str[3:],n.buses.index).replace("","electricity") - prices = prices.reindex(prices.index|bus_type.value_counts().index) + prices = prices.reindex(prices.index.union(bus_type.value_counts().index)) logger.warning("Prices are time-averaged, not load-weighted") prices[label] = n.buses_t.marginal_price.mean().groupby(bus_type).mean() diff --git a/scripts/plot_network.py b/scripts/plot_network.py index 84423916..e55b5de0 100755 --- a/scripts/plot_network.py +++ b/scripts/plot_network.py @@ -164,7 +164,7 @@ def plot_map(n, ax=None, attribute='p_nom', opts={}): handler_map=make_handler_map_to_scale_circles_as_in(ax)) ax.add_artist(l2) - techs = (bus_sizes.index.levels[1]) & pd.Index(opts['vre_techs'] + opts['conv_techs'] + opts['storage_techs']) + techs = (bus_sizes.index.levels[1]).intersection(pd.Index(opts['vre_techs'] + opts['conv_techs'] + opts['storage_techs'])) handles = [] labels = [] for t in techs: From 164c168a30f02d75307f12d5edc1afe8d4d8f6d4 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Tue, 27 Apr 2021 14:36:34 +0200 Subject: [PATCH 34/34] minor revisions on pz-max's PR --- doc/release_notes.rst | 2 ++ scripts/add_extra_components.py | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 54419681..8233a1f3 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -13,6 +13,8 @@ Upcoming Release * Fix: Value for ``co2base`` in ``config.yaml`` adjusted to 1.487e9 t CO2-eq (from 3.1e9 t CO2-eq). The new value represents emissions related to the electricity sector for EU+UK. The old value was ~2x too high and used when the emissions wildcard in ``{opts}`` was used. +* Add option to include marginal costs of links representing fuel cells, electrolysis, and battery inverters + [`#232 `_]. PyPSA-Eur 0.3.0 (7th December 2020) ================================== diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 6faced60..b957ca40 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -106,8 +106,7 @@ def attach_stores(n, costs): carrier='H2', e_nom_extendable=True, e_cyclic=True, - capital_cost=costs.at["hydrogen storage", "capital_cost"], - marginal_cost=costs.at["H2", "marginal_cost"]) + capital_cost=costs.at["hydrogen storage", "capital_cost"]) n.madd("Link", h2_buses_i + " Electrolysis", bus0=buses_i,