From a05881479ccd363cf729e74e9947dcc45fbede33 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 28 Mar 2022 15:17:55 +0200 Subject: [PATCH 01/25] build_bus_regions: move voronoi partition from vresutils to script --- scripts/build_bus_regions.py | 65 ++++++++++++++++++++++++++++++++++-- 1 file changed, 63 insertions(+), 2 deletions(-) diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py index d91d0575..4f2369b6 100644 --- a/scripts/build_bus_regions.py +++ b/scripts/build_bus_regions.py @@ -47,9 +47,10 @@ from _helpers import configure_logging import pypsa import os import pandas as pd +import numpy as np import geopandas as gpd - -from vresutils.graph import voronoi_partition_pts +from shapely.geometry import Polygon +from scipy.spatial import Voronoi logger = logging.getLogger(__name__) @@ -61,6 +62,66 @@ def save_to_geojson(s, fn): s.to_file(fn, driver='GeoJSON', schema=schema) +def voronoi_partition_pts(points, outline, no_multipolygons=False): + """ + Compute the polygons of a voronoi partition of `points` within the + polygon `outline`. Taken from + https://github.com/FRESNA/vresutils/blob/master/vresutils/graph.py + Attributes + ---------- + points : Nx2 - ndarray[dtype=float] + outline : Polygon + no_multipolygons : bool (default: False) + If true, replace each MultiPolygon by its largest component + Returns + ------- + polygons : N - ndarray[dtype=Polygon|MultiPolygon] + """ + + points = np.asarray(points) + + if len(points) == 1: + polygons = [outline] + else: + xmin, ymin = np.amin(points, axis=0) + xmax, ymax = np.amax(points, axis=0) + xspan = xmax - xmin + yspan = ymax - ymin + + # to avoid any network positions outside all Voronoi cells, append + # the corners of a rectangle framing these points + vor = Voronoi(np.vstack((points, + [[xmin-3.*xspan, ymin-3.*yspan], + [xmin-3.*xspan, ymax+3.*yspan], + [xmax+3.*xspan, ymin-3.*yspan], + [xmax+3.*xspan, ymax+3.*yspan]]))) + + polygons = [] + for i in range(len(points)): + poly = Polygon(vor.vertices[vor.regions[vor.point_region[i]]]) + + if not poly.is_valid: + poly = poly.buffer(0) + + poly = poly.intersection(outline) + + polygons.append(poly) + + if no_multipolygons: + def demultipolygon(poly): + try: + # for a MultiPolygon pick the part with the largest area + poly = max(poly.geoms, key=lambda pg: pg.area) + except: + pass + return poly + polygons = [demultipolygon(poly) for poly in polygons] + + polygons_arr = np.empty((len(polygons),), 'object') + polygons_arr[:] = polygons + return polygons_arr + + if __name__ == "__main__": if 'snakemake' not in globals(): from _helpers import mock_snakemake From dcac3ea6e1f8df9c8eca4e67c87512412310d252 Mon Sep 17 00:00:00 2001 From: martacki Date: Thu, 28 Apr 2022 12:59:25 +0200 Subject: [PATCH 02/25] respect stores in make_summary script --- scripts/make_summary.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/make_summary.py b/scripts/make_summary.py index af1ecf36..a14000ef 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -171,6 +171,9 @@ def calculate_capacity(n,label,capacity): if 'p_nom_opt' in c.df.columns: c_capacities = abs(c.df.p_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum() capacity = include_in_summary(capacity, [c.list_name], label, c_capacities) + elif 'e_nom_opt' in c.df.columns: + c_capacities = abs(c.df.e_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum() + capacity = include_in_summary(capacity, [c.list_name], label, c_capacities) for c in n.iterate_components(n.passive_branch_components): c_capacities = c.df['s_nom_opt'].groupby(c.df.carrier).sum() From d12405d848ce83134bf0011f25d2394c020733d4 Mon Sep 17 00:00:00 2001 From: martacki Date: Fri, 29 Apr 2022 13:48:58 +0200 Subject: [PATCH 03/25] respect stores for energy_supply.csv --- scripts/make_summary.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/make_summary.py b/scripts/make_summary.py index a14000ef..972b245d 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -235,11 +235,11 @@ def calculate_supply(n, label, supply): def calculate_supply_energy(n, label, supply_energy): """calculate the total dispatch of each component at the buses where the loads are attached""" - load_types = n.loads.carrier.value_counts().index + load_types = n.buses.carrier.unique() for i in load_types: - buses = n.loads.bus[n.loads.carrier == i].values + buses = n.buses.query("carrier == @i").index bus_map = pd.Series(False,index=n.buses.index) From ac7c94337e98735906d87bb2da5c6b1d4bc2a5f2 Mon Sep 17 00:00:00 2001 From: martacki Date: Fri, 29 Apr 2022 13:51:54 +0200 Subject: [PATCH 04/25] respect stores for supply.csv --- scripts/make_summary.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/make_summary.py b/scripts/make_summary.py index 972b245d..c070d33f 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -188,11 +188,11 @@ def calculate_capacity(n,label,capacity): def calculate_supply(n, label, supply): """calculate the max dispatch of each component at the buses where the loads are attached""" - load_types = n.loads.carrier.value_counts().index + load_types = n.buses.carrier.unique() for i in load_types: - buses = n.loads.bus[n.loads.carrier == i].values + buses = n.buses.query("carrier == @i").index bus_map = pd.Series(False,index=n.buses.index) From 4706422f4b5e2da52031d59440909374cec35be6 Mon Sep 17 00:00:00 2001 From: Ebbe Kyhl <69363603+ebbekyhl@users.noreply.github.com> Date: Fri, 27 May 2022 16:14:01 +0200 Subject: [PATCH 05/25] Update version of powerplantmatching Hi, I recently became aware that I was using an older version (0.4.8) of the powerplantmatching. I tested my setup with the newer version (0.5.3), and it runs without any issues. The following is a comment/question on the powerplantmatching dataset, which maybe is relevant to mention: With regard to pumped-hydro storage (PHS), the newest version of powerplantmatching entails an energy storage capacity of 4.3 TWh (Europe-aggregate, assuming 6-hours duration for plants that do not have duration specified). In the earlier version 0.4.8, this was vastly overestimated at 10 TWh. As comparison, Geth et al. (2015) showed 1.3 TWh (including Norway and Switzerland) using 2012-numbers. PHS power capacity has increased from roughly 50 to 55 GW from 2014 to 2020 (iha, 2015, 2021), so energy storage capacity most likely is increased as well. But is it fair to say that energy storage capacity has been quadrupled since 2012 (from 1.3 TWh to 4.3 TWh)? Or how can we interpret this difference? Sources: Geth et al., 2015, https://doi.org/10.1016/j.rser.2015.07.145 iha, 2015, https://www.aler-renovaveis.org/contents/lerpublication/iha_2015_sept_hydropower-status-report.pdf iha, 2021, https://assets-global.website-files.com/5f749e4b9399c80b5e421384/60c37321987070812596e26a_IHA20212405-status-report-02_LR.pdf --- envs/environment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/envs/environment.yaml b/envs/environment.yaml index 3c69b77b..eaad600f 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -24,7 +24,7 @@ dependencies: - yaml - pytables - lxml - - powerplantmatching>=0.4.8 + - powerplantmatching==0.5.3 - numpy - pandas - geopandas From b6032fb891c3506d1720b0609fa979e6d2d87321 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Sat, 28 May 2022 14:48:32 +0100 Subject: [PATCH 06/25] fix crs bug --- scripts/build_renewable_profiles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py index a2b2eda6..41e1b54d 100644 --- a/scripts/build_renewable_profiles.py +++ b/scripts/build_renewable_profiles.py @@ -240,7 +240,7 @@ if __name__ == '__main__': # use named function np.greater with partially frozen argument instead # and exclude areas where: -max_depth > grid cell depth func = functools.partial(np.greater,-config['max_depth']) - excluder.add_raster(snakemake.input.gebco, codes=func, crs=4236, nodata=-1000) + excluder.add_raster(snakemake.input.gebco, codes=func, crs=4326, nodata=-1000) if 'min_shore_distance' in config: buffer = config['min_shore_distance'] From edb81a9e6a4ce2f7f53aeacf68d15aaa48799509 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Sat, 28 May 2022 14:50:44 +0100 Subject: [PATCH 07/25] add release note --- doc/release_notes.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 3f131dc0..80211635 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -70,6 +70,8 @@ Upcoming Release * Use updated SARAH-2 and ERA5 cutouts with slightly wider scope to east and additional variables. +* Fix crs bug. Change crs 4236 to 4326. + PyPSA-Eur 0.4.0 (22th September 2021) ===================================== From 3bb8a7967a92b15f86ad5d914be1ada37bf60ff2 Mon Sep 17 00:00:00 2001 From: Ebbe Kyhl <69363603+ebbekyhl@users.noreply.github.com> Date: Mon, 30 May 2022 08:15:54 +0200 Subject: [PATCH 08/25] Powerplantmatching version 0.5.3 as lower bound --- envs/environment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/envs/environment.yaml b/envs/environment.yaml index eaad600f..a2cba37f 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -24,7 +24,7 @@ dependencies: - yaml - pytables - lxml - - powerplantmatching==0.5.3 + - powerplantmatching>=0.5.3 - numpy - pandas - geopandas From 798c015bf6e8f34e35d5e9accd5eb0323487c655 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Fri, 3 Jun 2022 17:03:10 +0100 Subject: [PATCH 09/25] restrict rasterio version --- envs/environment.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/envs/environment.yaml b/envs/environment.yaml index a2cba37f..69025845 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -37,7 +37,7 @@ dependencies: - pyomo - matplotlib - proj - - fiona <= 1.18.20 # Till issue https://github.com/Toblerity/Fiona/issues/1085 is not solved + - fiona<=1.18.20 # Till issue https://github.com/Toblerity/Fiona/issues/1085 is not solved # Keep in conda environment when calling ipython - ipython @@ -45,7 +45,7 @@ dependencies: # GIS dependencies: - cartopy - descartes - - rasterio + - rasterio<=1.2.8 # 1.2.10 creates error https://github.com/PyPSA/atlite/issues/238 # PyPSA-Eur-Sec Dependencies - geopy From 21f627c74ec0b3153d60969f542052694b142e1c Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Fri, 3 Jun 2022 17:05:32 +0100 Subject: [PATCH 10/25] update version --- envs/environment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/envs/environment.yaml b/envs/environment.yaml index 69025845..f8060de1 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -45,7 +45,7 @@ dependencies: # GIS dependencies: - cartopy - descartes - - rasterio<=1.2.8 # 1.2.10 creates error https://github.com/PyPSA/atlite/issues/238 + - rasterio<=1.2.9 # 1.2.10 creates error https://github.com/PyPSA/atlite/issues/238 # PyPSA-Eur-Sec Dependencies - geopy From e974a30fd3dcf6cc8d005251b1247778cdffb933 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Fri, 3 Jun 2022 17:14:30 +0100 Subject: [PATCH 11/25] add release note --- doc/release_notes.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 80211635..963a1175 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -72,6 +72,8 @@ Upcoming Release * Fix crs bug. Change crs 4236 to 4326. +* Update rasterio version to correctly calculate exclusion raster + PyPSA-Eur 0.4.0 (22th September 2021) ===================================== From d5db3b8d8060dfd7d4a33c5749a4c2c70ef64864 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Tue, 7 Jun 2022 10:57:01 +0200 Subject: [PATCH 12/25] Update scripts/build_bus_regions.py Co-authored-by: Fabian Hofmann --- scripts/build_bus_regions.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py index 4f2369b6..382a32e8 100644 --- a/scripts/build_bus_regions.py +++ b/scripts/build_bus_regions.py @@ -117,9 +117,7 @@ def voronoi_partition_pts(points, outline, no_multipolygons=False): return poly polygons = [demultipolygon(poly) for poly in polygons] - polygons_arr = np.empty((len(polygons),), 'object') - polygons_arr[:] = polygons - return polygons_arr + return np.array(polygons, dtype=object) if __name__ == "__main__": From aa867cb70489041cd7fdbed61ebec85a52e38ed6 Mon Sep 17 00:00:00 2001 From: Fabian Hofmann Date: Tue, 7 Jun 2022 15:00:57 +0200 Subject: [PATCH 13/25] Update scripts/build_bus_regions.py Co-authored-by: Fabian Neumann --- scripts/build_bus_regions.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py index 382a32e8..89765ed9 100644 --- a/scripts/build_bus_regions.py +++ b/scripts/build_bus_regions.py @@ -107,15 +107,6 @@ def voronoi_partition_pts(points, outline, no_multipolygons=False): polygons.append(poly) - if no_multipolygons: - def demultipolygon(poly): - try: - # for a MultiPolygon pick the part with the largest area - poly = max(poly.geoms, key=lambda pg: pg.area) - except: - pass - return poly - polygons = [demultipolygon(poly) for poly in polygons] return np.array(polygons, dtype=object) From 97fbf77ff8ddb7da8bb75602b73853e319854cb5 Mon Sep 17 00:00:00 2001 From: Fabian Hofmann Date: Tue, 7 Jun 2022 15:01:18 +0200 Subject: [PATCH 14/25] Update scripts/build_bus_regions.py --- scripts/build_bus_regions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py index 89765ed9..b6d2d129 100644 --- a/scripts/build_bus_regions.py +++ b/scripts/build_bus_regions.py @@ -62,7 +62,7 @@ def save_to_geojson(s, fn): s.to_file(fn, driver='GeoJSON', schema=schema) -def voronoi_partition_pts(points, outline, no_multipolygons=False): +def voronoi_partition_pts(points, outline): """ Compute the polygons of a voronoi partition of `points` within the polygon `outline`. Taken from From bdd094d796b3f896b42ba8d6fb27c0093f467a67 Mon Sep 17 00:00:00 2001 From: Fabian Hofmann Date: Tue, 7 Jun 2022 15:01:40 +0200 Subject: [PATCH 15/25] Update scripts/build_bus_regions.py --- scripts/build_bus_regions.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/build_bus_regions.py b/scripts/build_bus_regions.py index b6d2d129..8003d370 100644 --- a/scripts/build_bus_regions.py +++ b/scripts/build_bus_regions.py @@ -71,8 +71,6 @@ def voronoi_partition_pts(points, outline): ---------- points : Nx2 - ndarray[dtype=float] outline : Polygon - no_multipolygons : bool (default: False) - If true, replace each MultiPolygon by its largest component Returns ------- polygons : N - ndarray[dtype=Polygon|MultiPolygon] From cd92d8092ba2ba332e5c8e0ea3b29ff78e89c2b8 Mon Sep 17 00:00:00 2001 From: martacki Date: Wed, 8 Jun 2022 15:49:06 +0200 Subject: [PATCH 16/25] plot_summary: remove deprecated retrieve_snakemake_keys function --- scripts/plot_summary.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index 48f064b0..bc2bd30c 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -21,7 +21,7 @@ Description import os import logging -from _helpers import configure_logging, retrieve_snakemake_keys +from _helpers import configure_logging import pandas as pd import matplotlib.pyplot as plt @@ -170,12 +170,12 @@ if __name__ == "__main__": attr='', ext='png', country='all') configure_logging(snakemake) - paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake) + config = snakemake.config - summary = wildcards.summary + summary = snakemake.wildcards.summary try: func = globals()[f"plot_{summary}"] except KeyError: raise RuntimeError(f"plotting function for {summary} has not been defined") - func(os.path.join(paths[0], f"{summary}.csv"), config, out[0]) + func(os.path.join(snakemake.input[0], f"{summary}.csv"), config, snakemake.output[0]) From 1a7b439f2d43bd24f685e3602d6be6d122d557b1 Mon Sep 17 00:00:00 2001 From: zoltanmaric Date: Wed, 8 Jun 2022 17:19:06 +0200 Subject: [PATCH 17/25] Remove usages of `retrieve_snakemake_keys` --- scripts/build_natura_raster.py | 11 ++++------- scripts/plot_p_nom_max.py | 14 ++++++-------- scripts/prepare_links_p_nom.py | 6 ++---- 3 files changed, 12 insertions(+), 19 deletions(-) diff --git a/scripts/build_natura_raster.py b/scripts/build_natura_raster.py index 71d2c45e..7fa9d544 100644 --- a/scripts/build_natura_raster.py +++ b/scripts/build_natura_raster.py @@ -40,7 +40,7 @@ Description """ import logging -from _helpers import configure_logging, retrieve_snakemake_keys +from _helpers import configure_logging import atlite import geopandas as gpd @@ -73,20 +73,17 @@ if __name__ == "__main__": snakemake = mock_snakemake('build_natura_raster') configure_logging(snakemake) - paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake) - - cutouts = paths.cutouts + cutouts = snakemake.input.cutouts xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts)) bounds = transform_bounds(4326, 3035, min(xs), min(ys), max(Xs), max(Ys)) transform, out_shape = get_transform_and_shape(bounds, res=100) # adjusted boundaries - shapes = gpd.read_file(paths.natura).to_crs(3035) + shapes = gpd.read_file(snakemake.input.natura).to_crs(3035) raster = ~geometry_mask(shapes.geometry, out_shape[::-1], transform) raster = raster.astype(rio.uint8) - with rio.open(out[0], 'w', driver='GTiff', dtype=rio.uint8, + with rio.open(snakemake.output[0], 'w', driver='GTiff', dtype=rio.uint8, count=1, transform=transform, crs=3035, compress='lzw', width=raster.shape[1], height=raster.shape[0]) as dst: dst.write(raster, indexes=1) - diff --git a/scripts/plot_p_nom_max.py b/scripts/plot_p_nom_max.py index ea66d612..e79ad274 100644 --- a/scripts/plot_p_nom_max.py +++ b/scripts/plot_p_nom_max.py @@ -19,7 +19,7 @@ Description """ import logging -from _helpers import configure_logging, retrieve_snakemake_keys +from _helpers import configure_logging import pypsa import pandas as pd @@ -53,13 +53,11 @@ if __name__ == "__main__": clusts= '5,full', country= 'all') configure_logging(snakemake) - paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake) - plot_kwds = dict(drawstyle="steps-post") - clusters = wildcards.clusts.split(',') - techs = wildcards.techs.split(',') - country = wildcards.country + clusters = snakemake.wildcards.clusts.split(',') + techs = snakemake.wildcards.techs.split(',') + country = snakemake.wildcards.country if country == 'all': country = None else: @@ -68,7 +66,7 @@ if __name__ == "__main__": fig, axes = plt.subplots(1, len(techs)) for j, cluster in enumerate(clusters): - net = pypsa.Network(paths[j]) + net = pypsa.Network(snakemake.input[j]) for i, tech in enumerate(techs): cum_p_nom_max(net, tech, country).plot(x="p_max_pu", y="cum_p_nom_max", @@ -81,4 +79,4 @@ if __name__ == "__main__": plt.legend(title="Cluster level") - fig.savefig(out[0], transparent=True, bbox_inches='tight') + fig.savefig(snakemake.output[0], transparent=True, bbox_inches='tight') diff --git a/scripts/prepare_links_p_nom.py b/scripts/prepare_links_p_nom.py index 6bd4bca4..b83089d6 100644 --- a/scripts/prepare_links_p_nom.py +++ b/scripts/prepare_links_p_nom.py @@ -37,7 +37,7 @@ Description """ import logging -from _helpers import configure_logging, retrieve_snakemake_keys +from _helpers import configure_logging import pandas as pd @@ -63,8 +63,6 @@ if __name__ == "__main__": snakemake = mock_snakemake('prepare_links_p_nom', simpl='', network='elec') configure_logging(snakemake) - paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake) - links_p_nom = pd.read_html('https://en.wikipedia.org/wiki/List_of_HVDC_projects', header=0, match="SwePol")[0] mw = "Power (MW)" @@ -76,4 +74,4 @@ if __name__ == "__main__": links_p_nom['x1'], links_p_nom['y1'] = extract_coordinates(links_p_nom['Converterstation 1']) links_p_nom['x2'], links_p_nom['y2'] = extract_coordinates(links_p_nom['Converterstation 2']) - links_p_nom.dropna(subset=['x1', 'y1', 'x2', 'y2']).to_csv(out[0], index=False) + links_p_nom.dropna(subset=['x1', 'y1', 'x2', 'y2']).to_csv(snakemake.output[0], index=False) From 6b9932f5e80d685579617f2d962b4f9e77763263 Mon Sep 17 00:00:00 2001 From: Fabian Hofmann Date: Tue, 14 Jun 2022 15:24:10 +0200 Subject: [PATCH 18/25] build_renewable_profiles: set show progress default to False --- scripts/build_renewable_profiles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py index 41e1b54d..70cadab4 100644 --- a/scripts/build_renewable_profiles.py +++ b/scripts/build_renewable_profiles.py @@ -203,7 +203,7 @@ if __name__ == '__main__': pgb.streams.wrap_stderr() nprocesses = int(snakemake.threads) - noprogress = not snakemake.config['atlite'].get('show_progress', True) + noprogress = not snakemake.config['atlite'].get('show_progress', False) config = snakemake.config['renewable'][snakemake.wildcards.technology] resource = config['resource'] # pv panel config / wind turbine config correction_factor = config.get('correction_factor', 1.) From 5df588ccb80026eaf60b2d70eb71f3c8c55923bc Mon Sep 17 00:00:00 2001 From: Philipp Glaum Date: Mon, 20 Jun 2022 12:45:28 +0200 Subject: [PATCH 19/25] fix snakemake error introduced after v7.7.0 --- scripts/_helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 410e05af..766fb421 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -240,7 +240,7 @@ def mock_snakemake(rulename, **wildcards): if os.path.exists(p): snakefile = p break - workflow = sm.Workflow(snakefile, overwrite_configfiles=[]) + workflow = sm.Workflow(snakefile, overwrite_configfiles=[], rerun_triggers=[]) workflow.include(snakefile) workflow.global_resources = {} rule = workflow.get_rule(rulename) From c2413aeef439ffb0a96f02d4b1b86a4d9520ba1f Mon Sep 17 00:00:00 2001 From: Fabian Date: Mon, 20 Jun 2022 18:20:28 +0200 Subject: [PATCH 20/25] cluster-network: add strategies for conventionals --- scripts/cluster_network.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index 642db4da..1d5608e2 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -281,7 +281,14 @@ def clustering_for_n_clusters(n, n_clusters, custom_busmap=False, aggregate_carr aggregate_generators_carriers=aggregate_carriers, aggregate_one_ports=["Load", "StorageUnit"], line_length_factor=line_length_factor, - generator_strategies={'p_nom_max': p_nom_max_strategy, 'p_nom_min': pd.Series.sum}, + generator_strategies={'p_nom_max': p_nom_max_strategy, + 'p_nom_min': pd.Series.sum, + 'p_min_pu': pd.Series.mean, + 'marginal_cost': pd.Series.mean, + 'committable': np.any, + 'ramp_limit_up': pd.Series.max, + 'ramp_limit_down': pd.Series.max, + }, scale_link_capital_costs=False) if not n.links.empty: From 51ff3f02bb5ac45f840b7cd1a15c41a6e4112f6a Mon Sep 17 00:00:00 2001 From: Philipp Glaum Date: Tue, 21 Jun 2022 16:13:16 +0200 Subject: [PATCH 21/25] helpers: check snakemake version for bug fix --- scripts/_helpers.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 766fb421..af6d831c 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -231,6 +231,7 @@ def mock_snakemake(rulename, **wildcards): import os from pypsa.descriptors import Dict from snakemake.script import Snakemake + from packaging.version import Version, parse script_dir = Path(__file__).parent.resolve() assert Path.cwd().resolve() == script_dir, \ @@ -240,7 +241,8 @@ def mock_snakemake(rulename, **wildcards): if os.path.exists(p): snakefile = p break - workflow = sm.Workflow(snakefile, overwrite_configfiles=[], rerun_triggers=[]) + kwargs=dict(rerun_triggers=[]) if parse(sm.__version__) > Version("7.7.0") else {} + workflow = sm.Workflow(snakefile, overwrite_configfiles=[], **kwargs) workflow.include(snakefile) workflow.global_resources = {} rule = workflow.get_rule(rulename) From cc657b762874994b06809adedaf5647e014ff83a Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Tue, 21 Jun 2022 16:21:21 +0200 Subject: [PATCH 22/25] Update scripts/_helpers.py --- scripts/_helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/_helpers.py b/scripts/_helpers.py index af6d831c..6e47c053 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -241,7 +241,7 @@ def mock_snakemake(rulename, **wildcards): if os.path.exists(p): snakefile = p break - kwargs=dict(rerun_triggers=[]) if parse(sm.__version__) > Version("7.7.0") else {} + kwargs = dict(rerun_triggers=[]) if parse(sm.__version__) > Version("7.7.0") else {} workflow = sm.Workflow(snakefile, overwrite_configfiles=[], **kwargs) workflow.include(snakefile) workflow.global_resources = {} From d18867ce61c7e6d60ad8ef3ba557d8b03bbefd3b Mon Sep 17 00:00:00 2001 From: Fabian Date: Thu, 23 Jun 2022 21:27:18 +0200 Subject: [PATCH 23/25] build_renewable_profiles: use dask client instead of kwargs --- scripts/build_renewable_profiles.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py index 70cadab4..b77d79e1 100644 --- a/scripts/build_renewable_profiles.py +++ b/scripts/build_renewable_profiles.py @@ -189,6 +189,7 @@ import logging from pypsa.geo import haversine from shapely.geometry import LineString import time +from dask.distributed import Client from _helpers import configure_logging @@ -216,6 +217,7 @@ if __name__ == '__main__': if correction_factor != 1.: logger.info(f'correction_factor is set as {correction_factor}') + client = Client(n_workers=nprocesses) cutout = atlite.Cutout(snakemake.input['cutout']) regions = gpd.read_file(snakemake.input.regions).set_index('name').rename_axis('bus') @@ -266,7 +268,7 @@ if __name__ == '__main__': potential = capacity_per_sqkm * availability.sum('bus') * area func = getattr(cutout, resource.pop('method')) - resource['dask_kwargs'] = {'num_workers': nprocesses} + # resource['dask_kwargs'] = {'num_workers': nprocesses, "scheduler": "threading"} capacity_factor = correction_factor * func(capacity_factor=True, **resource) layout = capacity_factor * area * capacity_per_sqkm profile, capacities = func(matrix=availability.stack(spatial=['y','x']), From 743fdea874aac727e356ba6acf9c787a9eed4cc3 Mon Sep 17 00:00:00 2001 From: Fabian Date: Thu, 23 Jun 2022 21:39:28 +0200 Subject: [PATCH 24/25] add dask-worker-space to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index b4734ab2..559dde47 100644 --- a/.gitignore +++ b/.gitignore @@ -19,6 +19,7 @@ gurobi.log /data /data/links_p_nom.csv /cutouts +/dask-worker-space doc/_build From 75f9719076c39d7d6ff3653917d16b966c5a3e07 Mon Sep 17 00:00:00 2001 From: Fabian Date: Fri, 24 Jun 2022 14:07:51 +0200 Subject: [PATCH 25/25] build_renewable_profiles: use LocalCluster instance --- scripts/build_renewable_profiles.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py index b77d79e1..37e1e9de 100644 --- a/scripts/build_renewable_profiles.py +++ b/scripts/build_renewable_profiles.py @@ -189,7 +189,7 @@ import logging from pypsa.geo import haversine from shapely.geometry import LineString import time -from dask.distributed import Client +from dask.distributed import Client, LocalCluster from _helpers import configure_logging @@ -217,8 +217,9 @@ if __name__ == '__main__': if correction_factor != 1.: logger.info(f'correction_factor is set as {correction_factor}') - client = Client(n_workers=nprocesses) - + cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1) + client = Client(cluster, asynchronous=True) + cutout = atlite.Cutout(snakemake.input['cutout']) regions = gpd.read_file(snakemake.input.regions).set_index('name').rename_axis('bus') buses = regions.index @@ -268,7 +269,7 @@ if __name__ == '__main__': potential = capacity_per_sqkm * availability.sum('bus') * area func = getattr(cutout, resource.pop('method')) - # resource['dask_kwargs'] = {'num_workers': nprocesses, "scheduler": "threading"} + resource['dask_kwargs'] = {"scheduler": client} capacity_factor = correction_factor * func(capacity_factor=True, **resource) layout = capacity_factor * area * capacity_per_sqkm profile, capacities = func(matrix=availability.stack(spatial=['y','x']),