From f1243c3e0cc5fc1f4d6b5de03f6489f6109c9769 Mon Sep 17 00:00:00 2001 From: Max Parzen Date: Wed, 24 Nov 2021 14:16:24 +0100 Subject: [PATCH 1/2] Fix distribute clustering with cbc/glpk/ipopt Assume you have 10 nodes that need to be distributed between 2 countries. What can happen with some of the open source solvers is that one country gets assigned to 9.01 (float) nodes, and the other one to 0.99. Now using .astype(int) would lead to a node distribution of 0 and 9, as the `astype(int)` function round down by default (0.99 -> 0). This assigned zero value breaks the code in case open source solvers are used. Gurobi somehow does deal with it. --- scripts/cluster_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index 980b73b0..1a976cd1 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -218,7 +218,7 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None): results = opt.solve(m) assert results['Solver'][0]['Status'] == 'ok', f"Solver returned non-optimally: {results}" - return pd.Series(m.n.get_values(), index=L.index).astype(int) + return pd.Series(m.n.get_values(), index=L.index).round().astype(int) def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algorithm="kmeans", **algorithm_kwds): From 72e277a007c9421a5b48078942867e73b48ed481 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Fri, 14 Jan 2022 08:43:21 +0100 Subject: [PATCH 2/2] update environment and address deprecations (#291) * update environment and address deprecations * check pandas<1.3 * limit snakemake due to ascii encoding error, address review comments * remove version restriction on snakemake --- README.md | 2 +- envs/environment.yaml | 8 ++++---- scripts/add_electricity.py | 2 -- scripts/base_network.py | 2 +- scripts/build_shapes.py | 6 +++--- scripts/cluster_network.py | 5 ++++- 6 files changed, 13 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 15f979a7..8f569f2e 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,7 @@ The dataset consists of: - Electrical demand time series from the [OPSD project](https://open-power-system-data.org/). - Renewable time series based on ERA5 and SARAH, assembled using the [atlite tool](https://github.com/FRESNA/atlite). -- Geographical potentials for wind and solar generators based on land use (CORINE) and excluding nature reserves (Natura2000) are computed with the [vresutils library](https://github.com/FRESNA/vresutils) and the [glaes library](https://github.com/FZJ-IEK3-VSA/glaes). +- Geographical potentials for wind and solar generators based on land use (CORINE) and excluding nature reserves (Natura2000) are computed with the [atlite library](https://github.com/PyPSA/atlite). Already-built versions of the model can be found in the accompanying [Zenodo repository](https://doi.org/10.5281/zenodo.3601881). diff --git a/envs/environment.yaml b/envs/environment.yaml index b6958d85..4b7b0ec5 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -10,9 +10,9 @@ dependencies: - python>=3.8 - pip - - pypsa>=0.18 + - pypsa>=0.18.1 - atlite>=0.2.5 - - dask<=2021.3.1 # until https://github.com/dask/dask/issues/7583 is solved + - dask # Dependencies of the workflow itself - xlrd @@ -36,7 +36,7 @@ dependencies: - progressbar2 - pyomo - matplotlib - - proj<8 + - proj # Keep in conda environment when calling ipython - ipython @@ -54,5 +54,5 @@ dependencies: - tabula-py - pip: - - vresutils==0.3.1 + - vresutils>=0.3.1 - tsam>=1.1.0 diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 08a32a26..9e64ad29 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -95,7 +95,6 @@ import powerplantmatching as pm from powerplantmatching.export import map_country_bus from vresutils.costdata import annuity -from vresutils.load import timeseries_opsd from vresutils import transfer as vtransfer idx = pd.IndexSlice @@ -227,7 +226,6 @@ def attach_load(n): # relative factors 0.6 and 0.4 have been determined from a linear # regression on the country to continent load data - # (refer to vresutils.load._upsampling_weights) factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n)) return pd.DataFrame(factors.values * l.values[:,np.newaxis], index=l.index, columns=factors.index) diff --git a/scripts/base_network.py b/scripts/base_network.py index 514e4dc3..1f2b9241 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -97,7 +97,7 @@ def _get_country(df): def _find_closest_links(links, new_links, distance_upper_bound=1.5): - treecoords = np.asarray([np.asarray(shapely.wkt.loads(s))[[0, -1]].flatten() + treecoords = np.asarray([np.asarray(shapely.wkt.loads(s).coords)[[0, -1]].flatten() for s in links.geometry]) querycoords = np.vstack([new_links[['x1', 'y1', 'x2', 'y2']], new_links[['x2', 'y2', 'x1', 'y1']]]) diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index 5814085b..366cb820 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -79,7 +79,7 @@ from itertools import takewhile import pandas as pd import geopandas as gpd from shapely.geometry import MultiPolygon, Polygon -from shapely.ops import cascaded_union +from shapely.ops import unary_union import pycountry as pyc logger = logging.getLogger(__name__) @@ -95,7 +95,7 @@ def _get_country(target, **keys): def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True): if isinstance(polys, MultiPolygon): - polys = sorted(polys, key=attrgetter('area'), reverse=True) + polys = sorted(polys.geoms, key=attrgetter('area'), reverse=True) mainpoly = polys[0] mainlength = np.sqrt(mainpoly.area/(2.*np.pi)) if mainpoly.area > minarea: @@ -139,7 +139,7 @@ def country_cover(country_shapes, eez_shapes=None): if eez_shapes is not None: shapes += list(eez_shapes) - europe_shape = cascaded_union(shapes) + europe_shape = unary_union(shapes) if isinstance(europe_shape, MultiPolygon): europe_shape = max(europe_shape, key=attrgetter('area')) return Polygon(shell=europe_shape.exterior) diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index 1a976cd1..4b9db466 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -140,6 +140,9 @@ from functools import reduce from pypsa.networkclustering import (busmap_by_kmeans, busmap_by_spectral_clustering, _make_consense, get_clustering_from_busmap) +import warnings +warnings.filterwarnings(action='ignore', category=UserWarning) + from add_electricity import load_costs idx = pd.IndexSlice @@ -313,7 +316,7 @@ def cluster_regions(busmaps, input=None, output=None): for which in ('regions_onshore', 'regions_offshore'): regions = gpd.read_file(getattr(input, which)).set_index('name') - geom_c = regions.geometry.groupby(busmap).apply(shapely.ops.cascaded_union) + geom_c = regions.geometry.groupby(busmap).apply(shapely.ops.unary_union) regions_c = gpd.GeoDataFrame(dict(geometry=geom_c)) regions_c.index.name = 'name' save_to_geojson(regions_c, getattr(output, which))