Merge branch 'master' into snakemake_dependencies_in_functions

This commit is contained in:
Martha Frysztacki 2022-01-14 11:05:54 +01:00 committed by GitHub
commit 7067d924cc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 13 additions and 12 deletions

View File

@ -58,7 +58,7 @@ The dataset consists of:
- Electrical demand time series from the
[OPSD project](https://open-power-system-data.org/).
- Renewable time series based on ERA5 and SARAH, assembled using the [atlite tool](https://github.com/FRESNA/atlite).
- Geographical potentials for wind and solar generators based on land use (CORINE) and excluding nature reserves (Natura2000) are computed with the [vresutils library](https://github.com/FRESNA/vresutils) and the [glaes library](https://github.com/FZJ-IEK3-VSA/glaes).
- Geographical potentials for wind and solar generators based on land use (CORINE) and excluding nature reserves (Natura2000) are computed with the [atlite library](https://github.com/PyPSA/atlite).
Already-built versions of the model can be found in the accompanying [Zenodo
repository](https://doi.org/10.5281/zenodo.3601881).

View File

@ -10,9 +10,9 @@ dependencies:
- python>=3.8
- pip
- pypsa>=0.18
- pypsa>=0.18.1
- atlite>=0.2.5
- dask<=2021.3.1 # until https://github.com/dask/dask/issues/7583 is solved
- dask
# Dependencies of the workflow itself
- xlrd
@ -36,7 +36,7 @@ dependencies:
- progressbar2
- pyomo
- matplotlib
- proj<8
- proj
# Keep in conda environment when calling ipython
- ipython
@ -54,5 +54,5 @@ dependencies:
- tabula-py
- pip:
- vresutils==0.3.1
- vresutils>=0.3.1
- tsam>=1.1.0

View File

@ -95,7 +95,6 @@ import powerplantmatching as pm
from powerplantmatching.export import map_country_bus
from vresutils.costdata import annuity
from vresutils.load import timeseries_opsd
from vresutils import transfer as vtransfer
idx = pd.IndexSlice
@ -218,7 +217,6 @@ def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.):
# relative factors 0.6 and 0.4 have been determined from a linear
# regression on the country to continent load data
# (refer to vresutils.load._upsampling_weights)
factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))
return pd.DataFrame(factors.values * l.values[:,np.newaxis],
index=l.index, columns=factors.index)

View File

@ -97,7 +97,7 @@ def _get_country(df):
def _find_closest_links(links, new_links, distance_upper_bound=1.5):
treecoords = np.asarray([np.asarray(shapely.wkt.loads(s))[[0, -1]].flatten()
treecoords = np.asarray([np.asarray(shapely.wkt.loads(s).coords)[[0, -1]].flatten()
for s in links.geometry])
querycoords = np.vstack([new_links[['x1', 'y1', 'x2', 'y2']],
new_links[['x2', 'y2', 'x1', 'y1']]])

View File

@ -79,7 +79,7 @@ from itertools import takewhile
import pandas as pd
import geopandas as gpd
from shapely.geometry import MultiPolygon, Polygon
from shapely.ops import cascaded_union
from shapely.ops import unary_union
import pycountry as pyc
logger = logging.getLogger(__name__)
@ -95,7 +95,7 @@ def _get_country(target, **keys):
def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True):
if isinstance(polys, MultiPolygon):
polys = sorted(polys, key=attrgetter('area'), reverse=True)
polys = sorted(polys.geoms, key=attrgetter('area'), reverse=True)
mainpoly = polys[0]
mainlength = np.sqrt(mainpoly.area/(2.*np.pi))
if mainpoly.area > minarea:
@ -138,7 +138,7 @@ def country_cover(country_shapes, eez_shapes=None):
if eez_shapes is not None:
shapes += list(eez_shapes)
europe_shape = cascaded_union(shapes)
europe_shape = unary_union(shapes)
if isinstance(europe_shape, MultiPolygon):
europe_shape = max(europe_shape, key=attrgetter('area'))
return Polygon(shell=europe_shape.exterior)

View File

@ -140,6 +140,9 @@ from functools import reduce
from pypsa.networkclustering import (busmap_by_kmeans, busmap_by_spectral_clustering,
_make_consense, get_clustering_from_busmap)
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning)
from add_electricity import load_costs
idx = pd.IndexSlice
@ -215,7 +218,7 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"):
results = opt.solve(m)
assert results['Solver'][0]['Status'] == 'ok', f"Solver returned non-optimally: {results}"
return pd.Series(m.n.get_values(), index=L.index).astype(int)
return pd.Series(m.n.get_values(), index=L.index).round().astype(int)
def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algorithm="kmeans", **algorithm_kwds):