update version compatibility handling

This commit is contained in:
Fabian Neumann 2024-02-09 13:59:15 +01:00
parent e0dafc50b3
commit 3691f9f4c1
7 changed files with 25 additions and 40 deletions

View File

@ -45,7 +45,7 @@ if config["foresight"] != "perfect":
(
LOGS
+ "plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log"
)
),
benchmark:
(
BENCHMARKS
@ -74,7 +74,7 @@ if config["foresight"] != "perfect":
(
LOGS
+ "plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log"
)
),
benchmark:
(
BENCHMARKS
@ -102,7 +102,7 @@ if config["foresight"] != "perfect":
(
LOGS
+ "plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log"
)
),
benchmark:
(
BENCHMARKS

View File

@ -264,7 +264,6 @@ def mock_snakemake(
import os
import snakemake as sm
from packaging.version import Version, parse
from pypsa.descriptors import Dict
from snakemake.script import Snakemake
@ -290,13 +289,12 @@ def mock_snakemake(
if os.path.exists(p):
snakefile = p
break
kwargs = (
dict(rerun_triggers=[]) if parse(sm.__version__) > Version("7.7.0") else {}
)
if isinstance(configfiles, str):
configfiles = [configfiles]
workflow = sm.Workflow(snakefile, overwrite_configfiles=configfiles, **kwargs)
workflow = sm.Workflow(
snakefile, overwrite_configfiles=configfiles, rerun_triggers=[]
)
workflow.include(snakefile)
if configfiles:

View File

@ -78,10 +78,13 @@ import shapely.prepared
import shapely.wkt
import yaml
from _helpers import configure_logging
from packaging.version import Version, parse
from scipy import spatial
from scipy.sparse import csgraph
from shapely.geometry import LineString, Point
PD_GE_2_2 = parse(pd.__version__) >= Version("2.2")
logger = logging.getLogger(__name__)
@ -524,12 +527,13 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
)
return pd.Series(key, index)
compat_kws = dict(include_groups=False) if PD_GE_2_2 else {}
gb = buses.loc[substation_b].groupby(
["x", "y"], as_index=False, group_keys=False, sort=False
)
bus_map_low = gb.apply(prefer_voltage, "min", include_groups=False)
bus_map_low = gb.apply(prefer_voltage, "min", **compat_kws)
lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False)
bus_map_high = gb.apply(prefer_voltage, "max", include_groups=False)
bus_map_high = gb.apply(prefer_voltage, "max", **compat_kws)
hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False)
onshore_b = pd.Series(False, buses.index)

View File

@ -13,7 +13,6 @@ from itertools import product
import country_converter as coco
import geopandas as gpd
import pandas as pd
from packaging.version import Version, parse
logger = logging.getLogger(__name__)
cc = coco.CountryConverter()
@ -84,12 +83,7 @@ def prepare_hotmaps_database(regions):
gdf = gpd.GeoDataFrame(df, geometry="coordinates", crs="EPSG:4326")
kws = (
dict(op="within")
if parse(gpd.__version__) < Version("0.10")
else dict(predicate="within")
)
gdf = gpd.sjoin(gdf, regions, how="inner", **kws)
gdf = gpd.sjoin(gdf, regions, how="inner", predicate="within")
gdf.rename(columns={"index_right": "bus"}, inplace=True)
gdf["country"] = gdf.bus.str[:2]

View File

@ -10,7 +10,6 @@ import logging
import geopandas as gpd
import pandas as pd
from packaging.version import Version, parse
from pypsa.geo import haversine_pts
from shapely import wkt
@ -41,12 +40,9 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25):
for i in [0, 1]:
gdf = gpd.GeoDataFrame(geometry=df[f"point{i}"], crs="EPSG:4326")
kws = (
dict(op="within")
if parse(gpd.__version__) < Version("0.10")
else dict(predicate="within")
)
bus_mapping = gpd.sjoin(gdf, bus_regions, how="left", **kws).index_right
bus_mapping = gpd.sjoin(
gdf, bus_regions, how="left", predicate="within"
).index_right
bus_mapping = bus_mapping.groupby(bus_mapping.index).first()
df[f"bus{i}"] = bus_mapping

View File

@ -135,6 +135,7 @@ import pypsa
import seaborn as sns
from _helpers import configure_logging, update_p_nom_max
from add_electricity import load_costs
from packaging.version import Version, parse
from pypsa.clustering.spatial import (
busmap_by_greedy_modularity,
busmap_by_hac,
@ -142,6 +143,8 @@ from pypsa.clustering.spatial import (
get_clustering_from_busmap,
)
PD_GE_2_2 = parse(pd.__version__) >= Version("2.2")
warnings.filterwarnings(action="ignore", category=UserWarning)
idx = pd.IndexSlice
logger = logging.getLogger(__name__)
@ -362,9 +365,11 @@ def busmap_for_n_clusters(
f"`algorithm` must be one of 'kmeans' or 'hac'. Is {algorithm}."
)
compat_kws = dict(include_groups=False) if PD_GE_2_2 else {}
return (
n.buses.groupby(["country", "sub_network"], group_keys=False)
.apply(busmap_for_country, include_groups=False)
.apply(busmap_for_country, **compat_kws)
.squeeze()
.rename("busmap")
)

View File

@ -23,15 +23,12 @@ from add_electricity import calculate_annuity, sanitize_carriers, sanitize_locat
from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2
from networkx.algorithms import complement
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
from packaging.version import Version, parse
from pypsa.geo import haversine_pts
from pypsa.io import import_components_from_dataframe
from scipy.stats import beta
spatial = SimpleNamespace()
logger = logging.getLogger(__name__)
pd_version = parse(pd.__version__)
agg_group_kwargs = dict(numeric_only=False) if pd_version >= Version("1.3") else {}
def define_spatial(nodes, options):
@ -1853,16 +1850,7 @@ def add_heat(n, costs):
p_nom_extendable=True,
)
if isinstance(options["tes_tau"], dict):
tes_time_constant_days = options["tes_tau"][name_type]
else:
logger.warning(
"Deprecated: a future version will require you to specify 'tes_tau' ",
"for 'decentral' and 'central' separately.",
)
tes_time_constant_days = (
options["tes_tau"] if name_type == "decentral" else 180.0
)
tes_time_constant_days = options["tes_tau"][name_type]
n.madd(
"Store",
@ -3404,7 +3392,7 @@ def cluster_heat_buses(n):
# cluster heat nodes
# static dataframe
agg = define_clustering(df.columns, aggregate_dict)
df = df.groupby(level=0).agg(agg, **agg_group_kwargs)
df = df.groupby(level=0).agg(agg, numeric_only=False)
# time-varying data
pnl = c.pnl
agg = define_clustering(pd.Index(pnl.keys()), aggregate_dict)
@ -3413,7 +3401,7 @@ def cluster_heat_buses(n):
def renamer(s):
return s.replace("residential ", "").replace("services ", "")
pnl[k] = pnl[k].T.groupby(renamer).agg(agg[k], **agg_group_kwargs).T
pnl[k] = pnl[k].T.groupby(renamer).agg(agg[k], numeric_only=False).T
# remove unclustered assets of service/residential
to_drop = c.df.index.difference(df.index)