Merge pull request #880 from PyPSA/linting-refactor

Linting refactor
This commit is contained in:
Fabian Hofmann 2024-01-19 13:22:00 +01:00 committed by GitHub
commit 67cf85ce04
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
35 changed files with 110 additions and 134 deletions

View File

@ -13,15 +13,15 @@ import os
import sys
import time
from memory_profiler import _get_memory, choose_backend
logger = logging.getLogger(__name__)
# TODO: provide alternative when multiprocessing is not available
try:
from multiprocessing import Pipe, Process
except ImportError:
from multiprocessing.dummy import Process, Pipe
from memory_profiler import _get_memory, choose_backend
from multiprocessing.dummy import Pipe, Process
# The memory logging facilities have been adapted from memory_profiler

View File

@ -15,8 +15,6 @@ import pandas as pd
import pytz
import requests
import yaml
from pypsa.components import component_attrs, components
from pypsa.descriptors import Dict
from tqdm import tqdm
logger = logging.getLogger(__name__)
@ -362,8 +360,25 @@ def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None):
return week_df
def parse(l):
return yaml.safe_load(l[0]) if len(l) == 1 else {l.pop(0): parse(l)}
def parse(infix):
"""
Recursively parse a chained wildcard expression into a dictionary or a YAML
object.
Parameters
----------
list_to_parse : list
The list to parse.
Returns
-------
dict or YAML object
The parsed list.
"""
if len(infix) == 1:
return yaml.safe_load(infix[0])
else:
return {infix.pop(0): parse(infix)}
def update_config_with_sector_opts(config, sector_opts):
@ -371,8 +386,8 @@ def update_config_with_sector_opts(config, sector_opts):
for o in sector_opts.split("-"):
if o.startswith("CF+"):
l = o.split("+")[1:]
update_config(config, parse(l))
infix = o.split("+")[1:]
update_config(config, parse(infix))
def get_checksum_from_zenodo(file_url):

View File

@ -8,17 +8,15 @@ Prepares brownfield data from previous planning horizon.
import logging
logger = logging.getLogger(__name__)
import pandas as pd
idx = pd.IndexSlice
import numpy as np
import pandas as pd
import pypsa
from _helpers import update_config_with_sector_opts
from add_existing_baseyear import add_build_year_to_new_assets
logger = logging.getLogger(__name__)
idx = pd.IndexSlice
def add_brownfield(n, n_p, year):
logger.info(f"Preparing brownfield for the year {year}")
@ -121,7 +119,7 @@ def add_brownfield(n, n_p, year):
def disable_grid_expansion_if_LV_limit_hit(n):
if not "lv_limit" in n.global_constraints.index:
if "lv_limit" not in n.global_constraints.index:
return
total_expansion = (
@ -133,7 +131,7 @@ def disable_grid_expansion_if_LV_limit_hit(n):
# allow small numerical differences
if lv_limit - total_expansion < 1:
logger.info(f"LV is already reached, disabling expansion and LV limit")
logger.info("LV is already reached, disabling expansion and LV limit")
extendable_acs = n.lines.query("s_nom_extendable").index
n.lines.loc[extendable_acs, "s_nom_extendable"] = False
n.lines.loc[extendable_acs, "s_nom"] = n.lines.loc[extendable_acs, "s_nom_min"]

View File

@ -294,10 +294,10 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1.
nuts3 = gpd.read_file(nuts3_shapes).set_index("index")
def upsample(cntry, group):
l = opsd_load[cntry]
load = opsd_load[cntry]
if len(group) == 1:
return pd.DataFrame({group.index[0]: l})
return pd.DataFrame({group.index[0]: load})
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr()
gdp_n = pd.Series(
@ -314,8 +314,8 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1.
# overwrite factor because nuts3 provides no data for UA+MD
factors = normed(ua_md_gdp.loc[group.index, "GDP_PPP"].squeeze())
return pd.DataFrame(
factors.values * l.values[:, np.newaxis],
index=l.index,
factors.values * load.values[:, np.newaxis],
index=load.index,
columns=factors.index,
)
@ -622,7 +622,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
hydro.max_hours > 0, hydro.country.map(max_hours_country)
).fillna(6)
if flatten_dispatch := params.get("flatten_dispatch", False):
if params.get("flatten_dispatch", False):
buffer = params.get("flatten_dispatch_buffer", 0.2)
average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"]
p_max_pu = (average_capacity_factor + buffer).clip(upper=1)

View File

@ -8,25 +8,20 @@ horizon.
"""
import logging
logger = logging.getLogger(__name__)
import pandas as pd
idx = pd.IndexSlice
from types import SimpleNamespace
import country_converter as coco
import numpy as np
import pandas as pd
import pypsa
import xarray as xr
from _helpers import update_config_with_sector_opts
from add_electricity import sanitize_carriers
from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs
logger = logging.getLogger(__name__)
cc = coco.CountryConverter()
idx = pd.IndexSlice
spatial = SimpleNamespace()

View File

@ -9,11 +9,11 @@ using data from JRC ENSPRESO.
import logging
logger = logging.getLogger(__name__)
import geopandas as gpd
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
AVAILABLE_BIOMASS_YEARS = [2010, 2020, 2030, 2040, 2050]

View File

@ -80,4 +80,9 @@ def build_biomass_transport_costs():
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("build_biomass_transport_costs")
build_biomass_transport_costs()

View File

@ -28,7 +28,7 @@ if __name__ == "__main__":
gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0)
)
I = cutout.indicatormatrix(clustered_regions)
I = cutout.indicatormatrix(clustered_regions) # noqa: E741
pop = {}
for item in ["total", "urban", "rural"]:

View File

@ -41,13 +41,13 @@ Outputs
import logging
logger = logging.getLogger(__name__)
import dateutil
import numpy as np
import pandas as pd
from _helpers import configure_logging
from pandas import Timedelta as Delta
logger = logging.getLogger(__name__)
def load_timeseries(fn, years, countries, powerstatistics=True):
"""

View File

@ -7,9 +7,6 @@ Build total energy demands per country using JRC IDEES, eurostat, and EEA data.
"""
import logging
logger = logging.getLogger(__name__)
import multiprocessing as mp
from functools import partial
@ -21,7 +18,7 @@ from _helpers import mute_print
from tqdm import tqdm
cc = coco.CountryConverter()
logger = logging.getLogger(__name__)
idx = pd.IndexSlice

View File

@ -9,12 +9,12 @@ production sites with data from SciGRID_gas and Global Energy Monitor.
import logging
logger = logging.getLogger(__name__)
import geopandas as gpd
import pandas as pd
from cluster_gas_network import load_bus_regions
logger = logging.getLogger(__name__)
def read_scigrid_gas(fn):
df = gpd.read_file(fn)
@ -27,8 +27,11 @@ def build_gem_lng_data(fn):
df = pd.read_excel(fn[0], sheet_name="LNG terminals - data")
df = df.set_index("ComboID")
remove_country = ["Cyprus", "Turkey"]
remove_terminal = ["Puerto de la Luz LNG Terminal", "Gran Canaria LNG Terminal"]
remove_country = ["Cyprus", "Turkey"] # noqa: F841
remove_terminal = [ # noqa: F841
"Puerto de la Luz LNG Terminal",
"Gran Canaria LNG Terminal",
]
df = df.query(
"Status != 'Cancelled' \
@ -45,8 +48,8 @@ def build_gem_prod_data(fn):
df = pd.read_excel(fn[0], sheet_name="Gas extraction - main")
df = df.set_index("GEM Unit ID")
remove_country = ["Cyprus", "Türkiye"]
remove_fuel_type = ["oil"]
remove_country = ["Cyprus", "Türkiye"] # noqa: F841
remove_fuel_type = ["oil"] # noqa: F841
df = df.query(
"Status != 'shut in' \
@ -96,8 +99,8 @@ def build_gas_input_locations(gem_fn, entry_fn, sto_fn, countries):
]
sto = read_scigrid_gas(sto_fn)
remove_country = ["RU", "UA", "TR", "BY"]
sto = sto.query("country_code != @remove_country")
remove_country = ["RU", "UA", "TR", "BY"] # noqa: F841
sto = sto.query("country_code not in @remove_country")
# production sites inside the model scope
prod = build_gem_prod_data(gem_fn)

View File

@ -9,13 +9,13 @@ Preprocess gas network based on data from bthe SciGRID_gas project
import logging
logger = logging.getLogger(__name__)
import geopandas as gpd
import pandas as pd
from pypsa.geo import haversine_pts
from shapely.geometry import Point
logger = logging.getLogger(__name__)
def diameter_to_capacity(pipe_diameter_mm):
"""

View File

@ -34,7 +34,7 @@ if __name__ == "__main__":
gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0)
)
I = cutout.indicatormatrix(clustered_regions)
I = cutout.indicatormatrix(clustered_regions) # noqa: E741
pop_layout = xr.open_dataarray(snakemake.input.pop_layout)

View File

@ -7,9 +7,6 @@ Build spatial distribution of industries from Hotmaps database.
"""
import logging
logger = logging.getLogger(__name__)
import uuid
from itertools import product
@ -18,6 +15,7 @@ import geopandas as gpd
import pandas as pd
from packaging.version import Version, parse
logger = logging.getLogger(__name__)
cc = coco.CountryConverter()
@ -32,7 +30,7 @@ def locate_missing_industrial_sites(df):
try:
from geopy.extra.rate_limiter import RateLimiter
from geopy.geocoders import Nominatim
except:
except ImportError:
raise ModuleNotFoundError(
"Optional dependency 'geopy' not found."
"Install via 'conda install -c conda-forge geopy'"
@ -101,7 +99,7 @@ def prepare_hotmaps_database(regions):
# get all duplicated entries
duplicated_i = gdf.index[gdf.index.duplicated()]
# convert from raw data country name to iso-2-code
code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2")
code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2") # noqa: F841
# screen out malformed country allocation
gdf_filtered = gdf.loc[duplicated_i].query("country == @code")
# concat not duplicated and filtered gdf

View File

@ -7,11 +7,8 @@ Build industrial production per country.
"""
import logging
from functools import partial
logger = logging.getLogger(__name__)
import multiprocessing as mp
from functools import partial
import country_converter as coco
import numpy as np
@ -19,6 +16,7 @@ import pandas as pd
from _helpers import mute_print
from tqdm import tqdm
logger = logging.getLogger(__name__)
cc = coco.CountryConverter()
tj_to_ktoe = 0.0238845

View File

@ -50,7 +50,6 @@ With a heat balance considering the maximum temperature threshold of the transmi
the maximal possible capacity factor "s_max_pu" for each transmission line at each time step is calculated.
"""
import logging
import re
import atlite
@ -99,7 +98,7 @@ def calculate_line_rating(n, cutout):
-------
xarray DataArray object with maximal power.
"""
relevant_lines = n.lines[(n.lines["underground"] == False)]
relevant_lines = n.lines[~n.lines["underground"]]
buses = relevant_lines[["bus0", "bus1"]].values
x = n.buses.x
y = n.buses.y

View File

@ -8,15 +8,14 @@ Build mapping between cutout grid cells and population (total, urban, rural).
import logging
logger = logging.getLogger(__name__)
import atlite
import geopandas as gpd
import numpy as np
import pandas as pd
import xarray as xr
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
@ -34,7 +33,7 @@ if __name__ == "__main__":
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index")
# Indicator matrix NUTS3 -> grid cells
I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells)
I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells) # noqa: E741
# Indicator matrix grid_cells -> NUTS3; inprinciple Iinv*I is identity
# but imprecisions mean not perfect

View File

@ -340,7 +340,7 @@ if __name__ == "__main__":
f"Completed weighted capacity factor time series calculation ({duration:2.2f}s)"
)
logger.info(f"Calculating maximal capacity per bus")
logger.info("Calculating maximal capacity per bus")
p_nom_max = capacity_per_sqkm * availability @ area
logger.info("Calculate average distances.")

View File

@ -554,7 +554,7 @@ def prepare_temperature_data():
# windows ---------------------------------------------------------------
def window_limit(l, window_assumptions):
def window_limit(l, window_assumptions): # noqa: E741
"""
Define limit u value from which on window is retrofitted.
"""
@ -567,7 +567,7 @@ def window_limit(l, window_assumptions):
return m * l + a
def u_retro_window(l, window_assumptions):
def u_retro_window(l, window_assumptions): # noqa: E741
"""
Define retrofitting value depending on renovation strength.
"""
@ -580,7 +580,7 @@ def u_retro_window(l, window_assumptions):
return max(m * l + a, 0.8)
def window_cost(u, cost_retro, window_assumptions):
def window_cost(u, cost_retro, window_assumptions): # noqa: E741
"""
Get costs for new windows depending on u value.
"""
@ -600,7 +600,7 @@ def window_cost(u, cost_retro, window_assumptions):
return window_cost
def calculate_costs(u_values, l, cost_retro, window_assumptions):
def calculate_costs(u_values, l, cost_retro, window_assumptions): # noqa: E741
"""
Returns costs for a given retrofitting strength weighted by the average
surface/volume ratio of the component for each building type.
@ -626,7 +626,7 @@ def calculate_costs(u_values, l, cost_retro, window_assumptions):
)
def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035):
def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): # noqa: E741
"""
Calculate U-values after building retrofitting, depending on the old
U-values (u_values). This is for simple insulation measuers, adding an
@ -746,7 +746,7 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor)
"""
# (1) by transmission
# calculate new U values of building elements due to additional insulation
for l in l_strength:
for l in l_strength: # noqa: E741
u_values[f"new_U_{l}"] = calculate_new_u(
u_values, l, l_weight, window_assumptions
)

View File

@ -34,7 +34,7 @@ if __name__ == "__main__":
gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0)
)
I = cutout.indicatormatrix(clustered_regions)
I = cutout.indicatormatrix(clustered_regions) # noqa: E741
pop_layout = xr.open_dataarray(snakemake.input.pop_layout)

View File

@ -8,14 +8,14 @@ Cluster gas transmission network to clustered model regions.
import logging
logger = logging.getLogger(__name__)
import geopandas as gpd
import pandas as pd
from packaging.version import Version, parse
from pypsa.geo import haversine_pts
from shapely import wkt
logger = logging.getLogger(__name__)
def concat_gdf(gdf_list, crs="EPSG:4326"):
"""

View File

@ -133,6 +133,7 @@ import pyomo.environ as po
import pypsa
import seaborn as sns
from _helpers import configure_logging, update_p_nom_max
from add_electricity import load_costs
from pypsa.clustering.spatial import (
busmap_by_greedy_modularity,
busmap_by_hac,
@ -141,11 +142,7 @@ from pypsa.clustering.spatial import (
)
warnings.filterwarnings(action="ignore", category=UserWarning)
from add_electricity import load_costs
idx = pd.IndexSlice
logger = logging.getLogger(__name__)

View File

@ -6,8 +6,6 @@
Copy used configuration files and important scripts for archiving.
"""
from pathlib import Path
from shutil import copy
import yaml

View File

@ -8,9 +8,6 @@ capacity factors, curtailment, energy balances, prices and other metrics.
"""
import logging
logger = logging.getLogger(__name__)
import sys
import numpy as np
@ -19,7 +16,7 @@ import pypsa
from prepare_sector_network import prepare_costs
idx = pd.IndexSlice
logger = logging.getLogger(__name__)
opt_name = {"Store": "e", "Line": "s", "Transformer": "s"}
@ -509,10 +506,6 @@ def calculate_weighted_prices(n, label, weighted_prices):
if carrier in ["H2", "gas"]:
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
elif carrier[:5] == "space":
load = heat_demand_df[buses.str[:2]].rename(
columns=lambda i: str(i) + suffix
)
else:
load = n.loads_t.p_set[buses]

View File

@ -12,15 +12,12 @@ other metrics.
import numpy as np
import pandas as pd
import pypsa
from make_summary import (
assign_carriers,
assign_locations,
calculate_cfs,
calculate_nodal_cfs,
calculate_nodal_costs,
)
from make_summary import calculate_cfs # noqa: F401
from make_summary import calculate_nodal_cfs # noqa: F401
from make_summary import calculate_nodal_costs # noqa: F401
from make_summary import assign_carriers, assign_locations
from prepare_sector_network import prepare_costs
from pypsa.descriptors import get_active_assets, nominal_attrs
from pypsa.descriptors import get_active_assets
from six import iteritems
idx = pd.IndexSlice

View File

@ -13,8 +13,6 @@ nodes.
import logging
logger = logging.getLogger(__name__)
import cartopy.crs as ccrs
import geopandas as gpd
import matplotlib.pyplot as plt
@ -24,6 +22,7 @@ from make_summary import assign_carriers
from plot_summary import preferred_order, rename_techs
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
logger = logging.getLogger(__name__)
plt.style.use(["ggplot"])
@ -896,14 +895,12 @@ def plot_series(network, carrier="AC", name="test"):
fig.tight_layout()
fig.savefig(
"{}/{RDIR}maps/series-{}-{}-{}-{}-{}.pdf".format(
"results",
"results/{}maps/series-{}-{}-{}-{}.pdf".format(
snakemake.params.RDIR,
snakemake.wildcards["ll"],
carrier,
start,
stop,
name,
),
transparent=True,
)

View File

@ -8,17 +8,14 @@ Creates plots from summary CSV files.
import logging
logger = logging.getLogger(__name__)
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
plt.style.use("ggplot")
from prepare_sector_network import co2_emissions_year
logger = logging.getLogger(__name__)
plt.style.use("ggplot")
# consolidate and rename
def rename_techs(label):

View File

@ -9,7 +9,6 @@ import pandas as pd
import pypsa
import seaborn as sns
from _helpers import configure_logging
from pypsa.statistics import get_bus_and_carrier
sns.set_theme("paper", style="whitegrid")

View File

@ -58,7 +58,6 @@ Description
"""
import logging
import re
import numpy as np
import pandas as pd
@ -195,7 +194,7 @@ def apply_time_segmentation(n, segments, solver_name="cbc"):
logger.info(f"Aggregating time series to {segments} segments.")
try:
import tsam.timeseriesaggregation as tsam
except:
except ImportError:
raise ModuleNotFoundError(
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
)

View File

@ -305,7 +305,7 @@ def set_carbon_constraints(n, opts):
m = re.match(r"^\d+p\d$", o, re.IGNORECASE)
if m is not None:
budget = snakemake.config["co2_budget"][m.group(0)] * 1e9
if budget != None:
if budget is not None:
logger.info(f"add carbon budget of {budget}")
n.add(
"GlobalConstraint",
@ -428,7 +428,7 @@ def apply_time_segmentation_perfect(
"""
try:
import tsam.timeseriesaggregation as tsam
except:
except ImportError:
raise ModuleNotFoundError(
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
)

View File

@ -11,6 +11,7 @@ import logging
import os
import re
from itertools import product
from types import SimpleNamespace
import networkx as nx
import numpy as np
@ -22,18 +23,13 @@ from add_electricity import calculate_annuity, sanitize_carriers
from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2
from networkx.algorithms import complement
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
from packaging.version import Version, parse
from pypsa.geo import haversine_pts
from pypsa.io import import_components_from_dataframe
from scipy.stats import beta
logger = logging.getLogger(__name__)
from types import SimpleNamespace
spatial = SimpleNamespace()
from packaging.version import Version, parse
logger = logging.getLogger(__name__)
pd_version = parse(pd.__version__)
agg_group_kwargs = dict(numeric_only=False) if pd_version >= Version("1.3") else {}
@ -187,8 +183,6 @@ def define_spatial(nodes, options):
return spatial
from types import SimpleNamespace
spatial = SimpleNamespace()
@ -1476,7 +1470,6 @@ def add_land_transport(n, costs):
# TODO options?
logger.info("Add land transport")
nhours = n.snapshot_weightings.generators.sum()
transport = pd.read_csv(
snakemake.input.transport_demand, index_col=0, parse_dates=True
@ -3124,6 +3117,7 @@ def add_waste_heat(n):
# TODO options?
logger.info("Add possibility to use industrial waste heat in district heating")
cf_industry = snakemake.params.industry
# AC buses with district heating
urban_central = n.buses.index[n.buses.carrier == "urban central heat"]
@ -3484,7 +3478,7 @@ def apply_time_segmentation(
"""
try:
import tsam.timeseriesaggregation as tsam
except:
except ImportError:
raise ModuleNotFoundError(
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
)

View File

@ -7,13 +7,12 @@ Retrieve monthly fuel prices from Destatis.
"""
import logging
logger = logging.getLogger(__name__)
from pathlib import Path
from _helpers import configure_logging, progress_retrieve
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake

View File

@ -7,14 +7,13 @@ Retrieve and extract data bundle for sector-coupled studies.
"""
import logging
logger = logging.getLogger(__name__)
import tarfile
from pathlib import Path
from _helpers import configure_logging, progress_retrieve, validate_checksum
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake

View File

@ -86,7 +86,7 @@ The rule :mod:`simplify_network` does up to four things:
"""
import logging
from functools import partial, reduce
from functools import reduce
import numpy as np
import pandas as pd

View File

@ -39,10 +39,10 @@ import xarray as xr
from _benchmark import memory_logger
from _helpers import configure_logging, get_opt, update_config_with_sector_opts
from pypsa.descriptors import get_activity_mask
from pypsa.descriptors import get_switchable_as_dense as get_as_dense
logger = logging.getLogger(__name__)
pypsa.pf.logger.setLevel(logging.WARNING)
from pypsa.descriptors import get_switchable_as_dense as get_as_dense
def add_land_use_constraint(n, planning_horizons, config):
@ -572,7 +572,7 @@ def add_SAFE_constraints(n, config):
peakdemand = n.loads_t.p_set.sum(axis=1).max()
margin = 1.0 + config["electricity"]["SAFE_reservemargin"]
reserve_margin = peakdemand * margin
conventional_carriers = config["electricity"]["conventional_carriers"]
conventional_carriers = config["electricity"]["conventional_carriers"] # noqa: F841
ext_gens_i = n.generators.query(
"carrier in @conventional_carriers & p_nom_extendable"
).index
@ -690,11 +690,11 @@ def add_battery_constraints(n):
def add_lossy_bidirectional_link_constraints(n):
if not n.links.p_nom_extendable.any() or not "reversed" in n.links.columns:
if not n.links.p_nom_extendable.any() or "reversed" not in n.links.columns:
return
n.links["reversed"] = n.links.reversed.fillna(0).astype(bool)
carriers = n.links.loc[n.links.reversed, "carrier"].unique()
carriers = n.links.loc[n.links.reversed, "carrier"].unique() # noqa: F841
forward_i = n.links.query(
"carrier in @carriers and ~reversed and p_nom_extendable"