address deprecation warnings

This commit is contained in:
Fabian 2024-01-31 17:10:08 +01:00
parent 0f80e2d089
commit 46d8ce8f1f
11 changed files with 40 additions and 37 deletions

View File

@ -327,7 +327,9 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1.
axis=1, axis=1,
) )
n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load) n.madd(
"Load", substation_lv_i, bus=substation_lv_i, p_set=load
) # carrier="electricity"
def update_transmission_costs(n, costs, length_factor=1.0): def update_transmission_costs(n, costs, length_factor=1.0):
@ -504,8 +506,8 @@ def attach_conventional_generators(
snakemake.input[f"conventional_{carrier}_{attr}"], index_col=0 snakemake.input[f"conventional_{carrier}_{attr}"], index_col=0
).iloc[:, 0] ).iloc[:, 0]
bus_values = n.buses.country.map(values) bus_values = n.buses.country.map(values)
n.generators[attr].update( n.generators.update(
n.generators.loc[idx].bus.map(bus_values).dropna() {attr: n.generators.loc[idx].bus.map(bus_values).dropna()}
) )
else: else:
# Single value affecting all generators of technology k indiscriminantely of country # Single value affecting all generators of technology k indiscriminantely of country
@ -749,8 +751,8 @@ def attach_OPSD_renewables(n: pypsa.Network, tech_map: Dict[str, List[str]]) ->
caps = caps.groupby(["bus"]).Capacity.sum() caps = caps.groupby(["bus"]).Capacity.sum()
caps = caps / gens_per_bus.reindex(caps.index, fill_value=1) caps = caps / gens_per_bus.reindex(caps.index, fill_value=1)
n.generators.p_nom.update(gens.bus.map(caps).dropna()) n.generators.update({"p_nom": gens.bus.map(caps).dropna()})
n.generators.p_nom_min.update(gens.bus.map(caps).dropna()) n.generators.update({"p_nom_min": gens.bus.map(caps).dropna()})
def estimate_renewable_capacities( def estimate_renewable_capacities(

View File

@ -48,7 +48,7 @@ def add_build_year_to_new_assets(n, baseyear):
"series" "series"
) & n.component_attrs[c.name].status.str.contains("Input") ) & n.component_attrs[c.name].status.str.contains("Input")
for attr in n.component_attrs[c.name].index[selection]: for attr in n.component_attrs[c.name].index[selection]:
c.pnl[attr].rename(columns=rename, inplace=True) c.pnl[attr] = c.pnl[attr].rename(columns=rename)
def add_existing_renewables(df_agg): def add_existing_renewables(df_agg):

View File

@ -138,7 +138,9 @@ def _load_buses_from_eg(eg_buses, europe_shape, config_elec):
) )
buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"}) buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"})
buses["under_construction"] = buses["under_construction"].fillna(False).astype(bool) buses["under_construction"] = buses.under_construction.where(
lambda s: s.notnull(), False
).astype(bool)
# remove all buses outside of all countries including exclusive economic zones (offshore) # remove all buses outside of all countries including exclusive economic zones (offshore)
europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"] europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"]
@ -525,9 +527,9 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
gb = buses.loc[substation_b].groupby( gb = buses.loc[substation_b].groupby(
["x", "y"], as_index=False, group_keys=False, sort=False ["x", "y"], as_index=False, group_keys=False, sort=False
) )
bus_map_low = gb.apply(prefer_voltage, "min") bus_map_low = gb.apply(prefer_voltage, "min", include_groups=False)
lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False) lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False)
bus_map_high = gb.apply(prefer_voltage, "max") bus_map_high = gb.apply(prefer_voltage, "max", include_groups=False)
hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False) hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False)
onshore_b = pd.Series(False, buses.index) onshore_b = pd.Series(False, buses.index)

View File

@ -132,14 +132,14 @@ def disaggregate_nuts0(bio):
pop = build_nuts_population_data() pop = build_nuts_population_data()
# get population in nuts2 # get population in nuts2
pop_nuts2 = pop.loc[pop.index.str.len() == 4] pop_nuts2 = pop.loc[pop.index.str.len() == 4].copy()
by_country = pop_nuts2.total.groupby(pop_nuts2.ct).sum() by_country = pop_nuts2.total.groupby(pop_nuts2.ct).sum()
pop_nuts2.loc[:, "fraction"] = pop_nuts2.total / pop_nuts2.ct.map(by_country) pop_nuts2["fraction"] = pop_nuts2.total / pop_nuts2.ct.map(by_country)
# distribute nuts0 data to nuts2 by population # distribute nuts0 data to nuts2 by population
bio_nodal = bio.loc[pop_nuts2.ct] bio_nodal = bio.loc[pop_nuts2.ct]
bio_nodal.index = pop_nuts2.index bio_nodal.index = pop_nuts2.index
bio_nodal = bio_nodal.mul(pop_nuts2.fraction, axis=0) bio_nodal = bio_nodal.mul(pop_nuts2.fraction, axis=0).astype(float)
# update inplace # update inplace
bio.update(bio_nodal) bio.update(bio_nodal)

View File

@ -114,13 +114,11 @@ def prepare_dataset(
df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity) df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity)
ratio = df.p_nom / df.p_nom_diameter ratio = df.p_nom / df.p_nom_diameter
not_nordstream = df.max_pressure_bar < 220 not_nordstream = df.max_pressure_bar < 220
df.p_nom.update( df["p_nom"] = df.p_nom_diameter.where(
df.p_nom_diameter.where(
(df.p_nom <= 500) (df.p_nom <= 500)
| ((ratio > correction_threshold_p_nom) & not_nordstream) | ((ratio > correction_threshold_p_nom) & not_nordstream)
| ((ratio < 1 / correction_threshold_p_nom) & not_nordstream) | ((ratio < 1 / correction_threshold_p_nom) & not_nordstream)
) )
)
# lines which have way too discrepant line lengths # lines which have way too discrepant line lengths
# get assigned haversine length * length factor # get assigned haversine length * length factor
@ -130,13 +128,11 @@ def prepare_dataset(
axis=1, axis=1,
) )
ratio = df.eval("length / length_haversine") ratio = df.eval("length / length_haversine")
df["length"].update( df["length"] = df.length_haversine.where(
df.length_haversine.where(
(df["length"] < 20) (df["length"] < 20)
| (ratio > correction_threshold_length) | (ratio > correction_threshold_length)
| (ratio < 1 / correction_threshold_length) | (ratio < 1 / correction_threshold_length)
) )
)
return df return df

View File

@ -98,7 +98,7 @@ def calculate_line_rating(n, cutout):
------- -------
xarray DataArray object with maximal power. xarray DataArray object with maximal power.
""" """
relevant_lines = n.lines[~n.lines["underground"]] relevant_lines = n.lines[~n.lines["underground"]].copy()
buses = relevant_lines[["bus0", "bus1"]].values buses = relevant_lines[["bus0", "bus1"]].values
x = n.buses.x x = n.buses.x
y = n.buses.y y = n.buses.y

View File

@ -83,6 +83,7 @@ if __name__ == "__main__":
# correct for imprecision of Iinv*I # correct for imprecision of Iinv*I
pop_ct = nuts3.loc[nuts3.country == ct, "pop"].sum() pop_ct = nuts3.loc[nuts3.country == ct, "pop"].sum()
if pop_cells_ct.sum() != 0:
pop_cells_ct *= pop_ct / pop_cells_ct.sum() pop_cells_ct *= pop_ct / pop_cells_ct.sum()
# The first low density grid cells to reach rural fraction are rural # The first low density grid cells to reach rural fraction are rural

View File

@ -297,8 +297,8 @@ def prepare_building_stock_data():
errors="ignore", errors="ignore",
) )
u_values.subsector.replace(rename_sectors, inplace=True) u_values["subsector"] = u_values.subsector.replace(rename_sectors)
u_values.btype.replace(rename_sectors, inplace=True) u_values["btype"] = u_values.btype.replace(rename_sectors)
# for missing weighting of surfaces of building types assume MFH # for missing weighting of surfaces of building types assume MFH
u_values["assumed_subsector"] = u_values.subsector u_values["assumed_subsector"] = u_values.subsector
@ -306,8 +306,8 @@ def prepare_building_stock_data():
~u_values.subsector.isin(rename_sectors.values()), "assumed_subsector" ~u_values.subsector.isin(rename_sectors.values()), "assumed_subsector"
] = "MFH" ] = "MFH"
u_values.country_code.replace({"UK": "GB"}, inplace=True) u_values["country_code"] = u_values.country_code.replace({"UK": "GB"})
u_values.bage.replace({"Berfore 1945": "Before 1945"}, inplace=True) u_values["bage"] = u_values.bage.replace({"Berfore 1945": "Before 1945"})
u_values = u_values[~u_values.bage.isna()] u_values = u_values[~u_values.bage.isna()]
u_values.set_index(["country_code", "subsector", "bage", "type"], inplace=True) u_values.set_index(["country_code", "subsector", "bage", "type"], inplace=True)

View File

@ -488,7 +488,9 @@ if __name__ == "__main__":
gens.efficiency, bins=[0, low, high, 1], labels=labels gens.efficiency, bins=[0, low, high, 1], labels=labels
).astype(str) ).astype(str)
carriers += [f"{c} {label} efficiency" for label in labels] carriers += [f"{c} {label} efficiency" for label in labels]
n.generators.carrier.update(gens.carrier + " " + suffix + " efficiency") n.generators.update(
{"carrier": gens.carrier + " " + suffix + " efficiency"}
)
aggregate_carriers = carriers aggregate_carriers = carriers
if n_clusters == len(n.buses): if n_clusters == len(n.buses):

View File

@ -269,8 +269,8 @@ def set_line_nom_max(
hvdc = n.links.index[n.links.carrier == "DC"] hvdc = n.links.index[n.links.carrier == "DC"]
n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + p_nom_max_ext n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + p_nom_max_ext
n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True) n.lines["s_nom_max"] = n.lines.s_nom_max.clip(upper=s_nom_max_set)
n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True) n.links["p_nom_max"] = n.links.p_nom_max.clip(upper=p_nom_max_set)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -418,7 +418,7 @@ def add_CCL_constraints(n, config):
Example Example
------- -------
scenario: scenario:
opts: [Co2L-CCL-24H] opts: [Co2L-CCL-24h]
electricity: electricity:
agg_p_nom_limits: data/agg_p_nom_minmax.csv agg_p_nom_limits: data/agg_p_nom_minmax.csv
""" """
@ -463,7 +463,7 @@ def add_EQ_constraints(n, o, scaling=1e-1):
Example Example
------- -------
scenario: scenario:
opts: [Co2L-EQ0.7-24H] opts: [Co2L-EQ0.7-24h]
Require each country or node to on average produce a minimal share Require each country or node to on average produce a minimal share
of its total electricity consumption itself. Example: EQ0.7c demands each country of its total electricity consumption itself. Example: EQ0.7c demands each country
@ -527,7 +527,7 @@ def add_BAU_constraints(n, config):
Example Example
------- -------
scenario: scenario:
opts: [Co2L-BAU-24H] opts: [Co2L-BAU-24h]
electricity: electricity:
BAU_mincapacities: BAU_mincapacities:
solar: 0 solar: 0
@ -564,7 +564,7 @@ def add_SAFE_constraints(n, config):
config.yaml requires to specify opts: config.yaml requires to specify opts:
scenario: scenario:
opts: [Co2L-SAFE-24H] opts: [Co2L-SAFE-24h]
electricity: electricity:
SAFE_reservemargin: 0.1 SAFE_reservemargin: 0.1
Which sets a reserve margin of 10% above the peak demand. Which sets a reserve margin of 10% above the peak demand.