Merge branch 'master' into fix/deprecation-warning-pandas-1.4
This commit is contained in:
commit
6a2366ecac
@ -17,7 +17,7 @@ repos:
|
|||||||
|
|
||||||
# Sort package imports alphabetically
|
# Sort package imports alphabetically
|
||||||
- repo: https://github.com/PyCQA/isort
|
- repo: https://github.com/PyCQA/isort
|
||||||
rev: 5.11.4
|
rev: 5.12.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: isort
|
- id: isort
|
||||||
args: ["--profile", "black", "--filter-files"]
|
args: ["--profile", "black", "--filter-files"]
|
||||||
@ -51,7 +51,7 @@ repos:
|
|||||||
|
|
||||||
# Formatting with "black" coding style
|
# Formatting with "black" coding style
|
||||||
- repo: https://github.com/psf/black
|
- repo: https://github.com/psf/black
|
||||||
rev: 22.12.0
|
rev: 23.1.0
|
||||||
hooks:
|
hooks:
|
||||||
# Format Python files
|
# Format Python files
|
||||||
- id: black
|
- id: black
|
||||||
@ -74,7 +74,7 @@ repos:
|
|||||||
|
|
||||||
# Format Snakemake rule / workflow files
|
# Format Snakemake rule / workflow files
|
||||||
- repo: https://github.com/snakemake/snakefmt
|
- repo: https://github.com/snakemake/snakefmt
|
||||||
rev: v0.8.0
|
rev: v0.8.1
|
||||||
hooks:
|
hooks:
|
||||||
- id: snakefmt
|
- id: snakefmt
|
||||||
|
|
||||||
|
@ -394,12 +394,10 @@ def attach_conventional_generators(
|
|||||||
)
|
)
|
||||||
|
|
||||||
for carrier in conventional_config:
|
for carrier in conventional_config:
|
||||||
|
|
||||||
# Generators with technology affected
|
# Generators with technology affected
|
||||||
idx = n.generators.query("carrier == @carrier").index
|
idx = n.generators.query("carrier == @carrier").index
|
||||||
|
|
||||||
for attr in list(set(conventional_config[carrier]) & set(n.generators)):
|
for attr in list(set(conventional_config[carrier]) & set(n.generators)):
|
||||||
|
|
||||||
values = conventional_config[carrier][attr]
|
values = conventional_config[carrier][attr]
|
||||||
|
|
||||||
if f"conventional_{carrier}_{attr}" in conventional_inputs:
|
if f"conventional_{carrier}_{attr}" in conventional_inputs:
|
||||||
@ -498,7 +496,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **con
|
|||||||
e_target = hydro_stats["E_store[TWh]"].clip(lower=0.2) * 1e6
|
e_target = hydro_stats["E_store[TWh]"].clip(lower=0.2) * 1e6
|
||||||
e_installed = hydro.eval("p_nom * max_hours").groupby(hydro.country).sum()
|
e_installed = hydro.eval("p_nom * max_hours").groupby(hydro.country).sum()
|
||||||
e_missing = e_target - e_installed
|
e_missing = e_target - e_installed
|
||||||
missing_mh_i = hydro.query("max_hours == 0").index
|
missing_mh_i = hydro.query("max_hours.isnull()").index
|
||||||
|
|
||||||
if hydro_max_hours == "energy_capacity_totals_by_country":
|
if hydro_max_hours == "energy_capacity_totals_by_country":
|
||||||
# watch out some p_nom values like IE's are totally underrepresented
|
# watch out some p_nom values like IE's are totally underrepresented
|
||||||
@ -511,6 +509,8 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **con
|
|||||||
hydro_stats["E_store[TWh]"] * 1e3 / hydro_stats["p_nom_discharge[GW]"]
|
hydro_stats["E_store[TWh]"] * 1e3 / hydro_stats["p_nom_discharge[GW]"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
max_hours_country.clip(0, inplace=True)
|
||||||
|
|
||||||
missing_countries = pd.Index(hydro["country"].unique()).difference(
|
missing_countries = pd.Index(hydro["country"].unique()).difference(
|
||||||
max_hours_country.dropna().index
|
max_hours_country.dropna().index
|
||||||
)
|
)
|
||||||
|
@ -694,7 +694,6 @@ def base_network(
|
|||||||
parameter_corrections,
|
parameter_corrections,
|
||||||
config,
|
config,
|
||||||
):
|
):
|
||||||
|
|
||||||
buses = _load_buses_from_eg(eg_buses, europe_shape, config["electricity"])
|
buses = _load_buses_from_eg(eg_buses, europe_shape, config["electricity"])
|
||||||
|
|
||||||
links = _load_links_from_eg(buses, eg_links)
|
links = _load_links_from_eg(buses, eg_links)
|
||||||
|
@ -268,7 +268,6 @@ def manual_adjustment(load, fn_load, powerstatistics):
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
||||||
if "snakemake" not in globals():
|
if "snakemake" not in globals():
|
||||||
from _helpers import mock_snakemake
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
|
@ -80,7 +80,6 @@ import pandas as pd
|
|||||||
import pycountry as pyc
|
import pycountry as pyc
|
||||||
from _helpers import configure_logging
|
from _helpers import configure_logging
|
||||||
from shapely.geometry import MultiPolygon, Polygon
|
from shapely.geometry import MultiPolygon, Polygon
|
||||||
from shapely.ops import unary_union
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -158,8 +157,7 @@ def country_cover(country_shapes, eez_shapes=None):
|
|||||||
shapes = country_shapes
|
shapes = country_shapes
|
||||||
if eez_shapes is not None:
|
if eez_shapes is not None:
|
||||||
shapes = pd.concat([shapes, eez_shapes])
|
shapes = pd.concat([shapes, eez_shapes])
|
||||||
|
europe_shape = shapes.unary_union
|
||||||
europe_shape = unary_union(shapes)
|
|
||||||
if isinstance(europe_shape, MultiPolygon):
|
if isinstance(europe_shape, MultiPolygon):
|
||||||
europe_shape = max(europe_shape, key=attrgetter("area"))
|
europe_shape = max(europe_shape, key=attrgetter("area"))
|
||||||
return Polygon(shell=europe_shape.exterior)
|
return Polygon(shell=europe_shape.exterior)
|
||||||
|
@ -238,7 +238,6 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"):
|
|||||||
), f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries."
|
), f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries."
|
||||||
|
|
||||||
if focus_weights is not None:
|
if focus_weights is not None:
|
||||||
|
|
||||||
total_focus = sum(list(focus_weights.values()))
|
total_focus = sum(list(focus_weights.values()))
|
||||||
|
|
||||||
assert (
|
assert (
|
||||||
@ -396,7 +395,6 @@ def clustering_for_n_clusters(
|
|||||||
extended_link_costs=0,
|
extended_link_costs=0,
|
||||||
focus_weights=None,
|
focus_weights=None,
|
||||||
):
|
):
|
||||||
|
|
||||||
bus_strategies, generator_strategies = get_aggregation_strategies(
|
bus_strategies, generator_strategies = get_aggregation_strategies(
|
||||||
aggregation_strategies
|
aggregation_strategies
|
||||||
)
|
)
|
||||||
|
@ -164,7 +164,6 @@ def calculate_curtailment(n, label, curtailment):
|
|||||||
|
|
||||||
def calculate_energy(n, label, energy):
|
def calculate_energy(n, label, energy):
|
||||||
for c in n.iterate_components(n.one_port_components | n.branch_components):
|
for c in n.iterate_components(n.one_port_components | n.branch_components):
|
||||||
|
|
||||||
if c.name in {"Generator", "Load", "ShuntImpedance"}:
|
if c.name in {"Generator", "Load", "ShuntImpedance"}:
|
||||||
c_energies = (
|
c_energies = (
|
||||||
c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
|
c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
|
||||||
@ -238,7 +237,6 @@ def calculate_supply(n, label, supply):
|
|||||||
load_types = n.buses.carrier.unique()
|
load_types = n.buses.carrier.unique()
|
||||||
|
|
||||||
for i in load_types:
|
for i in load_types:
|
||||||
|
|
||||||
buses = n.buses.query("carrier == @i").index
|
buses = n.buses.query("carrier == @i").index
|
||||||
|
|
||||||
bus_map = pd.Series(False, index=n.buses.index)
|
bus_map = pd.Series(False, index=n.buses.index)
|
||||||
@ -246,7 +244,6 @@ def calculate_supply(n, label, supply):
|
|||||||
bus_map.loc[buses] = True
|
bus_map.loc[buses] = True
|
||||||
|
|
||||||
for c in n.iterate_components(n.one_port_components):
|
for c in n.iterate_components(n.one_port_components):
|
||||||
|
|
||||||
items = c.df.index[c.df.bus.map(bus_map)]
|
items = c.df.index[c.df.bus.map(bus_map)]
|
||||||
|
|
||||||
if len(items) == 0 or c.pnl.p.empty:
|
if len(items) == 0 or c.pnl.p.empty:
|
||||||
@ -267,9 +264,7 @@ def calculate_supply(n, label, supply):
|
|||||||
supply.loc[idx[raw_index], label] = s.values
|
supply.loc[idx[raw_index], label] = s.values
|
||||||
|
|
||||||
for c in n.iterate_components(n.branch_components):
|
for c in n.iterate_components(n.branch_components):
|
||||||
|
|
||||||
for end in ["0", "1"]:
|
for end in ["0", "1"]:
|
||||||
|
|
||||||
items = c.df.index[c.df["bus" + end].map(bus_map)]
|
items = c.df.index[c.df["bus" + end].map(bus_map)]
|
||||||
|
|
||||||
if len(items) == 0 or c.pnl["p" + end].empty:
|
if len(items) == 0 or c.pnl["p" + end].empty:
|
||||||
@ -298,7 +293,6 @@ def calculate_supply_energy(n, label, supply_energy):
|
|||||||
load_types = n.buses.carrier.unique()
|
load_types = n.buses.carrier.unique()
|
||||||
|
|
||||||
for i in load_types:
|
for i in load_types:
|
||||||
|
|
||||||
buses = n.buses.query("carrier == @i").index
|
buses = n.buses.query("carrier == @i").index
|
||||||
|
|
||||||
bus_map = pd.Series(False, index=n.buses.index)
|
bus_map = pd.Series(False, index=n.buses.index)
|
||||||
@ -306,7 +300,6 @@ def calculate_supply_energy(n, label, supply_energy):
|
|||||||
bus_map.loc[buses] = True
|
bus_map.loc[buses] = True
|
||||||
|
|
||||||
for c in n.iterate_components(n.one_port_components):
|
for c in n.iterate_components(n.one_port_components):
|
||||||
|
|
||||||
items = c.df.index[c.df.bus.map(bus_map)]
|
items = c.df.index[c.df.bus.map(bus_map)]
|
||||||
|
|
||||||
if len(items) == 0 or c.pnl.p.empty:
|
if len(items) == 0 or c.pnl.p.empty:
|
||||||
@ -327,9 +320,7 @@ def calculate_supply_energy(n, label, supply_energy):
|
|||||||
supply_energy.loc[idx[raw_index], label] = s.values
|
supply_energy.loc[idx[raw_index], label] = s.values
|
||||||
|
|
||||||
for c in n.iterate_components(n.branch_components):
|
for c in n.iterate_components(n.branch_components):
|
||||||
|
|
||||||
for end in ["0", "1"]:
|
for end in ["0", "1"]:
|
||||||
|
|
||||||
items = c.df.index[c.df["bus" + end].map(bus_map)]
|
items = c.df.index[c.df["bus" + end].map(bus_map)]
|
||||||
|
|
||||||
if len(items) == 0 or c.pnl["p" + end].empty:
|
if len(items) == 0 or c.pnl["p" + end].empty:
|
||||||
@ -431,7 +422,6 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
|||||||
}
|
}
|
||||||
|
|
||||||
for carrier in link_loads:
|
for carrier in link_loads:
|
||||||
|
|
||||||
if carrier == "electricity":
|
if carrier == "electricity":
|
||||||
suffix = ""
|
suffix = ""
|
||||||
elif carrier[:5] == "space":
|
elif carrier[:5] == "space":
|
||||||
@ -454,7 +444,6 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
|||||||
load = n.loads_t.p_set[buses]
|
load = n.loads_t.p_set[buses]
|
||||||
|
|
||||||
for tech in link_loads[carrier]:
|
for tech in link_loads[carrier]:
|
||||||
|
|
||||||
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
|
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
|
||||||
|
|
||||||
if names.empty:
|
if names.empty:
|
||||||
|
@ -324,7 +324,6 @@ def simplify_links(n, costs, config, output, aggregation_strategies=dict()):
|
|||||||
)
|
)
|
||||||
|
|
||||||
for lbl in labels.value_counts().loc[lambda s: s > 2].index:
|
for lbl in labels.value_counts().loc[lambda s: s > 2].index:
|
||||||
|
|
||||||
for b, buses, links in split_links(labels.index[labels == lbl]):
|
for b, buses, links in split_links(labels.index[labels == lbl]):
|
||||||
if len(buses) <= 2:
|
if len(buses) <= 2:
|
||||||
continue
|
continue
|
||||||
|
Loading…
Reference in New Issue
Block a user