Merge pull request #1075 from koen-vg/time-agg-fix

Minor bugfixes for new time aggregation implementation
This commit is contained in:
Fabian Neumann 2024-05-21 18:29:18 +02:00 committed by GitHub
commit fd7dcb2e8a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -3634,15 +3634,13 @@ def set_temporal_aggregation(n, resolution, snapshot_weightings):
logger.info("Use every %s snapshot as representative", sn) logger.info("Use every %s snapshot as representative", sn)
n.set_snapshots(n.snapshots[::sn]) n.set_snapshots(n.snapshots[::sn])
n.snapshot_weightings *= sn n.snapshot_weightings *= sn
return n
else: else:
# Otherwise, use the provided snapshots # Otherwise, use the provided snapshots
snapshot_weightings = pd.read_csv( snapshot_weightings = pd.read_csv(
snapshot_weightings, index_col=0, parse_dates=True snapshot_weightings, index_col=0, parse_dates=True
) )
n.set_snapshots(snapshot_weightings.index)
n.snapshot_weightings = snapshot_weightings
# Define a series used for aggregation, mapping each hour in # Define a series used for aggregation, mapping each hour in
# n.snapshots to the closest previous timestep in # n.snapshots to the closest previous timestep in
# snapshot_weightings.index # snapshot_weightings.index
@ -3656,16 +3654,23 @@ def set_temporal_aggregation(n, resolution, snapshot_weightings):
.map(lambda i: snapshot_weightings.index[i]) .map(lambda i: snapshot_weightings.index[i])
) )
m = n.copy(with_time=False)
m.set_snapshots(snapshot_weightings.index)
m.snapshot_weightings = snapshot_weightings
# Aggregation all time-varying data. # Aggregation all time-varying data.
for c in n.iterate_components(): for c in n.iterate_components():
pnl = getattr(m, c.list_name + "_t")
for k, df in c.pnl.items(): for k, df in c.pnl.items():
if not df.empty: if not df.empty:
if c.list_name == "stores" and k == "e_max_pu": if c.list_name == "stores" and k == "e_max_pu":
c.pnl[k] = df.groupby(aggregation_map).min() pnl[k] = df.groupby(aggregation_map).min()
elif c.list_name == "stores" and k == "e_min_pu": elif c.list_name == "stores" and k == "e_min_pu":
c.pnl[k] = df.groupby(aggregation_map).max() pnl[k] = df.groupby(aggregation_map).max()
else: else:
c.pnl[k] = df.groupby(aggregation_map).mean() pnl[k] = df.groupby(aggregation_map).mean()
return m
def lossy_bidirectional_links(n, carrier, efficiencies={}): def lossy_bidirectional_links(n, carrier, efficiencies={}):
@ -3818,7 +3823,7 @@ if __name__ == "__main__":
if options["allam_cycle"]: if options["allam_cycle"]:
add_allam(n, costs) add_allam(n, costs)
set_temporal_aggregation( n = set_temporal_aggregation(
n, snakemake.params.time_resolution, snakemake.input.snapshot_weightings n, snakemake.params.time_resolution, snakemake.input.snapshot_weightings
) )