From ce6b92dd6d9b3a5fe47e27e9c705c3b659181073 Mon Sep 17 00:00:00 2001 From: lisazeyen Date: Mon, 8 Aug 2022 08:53:07 +0200 Subject: [PATCH 1/3] add options to aggregate snapshots temporally --- scripts/prepare_sector_network.py | 85 +++++++++++++++++++++++++++++-- 1 file changed, 80 insertions(+), 5 deletions(-) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 0c175ff4..0bfc78e0 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -2413,6 +2413,84 @@ def limit_individual_line_extension(n, maxext): hvdc = n.links.index[n.links.carrier == 'DC'] n.links.loc[hvdc, 'p_nom_max'] = n.links.loc[hvdc, 'p_nom'] + maxext + +def apply_time_segmentation(n, segments, solver_name="cbc", + overwrite_time_dependent=False): + """Aggregating time series to segments with different lengths + + Input: + n: pypsa Network + segments: (int) number of segments in which the typical period should be + subdivided + solver_name: (str) name of solver + overwrite_time_dependent: (bool) overwrite time dependent data of pypsa network + with typical time series created by tsam + """ + try: + import tsam.timeseriesaggregation as tsam + except: + raise ModuleNotFoundError("Optional dependency 'tsam' not found." + "Install via 'pip install tsam'") + + # get all time-dependent data + columns = pd.MultiIndex.from_tuples([],names=['component', 'key', 'asset']) + raw = pd.DataFrame(index=n.snapshots,columns=columns) + for component in n.all_components: + pnl = n.pnl(component) + for key in pnl.keys(): + if not pnl[key].empty: + df = pnl[key].copy() + df.columns = pd.MultiIndex.from_product([[component], [key], df.columns]) + raw = pd.concat([raw, df], axis=1) + + # normalise all time-dependent data + annual_max = raw.max().replace(0,1) + raw = raw.div(annual_max, level=0) + + # get representative segments + agg = tsam.TimeSeriesAggregation(raw, hoursPerPeriod=len(raw), + noTypicalPeriods=1, noSegments=int(segments), + segmentation=True, solver=solver_name) + segmented = agg.createTypicalPeriods() + + + weightings = segmented.index.get_level_values("Segment Duration") + offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0) + timesteps = [raw.index[0] + pd.Timedelta(f"{offset}h") for offset in offsets] + snapshots = pd.DatetimeIndex(timesteps) + sn_weightings = pd.Series(weightings, index=snapshots, name="weightings", dtype="float64") + + n.set_snapshots(sn_weightings.index) + n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0) + + # overwrite time-dependent data with timeseries created by tsam + if overwrite_time_dependent: + values_t = segmented.mul(annual_max).set_index(snapshots) + for component, key in values_t.columns.droplevel(2).unique(): + n.pnl(component)[key] = values_t[component, key] + + return n + +def set_temporal_aggregation(n, opts, solver_name): + """Aggregate network temporally.""" + for o in opts: + # temporal averaging + m = re.match(r"^\d+h$", o, re.IGNORECASE) + if m is not None: + n = average_every_nhours(n, m.group(0)) + # representive snapshots + m = re.match(r"^\d+sn$", o, re.IGNORECASE) + if m is not None: + sn = int(m.group(0).split("sn")[0]) + logger.info("use every {} snapshot as representative".format(sn)) + n.set_snapshots(n.snapshots[::sn]) + n.snapshot_weightings *= sn + # segments with package tsam + if "SEG" in o: + segments = int(o.replace("SEG","")) + logger.info("use temporal segmentation with {} segments".format(segments)) + n = apply_time_segmentation(n, segments, solver_name=solver_name) + return n #%% if __name__ == "__main__": if 'snakemake' not in globals(): @@ -2514,11 +2592,8 @@ if __name__ == "__main__": if options["co2_network"]: add_co2_network(n, costs) - for o in opts: - m = re.match(r'^\d+h$', o, re.IGNORECASE) - if m is not None: - n = average_every_nhours(n, m.group(0)) - break + solver_name = snakemake.config["solving"]["solver"]["name"] + n = set_temporal_aggregation(n, opts, solver_name) limit_type = "config" limit = get(snakemake.config["co2_budget"], investment_year) From 4accfff1bbf50cfdec3d39e2a2a04d9cc2d3ba96 Mon Sep 17 00:00:00 2001 From: lisazeyen Date: Mon, 8 Aug 2022 08:53:16 +0200 Subject: [PATCH 2/3] update release notes --- doc/release_notes.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index abcbaad5..7a454f14 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -57,11 +57,13 @@ incorporates retrofitting options to hydrogen. **New features and functionality** +* Add option to aggregate network temporally using representative snapshots or segments (with tsam package) + * Add option for biomass boilers (wood pellets) for decentral heating -* Add option for BioSNG (methane from biomass) with and without CC +* Add option for BioSNG (methane from biomass) with and without CC -* Add option for BtL (Biomass to liquid fuel/oil) with and without CC +* Add option for BtL (Biomass to liquid fuel/oil) with and without CC * Units are assigned to the buses. These only provide a better understanding. The specifications of the units are not taken into account in the optimisation, which means that no automatic conversion of units takes place. From ed8f7830964e576f70815f17c1fefb448ad6cf3e Mon Sep 17 00:00:00 2001 From: lisazeyen Date: Wed, 14 Sep 2022 16:16:50 +0200 Subject: [PATCH 3/3] include code review --- scripts/prepare_sector_network.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 0bfc78e0..2a7d6ce9 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -2415,7 +2415,7 @@ def limit_individual_line_extension(n, maxext): def apply_time_segmentation(n, segments, solver_name="cbc", - overwrite_time_dependent=False): + overwrite_time_dependent=True): """Aggregating time series to segments with different lengths Input: @@ -2435,12 +2435,12 @@ def apply_time_segmentation(n, segments, solver_name="cbc", # get all time-dependent data columns = pd.MultiIndex.from_tuples([],names=['component', 'key', 'asset']) raw = pd.DataFrame(index=n.snapshots,columns=columns) - for component in n.all_components: - pnl = n.pnl(component) - for key in pnl.keys(): - if not pnl[key].empty: - df = pnl[key].copy() - df.columns = pd.MultiIndex.from_product([[component], [key], df.columns]) + for c in n.iterate_components(): + for attr, pnl in c.pnl.items(): + # exclude e_min_pu which is used for SOC of EVs in the morning + if not pnl.empty and attr != 'e_min_pu': + df = pnl.copy() + df.columns = pd.MultiIndex.from_product([[c.name], [attr], df.columns]) raw = pd.concat([raw, df], axis=1) # normalise all time-dependent data @@ -2478,18 +2478,22 @@ def set_temporal_aggregation(n, opts, solver_name): m = re.match(r"^\d+h$", o, re.IGNORECASE) if m is not None: n = average_every_nhours(n, m.group(0)) + break # representive snapshots - m = re.match(r"^\d+sn$", o, re.IGNORECASE) + m = re.match(r"(^\d+)sn$", o, re.IGNORECASE) if m is not None: - sn = int(m.group(0).split("sn")[0]) - logger.info("use every {} snapshot as representative".format(sn)) + sn = int(m[1]) + logger.info(f"use every {sn} snapshot as representative") n.set_snapshots(n.snapshots[::sn]) n.snapshot_weightings *= sn + break # segments with package tsam - if "SEG" in o: - segments = int(o.replace("SEG","")) - logger.info("use temporal segmentation with {} segments".format(segments)) + m = re.match(r"^(\d+)seg$", o, re.IGNORECASE) + if m is not None: + segments = int(m[1]) + logger.info(f"use temporal segmentation with {segments} segments") n = apply_time_segmentation(n, segments, solver_name=solver_name) + break return n #%% if __name__ == "__main__":