Merge branch 'master' into perfect-foresight-fixes

This commit is contained in:
Fabian Neumann 2024-02-05 12:18:53 +01:00 committed by GitHub
commit f0ec1d6716
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 69 additions and 51 deletions

View File

@ -81,11 +81,7 @@ jobs:
key: data-cutouts-${{ env.WEEK }}-${{ env.DATA_CACHE_NUMBER }} key: data-cutouts-${{ env.WEEK }}-${{ env.DATA_CACHE_NUMBER }}
- name: Test snakemake workflow - name: Test snakemake workflow
run: | run: ./test.sh
snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime
snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v4.3.0 uses: actions/upload-artifact@v4.3.0

View File

@ -31,7 +31,12 @@ CDIR = RDIR if not run.get("shared_cutouts") else ""
LOGS = "logs/" + RDIR LOGS = "logs/" + RDIR
BENCHMARKS = "benchmarks/" + RDIR BENCHMARKS = "benchmarks/" + RDIR
RESOURCES = "resources/" + RDIR if not run.get("shared_resources") else "resources/" if not (shared_resources := run.get("shared_resources")):
RESOURCES = "resources/" + RDIR
elif isinstance(shared_resources, str):
RESOURCES = "resources/" + shared_resources + "/"
else:
RESOURCES = "resources/"
RESULTS = "results/" + RDIR RESULTS = "results/" + RDIR

View File

@ -8,14 +8,14 @@ tutorial: true
run: run:
name: "test-elec" # use this to keep track of runs with different settings name: "test-elec" # use this to keep track of runs with different settings
disable_progressbar: true disable_progressbar: true
shared_resources: true shared_resources: "test"
shared_cutouts: true shared_cutouts: true
scenario: scenario:
clusters: clusters:
- 5 - 5
opts: opts:
- Co2L-24H - Co2L-24h
countries: ['BE'] countries: ['BE']

View File

@ -7,7 +7,7 @@ tutorial: true
run: run:
name: "test-sector-myopic" name: "test-sector-myopic"
disable_progressbar: true disable_progressbar: true
shared_resources: true shared_resources: "test"
shared_cutouts: true shared_cutouts: true
foresight: myopic foresight: myopic
@ -18,7 +18,7 @@ scenario:
clusters: clusters:
- 5 - 5
sector_opts: sector_opts:
- 24H-T-H-B-I-A-dist1 - 24h-T-H-B-I-A-dist1
planning_horizons: planning_horizons:
- 2030 - 2030
- 2040 - 2040

View File

@ -7,7 +7,7 @@ tutorial: true
run: run:
name: "test-sector-overnight" name: "test-sector-overnight"
disable_progressbar: true disable_progressbar: true
shared_resources: true shared_resources: "test"
shared_cutouts: true shared_cutouts: true
@ -17,7 +17,7 @@ scenario:
clusters: clusters:
- 5 - 5
sector_opts: sector_opts:
- CO2L0-24H-T-H-B-I-A-dist1 - CO2L0-24h-T-H-B-I-A-dist1
planning_horizons: planning_horizons:
- 2030 - 2030

View File

@ -7,7 +7,7 @@ tutorial: true
run: run:
name: "test-sector-perfect" name: "test-sector-perfect"
disable_progressbar: true disable_progressbar: true
shared_resources: true shared_resources: "test"
shared_cutouts: true shared_cutouts: true
foresight: perfect foresight: perfect

View File

@ -66,6 +66,10 @@ Upcoming Release
* Various minor bugfixes to the perfect foresight workflow, though perfect foresight must still be considered experimental. * Various minor bugfixes to the perfect foresight workflow, though perfect foresight must still be considered experimental.
* It is now possible to determine the directory for shared resources by setting `shared_resources` to a string.
* A ``test.sh`` script was added to the repository to run the tests locally.
* Default settings for recycling rates and primary product shares of high-value * Default settings for recycling rates and primary product shares of high-value
chemicals have been set in accordance with the values used in `Neumann et al. chemicals have been set in accordance with the values used in `Neumann et al.
(2023) <https://doi.org/10.1016/j.joule.2023.06.016>`_ linearly interpolated (2023) <https://doi.org/10.1016/j.joule.2023.06.016>`_ linearly interpolated
@ -74,6 +78,7 @@ Upcoming Release
<https://static.agora-energiewende.de/fileadmin/Projekte/2021/2021_02_EU_CEAP/A-EW_254_Mobilising-circular-economy_study_WEB.pdf>`_. <https://static.agora-energiewende.de/fileadmin/Projekte/2021/2021_02_EU_CEAP/A-EW_254_Mobilising-circular-economy_study_WEB.pdf>`_.
PyPSA-Eur 0.9.0 (5th January 2024) PyPSA-Eur 0.9.0 (5th January 2024)
================================== ==================================

View File

@ -54,6 +54,7 @@ if config["foresight"] != "perfect":
rule plot_hydrogen_network: rule plot_hydrogen_network:
params: params:
plotting=config["plotting"], plotting=config["plotting"],
foresight=config["foresight"],
input: input:
network=RESULTS network=RESULTS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",

View File

@ -327,7 +327,9 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1.
axis=1, axis=1,
) )
n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load) n.madd(
"Load", substation_lv_i, bus=substation_lv_i, p_set=load
) # carrier="electricity"
def update_transmission_costs(n, costs, length_factor=1.0): def update_transmission_costs(n, costs, length_factor=1.0):
@ -504,8 +506,8 @@ def attach_conventional_generators(
snakemake.input[f"conventional_{carrier}_{attr}"], index_col=0 snakemake.input[f"conventional_{carrier}_{attr}"], index_col=0
).iloc[:, 0] ).iloc[:, 0]
bus_values = n.buses.country.map(values) bus_values = n.buses.country.map(values)
n.generators[attr].update( n.generators.update(
n.generators.loc[idx].bus.map(bus_values).dropna() {attr: n.generators.loc[idx].bus.map(bus_values).dropna()}
) )
else: else:
# Single value affecting all generators of technology k indiscriminantely of country # Single value affecting all generators of technology k indiscriminantely of country
@ -749,8 +751,8 @@ def attach_OPSD_renewables(n: pypsa.Network, tech_map: Dict[str, List[str]]) ->
caps = caps.groupby(["bus"]).Capacity.sum() caps = caps.groupby(["bus"]).Capacity.sum()
caps = caps / gens_per_bus.reindex(caps.index, fill_value=1) caps = caps / gens_per_bus.reindex(caps.index, fill_value=1)
n.generators.p_nom.update(gens.bus.map(caps).dropna()) n.generators.update({"p_nom": gens.bus.map(caps).dropna()})
n.generators.p_nom_min.update(gens.bus.map(caps).dropna()) n.generators.update({"p_nom_min": gens.bus.map(caps).dropna()})
def estimate_renewable_capacities( def estimate_renewable_capacities(

View File

@ -48,7 +48,7 @@ def add_build_year_to_new_assets(n, baseyear):
"series" "series"
) & n.component_attrs[c.name].status.str.contains("Input") ) & n.component_attrs[c.name].status.str.contains("Input")
for attr in n.component_attrs[c.name].index[selection]: for attr in n.component_attrs[c.name].index[selection]:
c.pnl[attr].rename(columns=rename, inplace=True) c.pnl[attr] = c.pnl[attr].rename(columns=rename)
def add_existing_renewables(df_agg): def add_existing_renewables(df_agg):

View File

@ -138,7 +138,9 @@ def _load_buses_from_eg(eg_buses, europe_shape, config_elec):
) )
buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"}) buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"})
buses["under_construction"] = buses["under_construction"].fillna(False).astype(bool) buses["under_construction"] = buses.under_construction.where(
lambda s: s.notnull(), False
).astype(bool)
# remove all buses outside of all countries including exclusive economic zones (offshore) # remove all buses outside of all countries including exclusive economic zones (offshore)
europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"] europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"]
@ -525,9 +527,9 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
gb = buses.loc[substation_b].groupby( gb = buses.loc[substation_b].groupby(
["x", "y"], as_index=False, group_keys=False, sort=False ["x", "y"], as_index=False, group_keys=False, sort=False
) )
bus_map_low = gb.apply(prefer_voltage, "min") bus_map_low = gb.apply(prefer_voltage, "min", include_groups=False)
lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False) lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False)
bus_map_high = gb.apply(prefer_voltage, "max") bus_map_high = gb.apply(prefer_voltage, "max", include_groups=False)
hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False) hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False)
onshore_b = pd.Series(False, buses.index) onshore_b = pd.Series(False, buses.index)

View File

@ -132,14 +132,14 @@ def disaggregate_nuts0(bio):
pop = build_nuts_population_data() pop = build_nuts_population_data()
# get population in nuts2 # get population in nuts2
pop_nuts2 = pop.loc[pop.index.str.len() == 4] pop_nuts2 = pop.loc[pop.index.str.len() == 4].copy()
by_country = pop_nuts2.total.groupby(pop_nuts2.ct).sum() by_country = pop_nuts2.total.groupby(pop_nuts2.ct).sum()
pop_nuts2.loc[:, "fraction"] = pop_nuts2.total / pop_nuts2.ct.map(by_country) pop_nuts2["fraction"] = pop_nuts2.total / pop_nuts2.ct.map(by_country)
# distribute nuts0 data to nuts2 by population # distribute nuts0 data to nuts2 by population
bio_nodal = bio.loc[pop_nuts2.ct] bio_nodal = bio.loc[pop_nuts2.ct]
bio_nodal.index = pop_nuts2.index bio_nodal.index = pop_nuts2.index
bio_nodal = bio_nodal.mul(pop_nuts2.fraction, axis=0) bio_nodal = bio_nodal.mul(pop_nuts2.fraction, axis=0).astype(float)
# update inplace # update inplace
bio.update(bio_nodal) bio.update(bio_nodal)

View File

@ -114,12 +114,10 @@ def prepare_dataset(
df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity) df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity)
ratio = df.p_nom / df.p_nom_diameter ratio = df.p_nom / df.p_nom_diameter
not_nordstream = df.max_pressure_bar < 220 not_nordstream = df.max_pressure_bar < 220
df.p_nom.update( df["p_nom"] = df.p_nom_diameter.where(
df.p_nom_diameter.where( (df.p_nom <= 500)
(df.p_nom <= 500) | ((ratio > correction_threshold_p_nom) & not_nordstream)
| ((ratio > correction_threshold_p_nom) & not_nordstream) | ((ratio < 1 / correction_threshold_p_nom) & not_nordstream)
| ((ratio < 1 / correction_threshold_p_nom) & not_nordstream)
)
) )
# lines which have way too discrepant line lengths # lines which have way too discrepant line lengths
@ -130,12 +128,10 @@ def prepare_dataset(
axis=1, axis=1,
) )
ratio = df.eval("length / length_haversine") ratio = df.eval("length / length_haversine")
df["length"].update( df["length"] = df.length_haversine.where(
df.length_haversine.where( (df["length"] < 20)
(df["length"] < 20) | (ratio > correction_threshold_length)
| (ratio > correction_threshold_length) | (ratio < 1 / correction_threshold_length)
| (ratio < 1 / correction_threshold_length)
)
) )
return df return df

View File

@ -98,7 +98,7 @@ def calculate_line_rating(n, cutout):
------- -------
xarray DataArray object with maximal power. xarray DataArray object with maximal power.
""" """
relevant_lines = n.lines[~n.lines["underground"]] relevant_lines = n.lines[~n.lines["underground"]].copy()
buses = relevant_lines[["bus0", "bus1"]].values buses = relevant_lines[["bus0", "bus1"]].values
x = n.buses.x x = n.buses.x
y = n.buses.y y = n.buses.y

View File

@ -83,7 +83,8 @@ if __name__ == "__main__":
# correct for imprecision of Iinv*I # correct for imprecision of Iinv*I
pop_ct = nuts3.loc[nuts3.country == ct, "pop"].sum() pop_ct = nuts3.loc[nuts3.country == ct, "pop"].sum()
pop_cells_ct *= pop_ct / pop_cells_ct.sum() if pop_cells_ct.sum() != 0:
pop_cells_ct *= pop_ct / pop_cells_ct.sum()
# The first low density grid cells to reach rural fraction are rural # The first low density grid cells to reach rural fraction are rural
asc_density_i = density_cells_ct.sort_values().index asc_density_i = density_cells_ct.sort_values().index

View File

@ -297,8 +297,8 @@ def prepare_building_stock_data():
errors="ignore", errors="ignore",
) )
u_values.subsector.replace(rename_sectors, inplace=True) u_values["subsector"] = u_values.subsector.replace(rename_sectors)
u_values.btype.replace(rename_sectors, inplace=True) u_values["btype"] = u_values.btype.replace(rename_sectors)
# for missing weighting of surfaces of building types assume MFH # for missing weighting of surfaces of building types assume MFH
u_values["assumed_subsector"] = u_values.subsector u_values["assumed_subsector"] = u_values.subsector
@ -306,8 +306,8 @@ def prepare_building_stock_data():
~u_values.subsector.isin(rename_sectors.values()), "assumed_subsector" ~u_values.subsector.isin(rename_sectors.values()), "assumed_subsector"
] = "MFH" ] = "MFH"
u_values.country_code.replace({"UK": "GB"}, inplace=True) u_values["country_code"] = u_values.country_code.replace({"UK": "GB"})
u_values.bage.replace({"Berfore 1945": "Before 1945"}, inplace=True) u_values["bage"] = u_values.bage.replace({"Berfore 1945": "Before 1945"})
u_values = u_values[~u_values.bage.isna()] u_values = u_values[~u_values.bage.isna()]
u_values.set_index(["country_code", "subsector", "bage", "type"], inplace=True) u_values.set_index(["country_code", "subsector", "bage", "type"], inplace=True)

View File

@ -488,7 +488,9 @@ if __name__ == "__main__":
gens.efficiency, bins=[0, low, high, 1], labels=labels gens.efficiency, bins=[0, low, high, 1], labels=labels
).astype(str) ).astype(str)
carriers += [f"{c} {label} efficiency" for label in labels] carriers += [f"{c} {label} efficiency" for label in labels]
n.generators.carrier.update(gens.carrier + " " + suffix + " efficiency") n.generators.update(
{"carrier": gens.carrier + " " + suffix + " efficiency"}
)
aggregate_carriers = carriers aggregate_carriers = carriers
if n_clusters == len(n.buses): if n_clusters == len(n.buses):

View File

@ -98,7 +98,7 @@ def plot_map(
logger.debug(f"{comp}, {costs}") logger.debug(f"{comp}, {costs}")
costs = costs.groupby(costs.columns, axis=1).sum() costs = costs.T.groupby(costs.columns).sum().T
costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True) costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True)

View File

@ -269,8 +269,8 @@ def set_line_nom_max(
hvdc = n.links.index[n.links.carrier == "DC"] hvdc = n.links.index[n.links.carrier == "DC"]
n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + p_nom_max_ext n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + p_nom_max_ext
n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True) n.lines["s_nom_max"] = n.lines.s_nom_max.clip(upper=s_nom_max_set)
n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True) n.links["p_nom_max"] = n.links.p_nom_max.clip(upper=p_nom_max_set)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -418,7 +418,7 @@ def add_CCL_constraints(n, config):
Example Example
------- -------
scenario: scenario:
opts: [Co2L-CCL-24H] opts: [Co2L-CCL-24h]
electricity: electricity:
agg_p_nom_limits: data/agg_p_nom_minmax.csv agg_p_nom_limits: data/agg_p_nom_minmax.csv
""" """
@ -463,7 +463,7 @@ def add_EQ_constraints(n, o, scaling=1e-1):
Example Example
------- -------
scenario: scenario:
opts: [Co2L-EQ0.7-24H] opts: [Co2L-EQ0.7-24h]
Require each country or node to on average produce a minimal share Require each country or node to on average produce a minimal share
of its total electricity consumption itself. Example: EQ0.7c demands each country of its total electricity consumption itself. Example: EQ0.7c demands each country
@ -527,7 +527,7 @@ def add_BAU_constraints(n, config):
Example Example
------- -------
scenario: scenario:
opts: [Co2L-BAU-24H] opts: [Co2L-BAU-24h]
electricity: electricity:
BAU_mincapacities: BAU_mincapacities:
solar: 0 solar: 0
@ -564,7 +564,7 @@ def add_SAFE_constraints(n, config):
config.yaml requires to specify opts: config.yaml requires to specify opts:
scenario: scenario:
opts: [Co2L-SAFE-24H] opts: [Co2L-SAFE-24h]
electricity: electricity:
SAFE_reservemargin: 0.1 SAFE_reservemargin: 0.1
Which sets a reserve margin of 10% above the peak demand. Which sets a reserve margin of 10% above the peak demand.

8
test.sh Executable file
View File

@ -0,0 +1,8 @@
# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime && \
snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime && \
snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime && \
snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime