[pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
This commit is contained in:
pre-commit-ci[bot] 2023-08-30 10:05:16 +00:00
parent 9ec7442c69
commit bdaa646ed6
10 changed files with 287 additions and 239 deletions

View File

@ -87,10 +87,10 @@ co2_budget:
2050: 0.000 2050: 0.000
# update of IPCC 6th AR compared to the 1.5SR. (discussed here: https://twitter.com/JoeriRogelj/status/1424743828339167233) # update of IPCC 6th AR compared to the 1.5SR. (discussed here: https://twitter.com/JoeriRogelj/status/1424743828339167233)
1p5 : 34.2 # 25.7 # Budget in Gt CO2 for 1.5 for Europe, global 420 Gt, assuming per capita share 1p5: 34.2 # 25.7 # Budget in Gt CO2 for 1.5 for Europe, global 420 Gt, assuming per capita share
1p6 : 43.259666 # 35 # Budget in Gt CO2 for 1.6 for Europe, global 580 Gt 1p6: 43.259666 # 35 # Budget in Gt CO2 for 1.6 for Europe, global 580 Gt
1p7 : 51.4 # 45 # Budget in Gt CO2 for 1.7 for Europe, global 800 Gt 1p7: 51.4 # 45 # Budget in Gt CO2 for 1.7 for Europe, global 800 Gt
2p0 : 69.778 # 73.9 # Budget in Gt CO2 for 2 for Europe, global 1170 Gt 2p0: 69.778 # 73.9 # Budget in Gt CO2 for 2 for Europe, global 1170 Gt
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#electricity # docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#electricity
electricity: electricity:

View File

@ -7,7 +7,9 @@ localrules:
copy_config, copy_config,
copy_conda_env, copy_conda_env,
if config["foresight"] != "perfect": if config["foresight"] != "perfect":
rule plot_network: rule plot_network:
params: params:
foresight=config["foresight"], foresight=config["foresight"],
@ -34,7 +36,9 @@ if config["foresight"] != "perfect":
script: script:
"../scripts/plot_network.py" "../scripts/plot_network.py"
if config["foresight"] == "perfect": if config["foresight"] == "perfect":
rule plot_network: rule plot_network:
params: params:
foresight=config["foresight"], foresight=config["foresight"],
@ -55,15 +59,13 @@ if config["foresight"] == "perfect":
mem_mb=10000, mem_mb=10000,
benchmark: benchmark:
BENCHMARKS BENCHMARKS
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_benchmark", +"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_benchmark"
conda: conda:
"../envs/environment.yaml" "../envs/environment.yaml"
script: script:
"../scripts/plot_network.py" "../scripts/plot_network.py"
rule copy_config: rule copy_config:
params: params:
RDIR=RDIR, RDIR=RDIR,

View File

@ -190,7 +190,4 @@ rule make_summary_perfect:
"../scripts/make_summary_perfect.py" "../scripts/make_summary_perfect.py"
ruleorder: add_existing_baseyear > add_brownfield ruleorder: add_existing_baseyear > add_brownfield

View File

@ -304,17 +304,18 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
bus0 = vars(spatial)[carrier[generator]].nodes bus0 = vars(spatial)[carrier[generator]].nodes
if "EU" not in vars(spatial)[carrier[generator]].locations: if "EU" not in vars(spatial)[carrier[generator]].locations:
bus0 = bus0.intersection(capacity.index + " gas") bus0 = bus0.intersection(capacity.index + " gas")
# check for missing bus # check for missing bus
missing_bus = pd.Index(bus0).difference(n.buses.index) missing_bus = pd.Index(bus0).difference(n.buses.index)
if not missing_bus.empty: if not missing_bus.empty:
logger.info(f"add buses {bus0}") logger.info(f"add buses {bus0}")
n.madd("Bus", n.madd(
bus0, "Bus",
carrier=generator, bus0,
location=vars(spatial)[carrier[generator]].locations, carrier=generator,
unit="MWh_el" location=vars(spatial)[carrier[generator]].locations,
) unit="MWh_el",
)
already_build = n.links.index.intersection(asset_i) already_build = n.links.index.intersection(asset_i)
new_build = asset_i.difference(n.links.index) new_build = asset_i.difference(n.links.index)
@ -615,9 +616,9 @@ def add_heating_capacities_installed_before_baseyear(
if str(grouping_year) in index and n.links.p_nom[index] < threshold if str(grouping_year) in index and n.links.p_nom[index] < threshold
], ],
) )
# drop assets which are at the end of their lifetime # drop assets which are at the end of their lifetime
links_i = n.links[(n.links.build_year+n.links.lifetime<=baseyear)].index links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index
n.mremove("Link", links_i) n.mremove("Link", links_i)

View File

@ -95,16 +95,19 @@ def prepare_hotmaps_database(regions):
gdf.rename(columns={"index_right": "bus"}, inplace=True) gdf.rename(columns={"index_right": "bus"}, inplace=True)
gdf["country"] = gdf.bus.str[:2] gdf["country"] = gdf.bus.str[:2]
# the .sjoin can lead to duplicates if a geom is in two regions # the .sjoin can lead to duplicates if a geom is in two regions
if gdf.index.duplicated().any(): if gdf.index.duplicated().any():
import pycountry import pycountry
# get all duplicated entries # get all duplicated entries
duplicated_i = gdf.index[gdf.index.duplicated()] duplicated_i = gdf.index[gdf.index.duplicated()]
# convert from raw data country name to iso-2-code # convert from raw data country name to iso-2-code
s = df.loc[duplicated_i, "Country"].apply(lambda x: pycountry.countries.lookup(x).alpha_2) s = df.loc[duplicated_i, "Country"].apply(
lambda x: pycountry.countries.lookup(x).alpha_2
)
# Get a boolean mask where gdf's country column matches s's values for the same index # Get a boolean mask where gdf's country column matches s's values for the same index
mask = gdf['country'] == gdf.index.map(s) mask = gdf["country"] == gdf.index.map(s)
# Filter gdf using the mask # Filter gdf using the mask
gdf_filtered = gdf[mask] gdf_filtered = gdf[mask]
# concat not duplicated and filtered gdf # concat not duplicated and filtered gdf
@ -160,7 +163,8 @@ def build_nodal_distribution_key(hotmaps, regions, countries):
return keys return keys
#%%
# %%
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake

View File

@ -230,25 +230,33 @@ def calculate_energy(n, label, energy):
names=energy.columns.names[:3] + ["year"], names=energy.columns.names[:3] + ["year"],
) )
energy = energy.reindex(cols, axis=1) energy = energy.reindex(cols, axis=1)
for c in n.iterate_components(n.one_port_components | n.branch_components): for c in n.iterate_components(n.one_port_components | n.branch_components):
if c.name in n.one_port_components: if c.name in n.one_port_components:
c_energies = ( c_energies = (
c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0) c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0).sum() .groupby(level=0)
.sum()
.multiply(c.df.sign) .multiply(c.df.sign)
.groupby(c.df.carrier, axis=1) .groupby(c.df.carrier, axis=1)
.sum() .sum()
) )
else: else:
c_energies = pd.DataFrame(0.0, columns=c.df.carrier.unique(), index=n.investment_periods) c_energies = pd.DataFrame(
0.0, columns=c.df.carrier.unique(), index=n.investment_periods
)
for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]: for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
totals = c.pnl["p" + port].multiply(n.snapshot_weightings.generators, axis=0).groupby(level=0).sum() totals = (
c.pnl["p" + port]
.multiply(n.snapshot_weightings.generators, axis=0)
.groupby(level=0)
.sum()
)
# remove values where bus is missing (bug in nomopyomo) # remove values where bus is missing (bug in nomopyomo)
no_bus = c.df.index[c.df["bus" + port] == ""] no_bus = c.df.index[c.df["bus" + port] == ""]
totals[no_bus] = float(n.component_attrs[c.name].loc[ totals[no_bus] = float(
"p" + port, "default" n.component_attrs[c.name].loc["p" + port, "default"]
]) )
c_energies -= totals.groupby(c.df.carrier, axis=1).sum() c_energies -= totals.groupby(c.df.carrier, axis=1).sum()
c_energies = pd.concat([c_energies.T], keys=[c.list_name]) c_energies = pd.concat([c_energies.T], keys=[c.list_name])
@ -703,11 +711,13 @@ if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing # Detect running outside of snakemake and mock snakemake for testing
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake("make_summary_perfect") snakemake = mock_snakemake("make_summary_perfect")
run = snakemake.config["run"]["name"] run = snakemake.config["run"]["name"]
if run!="": run += "/" if run != "":
run += "/"
networks_dict = { networks_dict = {
(clusters, lv, opts + sector_opts): "results/" (clusters, lv, opts + sector_opts): "results/"
+ run + run

View File

@ -913,9 +913,11 @@ def plot_series(network, carrier="AC", name="test"):
) )
def plot_map_perfect(network, components=["Link", "Store", "StorageUnit", "Generator"], def plot_map_perfect(
bus_size_factor=1.7e10): network,
components=["Link", "Store", "StorageUnit", "Generator"],
bus_size_factor=1.7e10,
):
n = network.copy() n = network.copy()
assign_location(n) assign_location(n)
# Drop non-electric buses so they don't clutter the plot # Drop non-electric buses so they don't clutter the plot
@ -926,41 +928,48 @@ def plot_map_perfect(network, components=["Link", "Store", "StorageUnit", "Gener
costs = {} costs = {}
for comp in components: for comp in components:
df_c = n.df(comp) df_c = n.df(comp)
if df_c.empty: continue if df_c.empty:
continue
df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp)
attr = "e_nom_opt" if comp == "Store" else "p_nom_opt" attr = "e_nom_opt" if comp == "Store" else "p_nom_opt"
active = pd.concat( active = pd.concat(
[ [n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments],
n.get_active_assets(comp, inv_p).rename(inv_p)
for inv_p in investments
],
axis=1, axis=1,
).astype(int) ).astype(int)
capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost
capital_cost_t = ((active.mul(capital_cost, axis=0)) capital_cost_t = (
.groupby([n.df(comp).location, (active.mul(capital_cost, axis=0))
n.df(comp).nice_group]).sum()) .groupby([n.df(comp).location, n.df(comp).nice_group])
.sum()
)
capital_cost_t.drop("load", level=1, inplace=True, errors="ignore") capital_cost_t.drop("load", level=1, inplace=True, errors="ignore")
costs[comp] = capital_cost_t costs[comp] = capital_cost_t
costs = pd.concat(costs).groupby(level=[1,2]).sum() costs = pd.concat(costs).groupby(level=[1, 2]).sum()
costs.drop(costs[costs.sum(axis=1)==0].index, inplace=True) costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True)
new_columns = (preferred_order.intersection(costs.index.levels[1]) new_columns = preferred_order.intersection(costs.index.levels[1]).append(
.append(costs.index.levels[1].difference(preferred_order))) costs.index.levels[1].difference(preferred_order)
)
costs = costs.reindex(new_columns, level=1) costs = costs.reindex(new_columns, level=1)
for item in new_columns: for item in new_columns:
if item not in snakemake.config['plotting']['tech_colors']: if item not in snakemake.config["plotting"]["tech_colors"]:
print("Warning!",item,"not in config/plotting/tech_colors, assign random color") print(
snakemake.config['plotting']['tech_colors'] = "pink" "Warning!",
item,
n.links.drop(n.links.index[(n.links.carrier != "DC") & ( "not in config/plotting/tech_colors, assign random color",
n.links.carrier != "B2B")], inplace=True) )
snakemake.config["plotting"]["tech_colors"] = "pink"
n.links.drop(
n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")],
inplace=True,
)
# drop non-bus # drop non-bus
to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) to_drop = costs.index.levels[0].symmetric_difference(n.buses.index)
@ -972,7 +981,7 @@ def plot_map_perfect(network, components=["Link", "Store", "StorageUnit", "Gener
costs.index = pd.MultiIndex.from_tuples(costs.index.values) costs.index = pd.MultiIndex.from_tuples(costs.index.values)
# PDF has minimum width, so set these to zero # PDF has minimum width, so set these to zero
line_lower_threshold = 500. line_lower_threshold = 500.0
line_upper_threshold = 1e4 line_upper_threshold = 1e4
linewidth_factor = 2e3 linewidth_factor = 2e3
ac_color = "gray" ac_color = "gray"
@ -981,29 +990,29 @@ def plot_map_perfect(network, components=["Link", "Store", "StorageUnit", "Gener
line_widths = n.lines.s_nom_opt line_widths = n.lines.s_nom_opt
link_widths = n.links.p_nom_opt link_widths = n.links.p_nom_opt
linewidth_factor = 2e3 linewidth_factor = 2e3
line_lower_threshold = 0. line_lower_threshold = 0.0
title = "Today's transmission" title = "Today's transmission"
line_widths[line_widths < line_lower_threshold] = 0. line_widths[line_widths < line_lower_threshold] = 0.0
link_widths[link_widths < line_lower_threshold] = 0. link_widths[link_widths < line_lower_threshold] = 0.0
line_widths[line_widths > line_upper_threshold] = line_upper_threshold line_widths[line_widths > line_upper_threshold] = line_upper_threshold
link_widths[link_widths > line_upper_threshold] = line_upper_threshold link_widths[link_widths > line_upper_threshold] = line_upper_threshold
for year in costs.columns: for year in costs.columns:
fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()}) fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()})
fig.set_size_inches(7, 6) fig.set_size_inches(7, 6)
fig.suptitle(year) fig.suptitle(year)
n.plot( n.plot(
bus_sizes=costs[year] / bus_size_factor, bus_sizes=costs[year] / bus_size_factor,
bus_colors=snakemake.config['plotting']['tech_colors'], bus_colors=snakemake.config["plotting"]["tech_colors"],
line_colors=ac_color, line_colors=ac_color,
link_colors=dc_color, link_colors=dc_color,
line_widths=line_widths / linewidth_factor, line_widths=line_widths / linewidth_factor,
link_widths=link_widths / linewidth_factor, link_widths=link_widths / linewidth_factor,
ax=ax, **map_opts ax=ax,
**map_opts,
) )
sizes = [20, 10, 5] sizes = [20, 10, 5]
@ -1051,15 +1060,12 @@ def plot_map_perfect(network, components=["Link", "Store", "StorageUnit", "Gener
frameon=False, frameon=False,
) )
fig.savefig( fig.savefig(
snakemake.output[f"map_{year}"], snakemake.output[f"map_{year}"], transparent=True, bbox_inches="tight"
transparent=True,
bbox_inches="tight"
) )
#%% # %%
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
@ -1083,11 +1089,13 @@ if __name__ == "__main__":
if map_opts["boundaries"] is None: if map_opts["boundaries"] is None:
map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1]
if snakemake.params["foresight"] == "perfect": if snakemake.params["foresight"] == "perfect":
plot_map_perfect(n, plot_map_perfect(
components=["Link", "Store", "StorageUnit", "Generator"], n,
bus_size_factor=2e10) components=["Link", "Store", "StorageUnit", "Generator"],
bus_size_factor=2e10,
)
else: else:
plot_map( plot_map(
n, n,
@ -1095,7 +1103,7 @@ if __name__ == "__main__":
bus_size_factor=2e10, bus_size_factor=2e10,
transmission=False, transmission=False,
) )
plot_h2_map(n, regions) plot_h2_map(n, regions)
plot_ch4_map(n) plot_ch4_map(n)
plot_map_without(n) plot_map_without(n)

View File

@ -276,8 +276,6 @@ def plot_balances():
i for i in balances_df.index.levels[0] if i not in co2_carriers i for i in balances_df.index.levels[0] if i not in co2_carriers
] ]
for k, v in balances.items(): for k, v in balances.items():
df = balances_df.loc[v] df = balances_df.loc[v]
df = df.groupby(df.index.get_level_values(2)).sum() df = df.groupby(df.index.get_level_values(2)).sum()
@ -321,7 +319,7 @@ def plot_balances():
) )
new_columns = df.columns.sort_values() new_columns = df.columns.sort_values()
fig, ax = plt.subplots(figsize=(12, 8)) fig, ax = plt.subplots(figsize=(12, 8))
df.loc[new_index, new_columns].T.plot( df.loc[new_index, new_columns].T.plot(
@ -357,7 +355,6 @@ def plot_balances():
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight") fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight")
def historical_emissions(countries): def historical_emissions(countries):
""" """
Read historical emissions to add them to the carbon budget plot. Read historical emissions to add them to the carbon budget plot.
@ -393,19 +390,20 @@ def historical_emissions(countries):
e["other LULUCF"] = "4.H - Other LULUCF" e["other LULUCF"] = "4.H - Other LULUCF"
pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"] pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"]
if "GB" in countries: if "GB" in countries:
countries.remove("GB") countries.remove("GB")
countries.append("UK") countries.append("UK")
year = df.index.levels[0][df.index.levels[0]>=1990] year = df.index.levels[0][df.index.levels[0] >= 1990]
missing = pd.Index(countries).difference(df.index.levels[2]) missing = pd.Index(countries).difference(df.index.levels[2])
if not missing.empty: if not missing.empty:
logger.warning(f"The following countries are missing and not considered when plotting historic CO2 emissions: {missing}") logger.warning(
f"The following countries are missing and not considered when plotting historic CO2 emissions: {missing}"
)
countries = pd.Index(df.index.levels[2]).intersection(countries) countries = pd.Index(df.index.levels[2]).intersection(countries)
idx = pd.IndexSlice idx = pd.IndexSlice
co2_totals = ( co2_totals = (
df.loc[idx[year, e.values, countries, pol], "emissions"] df.loc[idx[year, e.values, countries, pol], "emissions"]
@ -469,26 +467,28 @@ def plot_carbon_budget_distribution(input_eurostat):
# historic emissions # historic emissions
countries = snakemake.params.countries countries = snakemake.params.countries
e_1990 = co2_emissions_year( e_1990 = co2_emissions_year(countries, input_eurostat, opts, snakemake, year=1990)
countries, input_eurostat, opts, snakemake, year=1990
)
emissions = historical_emissions(countries) emissions = historical_emissions(countries)
# add other years https://sdi.eea.europa.eu/data/0569441f-2853-4664-a7cd-db969ef54de0 # add other years https://sdi.eea.europa.eu/data/0569441f-2853-4664-a7cd-db969ef54de0
emissions.loc[2019] = 2.971372 emissions.loc[2019] = 2.971372
emissions.loc[2020] = 2.691958 emissions.loc[2020] = 2.691958
emissions.loc[2021] = 2.869355 emissions.loc[2021] = 2.869355
if snakemake.config["foresight"] == "myopic": if snakemake.config["foresight"] == "myopic":
path_cb = "results/" + snakemake.params.RDIR + "/csvs/" path_cb = "results/" + snakemake.params.RDIR + "/csvs/"
co2_cap = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)[["cb"]] co2_cap = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)[
["cb"]
]
co2_cap *= e_1990 co2_cap *= e_1990
else: else:
supply_energy = pd.read_csv(snakemake.input.balances, supply_energy = pd.read_csv(
index_col=[0,1,2], header=[0,1,2, 3]) snakemake.input.balances, index_col=[0, 1, 2], header=[0, 1, 2, 3]
co2_cap = supply_energy.loc["co2"].droplevel(0).drop("co2").sum().unstack().T/1e9 )
co2_cap = (
supply_energy.loc["co2"].droplevel(0).drop("co2").sum().unstack().T / 1e9
)
co2_cap.rename(index=lambda x: int(x), inplace=True) co2_cap.rename(index=lambda x: int(x), inplace=True)
plt.figure(figsize=(10, 7)) plt.figure(figsize=(10, 7))
gs1 = gridspec.GridSpec(1, 1) gs1 = gridspec.GridSpec(1, 1)
ax1 = plt.subplot(gs1[0, 0]) ax1 = plt.subplot(gs1[0, 0])
@ -511,13 +511,13 @@ def plot_carbon_budget_distribution(input_eurostat):
) )
ax1.plot( ax1.plot(
[2030], [2030],
[0.45 * emissions[1990]], [0.45 * emissions[1990]],
marker="*", marker="*",
markersize=12, markersize=12,
markerfacecolor="black", markerfacecolor="black",
markeredgecolor="black", markeredgecolor="black",
) )
ax1.plot( ax1.plot(
[2030], [2030],
@ -538,28 +538,28 @@ def plot_carbon_budget_distribution(input_eurostat):
) )
ax1.plot( ax1.plot(
[2050], [2050],
[0.0 * emissions[1990]], [0.0 * emissions[1990]],
marker="*", marker="*",
markersize=12, markersize=12,
markerfacecolor="black", markerfacecolor="black",
markeredgecolor="black", markeredgecolor="black",
label="EU commited target", label="EU commited target",
) )
for col in co2_cap.columns: for col in co2_cap.columns:
ax1.plot(co2_cap[col], linewidth=3, label=col) ax1.plot(co2_cap[col], linewidth=3, label=col)
ax1.legend( ax1.legend(
fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True
) )
plt.grid(axis="y") plt.grid(axis="y")
path = snakemake.output.balances.split("balances")[0] + "carbon_budget.pdf" path = snakemake.output.balances.split("balances")[0] + "carbon_budget.pdf"
plt.savefig(path, bbox_inches="tight") plt.savefig(path, bbox_inches="tight")
#%%
# %%
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
@ -578,5 +578,7 @@ if __name__ == "__main__":
for sector_opts in snakemake.params.sector_opts: for sector_opts in snakemake.params.sector_opts:
opts = sector_opts.split("-") opts = sector_opts.split("-")
if any(["cb" in o for o in opts]) or (snakemake.config["foresight"]=="perfect"): if any(["cb" in o for o in opts]) or (
snakemake.config["foresight"] == "perfect"
):
plot_carbon_budget_distribution(snakemake.input.eurostat) plot_carbon_budget_distribution(snakemake.input.eurostat)

View File

@ -8,14 +8,16 @@ Concats pypsa networks of single investment periods to one network.
import logging import logging
import re import re
import pandas as pd
import numpy as np import numpy as np
import pandas as pd
import pypsa import pypsa
from _helpers import update_config_with_sector_opts from _helpers import update_config_with_sector_opts
from add_existing_baseyear import add_build_year_to_new_assets from add_existing_baseyear import add_build_year_to_new_assets
from pypsa.descriptors import expand_series from pypsa.descriptors import expand_series
from pypsa.io import import_components_from_dataframe from pypsa.io import import_components_from_dataframe
from six import iterkeys from six import iterkeys
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -38,8 +40,8 @@ def get_missing(df, n, c):
def get_social_discount(t, r=0.01): def get_social_discount(t, r=0.01):
""" """
Calculate for a given time t and social discount rate r [per unit] Calculate for a given time t and social discount rate r [per unit] the
the social discount. social discount.
""" """
return 1 / (1 + r) ** t return 1 / (1 + r) ** t
@ -62,7 +64,7 @@ def get_investment_weighting(time_weighting, r=0.01):
def add_year_to_constraints(n, baseyear): def add_year_to_constraints(n, baseyear):
""" """
Add investment period to global constraints and rename index. Add investment period to global constraints and rename index.
Parameters Parameters
---------- ----------
n : pypsa.Network n : pypsa.Network
@ -77,25 +79,29 @@ def add_year_to_constraints(n, baseyear):
def hvdc_transport_model(n): def hvdc_transport_model(n):
""" """
Convert AC lines to DC links for multi-decade optimisation with Convert AC lines to DC links for multi-decade optimisation with line
line expansion. Losses of DC links are assumed to be 3% per 1000km expansion.
Losses of DC links are assumed to be 3% per 1000km
""" """
logger.info("Convert AC lines to DC links to perform multi-decade optimisation.") logger.info("Convert AC lines to DC links to perform multi-decade optimisation.")
n.madd("Link", n.madd(
n.lines.index, "Link",
bus0=n.lines.bus0, n.lines.index,
bus1=n.lines.bus1, bus0=n.lines.bus0,
p_nom_extendable=True, bus1=n.lines.bus1,
p_nom=n.lines.s_nom, p_nom_extendable=True,
p_nom_min=n.lines.s_nom, p_nom=n.lines.s_nom,
p_min_pu=-1, p_nom_min=n.lines.s_nom,
efficiency=1 - 0.03 * n.lines.length / 1000, p_min_pu=-1,
marginal_cost=0, efficiency=1 - 0.03 * n.lines.length / 1000,
carrier="DC", marginal_cost=0,
length=n.lines.length, carrier="DC",
capital_cost=n.lines.capital_cost) length=n.lines.length,
capital_cost=n.lines.capital_cost,
)
# Remove AC lines # Remove AC lines
logger.info("Removing AC lines") logger.info("Removing AC lines")
@ -103,15 +109,16 @@ def hvdc_transport_model(n):
n.mremove("Line", lines_rm) n.mremove("Line", lines_rm)
# Set efficiency of all DC links to include losses depending on length # Set efficiency of all DC links to include losses depending on length
n.links.loc[n.links.carrier == 'DC', 'efficiency'] = 1 - 0.03 * n.links.loc[ n.links.loc[n.links.carrier == "DC", "efficiency"] = (
n.links.carrier == 'DC', 'length'] / 1000 1 - 0.03 * n.links.loc[n.links.carrier == "DC", "length"] / 1000
)
def adjust_electricity_grid(n, year, years): def adjust_electricity_grid(n, year, years):
""" """
Add carrier to lines. Replace AC lines with DC links in case of line Add carrier to lines. Replace AC lines with DC links in case of line
expansion. Add lifetime to DC links in case of line expansion. expansion. Add lifetime to DC links in case of line expansion.
Parameters Parameters
---------- ----------
n : pypsa.Network n : pypsa.Network
@ -121,16 +128,15 @@ def adjust_electricity_grid(n, year, years):
investment periods investment periods
""" """
n.lines["carrier"] = "AC" n.lines["carrier"] = "AC"
links_i = n.links[n.links.carrier=="DC"].index links_i = n.links[n.links.carrier == "DC"].index
if n.lines.s_nom_extendable.any() or n.links.loc[links_i, "p_nom_extendable"].any(): if n.lines.s_nom_extendable.any() or n.links.loc[links_i, "p_nom_extendable"].any():
hvdc_transport_model(n) hvdc_transport_model(n)
links_i = n.links[n.links.carrier=="DC"].index links_i = n.links[n.links.carrier == "DC"].index
n.links.loc[links_i, "lifetime"] = 100 n.links.loc[links_i, "lifetime"] = 100
if year!= years[0]: if year != years[0]:
n.links.loc[links_i, "p_nom_min"] = 0 n.links.loc[links_i, "p_nom_min"] = 0
n.links.loc[links_i, "p_nom"] = 0 n.links.loc[links_i, "p_nom"] = 0
# -------------------------------------------------------------------- # --------------------------------------------------------------------
def concat_networks(years): def concat_networks(years):
@ -245,7 +251,6 @@ def adjust_stores(n):
e_initial_store = ["co2 stored"] e_initial_store = ["co2 stored"]
co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index
n.stores.loc[co2_i, "e_initial_per_period"] = True n.stores.loc[co2_i, "e_initial_per_period"] = True
return n return n
@ -309,13 +314,13 @@ def set_carbon_constraints(n, opts):
carrier_attribute="co2_emissions", carrier_attribute="co2_emissions",
sense="<=", sense="<=",
constant=budget, constant=budget,
investment_period=n.investment_periods[-1] investment_period=n.investment_periods[-1],
) )
# drop other CO2 limits # drop other CO2 limits
drop_i = n.global_constraints[n.global_constraints.type=="co2_limit"].index drop_i = n.global_constraints[n.global_constraints.type == "co2_limit"].index
n.mremove("GlobalConstraint", drop_i) n.mremove("GlobalConstraint", drop_i)
n.add( n.add(
"GlobalConstraint", "GlobalConstraint",
"carbon_neutral", "carbon_neutral",
@ -323,10 +328,9 @@ def set_carbon_constraints(n, opts):
carrier_attribute="co2_emissions", carrier_attribute="co2_emissions",
sense="<=", sense="<=",
constant=0, constant=0,
investment_period=n.investment_periods[-1] investment_period=n.investment_periods[-1],
) )
# set minimum CO2 emission constraint to avoid too fast reduction # set minimum CO2 emission constraint to avoid too fast reduction
if "co2min" in opts: if "co2min" in opts:
emissions_1990 = 4.53693 emissions_1990 = 4.53693
@ -348,25 +352,25 @@ def set_carbon_constraints(n, opts):
investment_period=first_year, investment_period=first_year,
constant=co2min * 1e9 * time_weightings, constant=co2min * 1e9 * time_weightings,
) )
return n return n
def adjust_lvlimit(n): def adjust_lvlimit(n):
""" """
Convert global constraints for single investment period to one uniform Convert global constraints for single investment period to one uniform if
if all attributes stay the same. all attributes stay the same.
""" """
c = "GlobalConstraint" c = "GlobalConstraint"
cols = ['carrier_attribute', 'sense', "constant", "type"] cols = ["carrier_attribute", "sense", "constant", "type"]
glc_type = "transmission_volume_expansion_limit" glc_type = "transmission_volume_expansion_limit"
if (n.df(c)[n.df(c).type==glc_type][cols].nunique()==1).all(): if (n.df(c)[n.df(c).type == glc_type][cols].nunique() == 1).all():
glc = n.df(c)[n.df(c).type==glc_type][cols].iloc[[0]] glc = n.df(c)[n.df(c).type == glc_type][cols].iloc[[0]]
glc.index = pd.Index(["lv_limit"]) glc.index = pd.Index(["lv_limit"])
remove_i = n.df(c)[n.df(c).type==glc_type].index remove_i = n.df(c)[n.df(c).type == glc_type].index
n.mremove(c, remove_i) n.mremove(c, remove_i)
import_components_from_dataframe(n, glc, c) import_components_from_dataframe(n, glc, c)
return n return n
@ -374,16 +378,17 @@ def adjust_CO2_glc(n):
c = "GlobalConstraint" c = "GlobalConstraint"
glc_name = "CO2Limit" glc_name = "CO2Limit"
glc_type = "primary_energy" glc_type = "primary_energy"
mask = (n.df(c).index.str.contains(glc_name)) & (n.df(c).type==glc_type) mask = (n.df(c).index.str.contains(glc_name)) & (n.df(c).type == glc_type)
n.df(c).loc[mask, "type"] = "co2_limit" n.df(c).loc[mask, "type"] = "co2_limit"
return n return n
def add_H2_boilers(n): def add_H2_boilers(n):
""" """
Gas boilers can be retrofitted to run with H2. Add H2 boilers for heating Gas boilers can be retrofitted to run with H2.
for all existing gas boilers.
Add H2 boilers for heating for all existing gas boilers.
""" """
c = "Link" c = "Link"
logger.info("Add H2 boilers.") logger.info("Add H2 boilers.")
@ -394,8 +399,12 @@ def add_H2_boilers(n):
# adjust bus 0 # adjust bus 0
df["bus0"] = df.bus1.map(n.buses.location) + " H2" df["bus0"] = df.bus1.map(n.buses.location) + " H2"
# rename carrier and index # rename carrier and index
df["carrier"] = df.carrier.apply(lambda x: x.replace("gas boiler", "retrofitted H2 boiler")) df["carrier"] = df.carrier.apply(
df.rename(index = lambda x: x.replace("gas boiler", "retrofitted H2 boiler"), inplace=True) lambda x: x.replace("gas boiler", "retrofitted H2 boiler")
)
df.rename(
index=lambda x: x.replace("gas boiler", "retrofitted H2 boiler"), inplace=True
)
# todo, costs for retrofitting # todo, costs for retrofitting
df["capital_costs"] = 100 df["capital_costs"] = 100
# set existing capacity to zero # set existing capacity to zero
@ -403,8 +412,8 @@ def add_H2_boilers(n):
df["p_nom_extendable"] = True df["p_nom_extendable"] = True
# add H2 boilers to network # add H2 boilers to network
import_components_from_dataframe(n, df, c) import_components_from_dataframe(n, df, c)
def apply_time_segmentation_perfect( def apply_time_segmentation_perfect(
n, segments, solver_name="cbc", overwrite_time_dependent=True n, segments, solver_name="cbc", overwrite_time_dependent=True
): ):
@ -438,8 +447,8 @@ def apply_time_segmentation_perfect(
raw = pd.concat([raw, df], axis=1) raw = pd.concat([raw, df], axis=1)
raw = raw.dropna(axis=1) raw = raw.dropna(axis=1)
sn_weightings = {} sn_weightings = {}
for year in raw.index.levels[0]: for year in raw.index.levels[0]:
logger.info(f"Find representative snapshots for {year}.") logger.info(f"Find representative snapshots for {year}.")
raw_t = raw.loc[year] raw_t = raw.loc[year]
# normalise all time-dependent data # normalise all time-dependent data
@ -455,7 +464,7 @@ def apply_time_segmentation_perfect(
solver=solver_name, solver=solver_name,
) )
segmented = agg.createTypicalPeriods() segmented = agg.createTypicalPeriods()
weightings = segmented.index.get_level_values("Segment Duration") weightings = segmented.index.get_level_values("Segment Duration")
offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0) offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0)
timesteps = [raw_t.index[0] + pd.Timedelta(f"{offset}h") for offset in offsets] timesteps = [raw_t.index[0] + pd.Timedelta(f"{offset}h") for offset in offsets]
@ -463,13 +472,14 @@ def apply_time_segmentation_perfect(
sn_weightings[year] = pd.Series( sn_weightings[year] = pd.Series(
weightings, index=snapshots, name="weightings", dtype="float64" weightings, index=snapshots, name="weightings", dtype="float64"
) )
sn_weightings = pd.concat(sn_weightings) sn_weightings = pd.concat(sn_weightings)
n.set_snapshots(sn_weightings.index) n.set_snapshots(sn_weightings.index)
n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0) n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0)
return n return n
def set_temporal_aggregation_SEG(n, opts, solver_name): def set_temporal_aggregation_SEG(n, opts, solver_name):
""" """
Aggregate network temporally with tsam. Aggregate network temporally with tsam.
@ -483,6 +493,8 @@ def set_temporal_aggregation_SEG(n, opts, solver_name):
n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name) n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name)
break break
return n return n
# %% # %%
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
@ -514,22 +526,22 @@ if __name__ == "__main__":
# concat prenetworks of planning horizon to single network ------------ # concat prenetworks of planning horizon to single network ------------
n = concat_networks(years) n = concat_networks(years)
# temporal aggregate # temporal aggregate
opts = snakemake.wildcards.sector_opts.split("-") opts = snakemake.wildcards.sector_opts.split("-")
solver_name = snakemake.config["solving"]["solver"]["name"] solver_name = snakemake.config["solving"]["solver"]["name"]
n = set_temporal_aggregation_SEG(n, opts, solver_name) n = set_temporal_aggregation_SEG(n, opts, solver_name)
# adjust global constraints lv limit if the same for all years # adjust global constraints lv limit if the same for all years
n = adjust_lvlimit(n) n = adjust_lvlimit(n)
# adjust global constraints CO2 limit # adjust global constraints CO2 limit
n = adjust_CO2_glc(n) n = adjust_CO2_glc(n)
# adjust stores to multi period investment # adjust stores to multi period investment
n = adjust_stores(n) n = adjust_stores(n)
# set phase outs # set phase outs
set_all_phase_outs(n) set_all_phase_outs(n)
# add H2 boiler # add H2 boiler
add_H2_boilers(n) add_H2_boilers(n)

View File

@ -32,10 +32,9 @@ import re
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import pypsa import pypsa
from pypsa.descriptors import get_activity_mask
import xarray as xr import xarray as xr
from _helpers import configure_logging, update_config_with_sector_opts from _helpers import configure_logging, update_config_with_sector_opts
from pypsa.descriptors import get_activity_mask
from vresutils.benchmark import memory_logger from vresutils.benchmark import memory_logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -48,34 +47,40 @@ def add_land_use_constraint(n, planning_horizons, config):
_add_land_use_constraint_m(n, planning_horizons, config) _add_land_use_constraint_m(n, planning_horizons, config)
else: else:
_add_land_use_constraint(n) _add_land_use_constraint(n)
def add_land_use_constraint_perfect(n): def add_land_use_constraint_perfect(n):
"""Add global constraints for tech capacity limit. """
Add global constraints for tech capacity limit.
""" """
logger.info("Add land-use constraint for perfect foresight") logger.info("Add land-use constraint for perfect foresight")
def compress_series(s): def compress_series(s):
def process_group(group): def process_group(group):
if group.nunique() == 1: if group.nunique() == 1:
return pd.Series(group.iloc[0], index=[None]) return pd.Series(group.iloc[0], index=[None])
else: else:
return group return group
return s.groupby(level=[0,1]).apply(process_group) return s.groupby(level=[0, 1]).apply(process_group)
def new_index_name(t): def new_index_name(t):
# Convert all elements to string and filter out None values # Convert all elements to string and filter out None values
parts = [str(x) for x in t if x is not None] parts = [str(x) for x in t if x is not None]
# Join with space, but use a dash for the last item if not None # Join with space, but use a dash for the last item if not None
return ' '.join(parts[:2]) + (f'-{parts[-1]}' if len(parts) > 2 else '') return " ".join(parts[:2]) + (f"-{parts[-1]}" if len(parts) > 2 else "")
def check_p_min_p_max(p_nom_max): def check_p_min_p_max(p_nom_max):
p_nom_min = n.generators[ext_i].groupby(grouper).sum().p_nom_min p_nom_min = n.generators[ext_i].groupby(grouper).sum().p_nom_min
p_nom_min = p_nom_min.reindex(p_nom_max.index) p_nom_min = p_nom_min.reindex(p_nom_max.index)
check = (p_nom_min.groupby(level=[0,1]).sum()>p_nom_max.groupby(level=[0,1]).min()) check = (
p_nom_min.groupby(level=[0, 1]).sum()
> p_nom_max.groupby(level=[0, 1]).min()
)
if check.sum(): if check.sum():
logger.warning(f"summed p_min_pu values at node larger than technical potential {check[check].index}") logger.warning(
f"summed p_min_pu values at node larger than technical potential {check[check].index}"
)
grouper = [n.generators.carrier, n.generators.bus, n.generators.build_year] grouper = [n.generators.carrier, n.generators.bus, n.generators.build_year]
ext_i = n.generators.p_nom_extendable ext_i = n.generators.p_nom_extendable
@ -85,8 +90,7 @@ def add_land_use_constraint_perfect(n):
p_nom_max = p_nom_max[~p_nom_max.isin([np.inf, np.nan])] p_nom_max = p_nom_max[~p_nom_max.isin([np.inf, np.nan])]
# carrier # carrier
carriers = p_nom_max.index.get_level_values(0).unique() carriers = p_nom_max.index.get_level_values(0).unique()
gen_i = n.generators[(n.generators.carrier.isin(carriers)) & gen_i = n.generators[(n.generators.carrier.isin(carriers)) & (ext_i)].index
(ext_i)].index
n.generators.loc[gen_i, "p_nom_min"] = 0 n.generators.loc[gen_i, "p_nom_min"] = 0
# check minimum capacities # check minimum capacities
check_p_min_p_max(p_nom_max) check_p_min_p_max(p_nom_max)
@ -94,15 +98,17 @@ def add_land_use_constraint_perfect(n):
# p_nom_max = compress_series(p_nom_max) # p_nom_max = compress_series(p_nom_max)
# adjust name to fit syntax of nominal constraint per bus # adjust name to fit syntax of nominal constraint per bus
df = p_nom_max.reset_index() df = p_nom_max.reset_index()
df["name"] = df.apply(lambda row: f"nom_max_{row['carrier']}" df["name"] = df.apply(
+ (f"_{row['build_year']}" if row['build_year'] is not None else ""), lambda row: f"nom_max_{row['carrier']}"
axis=1) + (f"_{row['build_year']}" if row["build_year"] is not None else ""),
axis=1,
)
for name in df.name.unique(): for name in df.name.unique():
df_carrier = df[df.name==name] df_carrier = df[df.name == name]
bus = df_carrier.bus bus = df_carrier.bus
n.buses.loc[bus, name] = df_carrier.p_nom_max.values n.buses.loc[bus, name] = df_carrier.p_nom_max.values
return n return n
@ -187,14 +193,14 @@ def add_co2_sequestration_limit(n, config, limit=200):
continue continue
limit = float(o[o.find("seq") + 3 :]) * 1e6 limit = float(o[o.find("seq") + 3 :]) * 1e6
break break
if config["foresight"] == "perfect": if config["foresight"] == "perfect":
periods = n.investment_periods periods = n.investment_periods
names = pd.Index([f"co2_sequestration_limit-{period}" for period in periods]) names = pd.Index([f"co2_sequestration_limit-{period}" for period in periods])
else: else:
periods = [np.nan] periods = [np.nan]
names = pd.Index(["co2_sequestration_limit"]) names = pd.Index(["co2_sequestration_limit"])
n.madd( n.madd(
"GlobalConstraint", "GlobalConstraint",
names, names,
@ -224,11 +230,11 @@ def add_carbon_constraint(n, snapshots):
time_valid = int(glc.loc["investment_period"]) time_valid = int(glc.loc["investment_period"])
if not stores.empty: if not stores.empty:
last = n.snapshot_weightings.reset_index().groupby("period").last() last = n.snapshot_weightings.reset_index().groupby("period").last()
last_i = last.set_index([last.index, last.timestep]).index last_i = last.set_index([last.index, last.timestep]).index
final_e = n.model["Store-e"].loc[last_i, stores.index] final_e = n.model["Store-e"].loc[last_i, stores.index]
time_i = pd.IndexSlice[time_valid, :] time_i = pd.IndexSlice[time_valid, :]
lhs = final_e.loc[time_i,:] - final_e.shift(snapshot=1).loc[time_i,:] lhs = final_e.loc[time_i, :] - final_e.shift(snapshot=1).loc[time_i, :]
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}") n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
@ -251,77 +257,83 @@ def add_carbon_budget_constraint(n, snapshots):
weighting = n.investment_period_weightings.loc[time_valid, "years"] weighting = n.investment_period_weightings.loc[time_valid, "years"]
if not stores.empty: if not stores.empty:
last = n.snapshot_weightings.reset_index().groupby("period").last() last = n.snapshot_weightings.reset_index().groupby("period").last()
last_i = last.set_index([last.index, last.timestep]).index last_i = last.set_index([last.index, last.timestep]).index
final_e = n.model["Store-e"].loc[last_i, stores.index] final_e = n.model["Store-e"].loc[last_i, stores.index]
time_i = pd.IndexSlice[time_valid, :] time_i = pd.IndexSlice[time_valid, :]
lhs = final_e.loc[time_i,:] * weighting lhs = final_e.loc[time_i, :] * weighting
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}") n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
def add_max_growth(n, config): def add_max_growth(n, config):
"""Add maximum growth rates for different carriers
""" """
Add maximum growth rates for different carriers.
"""
opts = config["sector"]["limit_max_growth"] opts = config["sector"]["limit_max_growth"]
# take maximum yearly difference between investment periods since historic growth is per year # take maximum yearly difference between investment periods since historic growth is per year
factor = n.investment_period_weightings.years.max() * opts["factor"] factor = n.investment_period_weightings.years.max() * opts["factor"]
for carrier in opts["max_growth"].keys(): for carrier in opts["max_growth"].keys():
max_per_period = opts["max_growth"][carrier] * factor max_per_period = opts["max_growth"][carrier] * factor
logger.info(f"set maximum growth rate per investment period of {carrier} to {max_per_period} GW.") logger.info(
f"set maximum growth rate per investment period of {carrier} to {max_per_period} GW."
)
n.carriers.loc[carrier, "max_growth"] = max_per_period * 1e3 n.carriers.loc[carrier, "max_growth"] = max_per_period * 1e3
for carrier in opts["max_relative_growth"].keys(): for carrier in opts["max_relative_growth"].keys():
max_r_per_period = opts["max_relative_growth"][carrier] max_r_per_period = opts["max_relative_growth"][carrier]
logger.info(f"set maximum relative growth per investment period of {carrier} to {max_r_per_period}.") logger.info(
n.carriers.loc[carrier, "max_relative_growth"] = max_r_per_period f"set maximum relative growth per investment period of {carrier} to {max_r_per_period}."
)
n.carriers.loc[carrier, "max_relative_growth"] = max_r_per_period
return n return n
def add_retrofit_gas_boiler_constraint(n, snapshots): def add_retrofit_gas_boiler_constraint(n, snapshots):
"""Allow retrofitting of existing gas boilers to H2 boilers. """
Allow retrofitting of existing gas boilers to H2 boilers.
""" """
c = "Link" c = "Link"
logger.info("Add constraint for retrofitting gas boilers to H2 boilers.") logger.info("Add constraint for retrofitting gas boilers to H2 boilers.")
# existing gas boilers # existing gas boilers
mask = n.links.carrier.str.contains("gas boiler") & ~n.links.p_nom_extendable mask = n.links.carrier.str.contains("gas boiler") & ~n.links.p_nom_extendable
gas_i = n.links[mask].index gas_i = n.links[mask].index
mask = n.links.carrier.str.contains("retrofitted H2 boiler") mask = n.links.carrier.str.contains("retrofitted H2 boiler")
h2_i = n.links[mask].index h2_i = n.links[mask].index
n.links.loc[gas_i, "p_nom_extendable"] = True n.links.loc[gas_i, "p_nom_extendable"] = True
p_nom = n.links.loc[gas_i, "p_nom"] p_nom = n.links.loc[gas_i, "p_nom"]
n.links.loc[gas_i, "p_nom"] = 0 n.links.loc[gas_i, "p_nom"] = 0
# heat profile # heat profile
cols = n.loads_t.p_set.columns[n.loads_t.p_set.columns.str.contains("heat") cols = n.loads_t.p_set.columns[
& ~n.loads_t.p_set.columns.str.contains("industry") n.loads_t.p_set.columns.str.contains("heat")
& ~n.loads_t.p_set.columns.str.contains("agriculture")] & ~n.loads_t.p_set.columns.str.contains("industry")
profile = n.loads_t.p_set[cols].div(n.loads_t.p_set[cols].groupby(level=0).max(), level=0) & ~n.loads_t.p_set.columns.str.contains("agriculture")
]
profile = n.loads_t.p_set[cols].div(
n.loads_t.p_set[cols].groupby(level=0).max(), level=0
)
# to deal if max value is zero # to deal if max value is zero
profile.fillna(0, inplace=True) profile.fillna(0, inplace=True)
profile.rename(columns=n.loads.bus.to_dict(), inplace=True) profile.rename(columns=n.loads.bus.to_dict(), inplace=True)
profile = profile.reindex(columns=n.links.loc[gas_i, "bus1"]) profile = profile.reindex(columns=n.links.loc[gas_i, "bus1"])
profile.columns = gas_i profile.columns = gas_i
rhs = profile.mul(p_nom) rhs = profile.mul(p_nom)
dispatch = n.model["Link-p"] dispatch = n.model["Link-p"]
active = get_activity_mask(n, c, snapshots, gas_i) active = get_activity_mask(n, c, snapshots, gas_i)
rhs = rhs[active] rhs = rhs[active]
p_gas = dispatch.sel(Link=gas_i) p_gas = dispatch.sel(Link=gas_i)
p_h2 = dispatch.sel(Link=h2_i) p_h2 = dispatch.sel(Link=h2_i)
lhs = p_gas + p_h2
n.model.add_constraints(lhs == rhs, name="gas_retrofit")
lhs = p_gas + p_h2
n.model.add_constraints(lhs == rhs, name="gas_retrofit")
def prepare_network( def prepare_network(
n, n,
solve_opts=None, solve_opts=None,
@ -381,7 +393,7 @@ def prepare_network(
if foresight == "myopic": if foresight == "myopic":
add_land_use_constraint(n, planning_horizons, config) add_land_use_constraint(n, planning_horizons, config)
if foresight == "perfect": if foresight == "perfect":
n = add_land_use_constraint_perfect(n) n = add_land_use_constraint_perfect(n)
if config["sector"]["limit_max_growth"]["enable"]: if config["sector"]["limit_max_growth"]["enable"]:
@ -756,7 +768,6 @@ def add_pipe_retrofit_constraint(n):
n.model.add_constraints(lhs == rhs, name="Link-pipe_retrofit") n.model.add_constraints(lhs == rhs, name="Link-pipe_retrofit")
def extra_functionality(n, snapshots): def extra_functionality(n, snapshots):
""" """
Collects supplementary constraints which will be passed to Collects supplementary constraints which will be passed to
@ -791,8 +802,10 @@ def extra_functionality(n, snapshots):
def solve_network(n, config, solving, opts="", **kwargs): def solve_network(n, config, solving, opts="", **kwargs):
set_of_options = solving["solver"]["options"] set_of_options = solving["solver"]["options"]
cf_solving = solving["options"] cf_solving = solving["options"]
kwargs["multi_investment_periods"] = True if config["foresight"] == "perfect" else False kwargs["multi_investment_periods"] = (
True if config["foresight"] == "perfect" else False
)
kwargs["solver_options"] = ( kwargs["solver_options"] = (
solving["solver_options"][set_of_options] if set_of_options else {} solving["solver_options"][set_of_options] if set_of_options else {}
) )
@ -838,7 +851,8 @@ def solve_network(n, config, solving, opts="", **kwargs):
return n return n
#%%
# %%
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
@ -868,7 +882,7 @@ if __name__ == "__main__":
np.random.seed(solve_opts.get("seed", 123)) np.random.seed(solve_opts.get("seed", 123))
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)
n = prepare_network( n = prepare_network(
n, n,
solve_opts, solve_opts,
@ -877,9 +891,10 @@ if __name__ == "__main__":
planning_horizons=snakemake.params.planning_horizons, planning_horizons=snakemake.params.planning_horizons,
co2_sequestration_potential=snakemake.params["co2_sequestration_potential"], co2_sequestration_potential=snakemake.params["co2_sequestration_potential"],
) )
with memory_logger(filename=getattr(snakemake.log, 'memory', None), interval=30.) as mem: with memory_logger(
filename=getattr(snakemake.log, "memory", None), interval=30.0
) as mem:
n = solve_network( n = solve_network(
n, n,
config=snakemake.config, config=snakemake.config,
@ -887,11 +902,8 @@ if __name__ == "__main__":
opts=opts, opts=opts,
log_fn=snakemake.log.solver, log_fn=snakemake.log.solver,
) )
logger.info("Maximum memory usage: {}".format(mem.mem_usage)) logger.info("Maximum memory usage: {}".format(mem.mem_usage))
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])