[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
This commit is contained in:
parent
9ec7442c69
commit
bdaa646ed6
@ -87,10 +87,10 @@ co2_budget:
|
||||
2050: 0.000
|
||||
|
||||
# update of IPCC 6th AR compared to the 1.5SR. (discussed here: https://twitter.com/JoeriRogelj/status/1424743828339167233)
|
||||
1p5 : 34.2 # 25.7 # Budget in Gt CO2 for 1.5 for Europe, global 420 Gt, assuming per capita share
|
||||
1p6 : 43.259666 # 35 # Budget in Gt CO2 for 1.6 for Europe, global 580 Gt
|
||||
1p7 : 51.4 # 45 # Budget in Gt CO2 for 1.7 for Europe, global 800 Gt
|
||||
2p0 : 69.778 # 73.9 # Budget in Gt CO2 for 2 for Europe, global 1170 Gt
|
||||
1p5: 34.2 # 25.7 # Budget in Gt CO2 for 1.5 for Europe, global 420 Gt, assuming per capita share
|
||||
1p6: 43.259666 # 35 # Budget in Gt CO2 for 1.6 for Europe, global 580 Gt
|
||||
1p7: 51.4 # 45 # Budget in Gt CO2 for 1.7 for Europe, global 800 Gt
|
||||
2p0: 69.778 # 73.9 # Budget in Gt CO2 for 2 for Europe, global 1170 Gt
|
||||
|
||||
# docs in https://pypsa-eur.readthedocs.io/en/latest/configuration.html#electricity
|
||||
electricity:
|
||||
|
@ -7,7 +7,9 @@ localrules:
|
||||
copy_config,
|
||||
copy_conda_env,
|
||||
|
||||
|
||||
if config["foresight"] != "perfect":
|
||||
|
||||
rule plot_network:
|
||||
params:
|
||||
foresight=config["foresight"],
|
||||
@ -34,7 +36,9 @@ if config["foresight"] != "perfect":
|
||||
script:
|
||||
"../scripts/plot_network.py"
|
||||
|
||||
|
||||
if config["foresight"] == "perfect":
|
||||
|
||||
rule plot_network:
|
||||
params:
|
||||
foresight=config["foresight"],
|
||||
@ -55,15 +59,13 @@ if config["foresight"] == "perfect":
|
||||
mem_mb=10000,
|
||||
benchmark:
|
||||
BENCHMARKS
|
||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_benchmark",
|
||||
+"postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years_benchmark"
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
"../scripts/plot_network.py"
|
||||
|
||||
|
||||
|
||||
|
||||
rule copy_config:
|
||||
params:
|
||||
RDIR=RDIR,
|
||||
|
@ -190,7 +190,4 @@ rule make_summary_perfect:
|
||||
"../scripts/make_summary_perfect.py"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
ruleorder: add_existing_baseyear > add_brownfield
|
||||
|
@ -309,12 +309,13 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
|
||||
missing_bus = pd.Index(bus0).difference(n.buses.index)
|
||||
if not missing_bus.empty:
|
||||
logger.info(f"add buses {bus0}")
|
||||
n.madd("Bus",
|
||||
bus0,
|
||||
carrier=generator,
|
||||
location=vars(spatial)[carrier[generator]].locations,
|
||||
unit="MWh_el"
|
||||
)
|
||||
n.madd(
|
||||
"Bus",
|
||||
bus0,
|
||||
carrier=generator,
|
||||
location=vars(spatial)[carrier[generator]].locations,
|
||||
unit="MWh_el",
|
||||
)
|
||||
|
||||
already_build = n.links.index.intersection(asset_i)
|
||||
new_build = asset_i.difference(n.links.index)
|
||||
@ -617,7 +618,7 @@ def add_heating_capacities_installed_before_baseyear(
|
||||
)
|
||||
|
||||
# drop assets which are at the end of their lifetime
|
||||
links_i = n.links[(n.links.build_year+n.links.lifetime<=baseyear)].index
|
||||
links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index
|
||||
n.mremove("Link", links_i)
|
||||
|
||||
|
||||
|
@ -99,12 +99,15 @@ def prepare_hotmaps_database(regions):
|
||||
# the .sjoin can lead to duplicates if a geom is in two regions
|
||||
if gdf.index.duplicated().any():
|
||||
import pycountry
|
||||
|
||||
# get all duplicated entries
|
||||
duplicated_i = gdf.index[gdf.index.duplicated()]
|
||||
# convert from raw data country name to iso-2-code
|
||||
s = df.loc[duplicated_i, "Country"].apply(lambda x: pycountry.countries.lookup(x).alpha_2)
|
||||
s = df.loc[duplicated_i, "Country"].apply(
|
||||
lambda x: pycountry.countries.lookup(x).alpha_2
|
||||
)
|
||||
# Get a boolean mask where gdf's country column matches s's values for the same index
|
||||
mask = gdf['country'] == gdf.index.map(s)
|
||||
mask = gdf["country"] == gdf.index.map(s)
|
||||
# Filter gdf using the mask
|
||||
gdf_filtered = gdf[mask]
|
||||
# concat not duplicated and filtered gdf
|
||||
@ -160,7 +163,8 @@ def build_nodal_distribution_key(hotmaps, regions, countries):
|
||||
|
||||
return keys
|
||||
|
||||
#%%
|
||||
|
||||
# %%
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
|
@ -235,20 +235,28 @@ def calculate_energy(n, label, energy):
|
||||
if c.name in n.one_port_components:
|
||||
c_energies = (
|
||||
c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
|
||||
.groupby(level=0).sum()
|
||||
.groupby(level=0)
|
||||
.sum()
|
||||
.multiply(c.df.sign)
|
||||
.groupby(c.df.carrier, axis=1)
|
||||
.sum()
|
||||
)
|
||||
else:
|
||||
c_energies = pd.DataFrame(0.0, columns=c.df.carrier.unique(), index=n.investment_periods)
|
||||
c_energies = pd.DataFrame(
|
||||
0.0, columns=c.df.carrier.unique(), index=n.investment_periods
|
||||
)
|
||||
for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
|
||||
totals = c.pnl["p" + port].multiply(n.snapshot_weightings.generators, axis=0).groupby(level=0).sum()
|
||||
totals = (
|
||||
c.pnl["p" + port]
|
||||
.multiply(n.snapshot_weightings.generators, axis=0)
|
||||
.groupby(level=0)
|
||||
.sum()
|
||||
)
|
||||
# remove values where bus is missing (bug in nomopyomo)
|
||||
no_bus = c.df.index[c.df["bus" + port] == ""]
|
||||
totals[no_bus] = float(n.component_attrs[c.name].loc[
|
||||
"p" + port, "default"
|
||||
])
|
||||
totals[no_bus] = float(
|
||||
n.component_attrs[c.name].loc["p" + port, "default"]
|
||||
)
|
||||
c_energies -= totals.groupby(c.df.carrier, axis=1).sum()
|
||||
|
||||
c_energies = pd.concat([c_energies.T], keys=[c.list_name])
|
||||
@ -703,10 +711,12 @@ if __name__ == "__main__":
|
||||
# Detect running outside of snakemake and mock snakemake for testing
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake("make_summary_perfect")
|
||||
|
||||
run = snakemake.config["run"]["name"]
|
||||
if run!="": run += "/"
|
||||
if run != "":
|
||||
run += "/"
|
||||
|
||||
networks_dict = {
|
||||
(clusters, lv, opts + sector_opts): "results/"
|
||||
|
@ -913,9 +913,11 @@ def plot_series(network, carrier="AC", name="test"):
|
||||
)
|
||||
|
||||
|
||||
def plot_map_perfect(network, components=["Link", "Store", "StorageUnit", "Generator"],
|
||||
bus_size_factor=1.7e10):
|
||||
|
||||
def plot_map_perfect(
|
||||
network,
|
||||
components=["Link", "Store", "StorageUnit", "Generator"],
|
||||
bus_size_factor=1.7e10,
|
||||
):
|
||||
n = network.copy()
|
||||
assign_location(n)
|
||||
# Drop non-electric buses so they don't clutter the plot
|
||||
@ -926,41 +928,48 @@ def plot_map_perfect(network, components=["Link", "Store", "StorageUnit", "Gener
|
||||
costs = {}
|
||||
for comp in components:
|
||||
df_c = n.df(comp)
|
||||
if df_c.empty: continue
|
||||
if df_c.empty:
|
||||
continue
|
||||
df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp)
|
||||
|
||||
attr = "e_nom_opt" if comp == "Store" else "p_nom_opt"
|
||||
|
||||
active = pd.concat(
|
||||
[
|
||||
n.get_active_assets(comp, inv_p).rename(inv_p)
|
||||
for inv_p in investments
|
||||
],
|
||||
[n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments],
|
||||
axis=1,
|
||||
).astype(int)
|
||||
capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost
|
||||
capital_cost_t = ((active.mul(capital_cost, axis=0))
|
||||
.groupby([n.df(comp).location,
|
||||
n.df(comp).nice_group]).sum())
|
||||
capital_cost_t = (
|
||||
(active.mul(capital_cost, axis=0))
|
||||
.groupby([n.df(comp).location, n.df(comp).nice_group])
|
||||
.sum()
|
||||
)
|
||||
|
||||
capital_cost_t.drop("load", level=1, inplace=True, errors="ignore")
|
||||
|
||||
costs[comp] = capital_cost_t
|
||||
|
||||
costs = pd.concat(costs).groupby(level=[1,2]).sum()
|
||||
costs.drop(costs[costs.sum(axis=1)==0].index, inplace=True)
|
||||
costs = pd.concat(costs).groupby(level=[1, 2]).sum()
|
||||
costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True)
|
||||
|
||||
new_columns = (preferred_order.intersection(costs.index.levels[1])
|
||||
.append(costs.index.levels[1].difference(preferred_order)))
|
||||
new_columns = preferred_order.intersection(costs.index.levels[1]).append(
|
||||
costs.index.levels[1].difference(preferred_order)
|
||||
)
|
||||
costs = costs.reindex(new_columns, level=1)
|
||||
|
||||
for item in new_columns:
|
||||
if item not in snakemake.config['plotting']['tech_colors']:
|
||||
print("Warning!",item,"not in config/plotting/tech_colors, assign random color")
|
||||
snakemake.config['plotting']['tech_colors'] = "pink"
|
||||
if item not in snakemake.config["plotting"]["tech_colors"]:
|
||||
print(
|
||||
"Warning!",
|
||||
item,
|
||||
"not in config/plotting/tech_colors, assign random color",
|
||||
)
|
||||
snakemake.config["plotting"]["tech_colors"] = "pink"
|
||||
|
||||
n.links.drop(n.links.index[(n.links.carrier != "DC") & (
|
||||
n.links.carrier != "B2B")], inplace=True)
|
||||
n.links.drop(
|
||||
n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")],
|
||||
inplace=True,
|
||||
)
|
||||
|
||||
# drop non-bus
|
||||
to_drop = costs.index.levels[0].symmetric_difference(n.buses.index)
|
||||
@ -972,7 +981,7 @@ def plot_map_perfect(network, components=["Link", "Store", "StorageUnit", "Gener
|
||||
costs.index = pd.MultiIndex.from_tuples(costs.index.values)
|
||||
|
||||
# PDF has minimum width, so set these to zero
|
||||
line_lower_threshold = 500.
|
||||
line_lower_threshold = 500.0
|
||||
line_upper_threshold = 1e4
|
||||
linewidth_factor = 2e3
|
||||
ac_color = "gray"
|
||||
@ -981,29 +990,29 @@ def plot_map_perfect(network, components=["Link", "Store", "StorageUnit", "Gener
|
||||
line_widths = n.lines.s_nom_opt
|
||||
link_widths = n.links.p_nom_opt
|
||||
linewidth_factor = 2e3
|
||||
line_lower_threshold = 0.
|
||||
line_lower_threshold = 0.0
|
||||
title = "Today's transmission"
|
||||
|
||||
line_widths[line_widths < line_lower_threshold] = 0.
|
||||
link_widths[link_widths < line_lower_threshold] = 0.
|
||||
line_widths[line_widths < line_lower_threshold] = 0.0
|
||||
link_widths[link_widths < line_lower_threshold] = 0.0
|
||||
|
||||
line_widths[line_widths > line_upper_threshold] = line_upper_threshold
|
||||
link_widths[link_widths > line_upper_threshold] = line_upper_threshold
|
||||
|
||||
for year in costs.columns:
|
||||
|
||||
fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()})
|
||||
fig.set_size_inches(7, 6)
|
||||
fig.suptitle(year)
|
||||
|
||||
n.plot(
|
||||
bus_sizes=costs[year] / bus_size_factor,
|
||||
bus_colors=snakemake.config['plotting']['tech_colors'],
|
||||
bus_colors=snakemake.config["plotting"]["tech_colors"],
|
||||
line_colors=ac_color,
|
||||
link_colors=dc_color,
|
||||
line_widths=line_widths / linewidth_factor,
|
||||
link_widths=link_widths / linewidth_factor,
|
||||
ax=ax, **map_opts
|
||||
ax=ax,
|
||||
**map_opts,
|
||||
)
|
||||
|
||||
sizes = [20, 10, 5]
|
||||
@ -1051,15 +1060,12 @@ def plot_map_perfect(network, components=["Link", "Store", "StorageUnit", "Gener
|
||||
frameon=False,
|
||||
)
|
||||
|
||||
|
||||
fig.savefig(
|
||||
snakemake.output[f"map_{year}"],
|
||||
transparent=True,
|
||||
bbox_inches="tight"
|
||||
snakemake.output[f"map_{year}"], transparent=True, bbox_inches="tight"
|
||||
)
|
||||
|
||||
|
||||
#%%
|
||||
# %%
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
@ -1085,9 +1091,11 @@ if __name__ == "__main__":
|
||||
map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1]
|
||||
|
||||
if snakemake.params["foresight"] == "perfect":
|
||||
plot_map_perfect(n,
|
||||
components=["Link", "Store", "StorageUnit", "Generator"],
|
||||
bus_size_factor=2e10)
|
||||
plot_map_perfect(
|
||||
n,
|
||||
components=["Link", "Store", "StorageUnit", "Generator"],
|
||||
bus_size_factor=2e10,
|
||||
)
|
||||
else:
|
||||
plot_map(
|
||||
n,
|
||||
|
@ -276,8 +276,6 @@ def plot_balances():
|
||||
i for i in balances_df.index.levels[0] if i not in co2_carriers
|
||||
]
|
||||
|
||||
|
||||
|
||||
for k, v in balances.items():
|
||||
df = balances_df.loc[v]
|
||||
df = df.groupby(df.index.get_level_values(2)).sum()
|
||||
@ -357,7 +355,6 @@ def plot_balances():
|
||||
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches="tight")
|
||||
|
||||
|
||||
|
||||
def historical_emissions(countries):
|
||||
"""
|
||||
Read historical emissions to add them to the carbon budget plot.
|
||||
@ -393,17 +390,18 @@ def historical_emissions(countries):
|
||||
|
||||
e["other LULUCF"] = "4.H - Other LULUCF"
|
||||
|
||||
|
||||
pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"]
|
||||
if "GB" in countries:
|
||||
countries.remove("GB")
|
||||
countries.append("UK")
|
||||
|
||||
year = df.index.levels[0][df.index.levels[0]>=1990]
|
||||
year = df.index.levels[0][df.index.levels[0] >= 1990]
|
||||
|
||||
missing = pd.Index(countries).difference(df.index.levels[2])
|
||||
if not missing.empty:
|
||||
logger.warning(f"The following countries are missing and not considered when plotting historic CO2 emissions: {missing}")
|
||||
logger.warning(
|
||||
f"The following countries are missing and not considered when plotting historic CO2 emissions: {missing}"
|
||||
)
|
||||
countries = pd.Index(df.index.levels[2]).intersection(countries)
|
||||
|
||||
idx = pd.IndexSlice
|
||||
@ -469,24 +467,26 @@ def plot_carbon_budget_distribution(input_eurostat):
|
||||
|
||||
# historic emissions
|
||||
countries = snakemake.params.countries
|
||||
e_1990 = co2_emissions_year(
|
||||
countries, input_eurostat, opts, snakemake, year=1990
|
||||
)
|
||||
e_1990 = co2_emissions_year(countries, input_eurostat, opts, snakemake, year=1990)
|
||||
emissions = historical_emissions(countries)
|
||||
# add other years https://sdi.eea.europa.eu/data/0569441f-2853-4664-a7cd-db969ef54de0
|
||||
emissions.loc[2019] = 2.971372
|
||||
emissions.loc[2020] = 2.691958
|
||||
emissions.loc[2021] = 2.869355
|
||||
|
||||
|
||||
if snakemake.config["foresight"] == "myopic":
|
||||
path_cb = "results/" + snakemake.params.RDIR + "/csvs/"
|
||||
co2_cap = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)[["cb"]]
|
||||
co2_cap = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)[
|
||||
["cb"]
|
||||
]
|
||||
co2_cap *= e_1990
|
||||
else:
|
||||
supply_energy = pd.read_csv(snakemake.input.balances,
|
||||
index_col=[0,1,2], header=[0,1,2, 3])
|
||||
co2_cap = supply_energy.loc["co2"].droplevel(0).drop("co2").sum().unstack().T/1e9
|
||||
supply_energy = pd.read_csv(
|
||||
snakemake.input.balances, index_col=[0, 1, 2], header=[0, 1, 2, 3]
|
||||
)
|
||||
co2_cap = (
|
||||
supply_energy.loc["co2"].droplevel(0).drop("co2").sum().unstack().T / 1e9
|
||||
)
|
||||
co2_cap.rename(index=lambda x: int(x), inplace=True)
|
||||
|
||||
plt.figure(figsize=(10, 7))
|
||||
@ -511,13 +511,13 @@ def plot_carbon_budget_distribution(input_eurostat):
|
||||
)
|
||||
|
||||
ax1.plot(
|
||||
[2030],
|
||||
[0.45 * emissions[1990]],
|
||||
marker="*",
|
||||
markersize=12,
|
||||
markerfacecolor="black",
|
||||
markeredgecolor="black",
|
||||
)
|
||||
[2030],
|
||||
[0.45 * emissions[1990]],
|
||||
marker="*",
|
||||
markersize=12,
|
||||
markerfacecolor="black",
|
||||
markeredgecolor="black",
|
||||
)
|
||||
|
||||
ax1.plot(
|
||||
[2030],
|
||||
@ -538,19 +538,18 @@ def plot_carbon_budget_distribution(input_eurostat):
|
||||
)
|
||||
|
||||
ax1.plot(
|
||||
[2050],
|
||||
[0.0 * emissions[1990]],
|
||||
marker="*",
|
||||
markersize=12,
|
||||
markerfacecolor="black",
|
||||
markeredgecolor="black",
|
||||
label="EU commited target",
|
||||
)
|
||||
[2050],
|
||||
[0.0 * emissions[1990]],
|
||||
marker="*",
|
||||
markersize=12,
|
||||
markerfacecolor="black",
|
||||
markeredgecolor="black",
|
||||
label="EU commited target",
|
||||
)
|
||||
|
||||
for col in co2_cap.columns:
|
||||
ax1.plot(co2_cap[col], linewidth=3, label=col)
|
||||
|
||||
|
||||
ax1.legend(
|
||||
fancybox=True, fontsize=18, loc=(0.01, 0.01), facecolor="white", frameon=True
|
||||
)
|
||||
@ -559,7 +558,8 @@ def plot_carbon_budget_distribution(input_eurostat):
|
||||
path = snakemake.output.balances.split("balances")[0] + "carbon_budget.pdf"
|
||||
plt.savefig(path, bbox_inches="tight")
|
||||
|
||||
#%%
|
||||
|
||||
# %%
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
@ -578,5 +578,7 @@ if __name__ == "__main__":
|
||||
|
||||
for sector_opts in snakemake.params.sector_opts:
|
||||
opts = sector_opts.split("-")
|
||||
if any(["cb" in o for o in opts]) or (snakemake.config["foresight"]=="perfect"):
|
||||
if any(["cb" in o for o in opts]) or (
|
||||
snakemake.config["foresight"] == "perfect"
|
||||
):
|
||||
plot_carbon_budget_distribution(snakemake.input.eurostat)
|
||||
|
@ -8,14 +8,16 @@ Concats pypsa networks of single investment periods to one network.
|
||||
|
||||
import logging
|
||||
import re
|
||||
import pandas as pd
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pypsa
|
||||
from _helpers import update_config_with_sector_opts
|
||||
from add_existing_baseyear import add_build_year_to_new_assets
|
||||
from pypsa.descriptors import expand_series
|
||||
from pypsa.io import import_components_from_dataframe
|
||||
from six import iterkeys
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -38,8 +40,8 @@ def get_missing(df, n, c):
|
||||
|
||||
def get_social_discount(t, r=0.01):
|
||||
"""
|
||||
Calculate for a given time t and social discount rate r [per unit]
|
||||
the social discount.
|
||||
Calculate for a given time t and social discount rate r [per unit] the
|
||||
social discount.
|
||||
"""
|
||||
return 1 / (1 + r) ** t
|
||||
|
||||
@ -77,25 +79,29 @@ def add_year_to_constraints(n, baseyear):
|
||||
|
||||
def hvdc_transport_model(n):
|
||||
"""
|
||||
Convert AC lines to DC links for multi-decade optimisation with
|
||||
line expansion. Losses of DC links are assumed to be 3% per 1000km
|
||||
Convert AC lines to DC links for multi-decade optimisation with line
|
||||
expansion.
|
||||
|
||||
Losses of DC links are assumed to be 3% per 1000km
|
||||
"""
|
||||
|
||||
logger.info("Convert AC lines to DC links to perform multi-decade optimisation.")
|
||||
|
||||
n.madd("Link",
|
||||
n.lines.index,
|
||||
bus0=n.lines.bus0,
|
||||
bus1=n.lines.bus1,
|
||||
p_nom_extendable=True,
|
||||
p_nom=n.lines.s_nom,
|
||||
p_nom_min=n.lines.s_nom,
|
||||
p_min_pu=-1,
|
||||
efficiency=1 - 0.03 * n.lines.length / 1000,
|
||||
marginal_cost=0,
|
||||
carrier="DC",
|
||||
length=n.lines.length,
|
||||
capital_cost=n.lines.capital_cost)
|
||||
n.madd(
|
||||
"Link",
|
||||
n.lines.index,
|
||||
bus0=n.lines.bus0,
|
||||
bus1=n.lines.bus1,
|
||||
p_nom_extendable=True,
|
||||
p_nom=n.lines.s_nom,
|
||||
p_nom_min=n.lines.s_nom,
|
||||
p_min_pu=-1,
|
||||
efficiency=1 - 0.03 * n.lines.length / 1000,
|
||||
marginal_cost=0,
|
||||
carrier="DC",
|
||||
length=n.lines.length,
|
||||
capital_cost=n.lines.capital_cost,
|
||||
)
|
||||
|
||||
# Remove AC lines
|
||||
logger.info("Removing AC lines")
|
||||
@ -103,8 +109,9 @@ def hvdc_transport_model(n):
|
||||
n.mremove("Line", lines_rm)
|
||||
|
||||
# Set efficiency of all DC links to include losses depending on length
|
||||
n.links.loc[n.links.carrier == 'DC', 'efficiency'] = 1 - 0.03 * n.links.loc[
|
||||
n.links.carrier == 'DC', 'length'] / 1000
|
||||
n.links.loc[n.links.carrier == "DC", "efficiency"] = (
|
||||
1 - 0.03 * n.links.loc[n.links.carrier == "DC", "length"] / 1000
|
||||
)
|
||||
|
||||
|
||||
def adjust_electricity_grid(n, year, years):
|
||||
@ -121,17 +128,16 @@ def adjust_electricity_grid(n, year, years):
|
||||
investment periods
|
||||
"""
|
||||
n.lines["carrier"] = "AC"
|
||||
links_i = n.links[n.links.carrier=="DC"].index
|
||||
links_i = n.links[n.links.carrier == "DC"].index
|
||||
if n.lines.s_nom_extendable.any() or n.links.loc[links_i, "p_nom_extendable"].any():
|
||||
hvdc_transport_model(n)
|
||||
links_i = n.links[n.links.carrier=="DC"].index
|
||||
links_i = n.links[n.links.carrier == "DC"].index
|
||||
n.links.loc[links_i, "lifetime"] = 100
|
||||
if year!= years[0]:
|
||||
if year != years[0]:
|
||||
n.links.loc[links_i, "p_nom_min"] = 0
|
||||
n.links.loc[links_i, "p_nom"] = 0
|
||||
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
def concat_networks(years):
|
||||
"""
|
||||
@ -246,7 +252,6 @@ def adjust_stores(n):
|
||||
co2_i = n.stores[n.stores.carrier.isin(e_initial_store)].index
|
||||
n.stores.loc[co2_i, "e_initial_per_period"] = True
|
||||
|
||||
|
||||
return n
|
||||
|
||||
|
||||
@ -309,11 +314,11 @@ def set_carbon_constraints(n, opts):
|
||||
carrier_attribute="co2_emissions",
|
||||
sense="<=",
|
||||
constant=budget,
|
||||
investment_period=n.investment_periods[-1]
|
||||
investment_period=n.investment_periods[-1],
|
||||
)
|
||||
|
||||
# drop other CO2 limits
|
||||
drop_i = n.global_constraints[n.global_constraints.type=="co2_limit"].index
|
||||
drop_i = n.global_constraints[n.global_constraints.type == "co2_limit"].index
|
||||
n.mremove("GlobalConstraint", drop_i)
|
||||
|
||||
n.add(
|
||||
@ -323,10 +328,9 @@ def set_carbon_constraints(n, opts):
|
||||
carrier_attribute="co2_emissions",
|
||||
sense="<=",
|
||||
constant=0,
|
||||
investment_period=n.investment_periods[-1]
|
||||
investment_period=n.investment_periods[-1],
|
||||
)
|
||||
|
||||
|
||||
# set minimum CO2 emission constraint to avoid too fast reduction
|
||||
if "co2min" in opts:
|
||||
emissions_1990 = 4.53693
|
||||
@ -354,16 +358,16 @@ def set_carbon_constraints(n, opts):
|
||||
|
||||
def adjust_lvlimit(n):
|
||||
"""
|
||||
Convert global constraints for single investment period to one uniform
|
||||
if all attributes stay the same.
|
||||
Convert global constraints for single investment period to one uniform if
|
||||
all attributes stay the same.
|
||||
"""
|
||||
c = "GlobalConstraint"
|
||||
cols = ['carrier_attribute', 'sense', "constant", "type"]
|
||||
cols = ["carrier_attribute", "sense", "constant", "type"]
|
||||
glc_type = "transmission_volume_expansion_limit"
|
||||
if (n.df(c)[n.df(c).type==glc_type][cols].nunique()==1).all():
|
||||
glc = n.df(c)[n.df(c).type==glc_type][cols].iloc[[0]]
|
||||
if (n.df(c)[n.df(c).type == glc_type][cols].nunique() == 1).all():
|
||||
glc = n.df(c)[n.df(c).type == glc_type][cols].iloc[[0]]
|
||||
glc.index = pd.Index(["lv_limit"])
|
||||
remove_i = n.df(c)[n.df(c).type==glc_type].index
|
||||
remove_i = n.df(c)[n.df(c).type == glc_type].index
|
||||
n.mremove(c, remove_i)
|
||||
import_components_from_dataframe(n, glc, c)
|
||||
|
||||
@ -374,7 +378,7 @@ def adjust_CO2_glc(n):
|
||||
c = "GlobalConstraint"
|
||||
glc_name = "CO2Limit"
|
||||
glc_type = "primary_energy"
|
||||
mask = (n.df(c).index.str.contains(glc_name)) & (n.df(c).type==glc_type)
|
||||
mask = (n.df(c).index.str.contains(glc_name)) & (n.df(c).type == glc_type)
|
||||
n.df(c).loc[mask, "type"] = "co2_limit"
|
||||
|
||||
return n
|
||||
@ -382,8 +386,9 @@ def adjust_CO2_glc(n):
|
||||
|
||||
def add_H2_boilers(n):
|
||||
"""
|
||||
Gas boilers can be retrofitted to run with H2. Add H2 boilers for heating
|
||||
for all existing gas boilers.
|
||||
Gas boilers can be retrofitted to run with H2.
|
||||
|
||||
Add H2 boilers for heating for all existing gas boilers.
|
||||
"""
|
||||
c = "Link"
|
||||
logger.info("Add H2 boilers.")
|
||||
@ -394,8 +399,12 @@ def add_H2_boilers(n):
|
||||
# adjust bus 0
|
||||
df["bus0"] = df.bus1.map(n.buses.location) + " H2"
|
||||
# rename carrier and index
|
||||
df["carrier"] = df.carrier.apply(lambda x: x.replace("gas boiler", "retrofitted H2 boiler"))
|
||||
df.rename(index = lambda x: x.replace("gas boiler", "retrofitted H2 boiler"), inplace=True)
|
||||
df["carrier"] = df.carrier.apply(
|
||||
lambda x: x.replace("gas boiler", "retrofitted H2 boiler")
|
||||
)
|
||||
df.rename(
|
||||
index=lambda x: x.replace("gas boiler", "retrofitted H2 boiler"), inplace=True
|
||||
)
|
||||
# todo, costs for retrofitting
|
||||
df["capital_costs"] = 100
|
||||
# set existing capacity to zero
|
||||
@ -470,6 +479,7 @@ def apply_time_segmentation_perfect(
|
||||
|
||||
return n
|
||||
|
||||
|
||||
def set_temporal_aggregation_SEG(n, opts, solver_name):
|
||||
"""
|
||||
Aggregate network temporally with tsam.
|
||||
@ -483,6 +493,8 @@ def set_temporal_aggregation_SEG(n, opts, solver_name):
|
||||
n = apply_time_segmentation_perfect(n, segments, solver_name=solver_name)
|
||||
break
|
||||
return n
|
||||
|
||||
|
||||
# %%
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
|
@ -32,10 +32,9 @@ import re
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pypsa
|
||||
from pypsa.descriptors import get_activity_mask
|
||||
import xarray as xr
|
||||
from _helpers import configure_logging, update_config_with_sector_opts
|
||||
|
||||
from pypsa.descriptors import get_activity_mask
|
||||
from vresutils.benchmark import memory_logger
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -51,31 +50,37 @@ def add_land_use_constraint(n, planning_horizons, config):
|
||||
|
||||
|
||||
def add_land_use_constraint_perfect(n):
|
||||
"""Add global constraints for tech capacity limit.
|
||||
"""
|
||||
Add global constraints for tech capacity limit.
|
||||
"""
|
||||
logger.info("Add land-use constraint for perfect foresight")
|
||||
def compress_series(s):
|
||||
|
||||
def compress_series(s):
|
||||
def process_group(group):
|
||||
if group.nunique() == 1:
|
||||
return pd.Series(group.iloc[0], index=[None])
|
||||
else:
|
||||
return group
|
||||
|
||||
return s.groupby(level=[0,1]).apply(process_group)
|
||||
return s.groupby(level=[0, 1]).apply(process_group)
|
||||
|
||||
def new_index_name(t):
|
||||
# Convert all elements to string and filter out None values
|
||||
parts = [str(x) for x in t if x is not None]
|
||||
# Join with space, but use a dash for the last item if not None
|
||||
return ' '.join(parts[:2]) + (f'-{parts[-1]}' if len(parts) > 2 else '')
|
||||
return " ".join(parts[:2]) + (f"-{parts[-1]}" if len(parts) > 2 else "")
|
||||
|
||||
def check_p_min_p_max(p_nom_max):
|
||||
p_nom_min = n.generators[ext_i].groupby(grouper).sum().p_nom_min
|
||||
p_nom_min = p_nom_min.reindex(p_nom_max.index)
|
||||
check = (p_nom_min.groupby(level=[0,1]).sum()>p_nom_max.groupby(level=[0,1]).min())
|
||||
check = (
|
||||
p_nom_min.groupby(level=[0, 1]).sum()
|
||||
> p_nom_max.groupby(level=[0, 1]).min()
|
||||
)
|
||||
if check.sum():
|
||||
logger.warning(f"summed p_min_pu values at node larger than technical potential {check[check].index}")
|
||||
logger.warning(
|
||||
f"summed p_min_pu values at node larger than technical potential {check[check].index}"
|
||||
)
|
||||
|
||||
grouper = [n.generators.carrier, n.generators.bus, n.generators.build_year]
|
||||
ext_i = n.generators.p_nom_extendable
|
||||
@ -85,8 +90,7 @@ def add_land_use_constraint_perfect(n):
|
||||
p_nom_max = p_nom_max[~p_nom_max.isin([np.inf, np.nan])]
|
||||
# carrier
|
||||
carriers = p_nom_max.index.get_level_values(0).unique()
|
||||
gen_i = n.generators[(n.generators.carrier.isin(carriers)) &
|
||||
(ext_i)].index
|
||||
gen_i = n.generators[(n.generators.carrier.isin(carriers)) & (ext_i)].index
|
||||
n.generators.loc[gen_i, "p_nom_min"] = 0
|
||||
# check minimum capacities
|
||||
check_p_min_p_max(p_nom_max)
|
||||
@ -94,12 +98,14 @@ def add_land_use_constraint_perfect(n):
|
||||
# p_nom_max = compress_series(p_nom_max)
|
||||
# adjust name to fit syntax of nominal constraint per bus
|
||||
df = p_nom_max.reset_index()
|
||||
df["name"] = df.apply(lambda row: f"nom_max_{row['carrier']}"
|
||||
+ (f"_{row['build_year']}" if row['build_year'] is not None else ""),
|
||||
axis=1)
|
||||
df["name"] = df.apply(
|
||||
lambda row: f"nom_max_{row['carrier']}"
|
||||
+ (f"_{row['build_year']}" if row["build_year"] is not None else ""),
|
||||
axis=1,
|
||||
)
|
||||
|
||||
for name in df.name.unique():
|
||||
df_carrier = df[df.name==name]
|
||||
df_carrier = df[df.name == name]
|
||||
bus = df_carrier.bus
|
||||
n.buses.loc[bus, name] = df_carrier.p_nom_max.values
|
||||
|
||||
@ -227,7 +233,7 @@ def add_carbon_constraint(n, snapshots):
|
||||
last_i = last.set_index([last.index, last.timestep]).index
|
||||
final_e = n.model["Store-e"].loc[last_i, stores.index]
|
||||
time_i = pd.IndexSlice[time_valid, :]
|
||||
lhs = final_e.loc[time_i,:] - final_e.shift(snapshot=1).loc[time_i,:]
|
||||
lhs = final_e.loc[time_i, :] - final_e.shift(snapshot=1).loc[time_i, :]
|
||||
|
||||
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
|
||||
|
||||
@ -254,13 +260,14 @@ def add_carbon_budget_constraint(n, snapshots):
|
||||
last_i = last.set_index([last.index, last.timestep]).index
|
||||
final_e = n.model["Store-e"].loc[last_i, stores.index]
|
||||
time_i = pd.IndexSlice[time_valid, :]
|
||||
lhs = final_e.loc[time_i,:] * weighting
|
||||
lhs = final_e.loc[time_i, :] * weighting
|
||||
|
||||
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
|
||||
|
||||
|
||||
def add_max_growth(n, config):
|
||||
"""Add maximum growth rates for different carriers
|
||||
"""
|
||||
Add maximum growth rates for different carriers.
|
||||
"""
|
||||
|
||||
opts = config["sector"]["limit_max_growth"]
|
||||
@ -268,19 +275,24 @@ def add_max_growth(n, config):
|
||||
factor = n.investment_period_weightings.years.max() * opts["factor"]
|
||||
for carrier in opts["max_growth"].keys():
|
||||
max_per_period = opts["max_growth"][carrier] * factor
|
||||
logger.info(f"set maximum growth rate per investment period of {carrier} to {max_per_period} GW.")
|
||||
logger.info(
|
||||
f"set maximum growth rate per investment period of {carrier} to {max_per_period} GW."
|
||||
)
|
||||
n.carriers.loc[carrier, "max_growth"] = max_per_period * 1e3
|
||||
|
||||
for carrier in opts["max_relative_growth"].keys():
|
||||
max_r_per_period = opts["max_relative_growth"][carrier]
|
||||
logger.info(f"set maximum relative growth per investment period of {carrier} to {max_r_per_period}.")
|
||||
logger.info(
|
||||
f"set maximum relative growth per investment period of {carrier} to {max_r_per_period}."
|
||||
)
|
||||
n.carriers.loc[carrier, "max_relative_growth"] = max_r_per_period
|
||||
|
||||
return n
|
||||
|
||||
|
||||
def add_retrofit_gas_boiler_constraint(n, snapshots):
|
||||
"""Allow retrofitting of existing gas boilers to H2 boilers.
|
||||
"""
|
||||
Allow retrofitting of existing gas boilers to H2 boilers.
|
||||
"""
|
||||
c = "Link"
|
||||
logger.info("Add constraint for retrofitting gas boilers to H2 boilers.")
|
||||
@ -290,23 +302,25 @@ def add_retrofit_gas_boiler_constraint(n, snapshots):
|
||||
mask = n.links.carrier.str.contains("retrofitted H2 boiler")
|
||||
h2_i = n.links[mask].index
|
||||
|
||||
|
||||
n.links.loc[gas_i, "p_nom_extendable"] = True
|
||||
p_nom = n.links.loc[gas_i, "p_nom"]
|
||||
n.links.loc[gas_i, "p_nom"] = 0
|
||||
|
||||
# heat profile
|
||||
cols = n.loads_t.p_set.columns[n.loads_t.p_set.columns.str.contains("heat")
|
||||
& ~n.loads_t.p_set.columns.str.contains("industry")
|
||||
& ~n.loads_t.p_set.columns.str.contains("agriculture")]
|
||||
profile = n.loads_t.p_set[cols].div(n.loads_t.p_set[cols].groupby(level=0).max(), level=0)
|
||||
cols = n.loads_t.p_set.columns[
|
||||
n.loads_t.p_set.columns.str.contains("heat")
|
||||
& ~n.loads_t.p_set.columns.str.contains("industry")
|
||||
& ~n.loads_t.p_set.columns.str.contains("agriculture")
|
||||
]
|
||||
profile = n.loads_t.p_set[cols].div(
|
||||
n.loads_t.p_set[cols].groupby(level=0).max(), level=0
|
||||
)
|
||||
# to deal if max value is zero
|
||||
profile.fillna(0, inplace=True)
|
||||
profile.rename(columns=n.loads.bus.to_dict(), inplace=True)
|
||||
profile = profile.reindex(columns=n.links.loc[gas_i, "bus1"])
|
||||
profile.columns = gas_i
|
||||
|
||||
|
||||
rhs = profile.mul(p_nom)
|
||||
|
||||
dispatch = n.model["Link-p"]
|
||||
@ -320,8 +334,6 @@ def add_retrofit_gas_boiler_constraint(n, snapshots):
|
||||
n.model.add_constraints(lhs == rhs, name="gas_retrofit")
|
||||
|
||||
|
||||
|
||||
|
||||
def prepare_network(
|
||||
n,
|
||||
solve_opts=None,
|
||||
@ -756,7 +768,6 @@ def add_pipe_retrofit_constraint(n):
|
||||
n.model.add_constraints(lhs == rhs, name="Link-pipe_retrofit")
|
||||
|
||||
|
||||
|
||||
def extra_functionality(n, snapshots):
|
||||
"""
|
||||
Collects supplementary constraints which will be passed to
|
||||
@ -792,7 +803,9 @@ def solve_network(n, config, solving, opts="", **kwargs):
|
||||
set_of_options = solving["solver"]["options"]
|
||||
cf_solving = solving["options"]
|
||||
|
||||
kwargs["multi_investment_periods"] = True if config["foresight"] == "perfect" else False
|
||||
kwargs["multi_investment_periods"] = (
|
||||
True if config["foresight"] == "perfect" else False
|
||||
)
|
||||
kwargs["solver_options"] = (
|
||||
solving["solver_options"][set_of_options] if set_of_options else {}
|
||||
)
|
||||
@ -838,7 +851,8 @@ def solve_network(n, config, solving, opts="", **kwargs):
|
||||
|
||||
return n
|
||||
|
||||
#%%
|
||||
|
||||
# %%
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
@ -878,8 +892,9 @@ if __name__ == "__main__":
|
||||
co2_sequestration_potential=snakemake.params["co2_sequestration_potential"],
|
||||
)
|
||||
|
||||
with memory_logger(filename=getattr(snakemake.log, 'memory', None), interval=30.) as mem:
|
||||
|
||||
with memory_logger(
|
||||
filename=getattr(snakemake.log, "memory", None), interval=30.0
|
||||
) as mem:
|
||||
n = solve_network(
|
||||
n,
|
||||
config=snakemake.config,
|
||||
@ -890,8 +905,5 @@ if __name__ == "__main__":
|
||||
|
||||
logger.info("Maximum memory usage: {}".format(mem.mem_usage))
|
||||
|
||||
|
||||
|
||||
|
||||
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
||||
n.export_to_netcdf(snakemake.output[0])
|
||||
|
Loading…
Reference in New Issue
Block a user