Merge pull request #750 from PyPSA/sourcery/master
Sourcery refactored master branch
This commit is contained in:
commit
7cb4742c75
@ -303,10 +303,7 @@ def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None):
|
|||||||
|
|
||||||
|
|
||||||
def parse(l):
|
def parse(l):
|
||||||
if len(l) == 1:
|
return yaml.safe_load(l[0]) if len(l) == 1 else {l.pop(0): parse(l)}
|
||||||
return yaml.safe_load(l[0])
|
|
||||||
else:
|
|
||||||
return {l.pop(0): parse(l)}
|
|
||||||
|
|
||||||
|
|
||||||
def update_config_with_sector_opts(config, sector_opts):
|
def update_config_with_sector_opts(config, sector_opts):
|
||||||
|
@ -41,12 +41,9 @@ def add_brownfield(n, n_p, year):
|
|||||||
# remove assets if their optimized nominal capacity is lower than a threshold
|
# remove assets if their optimized nominal capacity is lower than a threshold
|
||||||
# since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible
|
# since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible
|
||||||
chp_heat = c.df.index[
|
chp_heat = c.df.index[
|
||||||
(
|
(c.df[f"{attr}_nom_extendable"] & c.df.index.str.contains("urban central"))
|
||||||
c.df[attr + "_nom_extendable"]
|
& c.df.index.str.contains("CHP")
|
||||||
& c.df.index.str.contains("urban central")
|
& c.df.index.str.contains("heat")
|
||||||
& c.df.index.str.contains("CHP")
|
|
||||||
& c.df.index.str.contains("heat")
|
|
||||||
)
|
|
||||||
]
|
]
|
||||||
|
|
||||||
threshold = snakemake.params.threshold_capacity
|
threshold = snakemake.params.threshold_capacity
|
||||||
@ -60,21 +57,20 @@ def add_brownfield(n, n_p, year):
|
|||||||
)
|
)
|
||||||
n_p.mremove(
|
n_p.mremove(
|
||||||
c.name,
|
c.name,
|
||||||
chp_heat[c.df.loc[chp_heat, attr + "_nom_opt"] < threshold_chp_heat],
|
chp_heat[c.df.loc[chp_heat, f"{attr}_nom_opt"] < threshold_chp_heat],
|
||||||
)
|
)
|
||||||
|
|
||||||
n_p.mremove(
|
n_p.mremove(
|
||||||
c.name,
|
c.name,
|
||||||
c.df.index[
|
c.df.index[
|
||||||
c.df[attr + "_nom_extendable"]
|
(c.df[f"{attr}_nom_extendable"] & ~c.df.index.isin(chp_heat))
|
||||||
& ~c.df.index.isin(chp_heat)
|
& (c.df[f"{attr}_nom_opt"] < threshold)
|
||||||
& (c.df[attr + "_nom_opt"] < threshold)
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
# copy over assets but fix their capacity
|
# copy over assets but fix their capacity
|
||||||
c.df[attr + "_nom"] = c.df[attr + "_nom_opt"]
|
c.df[f"{attr}_nom"] = c.df[f"{attr}_nom_opt"]
|
||||||
c.df[attr + "_nom_extendable"] = False
|
c.df[f"{attr}_nom_extendable"] = False
|
||||||
|
|
||||||
n.import_components_from_dataframe(c.df, c.name)
|
n.import_components_from_dataframe(c.df, c.name)
|
||||||
|
|
||||||
|
@ -293,24 +293,23 @@ def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.0):
|
|||||||
l = opsd_load[cntry]
|
l = opsd_load[cntry]
|
||||||
if len(group) == 1:
|
if len(group) == 1:
|
||||||
return pd.DataFrame({group.index[0]: l})
|
return pd.DataFrame({group.index[0]: l})
|
||||||
else:
|
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
|
||||||
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
|
transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr()
|
||||||
transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr()
|
gdp_n = pd.Series(
|
||||||
gdp_n = pd.Series(
|
transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index
|
||||||
transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index
|
)
|
||||||
)
|
pop_n = pd.Series(
|
||||||
pop_n = pd.Series(
|
transfer.dot(nuts3_cntry["pop"].fillna(1.0).values), index=group.index
|
||||||
transfer.dot(nuts3_cntry["pop"].fillna(1.0).values), index=group.index
|
)
|
||||||
)
|
|
||||||
|
|
||||||
# relative factors 0.6 and 0.4 have been determined from a linear
|
# relative factors 0.6 and 0.4 have been determined from a linear
|
||||||
# regression on the country to continent load data
|
# regression on the country to continent load data
|
||||||
factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))
|
factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))
|
||||||
return pd.DataFrame(
|
return pd.DataFrame(
|
||||||
factors.values * l.values[:, np.newaxis],
|
factors.values * l.values[:, np.newaxis],
|
||||||
index=l.index,
|
index=l.index,
|
||||||
columns=factors.index,
|
columns=factors.index,
|
||||||
)
|
)
|
||||||
|
|
||||||
load = pd.concat(
|
load = pd.concat(
|
||||||
[
|
[
|
||||||
@ -435,7 +434,7 @@ def attach_conventional_generators(
|
|||||||
ppl = (
|
ppl = (
|
||||||
ppl.query("carrier in @carriers")
|
ppl.query("carrier in @carriers")
|
||||||
.join(costs, on="carrier", rsuffix="_r")
|
.join(costs, on="carrier", rsuffix="_r")
|
||||||
.rename(index=lambda s: "C" + str(s))
|
.rename(index=lambda s: f"C{str(s)}")
|
||||||
)
|
)
|
||||||
ppl["efficiency"] = ppl.efficiency.fillna(ppl.efficiency_r)
|
ppl["efficiency"] = ppl.efficiency.fillna(ppl.efficiency_r)
|
||||||
|
|
||||||
@ -512,7 +511,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
|
|||||||
ppl = (
|
ppl = (
|
||||||
ppl.query('carrier == "hydro"')
|
ppl.query('carrier == "hydro"')
|
||||||
.reset_index(drop=True)
|
.reset_index(drop=True)
|
||||||
.rename(index=lambda s: str(s) + " hydro")
|
.rename(index=lambda s: f"{str(s)} hydro")
|
||||||
)
|
)
|
||||||
ror = ppl.query('technology == "Run-Of-River"')
|
ror = ppl.query('technology == "Run-Of-River"')
|
||||||
phs = ppl.query('technology == "Pumped Storage"')
|
phs = ppl.query('technology == "Pumped Storage"')
|
||||||
@ -609,16 +608,13 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
|
|||||||
)
|
)
|
||||||
if not missing_countries.empty:
|
if not missing_countries.empty:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Assuming max_hours=6 for hydro reservoirs in the countries: {}".format(
|
f'Assuming max_hours=6 for hydro reservoirs in the countries: {", ".join(missing_countries)}'
|
||||||
", ".join(missing_countries)
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
hydro_max_hours = hydro.max_hours.where(
|
hydro_max_hours = hydro.max_hours.where(
|
||||||
hydro.max_hours > 0, hydro.country.map(max_hours_country)
|
hydro.max_hours > 0, hydro.country.map(max_hours_country)
|
||||||
).fillna(6)
|
).fillna(6)
|
||||||
|
|
||||||
flatten_dispatch = params.get("flatten_dispatch", False)
|
if flatten_dispatch := params.get("flatten_dispatch", False):
|
||||||
if flatten_dispatch:
|
|
||||||
buffer = params.get("flatten_dispatch_buffer", 0.2)
|
buffer = params.get("flatten_dispatch_buffer", 0.2)
|
||||||
average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"]
|
average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"]
|
||||||
p_max_pu = (average_capacity_factor + buffer).clip(upper=1)
|
p_max_pu = (average_capacity_factor + buffer).clip(upper=1)
|
||||||
|
@ -45,7 +45,7 @@ def add_build_year_to_new_assets(n, baseyear):
|
|||||||
|
|
||||||
# add -baseyear to name
|
# add -baseyear to name
|
||||||
rename = pd.Series(c.df.index, c.df.index)
|
rename = pd.Series(c.df.index, c.df.index)
|
||||||
rename[assets] += "-" + str(baseyear)
|
rename[assets] += f"-{str(baseyear)}"
|
||||||
c.df.rename(index=rename, inplace=True)
|
c.df.rename(index=rename, inplace=True)
|
||||||
|
|
||||||
# rename time-dependent
|
# rename time-dependent
|
||||||
@ -252,7 +252,7 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
|
|||||||
if "m" in snakemake.wildcards.clusters:
|
if "m" in snakemake.wildcards.clusters:
|
||||||
for ind in new_capacity.index:
|
for ind in new_capacity.index:
|
||||||
# existing capacities are split evenly among regions in every country
|
# existing capacities are split evenly among regions in every country
|
||||||
inv_ind = [i for i in inv_busmap[ind]]
|
inv_ind = list(inv_busmap[ind])
|
||||||
|
|
||||||
# for offshore the splitting only includes coastal regions
|
# for offshore the splitting only includes coastal regions
|
||||||
inv_ind = [
|
inv_ind = [
|
||||||
@ -545,13 +545,17 @@ def add_heating_capacities_installed_before_baseyear(
|
|||||||
bus0=nodes[name],
|
bus0=nodes[name],
|
||||||
bus1=nodes[name] + " " + name + " heat",
|
bus1=nodes[name] + " " + name + " heat",
|
||||||
carrier=name + " resistive heater",
|
carrier=name + " resistive heater",
|
||||||
efficiency=costs.at[name_type + " resistive heater", "efficiency"],
|
efficiency=costs.at[f"{name_type} resistive heater", "efficiency"],
|
||||||
capital_cost=costs.at[name_type + " resistive heater", "efficiency"]
|
capital_cost=(
|
||||||
* costs.at[name_type + " resistive heater", "fixed"],
|
costs.at[f"{name_type} resistive heater", "efficiency"]
|
||||||
p_nom=0.5
|
* costs.at[f"{name_type} resistive heater", "fixed"]
|
||||||
* nodal_df[f"{heat_type} resistive heater"][nodes[name]]
|
),
|
||||||
* ratio
|
p_nom=(
|
||||||
/ costs.at[name_type + " resistive heater", "efficiency"],
|
0.5
|
||||||
|
* nodal_df[f"{heat_type} resistive heater"][nodes[name]]
|
||||||
|
* ratio
|
||||||
|
/ costs.at[f"{name_type} resistive heater", "efficiency"]
|
||||||
|
),
|
||||||
build_year=int(grouping_year),
|
build_year=int(grouping_year),
|
||||||
lifetime=costs.at[costs_name, "lifetime"],
|
lifetime=costs.at[costs_name, "lifetime"],
|
||||||
)
|
)
|
||||||
@ -564,16 +568,20 @@ def add_heating_capacities_installed_before_baseyear(
|
|||||||
bus1=nodes[name] + " " + name + " heat",
|
bus1=nodes[name] + " " + name + " heat",
|
||||||
bus2="co2 atmosphere",
|
bus2="co2 atmosphere",
|
||||||
carrier=name + " gas boiler",
|
carrier=name + " gas boiler",
|
||||||
efficiency=costs.at[name_type + " gas boiler", "efficiency"],
|
efficiency=costs.at[f"{name_type} gas boiler", "efficiency"],
|
||||||
efficiency2=costs.at["gas", "CO2 intensity"],
|
efficiency2=costs.at["gas", "CO2 intensity"],
|
||||||
capital_cost=costs.at[name_type + " gas boiler", "efficiency"]
|
capital_cost=(
|
||||||
* costs.at[name_type + " gas boiler", "fixed"],
|
costs.at[f"{name_type} gas boiler", "efficiency"]
|
||||||
p_nom=0.5
|
* costs.at[f"{name_type} gas boiler", "fixed"]
|
||||||
* nodal_df[f"{heat_type} gas boiler"][nodes[name]]
|
),
|
||||||
* ratio
|
p_nom=(
|
||||||
/ costs.at[name_type + " gas boiler", "efficiency"],
|
0.5
|
||||||
|
* nodal_df[f"{heat_type} gas boiler"][nodes[name]]
|
||||||
|
* ratio
|
||||||
|
/ costs.at[f"{name_type} gas boiler", "efficiency"]
|
||||||
|
),
|
||||||
build_year=int(grouping_year),
|
build_year=int(grouping_year),
|
||||||
lifetime=costs.at[name_type + " gas boiler", "lifetime"],
|
lifetime=costs.at[f"{name_type} gas boiler", "lifetime"],
|
||||||
)
|
)
|
||||||
|
|
||||||
n.madd(
|
n.madd(
|
||||||
@ -593,7 +601,7 @@ def add_heating_capacities_installed_before_baseyear(
|
|||||||
* ratio
|
* ratio
|
||||||
/ costs.at["decentral oil boiler", "efficiency"],
|
/ costs.at["decentral oil boiler", "efficiency"],
|
||||||
build_year=int(grouping_year),
|
build_year=int(grouping_year),
|
||||||
lifetime=costs.at[name_type + " gas boiler", "lifetime"],
|
lifetime=costs.at[f"{name_type} gas boiler", "lifetime"],
|
||||||
)
|
)
|
||||||
|
|
||||||
# delete links with p_nom=nan corresponding to extra nodes in country
|
# delete links with p_nom=nan corresponding to extra nodes in country
|
||||||
|
@ -151,9 +151,7 @@ def _load_buses_from_eg(eg_buses, europe_shape, config_elec):
|
|||||||
buses.v_nom.isin(config_elec["voltages"]) | buses.v_nom.isnull()
|
buses.v_nom.isin(config_elec["voltages"]) | buses.v_nom.isnull()
|
||||||
)
|
)
|
||||||
logger.info(
|
logger.info(
|
||||||
"Removing buses with voltages {}".format(
|
f'Removing buses with voltages {pd.Index(buses.v_nom.unique()).dropna().difference(config_elec["voltages"])}'
|
||||||
pd.Index(buses.v_nom.unique()).dropna().difference(config_elec["voltages"])
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b])
|
return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b])
|
||||||
@ -460,11 +458,7 @@ def _remove_unconnected_components(network):
|
|||||||
components_to_remove = component_sizes.iloc[1:]
|
components_to_remove = component_sizes.iloc[1:]
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"Removing {} unconnected network components with less than {} buses. In total {} buses.".format(
|
f"Removing {len(components_to_remove)} unconnected network components with less than {components_to_remove.max()} buses. In total {components_to_remove.sum()} buses."
|
||||||
len(components_to_remove),
|
|
||||||
components_to_remove.max(),
|
|
||||||
components_to_remove.sum(),
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return network[component == component_sizes.index[0]]
|
return network[component == component_sizes.index[0]]
|
||||||
|
@ -172,8 +172,6 @@ def build_swiss(year):
|
|||||||
|
|
||||||
|
|
||||||
def idees_per_country(ct, year, base_dir):
|
def idees_per_country(ct, year, base_dir):
|
||||||
ct_totals = {}
|
|
||||||
|
|
||||||
ct_idees = idees_rename.get(ct, ct)
|
ct_idees = idees_rename.get(ct, ct)
|
||||||
fn_residential = f"{base_dir}/JRC-IDEES-2015_Residential_{ct_idees}.xlsx"
|
fn_residential = f"{base_dir}/JRC-IDEES-2015_Residential_{ct_idees}.xlsx"
|
||||||
fn_tertiary = f"{base_dir}/JRC-IDEES-2015_Tertiary_{ct_idees}.xlsx"
|
fn_tertiary = f"{base_dir}/JRC-IDEES-2015_Tertiary_{ct_idees}.xlsx"
|
||||||
@ -183,11 +181,11 @@ def idees_per_country(ct, year, base_dir):
|
|||||||
|
|
||||||
df = pd.read_excel(fn_residential, "RES_hh_fec", index_col=0)[year]
|
df = pd.read_excel(fn_residential, "RES_hh_fec", index_col=0)[year]
|
||||||
|
|
||||||
ct_totals["total residential space"] = df["Space heating"]
|
|
||||||
|
|
||||||
rows = ["Advanced electric heating", "Conventional electric heating"]
|
rows = ["Advanced electric heating", "Conventional electric heating"]
|
||||||
ct_totals["electricity residential space"] = df[rows].sum()
|
ct_totals = {
|
||||||
|
"total residential space": df["Space heating"],
|
||||||
|
"electricity residential space": df[rows].sum(),
|
||||||
|
}
|
||||||
ct_totals["total residential water"] = df.at["Water heating"]
|
ct_totals["total residential water"] = df.at["Water heating"]
|
||||||
|
|
||||||
assert df.index[23] == "Electricity"
|
assert df.index[23] == "Electricity"
|
||||||
|
@ -29,25 +29,25 @@ def diameter_to_capacity(pipe_diameter_mm):
|
|||||||
Based on p.15 of
|
Based on p.15 of
|
||||||
https://gasforclimate2050.eu/wp-content/uploads/2020/07/2020_European-Hydrogen-Backbone_Report.pdf
|
https://gasforclimate2050.eu/wp-content/uploads/2020/07/2020_European-Hydrogen-Backbone_Report.pdf
|
||||||
"""
|
"""
|
||||||
# slopes definitions
|
|
||||||
m0 = (1500 - 0) / (500 - 0)
|
|
||||||
m1 = (5000 - 1500) / (600 - 500)
|
m1 = (5000 - 1500) / (600 - 500)
|
||||||
m2 = (11250 - 5000) / (900 - 600)
|
m2 = (11250 - 5000) / (900 - 600)
|
||||||
m3 = (21700 - 11250) / (1200 - 900)
|
|
||||||
|
|
||||||
# intercept
|
|
||||||
a0 = 0
|
|
||||||
a1 = -16000
|
a1 = -16000
|
||||||
a2 = -7500
|
a2 = -7500
|
||||||
a3 = -20100
|
|
||||||
|
|
||||||
if pipe_diameter_mm < 500:
|
if pipe_diameter_mm < 500:
|
||||||
|
# slopes definitions
|
||||||
|
m0 = (1500 - 0) / (500 - 0)
|
||||||
|
# intercept
|
||||||
|
a0 = 0
|
||||||
return a0 + m0 * pipe_diameter_mm
|
return a0 + m0 * pipe_diameter_mm
|
||||||
elif pipe_diameter_mm < 600:
|
elif pipe_diameter_mm < 600:
|
||||||
return a1 + m1 * pipe_diameter_mm
|
return a1 + m1 * pipe_diameter_mm
|
||||||
elif pipe_diameter_mm < 900:
|
elif pipe_diameter_mm < 900:
|
||||||
return a2 + m2 * pipe_diameter_mm
|
return a2 + m2 * pipe_diameter_mm
|
||||||
else:
|
else:
|
||||||
|
m3 = (21700 - 11250) / (1200 - 900)
|
||||||
|
|
||||||
|
a3 = -20100
|
||||||
|
|
||||||
return a3 + m3 * pipe_diameter_mm
|
return a3 + m3 * pipe_diameter_mm
|
||||||
|
|
||||||
|
|
||||||
|
@ -167,9 +167,7 @@ def industrial_energy_demand(countries, year):
|
|||||||
with mp.Pool(processes=nprocesses) as pool:
|
with mp.Pool(processes=nprocesses) as pool:
|
||||||
demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))
|
demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))
|
||||||
|
|
||||||
demand = pd.concat(demand_l, keys=countries)
|
return pd.concat(demand_l, keys=countries)
|
||||||
|
|
||||||
return demand
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -83,8 +83,7 @@ def calculate_resistance(T, R_ref, T_ref=293, alpha=0.00403):
|
|||||||
-------
|
-------
|
||||||
Resistance of at given temperature.
|
Resistance of at given temperature.
|
||||||
"""
|
"""
|
||||||
R = R_ref * (1 + alpha * (T - T_ref))
|
return R_ref * (1 + alpha * (T - T_ref))
|
||||||
return R
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_line_rating(n, cutout):
|
def calculate_line_rating(n, cutout):
|
||||||
@ -125,13 +124,12 @@ def calculate_line_rating(n, cutout):
|
|||||||
R = calculate_resistance(T=353, R_ref=R)
|
R = calculate_resistance(T=353, R_ref=R)
|
||||||
Imax = cutout.line_rating(shapes, R, D=0.0218, Ts=353, epsilon=0.8, alpha=0.8)
|
Imax = cutout.line_rating(shapes, R, D=0.0218, Ts=353, epsilon=0.8, alpha=0.8)
|
||||||
line_factor = relevant_lines.eval("v_nom * n_bundle * num_parallel") / 1e3 # in mW
|
line_factor = relevant_lines.eval("v_nom * n_bundle * num_parallel") / 1e3 # in mW
|
||||||
da = xr.DataArray(
|
return xr.DataArray(
|
||||||
data=np.sqrt(3) * Imax * line_factor.values.reshape(-1, 1),
|
data=np.sqrt(3) * Imax * line_factor.values.reshape(-1, 1),
|
||||||
attrs=dict(
|
attrs=dict(
|
||||||
description="Maximal possible power in MW for given line considering line rating"
|
description="Maximal possible power in MW for given line considering line rating"
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
return da
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -146,8 +146,7 @@ if __name__ == "__main__":
|
|||||||
ppl, snakemake.input.custom_powerplants, custom_ppl_query
|
ppl, snakemake.input.custom_powerplants, custom_ppl_query
|
||||||
)
|
)
|
||||||
|
|
||||||
countries_wo_ppl = set(countries) - set(ppl.Country.unique())
|
if countries_wo_ppl := set(countries) - set(ppl.Country.unique()):
|
||||||
if countries_wo_ppl:
|
|
||||||
logging.warning(f"No powerplants known in: {', '.join(countries_wo_ppl)}")
|
logging.warning(f"No powerplants known in: {', '.join(countries_wo_ppl)}")
|
||||||
|
|
||||||
substations = n.buses.query("substation_lv")
|
substations = n.buses.query("substation_lv")
|
||||||
|
@ -609,12 +609,11 @@ def calculate_costs(u_values, l, cost_retro, window_assumptions):
|
|||||||
/ x.A_C_Ref
|
/ x.A_C_Ref
|
||||||
if x.name[3] != "Window"
|
if x.name[3] != "Window"
|
||||||
else (
|
else (
|
||||||
window_cost(x["new_U_{}".format(l)], cost_retro, window_assumptions)
|
(window_cost(x[f"new_U_{l}"], cost_retro, window_assumptions) * x.A_element)
|
||||||
* x.A_element
|
|
||||||
/ x.A_C_Ref
|
/ x.A_C_Ref
|
||||||
if x.value > window_limit(float(l), window_assumptions)
|
)
|
||||||
else 0
|
if x.value > window_limit(float(l), window_assumptions)
|
||||||
),
|
else 0,
|
||||||
axis=1,
|
axis=1,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -739,12 +738,12 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor)
|
|||||||
# (1) by transmission
|
# (1) by transmission
|
||||||
# calculate new U values of building elements due to additional insulation
|
# calculate new U values of building elements due to additional insulation
|
||||||
for l in l_strength:
|
for l in l_strength:
|
||||||
u_values["new_U_{}".format(l)] = calculate_new_u(
|
u_values[f"new_U_{l}"] = calculate_new_u(
|
||||||
u_values, l, l_weight, window_assumptions
|
u_values, l, l_weight, window_assumptions
|
||||||
)
|
)
|
||||||
# surface area of building components [m^2]
|
# surface area of building components [m^2]
|
||||||
area_element = (
|
area_element = (
|
||||||
data_tabula[["A_{}".format(e) for e in u_values.index.levels[3]]]
|
data_tabula[[f"A_{e}" for e in u_values.index.levels[3]]]
|
||||||
.rename(columns=lambda x: x[2:])
|
.rename(columns=lambda x: x[2:])
|
||||||
.stack()
|
.stack()
|
||||||
.unstack(-2)
|
.unstack(-2)
|
||||||
@ -756,7 +755,7 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor)
|
|||||||
|
|
||||||
# heat transfer H_tr_e [W/m^2K] through building element
|
# heat transfer H_tr_e [W/m^2K] through building element
|
||||||
# U_e * A_e / A_C_Ref
|
# U_e * A_e / A_C_Ref
|
||||||
columns = ["value"] + ["new_U_{}".format(l) for l in l_strength]
|
columns = ["value"] + [f"new_U_{l}" for l in l_strength]
|
||||||
heat_transfer = pd.concat(
|
heat_transfer = pd.concat(
|
||||||
[u_values[columns].mul(u_values.A_element, axis=0), u_values.A_element], axis=1
|
[u_values[columns].mul(u_values.A_element, axis=0), u_values.A_element], axis=1
|
||||||
)
|
)
|
||||||
@ -875,10 +874,7 @@ def calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain):
|
|||||||
alpha = alpha_H_0 + (tau / tau_H_0)
|
alpha = alpha_H_0 + (tau / tau_H_0)
|
||||||
# heat balance ratio
|
# heat balance ratio
|
||||||
gamma = (1 / Q_ht).mul(Q_gain.sum(axis=1), axis=0)
|
gamma = (1 / Q_ht).mul(Q_gain.sum(axis=1), axis=0)
|
||||||
# gain utilisation factor
|
return (1 - gamma**alpha) / (1 - gamma ** (alpha + 1))
|
||||||
nu = (1 - gamma**alpha) / (1 - gamma ** (alpha + 1))
|
|
||||||
|
|
||||||
return nu
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_space_heat_savings(
|
def calculate_space_heat_savings(
|
||||||
|
@ -66,11 +66,7 @@ def salt_cavern_potential_by_region(caverns, regions):
|
|||||||
"capacity_per_area * share * area_caverns / 1000"
|
"capacity_per_area * share * area_caverns / 1000"
|
||||||
) # TWh
|
) # TWh
|
||||||
|
|
||||||
caverns_regions = (
|
return overlay.groupby(["name", "storage_type"]).e_nom.sum().unstack("storage_type")
|
||||||
overlay.groupby(["name", "storage_type"]).e_nom.sum().unstack("storage_type")
|
|
||||||
)
|
|
||||||
|
|
||||||
return caverns_regions
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -119,7 +119,7 @@ def countries(naturalearth, country_list):
|
|||||||
fieldnames = (
|
fieldnames = (
|
||||||
df[x].where(lambda s: s != "-99") for x in ("ISO_A2", "WB_A2", "ADM0_A3")
|
df[x].where(lambda s: s != "-99") for x in ("ISO_A2", "WB_A2", "ADM0_A3")
|
||||||
)
|
)
|
||||||
df["name"] = reduce(lambda x, y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2]
|
df["name"] = reduce(lambda x, y: x.fillna(y), fieldnames, next(fieldnames)).str[:2]
|
||||||
|
|
||||||
df = df.loc[
|
df = df.loc[
|
||||||
df.name.isin(country_list) & ((df["scalerank"] == 0) | (df["scalerank"] == 5))
|
df.name.isin(country_list) & ((df["scalerank"] == 0) | (df["scalerank"] == 5))
|
||||||
|
@ -81,14 +81,12 @@ def build_transport_demand(traffic_fn, airtemp_fn, nodes, nodal_transport_data):
|
|||||||
- pop_weighted_energy_totals["electricity rail"]
|
- pop_weighted_energy_totals["electricity rail"]
|
||||||
)
|
)
|
||||||
|
|
||||||
transport = (
|
return (
|
||||||
(transport_shape.multiply(energy_totals_transport) * 1e6 * nyears)
|
(transport_shape.multiply(energy_totals_transport) * 1e6 * nyears)
|
||||||
.divide(efficiency_gain * ice_correction)
|
.divide(efficiency_gain * ice_correction)
|
||||||
.multiply(1 + dd_EV)
|
.multiply(1 + dd_EV)
|
||||||
)
|
)
|
||||||
|
|
||||||
return transport
|
|
||||||
|
|
||||||
|
|
||||||
def transport_degree_factor(
|
def transport_degree_factor(
|
||||||
temperature,
|
temperature,
|
||||||
@ -132,14 +130,12 @@ def bev_availability_profile(fn, snapshots, nodes, options):
|
|||||||
traffic.mean() - traffic.min()
|
traffic.mean() - traffic.min()
|
||||||
)
|
)
|
||||||
|
|
||||||
avail_profile = generate_periodic_profiles(
|
return generate_periodic_profiles(
|
||||||
dt_index=snapshots,
|
dt_index=snapshots,
|
||||||
nodes=nodes,
|
nodes=nodes,
|
||||||
weekly_profile=avail.values,
|
weekly_profile=avail.values,
|
||||||
)
|
)
|
||||||
|
|
||||||
return avail_profile
|
|
||||||
|
|
||||||
|
|
||||||
def bev_dsm_profile(snapshots, nodes, options):
|
def bev_dsm_profile(snapshots, nodes, options):
|
||||||
dsm_week = np.zeros((24 * 7,))
|
dsm_week = np.zeros((24 * 7,))
|
||||||
@ -148,14 +144,12 @@ def bev_dsm_profile(snapshots, nodes, options):
|
|||||||
"bev_dsm_restriction_value"
|
"bev_dsm_restriction_value"
|
||||||
]
|
]
|
||||||
|
|
||||||
dsm_profile = generate_periodic_profiles(
|
return generate_periodic_profiles(
|
||||||
dt_index=snapshots,
|
dt_index=snapshots,
|
||||||
nodes=nodes,
|
nodes=nodes,
|
||||||
weekly_profile=dsm_week,
|
weekly_profile=dsm_week,
|
||||||
)
|
)
|
||||||
|
|
||||||
return dsm_profile
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if "snakemake" not in globals():
|
if "snakemake" not in globals():
|
||||||
|
@ -322,9 +322,9 @@ def busmap_for_n_clusters(
|
|||||||
neighbor_bus = n.lines.query(
|
neighbor_bus = n.lines.query(
|
||||||
"bus0 == @disconnected_bus or bus1 == @disconnected_bus"
|
"bus0 == @disconnected_bus or bus1 == @disconnected_bus"
|
||||||
).iloc[0][["bus0", "bus1"]]
|
).iloc[0][["bus0", "bus1"]]
|
||||||
new_country = list(
|
new_country = list(set(n.buses.loc[neighbor_bus].country) - {country})[
|
||||||
set(n.buses.loc[neighbor_bus].country) - set([country])
|
0
|
||||||
)[0]
|
]
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"overwriting country `{country}` of bus `{disconnected_bus}` "
|
f"overwriting country `{country}` of bus `{disconnected_bus}` "
|
||||||
|
@ -33,10 +33,7 @@ def assign_locations(n):
|
|||||||
ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index)
|
ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index)
|
||||||
for i in ifind.unique():
|
for i in ifind.unique():
|
||||||
names = ifind.index[ifind == i]
|
names = ifind.index[ifind == i]
|
||||||
if i == -1:
|
c.df.loc[names, "location"] = "" if i == -1 else names.str[:i]
|
||||||
c.df.loc[names, "location"] = ""
|
|
||||||
else:
|
|
||||||
c.df.loc[names, "location"] = names.str[:i]
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_nodal_cfs(n, label, nodal_cfs):
|
def calculate_nodal_cfs(n, label, nodal_cfs):
|
||||||
@ -397,7 +394,7 @@ def calculate_supply_energy(n, label, supply_energy):
|
|||||||
|
|
||||||
for c in n.iterate_components(n.branch_components):
|
for c in n.iterate_components(n.branch_components):
|
||||||
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
|
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
|
||||||
items = c.df.index[c.df["bus" + str(end)].map(bus_map).fillna(False)]
|
items = c.df.index[c.df[f"bus{str(end)}"].map(bus_map).fillna(False)]
|
||||||
|
|
||||||
if len(items) == 0:
|
if len(items) == 0:
|
||||||
continue
|
continue
|
||||||
@ -493,7 +490,7 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
|||||||
"H2": ["Sabatier", "H2 Fuel Cell"],
|
"H2": ["Sabatier", "H2 Fuel Cell"],
|
||||||
}
|
}
|
||||||
|
|
||||||
for carrier in link_loads:
|
for carrier, value in link_loads.items():
|
||||||
if carrier == "electricity":
|
if carrier == "electricity":
|
||||||
suffix = ""
|
suffix = ""
|
||||||
elif carrier[:5] == "space":
|
elif carrier[:5] == "space":
|
||||||
@ -515,15 +512,15 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
|||||||
else:
|
else:
|
||||||
load = n.loads_t.p_set[buses]
|
load = n.loads_t.p_set[buses]
|
||||||
|
|
||||||
for tech in link_loads[carrier]:
|
for tech in value:
|
||||||
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
|
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
|
||||||
|
|
||||||
if names.empty:
|
if not names.empty:
|
||||||
continue
|
load += (
|
||||||
|
n.links_t.p0[names]
|
||||||
load += (
|
.groupby(n.links.loc[names, "bus0"], axis=1)
|
||||||
n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum()
|
.sum()
|
||||||
)
|
)
|
||||||
|
|
||||||
# Add H2 Store when charging
|
# Add H2 Store when charging
|
||||||
# if carrier == "H2":
|
# if carrier == "H2":
|
||||||
@ -650,11 +647,7 @@ def make_summaries(networks_dict):
|
|||||||
networks_dict.keys(), names=["cluster", "ll", "opt", "planning_horizon"]
|
networks_dict.keys(), names=["cluster", "ll", "opt", "planning_horizon"]
|
||||||
)
|
)
|
||||||
|
|
||||||
df = {}
|
df = {output: pd.DataFrame(columns=columns, dtype=float) for output in outputs}
|
||||||
|
|
||||||
for output in outputs:
|
|
||||||
df[output] = pd.DataFrame(columns=columns, dtype=float)
|
|
||||||
|
|
||||||
for label, filename in networks_dict.items():
|
for label, filename in networks_dict.items():
|
||||||
logger.info(f"Make summary for scenario {label}, using {filename}")
|
logger.info(f"Make summary for scenario {label}, using {filename}")
|
||||||
|
|
||||||
|
@ -382,7 +382,7 @@ def calculate_supply_energy(n, label, supply_energy):
|
|||||||
|
|
||||||
for c in n.iterate_components(n.branch_components):
|
for c in n.iterate_components(n.branch_components):
|
||||||
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
|
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
|
||||||
items = c.df.index[c.df["bus" + str(end)].map(bus_map).fillna(False)]
|
items = c.df.index[c.df[f"bus{str(end)}"].map(bus_map).fillna(False)]
|
||||||
|
|
||||||
if len(items) == 0:
|
if len(items) == 0:
|
||||||
continue
|
continue
|
||||||
@ -483,7 +483,7 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
|||||||
"H2": ["Sabatier", "H2 Fuel Cell"],
|
"H2": ["Sabatier", "H2 Fuel Cell"],
|
||||||
}
|
}
|
||||||
|
|
||||||
for carrier in link_loads:
|
for carrier, value in link_loads.items():
|
||||||
if carrier == "electricity":
|
if carrier == "electricity":
|
||||||
suffix = ""
|
suffix = ""
|
||||||
elif carrier[:5] == "space":
|
elif carrier[:5] == "space":
|
||||||
@ -496,12 +496,12 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
|||||||
if buses.empty:
|
if buses.empty:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if carrier in ["H2", "gas"]:
|
load = (
|
||||||
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
|
pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
|
||||||
else:
|
if carrier in ["H2", "gas"]
|
||||||
load = n.loads_t.p_set.reindex(buses, axis=1)
|
else n.loads_t.p_set.reindex(buses, axis=1)
|
||||||
|
)
|
||||||
for tech in link_loads[carrier]:
|
for tech in value:
|
||||||
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
|
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
|
||||||
|
|
||||||
if names.empty:
|
if names.empty:
|
||||||
|
@ -145,12 +145,12 @@ def plot_map(
|
|||||||
ac_color = "rosybrown"
|
ac_color = "rosybrown"
|
||||||
dc_color = "darkseagreen"
|
dc_color = "darkseagreen"
|
||||||
|
|
||||||
|
title = "added grid"
|
||||||
|
|
||||||
if snakemake.wildcards["ll"] == "v1.0":
|
if snakemake.wildcards["ll"] == "v1.0":
|
||||||
# should be zero
|
# should be zero
|
||||||
line_widths = n.lines.s_nom_opt - n.lines.s_nom
|
line_widths = n.lines.s_nom_opt - n.lines.s_nom
|
||||||
link_widths = n.links.p_nom_opt - n.links.p_nom
|
link_widths = n.links.p_nom_opt - n.links.p_nom
|
||||||
title = "added grid"
|
|
||||||
|
|
||||||
if transmission:
|
if transmission:
|
||||||
line_widths = n.lines.s_nom_opt
|
line_widths = n.lines.s_nom_opt
|
||||||
link_widths = n.links.p_nom_opt
|
link_widths = n.links.p_nom_opt
|
||||||
@ -160,8 +160,6 @@ def plot_map(
|
|||||||
else:
|
else:
|
||||||
line_widths = n.lines.s_nom_opt - n.lines.s_nom_min
|
line_widths = n.lines.s_nom_opt - n.lines.s_nom_min
|
||||||
link_widths = n.links.p_nom_opt - n.links.p_nom_min
|
link_widths = n.links.p_nom_opt - n.links.p_nom_min
|
||||||
title = "added grid"
|
|
||||||
|
|
||||||
if transmission:
|
if transmission:
|
||||||
line_widths = n.lines.s_nom_opt
|
line_widths = n.lines.s_nom_opt
|
||||||
link_widths = n.links.p_nom_opt
|
link_widths = n.links.p_nom_opt
|
||||||
@ -262,12 +260,7 @@ def group_pipes(df, drop_direction=False):
|
|||||||
lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}",
|
lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}",
|
||||||
axis=1,
|
axis=1,
|
||||||
)
|
)
|
||||||
# group pipe lines connecting the same buses and rename them for plotting
|
return df.groupby(level=0).agg({"p_nom_opt": sum, "bus0": "first", "bus1": "first"})
|
||||||
pipe_capacity = df.groupby(level=0).agg(
|
|
||||||
{"p_nom_opt": sum, "bus0": "first", "bus1": "first"}
|
|
||||||
)
|
|
||||||
|
|
||||||
return pipe_capacity
|
|
||||||
|
|
||||||
|
|
||||||
def plot_h2_map(network, regions):
|
def plot_h2_map(network, regions):
|
||||||
@ -766,11 +759,13 @@ def plot_series(network, carrier="AC", name="test"):
|
|||||||
supply = pd.concat(
|
supply = pd.concat(
|
||||||
(
|
(
|
||||||
supply,
|
supply,
|
||||||
(-1)
|
(
|
||||||
* c.pnl["p" + str(i)]
|
-1
|
||||||
.loc[:, c.df.index[c.df["bus" + str(i)].isin(buses)]]
|
* c.pnl[f"p{str(i)}"]
|
||||||
.groupby(c.df.carrier, axis=1)
|
.loc[:, c.df.index[c.df[f"bus{str(i)}"].isin(buses)]]
|
||||||
.sum(),
|
.groupby(c.df.carrier, axis=1)
|
||||||
|
.sum()
|
||||||
|
),
|
||||||
),
|
),
|
||||||
axis=1,
|
axis=1,
|
||||||
)
|
)
|
||||||
|
@ -297,11 +297,7 @@ def plot_balances():
|
|||||||
df.abs().max(axis=1) < snakemake.params.plotting["energy_threshold"] / 10
|
df.abs().max(axis=1) < snakemake.params.plotting["energy_threshold"] / 10
|
||||||
]
|
]
|
||||||
|
|
||||||
if v[0] in co2_carriers:
|
units = "MtCO2/a" if v[0] in co2_carriers else "TWh/a"
|
||||||
units = "MtCO2/a"
|
|
||||||
else:
|
|
||||||
units = "TWh/a"
|
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"Dropping technology energy balance smaller than {snakemake.params['plotting']['energy_threshold']/10} {units}"
|
f"Dropping technology energy balance smaller than {snakemake.params['plotting']['energy_threshold']/10} {units}"
|
||||||
)
|
)
|
||||||
@ -587,7 +583,5 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
for sector_opts in snakemake.params.sector_opts:
|
for sector_opts in snakemake.params.sector_opts:
|
||||||
opts = sector_opts.split("-")
|
opts = sector_opts.split("-")
|
||||||
if any(["cb" in o for o in opts]) or (
|
if any("cb" in o for o in opts) or snakemake.config["foresight"] == "perfect":
|
||||||
snakemake.config["foresight"] == "perfect"
|
|
||||||
):
|
|
||||||
plot_carbon_budget_distribution(snakemake.input.eurostat)
|
plot_carbon_budget_distribution(snakemake.input.eurostat)
|
||||||
|
@ -84,13 +84,9 @@ def cross_border_time_series(countries, data):
|
|||||||
df_neg.plot.area(
|
df_neg.plot.area(
|
||||||
ax=ax[axis], stacked=True, linewidth=0.0, color=color, ylim=[-1, 1]
|
ax=ax[axis], stacked=True, linewidth=0.0, color=color, ylim=[-1, 1]
|
||||||
)
|
)
|
||||||
if (axis % 2) == 0:
|
title = "Historic" if (axis % 2) == 0 else "Optimized"
|
||||||
title = "Historic"
|
|
||||||
else:
|
|
||||||
title = "Optimized"
|
|
||||||
|
|
||||||
ax[axis].set_title(
|
ax[axis].set_title(
|
||||||
title + " Import / Export for " + cc.convert(country, to="name_short")
|
f"{title} Import / Export for " + cc.convert(country, to="name_short")
|
||||||
)
|
)
|
||||||
|
|
||||||
# Custom legend elements
|
# Custom legend elements
|
||||||
@ -137,16 +133,12 @@ def cross_border_bar(countries, data):
|
|||||||
df_country = sort_one_country(country, df)
|
df_country = sort_one_country(country, df)
|
||||||
df_neg, df_pos = df_country.clip(upper=0), df_country.clip(lower=0)
|
df_neg, df_pos = df_country.clip(upper=0), df_country.clip(lower=0)
|
||||||
|
|
||||||
if (order % 2) == 0:
|
title = "Historic" if (order % 2) == 0 else "Optimized"
|
||||||
title = "Historic"
|
|
||||||
else:
|
|
||||||
title = "Optimized"
|
|
||||||
|
|
||||||
df_positive_new = pd.DataFrame(data=df_pos.sum()).T.rename(
|
df_positive_new = pd.DataFrame(data=df_pos.sum()).T.rename(
|
||||||
{0: title + " " + cc.convert(country, to="name_short")}
|
{0: f"{title} " + cc.convert(country, to="name_short")}
|
||||||
)
|
)
|
||||||
df_negative_new = pd.DataFrame(data=df_neg.sum()).T.rename(
|
df_negative_new = pd.DataFrame(data=df_neg.sum()).T.rename(
|
||||||
{0: title + " " + cc.convert(country, to="name_short")}
|
{0: f"{title} " + cc.convert(country, to="name_short")}
|
||||||
)
|
)
|
||||||
|
|
||||||
df_positive = pd.concat([df_positive_new, df_positive])
|
df_positive = pd.concat([df_positive_new, df_positive])
|
||||||
|
@ -56,7 +56,7 @@ def get_investment_weighting(time_weighting, r=0.01):
|
|||||||
end = time_weighting.cumsum()
|
end = time_weighting.cumsum()
|
||||||
start = time_weighting.cumsum().shift().fillna(0)
|
start = time_weighting.cumsum().shift().fillna(0)
|
||||||
return pd.concat([start, end], axis=1).apply(
|
return pd.concat([start, end], axis=1).apply(
|
||||||
lambda x: sum([get_social_discount(t, r) for t in range(int(x[0]), int(x[1]))]),
|
lambda x: sum(get_social_discount(t, r) for t in range(int(x[0]), int(x[1]))),
|
||||||
axis=1,
|
axis=1,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -306,7 +306,7 @@ def set_carbon_constraints(n, opts):
|
|||||||
if m is not None:
|
if m is not None:
|
||||||
budget = snakemake.config["co2_budget"][m.group(0)] * 1e9
|
budget = snakemake.config["co2_budget"][m.group(0)] * 1e9
|
||||||
if budget != None:
|
if budget != None:
|
||||||
logger.info("add carbon budget of {}".format(budget))
|
logger.info(f"add carbon budget of {budget}")
|
||||||
n.add(
|
n.add(
|
||||||
"GlobalConstraint",
|
"GlobalConstraint",
|
||||||
"Budget",
|
"Budget",
|
||||||
@ -340,9 +340,7 @@ def set_carbon_constraints(n, opts):
|
|||||||
first_year = n.snapshots.levels[0][0]
|
first_year = n.snapshots.levels[0][0]
|
||||||
time_weightings = n.investment_period_weightings.loc[first_year, "years"]
|
time_weightings = n.investment_period_weightings.loc[first_year, "years"]
|
||||||
co2min = emissions_2019 - ((first_year - 2019) * annual_reduction)
|
co2min = emissions_2019 - ((first_year - 2019) * annual_reduction)
|
||||||
logger.info(
|
logger.info(f"add minimum emissions for {first_year} of {co2min} t CO2/a")
|
||||||
"add minimum emissions for {} of {} t CO2/a".format(first_year, co2min)
|
|
||||||
)
|
|
||||||
n.add(
|
n.add(
|
||||||
"GlobalConstraint",
|
"GlobalConstraint",
|
||||||
f"Co2Min-{first_year}",
|
f"Co2Min-{first_year}",
|
||||||
@ -519,9 +517,7 @@ if __name__ == "__main__":
|
|||||||
social_discountrate = float(o.replace("sdr", "")) / 100
|
social_discountrate = float(o.replace("sdr", "")) / 100
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"Concat networks of investment period {} with social discount rate of {}%".format(
|
f"Concat networks of investment period {years} with social discount rate of {social_discountrate * 100}%"
|
||||||
years, social_discountrate * 100
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# concat prenetworks of planning horizon to single network ------------
|
# concat prenetworks of planning horizon to single network ------------
|
||||||
|
@ -184,10 +184,7 @@ def get(item, investment_year=None):
|
|||||||
"""
|
"""
|
||||||
Check whether item depends on investment year.
|
Check whether item depends on investment year.
|
||||||
"""
|
"""
|
||||||
if isinstance(item, dict):
|
return item[investment_year] if isinstance(item, dict) else item
|
||||||
return item[investment_year]
|
|
||||||
else:
|
|
||||||
return item
|
|
||||||
|
|
||||||
|
|
||||||
def co2_emissions_year(
|
def co2_emissions_year(
|
||||||
@ -413,11 +410,9 @@ def update_wind_solar_costs(n, costs):
|
|||||||
# e.g. clusters == 37m means that VRE generators are left
|
# e.g. clusters == 37m means that VRE generators are left
|
||||||
# at clustering of simplified network, but that they are
|
# at clustering of simplified network, but that they are
|
||||||
# connected to 37-node network
|
# connected to 37-node network
|
||||||
if snakemake.wildcards.clusters[-1:] == "m":
|
genmap = (
|
||||||
genmap = busmap_s
|
busmap_s if snakemake.wildcards.clusters[-1:] == "m" else clustermaps
|
||||||
else:
|
)
|
||||||
genmap = clustermaps
|
|
||||||
|
|
||||||
connection_cost = (connection_cost * weight).groupby(
|
connection_cost = (connection_cost * weight).groupby(
|
||||||
genmap
|
genmap
|
||||||
).sum() / weight.groupby(genmap).sum()
|
).sum() / weight.groupby(genmap).sum()
|
||||||
@ -505,8 +500,7 @@ def remove_non_electric_buses(n):
|
|||||||
"""
|
"""
|
||||||
Remove buses from pypsa-eur with carriers which are not AC buses.
|
Remove buses from pypsa-eur with carriers which are not AC buses.
|
||||||
"""
|
"""
|
||||||
to_drop = list(n.buses.query("carrier not in ['AC', 'DC']").carrier.unique())
|
if to_drop := list(n.buses.query("carrier not in ['AC', 'DC']").carrier.unique()):
|
||||||
if to_drop:
|
|
||||||
logger.info(f"Drop buses from PyPSA-Eur with carrier: {to_drop}")
|
logger.info(f"Drop buses from PyPSA-Eur with carrier: {to_drop}")
|
||||||
n.buses = n.buses[n.buses.carrier.isin(["AC", "DC"])]
|
n.buses = n.buses[n.buses.carrier.isin(["AC", "DC"])]
|
||||||
|
|
||||||
@ -1232,11 +1226,9 @@ def add_storage_and_grids(n, costs):
|
|||||||
|
|
||||||
# apply k_edge_augmentation weighted by length of complement edges
|
# apply k_edge_augmentation weighted by length of complement edges
|
||||||
k_edge = options.get("gas_network_connectivity_upgrade", 3)
|
k_edge = options.get("gas_network_connectivity_upgrade", 3)
|
||||||
augmentation = list(
|
if augmentation := list(
|
||||||
k_edge_augmentation(G, k_edge, avail=complement_edges.values)
|
k_edge_augmentation(G, k_edge, avail=complement_edges.values)
|
||||||
)
|
):
|
||||||
|
|
||||||
if augmentation:
|
|
||||||
new_gas_pipes = pd.DataFrame(augmentation, columns=["bus0", "bus1"])
|
new_gas_pipes = pd.DataFrame(augmentation, columns=["bus0", "bus1"])
|
||||||
new_gas_pipes["length"] = new_gas_pipes.apply(haversine, axis=1)
|
new_gas_pipes["length"] = new_gas_pipes.apply(haversine, axis=1)
|
||||||
|
|
||||||
|
@ -152,22 +152,20 @@ def _prepare_connection_costs_per_link(n, costs, renewable_carriers, length_fact
|
|||||||
if n.links.empty:
|
if n.links.empty:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
connection_costs_per_link = {}
|
return {
|
||||||
|
tech: (
|
||||||
for tech in renewable_carriers:
|
n.links.length
|
||||||
if tech.startswith("offwind"):
|
* length_factor
|
||||||
connection_costs_per_link[tech] = (
|
* (
|
||||||
n.links.length
|
n.links.underwater_fraction
|
||||||
* length_factor
|
* costs.at[tech + "-connection-submarine", "capital_cost"]
|
||||||
* (
|
+ (1.0 - n.links.underwater_fraction)
|
||||||
n.links.underwater_fraction
|
* costs.at[tech + "-connection-underground", "capital_cost"]
|
||||||
* costs.at[tech + "-connection-submarine", "capital_cost"]
|
|
||||||
+ (1.0 - n.links.underwater_fraction)
|
|
||||||
* costs.at[tech + "-connection-underground", "capital_cost"]
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
)
|
||||||
return connection_costs_per_link
|
for tech in renewable_carriers
|
||||||
|
if tech.startswith("offwind")
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def _compute_connection_costs_to_bus(
|
def _compute_connection_costs_to_bus(
|
||||||
|
@ -153,12 +153,7 @@ def _add_land_use_constraint_m(n, planning_horizons, config):
|
|||||||
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
|
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
|
||||||
existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"]
|
existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"]
|
||||||
ind = list(
|
ind = list(
|
||||||
set(
|
{i.split(sep=" ")[0] + " " + i.split(sep=" ")[1] for i in existing.index}
|
||||||
[
|
|
||||||
i.split(sep=" ")[0] + " " + i.split(sep=" ")[1]
|
|
||||||
for i in existing.index
|
|
||||||
]
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
previous_years = [
|
previous_years = [
|
||||||
@ -217,7 +212,6 @@ def add_carbon_constraint(n, snapshots):
|
|||||||
if glcs.empty:
|
if glcs.empty:
|
||||||
return
|
return
|
||||||
for name, glc in glcs.iterrows():
|
for name, glc in glcs.iterrows():
|
||||||
rhs = glc.constant
|
|
||||||
carattr = glc.carrier_attribute
|
carattr = glc.carrier_attribute
|
||||||
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
|
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
|
||||||
|
|
||||||
@ -227,14 +221,15 @@ def add_carbon_constraint(n, snapshots):
|
|||||||
# stores
|
# stores
|
||||||
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
|
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
|
||||||
stores = n.stores.query("carrier in @emissions.index and not e_cyclic")
|
stores = n.stores.query("carrier in @emissions.index and not e_cyclic")
|
||||||
time_valid = int(glc.loc["investment_period"])
|
|
||||||
if not stores.empty:
|
if not stores.empty:
|
||||||
last = n.snapshot_weightings.reset_index().groupby("period").last()
|
last = n.snapshot_weightings.reset_index().groupby("period").last()
|
||||||
last_i = last.set_index([last.index, last.timestep]).index
|
last_i = last.set_index([last.index, last.timestep]).index
|
||||||
final_e = n.model["Store-e"].loc[last_i, stores.index]
|
final_e = n.model["Store-e"].loc[last_i, stores.index]
|
||||||
|
time_valid = int(glc.loc["investment_period"])
|
||||||
time_i = pd.IndexSlice[time_valid, :]
|
time_i = pd.IndexSlice[time_valid, :]
|
||||||
lhs = final_e.loc[time_i, :] - final_e.shift(snapshot=1).loc[time_i, :]
|
lhs = final_e.loc[time_i, :] - final_e.shift(snapshot=1).loc[time_i, :]
|
||||||
|
|
||||||
|
rhs = glc.constant
|
||||||
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
|
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
|
||||||
|
|
||||||
|
|
||||||
@ -243,7 +238,6 @@ def add_carbon_budget_constraint(n, snapshots):
|
|||||||
if glcs.empty:
|
if glcs.empty:
|
||||||
return
|
return
|
||||||
for name, glc in glcs.iterrows():
|
for name, glc in glcs.iterrows():
|
||||||
rhs = glc.constant
|
|
||||||
carattr = glc.carrier_attribute
|
carattr = glc.carrier_attribute
|
||||||
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
|
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
|
||||||
|
|
||||||
@ -253,15 +247,16 @@ def add_carbon_budget_constraint(n, snapshots):
|
|||||||
# stores
|
# stores
|
||||||
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
|
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
|
||||||
stores = n.stores.query("carrier in @emissions.index and not e_cyclic")
|
stores = n.stores.query("carrier in @emissions.index and not e_cyclic")
|
||||||
time_valid = int(glc.loc["investment_period"])
|
|
||||||
weighting = n.investment_period_weightings.loc[time_valid, "years"]
|
|
||||||
if not stores.empty:
|
if not stores.empty:
|
||||||
last = n.snapshot_weightings.reset_index().groupby("period").last()
|
last = n.snapshot_weightings.reset_index().groupby("period").last()
|
||||||
last_i = last.set_index([last.index, last.timestep]).index
|
last_i = last.set_index([last.index, last.timestep]).index
|
||||||
final_e = n.model["Store-e"].loc[last_i, stores.index]
|
final_e = n.model["Store-e"].loc[last_i, stores.index]
|
||||||
|
time_valid = int(glc.loc["investment_period"])
|
||||||
time_i = pd.IndexSlice[time_valid, :]
|
time_i = pd.IndexSlice[time_valid, :]
|
||||||
|
weighting = n.investment_period_weightings.loc[time_valid, "years"]
|
||||||
lhs = final_e.loc[time_i, :] * weighting
|
lhs = final_e.loc[time_i, :] * weighting
|
||||||
|
|
||||||
|
rhs = glc.constant
|
||||||
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
|
n.model.add_constraints(lhs <= rhs, name=f"GlobalConstraint-{name}")
|
||||||
|
|
||||||
|
|
||||||
@ -350,8 +345,7 @@ def prepare_network(
|
|||||||
):
|
):
|
||||||
df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)
|
df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)
|
||||||
|
|
||||||
load_shedding = solve_opts.get("load_shedding")
|
if load_shedding := solve_opts.get("load_shedding"):
|
||||||
if load_shedding:
|
|
||||||
# intersect between macroeconomic and surveybased willingness to pay
|
# intersect between macroeconomic and surveybased willingness to pay
|
||||||
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full
|
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full
|
||||||
# TODO: retrieve color and nice name from config
|
# TODO: retrieve color and nice name from config
|
||||||
@ -803,9 +797,7 @@ def solve_network(n, config, solving, opts="", **kwargs):
|
|||||||
set_of_options = solving["solver"]["options"]
|
set_of_options = solving["solver"]["options"]
|
||||||
cf_solving = solving["options"]
|
cf_solving = solving["options"]
|
||||||
|
|
||||||
kwargs["multi_investment_periods"] = (
|
kwargs["multi_investment_periods"] = config["foresight"] == "perfect"
|
||||||
True if config["foresight"] == "perfect" else False
|
|
||||||
)
|
|
||||||
kwargs["solver_options"] = (
|
kwargs["solver_options"] = (
|
||||||
solving["solver_options"][set_of_options] if set_of_options else {}
|
solving["solver_options"][set_of_options] if set_of_options else {}
|
||||||
)
|
)
|
||||||
@ -903,7 +895,7 @@ if __name__ == "__main__":
|
|||||||
log_fn=snakemake.log.solver,
|
log_fn=snakemake.log.solver,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info("Maximum memory usage: {}".format(mem.mem_usage))
|
logger.info(f"Maximum memory usage: {mem.mem_usage}")
|
||||||
|
|
||||||
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
||||||
n.export_to_netcdf(snakemake.output[0])
|
n.export_to_netcdf(snakemake.output[0])
|
||||||
|
@ -7,6 +7,7 @@ Solves linear optimal dispatch in hourly resolution using the capacities of
|
|||||||
previous capacity expansion in rule :mod:`solve_network`.
|
previous capacity expansion in rule :mod:`solve_network`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -35,7 +36,7 @@ if __name__ == "__main__":
|
|||||||
configure_logging(snakemake)
|
configure_logging(snakemake)
|
||||||
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
|
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
|
||||||
|
|
||||||
opts = (snakemake.wildcards.opts + "-" + snakemake.wildcards.sector_opts).split("-")
|
opts = f"{snakemake.wildcards.opts}-{snakemake.wildcards.sector_opts}".split("-")
|
||||||
opts = [o for o in opts if o != ""]
|
opts = [o for o in opts if o != ""]
|
||||||
solve_opts = snakemake.params.options
|
solve_opts = snakemake.params.options
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user