[pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
This commit is contained in:
pre-commit-ci[bot] 2023-04-06 13:57:26 +00:00
parent a72a620a60
commit 5f554ab28f
7 changed files with 188 additions and 173 deletions

View File

@ -65,6 +65,7 @@ rule solve_sector_networks:
**config["scenario"]
),
rule solve_sector_networks_perfect:
input:
expand(
@ -73,6 +74,7 @@ rule solve_sector_networks_perfect:
**config["scenario"]
),
rule plot_networks:
input:
expand(

View File

@ -172,10 +172,7 @@ rule make_summary_perfect:
log:
LOGS + "make_summary_perfect.log",
benchmark:
(
BENCHMARKS
+ "make_summary_perfect"
)
(BENCHMARKS + "make_summary_perfect")
conda:
"../envs/environment.yaml"
script:

View File

@ -4,39 +4,33 @@
# SPDX-License-Identifier: MIT
"""
Create summary CSV files for all scenario runs with perfect foresight
including costs, capacities, capacity factors, curtailment, energy balances,
prices and other metrics.
Create summary CSV files for all scenario runs with perfect foresight including
costs, capacities, capacity factors, curtailment, energy balances, prices and
other metrics.
"""
from six import iteritems
import pandas as pd
import numpy as np
import pandas as pd
import pypsa
from pypsa.descriptors import (
nominal_attrs,
get_active_assets,
)
from _helpers import override_component_attrs
from make_summary import (
assign_carriers,
assign_locations,
calculate_cfs,
calculate_nodal_cfs,
calculate_nodal_costs,
)
from prepare_sector_network import prepare_costs
from make_summary import (assign_carriers, assign_locations,
calculate_cfs, calculate_nodal_cfs,
calculate_nodal_costs)
from pypsa.descriptors import get_active_assets, nominal_attrs
from six import iteritems
idx = pd.IndexSlice
opt_name = {"Store": "e", "Line": "s", "Transformer": "s"}
def calculate_costs(n, label, costs):
def calculate_costs(n, label, costs):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
@ -65,9 +59,7 @@ def calculate_costs(n, label, costs):
n.investment_period_weightings["objective"]
/ n.investment_period_weightings["years"]
)
capital_costs_grouped = (
capital_costs.groupby(c.df.carrier).sum().mul(discount)
)
capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum().mul(discount)
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"])
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=[c.list_name])
@ -176,7 +168,6 @@ def calculate_nodal_capacities(n, label, nodal_capacities):
def calculate_capacities(n, label, capacities):
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
[
@ -202,9 +193,7 @@ def calculate_capacities(n, label, capacities):
caps = c.df[opt_name.get(c.name, "p") + "_nom_opt"]
caps = active.mul(caps, axis=0)
capacities_grouped = (
caps.groupby(c.df.carrier)
.sum()
.drop("load", errors="ignore")
caps.groupby(c.df.carrier).sum().drop("load", errors="ignore")
)
capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name])
@ -218,7 +207,6 @@ def calculate_capacities(n, label, capacities):
def calculate_curtailment(n, label, curtailment):
avail = (
n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt)
.sum()
@ -233,9 +221,7 @@ def calculate_curtailment(n, label, curtailment):
def calculate_energy(n, label, energy):
for c in n.iterate_components(n.one_port_components | n.branch_components):
if c.name in n.one_port_components:
c_energies = (
c.pnl.p.multiply(n.snapshot_weightings, axis=0)
@ -265,7 +251,10 @@ def calculate_energy(n, label, energy):
def calculate_supply(n, label, supply):
"""calculate the max dispatch of each component at the buses aggregated by carrier"""
"""
Calculate the max dispatch of each component at the buses aggregated by
carrier.
"""
bus_carriers = n.buses.carrier.unique()
@ -274,7 +263,6 @@ def calculate_supply(n, label, supply):
bus_map.at[""] = False
for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0:
@ -294,9 +282,7 @@ def calculate_supply(n, label, supply):
supply.loc[s.index, label] = s
for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df["bus" + end].map(bus_map).fillna(False)]
if len(items) == 0:
@ -317,7 +303,10 @@ def calculate_supply(n, label, supply):
def calculate_supply_energy(n, label, supply_energy):
"""calculate the total energy supply/consuption of each component at the buses aggregated by carrier"""
"""
Calculate the total energy supply/consuption of each component at the buses
aggregated by carrier.
"""
investments = n.investment_periods
cols = pd.MultiIndex.from_product(
@ -338,7 +327,6 @@ def calculate_supply_energy(n, label, supply_energy):
bus_map.at[""] = False
for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0:
@ -350,9 +338,11 @@ def calculate_supply_energy(n, label, supply_energy):
weightings = n.snapshot_weightings.stores
if i in ["oil", "co2", "H2"]:
if c.name=="Load":
c.df.loc[items, "carrier"] = [load.split("-202")[0] for load in items]
if i=="oil" and c.name=="Generator":
if c.name == "Load":
c.df.loc[items, "carrier"] = [
load.split("-202")[0] for load in items
]
if i == "oil" and c.name == "Generator":
c.df.loc[items, "carrier"] = "imported oil"
s = (
c.pnl.p[items]
@ -373,9 +363,7 @@ def calculate_supply_energy(n, label, supply_energy):
supply_energy.loc[s.index, label] = s.values
for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df["bus" + str(end)].map(bus_map).fillna(False)]
if len(items) == 0:
@ -405,7 +393,6 @@ def calculate_supply_energy(n, label, supply_energy):
def calculate_metrics(n, label, metrics):
metrics = metrics.reindex(
pd.Index(
[
@ -438,15 +425,10 @@ def calculate_metrics(n, label, metrics):
def calculate_prices(n, label, prices):
prices = prices.reindex(prices.index.union(n.buses.carrier.unique()))
# WARNING: this is time-averaged, see weighted_prices for load-weighted average
prices[label] = (
n.buses_t.marginal_price.mean()
.groupby(n.buses.carrier)
.mean()
)
prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean()
return prices
@ -484,7 +466,6 @@ def calculate_weighted_prices(n, label, weighted_prices):
}
for carrier in link_loads:
if carrier == "electricity":
suffix = ""
elif carrier[:5] == "space":
@ -503,7 +484,6 @@ def calculate_weighted_prices(n, label, weighted_prices):
load = n.loads_t.p_set.reindex(buses, axis=1)
for tech in link_loads[carrier]:
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
if names.empty:
@ -585,7 +565,6 @@ def calculate_market_values(n, label, market_values):
def calculate_price_statistics(n, label, price_statistics):
price_statistics = price_statistics.reindex(
price_statistics.index.union(
pd.Index(["zero_hours", "mean", "standard_deviation"])
@ -604,9 +583,7 @@ def calculate_price_statistics(n, label, price_statistics):
df.shape[0] * df.shape[1]
)
price_statistics.at["mean", label] = (
n.buses_t.marginal_price[buses].mean().mean()
)
price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].mean().mean()
price_statistics.at["standard_deviation", label] = (
n.buses_t.marginal_price[buses].droplevel(0).unstack().std()
@ -616,7 +593,6 @@ def calculate_price_statistics(n, label, price_statistics):
def calculate_co2_emissions(n, label, df):
carattr = "co2_emissions"
emissions = n.carriers.query(f"{carattr} != 0")[carattr]
@ -642,11 +618,7 @@ def calculate_co2_emissions(n, label, df):
emitted = n.generators_t.p[gens.index].mul(em_pu)
emitted_grouped = (
emitted.groupby(level=0)
.sum()
.groupby(n.generators.carrier, axis=1)
.sum()
.T
emitted.groupby(level=0).sum().groupby(n.generators.carrier, axis=1).sum().T
)
df = df.reindex(emitted_grouped.index.union(df.index))
@ -681,7 +653,6 @@ outputs = [
def make_summaries(networks_dict):
columns = pd.MultiIndex.from_tuples(
networks_dict.keys(), names=["cluster", "lv", "opt"]
)
@ -694,9 +665,7 @@ def make_summaries(networks_dict):
for label, filename in iteritems(networks_dict):
print(label, filename)
try:
n = pypsa.Network(
filename, override_component_attrs=overrides
)
n = pypsa.Network(filename, override_component_attrs=overrides)
except OSError:
print(label, " not solved yet.")
continue
@ -715,33 +684,32 @@ def make_summaries(networks_dict):
def to_csv(df):
for key in df:
df[key] = df[key].apply(lambda x: pd.to_numeric(x))
df[key].to_csv(snakemake.output[key])
#%%
# %%
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('make_summary_perfect')
snakemake = mock_snakemake("make_summary_perfect")
networks_dict = {
(clusters, lv, opts+sector_opts) :
"results/" + snakemake.config['run']["name"] + f'postnetworks/elec_s{simpl}_{clusters}_l{lv}_{opts}_{sector_opts}_brownfield_all_years.nc' \
for simpl in snakemake.config['scenario']['simpl'] \
for clusters in snakemake.config['scenario']['clusters'] \
for opts in snakemake.config['scenario']['opts'] \
for sector_opts in snakemake.config['scenario']['sector_opts'] \
for lv in snakemake.config['scenario']['ll'] \
(clusters, lv, opts + sector_opts): "results/"
+ snakemake.config["run"]["name"]
+ f"postnetworks/elec_s{simpl}_{clusters}_l{lv}_{opts}_{sector_opts}_brownfield_all_years.nc"
for simpl in snakemake.config["scenario"]["simpl"]
for clusters in snakemake.config["scenario"]["clusters"]
for opts in snakemake.config["scenario"]["opts"]
for sector_opts in snakemake.config["scenario"]["sector_opts"]
for lv in snakemake.config["scenario"]["ll"]
}
print(networks_dict)
nyears = 1
costs_db = prepare_costs(
snakemake.input.costs,

View File

@ -44,7 +44,7 @@ def rename_techs(label):
"land transport fuel cell",
"land transport oil",
"H2 for industry",
"shipping oil"
"shipping oil",
]
rename_if_contains_dict = {
@ -277,7 +277,6 @@ def plot_balances():
i for i in balances_df.index.levels[0] if i not in co2_carriers
]
for k, v in balances.items():
df = balances_df.loc[v]
df = df.groupby(df.index.get_level_values(2)).sum()
@ -462,18 +461,21 @@ def plot_carbon_budget_distribution(input_eurostat, input_eea):
plt.rcParams["xtick.labelsize"] = 20
plt.rcParams["ytick.labelsize"] = 20
path_cb = "results/" + snakemake.params.RDIR + "csvs/"
countries = snakemake.config["countries"]
emissions_scope = snakemake.config["energy"]["emissions"]
# this only affects the estimation of CO2 emissions for BA, RS, AL, ME, MK
report_year = snakemake.config["energy"]["eurostat_report_year"]
e_1990 = co2_emissions_year(
countries, input_eurostat, input_eea, opts, emissions_scope,
report_year, year=1990
countries,
input_eurostat,
input_eea,
opts,
emissions_scope,
report_year,
year=1990,
)
CO2_CAP = pd.read_csv(path_cb + "carbon_budget_distribution.csv", index_col=0)
plt.figure(figsize=(10, 7))
@ -483,7 +485,6 @@ def plot_carbon_budget_distribution(input_eurostat, input_eea):
ax1.set_ylim([0, 5])
ax1.set_xlim([1990, snakemake.config["scenario"]["planning_horizons"][-1] + 1])
ax1.plot(e_1990 * CO2_CAP[o], linewidth=3, color="dodgerblue", label=None)
emissions = historical_emissions(countries)
@ -560,7 +561,8 @@ def plot_carbon_budget_distribution(input_eurostat, input_eea):
path_cb_plot = "results/" + snakemake.params.RDIR + "/graphs/"
plt.savefig(path_cb_plot + "carbon_budget_plot.pdf", dpi=300)
#%%
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
@ -581,5 +583,6 @@ if __name__ == "__main__":
opts = sector_opts.split("-")
for o in opts:
if "cb" in o:
plot_carbon_budget_distribution(snakemake.input.eurostat,
snakemake.input.co2)
plot_carbon_budget_distribution(
snakemake.input.eurostat, snakemake.input.co2
)

View File

@ -7,43 +7,50 @@
Concats pypsa networks of single investment periods to one network.
"""
import pypsa
import pandas as pd
from _helpers import override_component_attrs, update_config_with_sector_opts
from pypsa.io import import_components_from_dataframe
from add_existing_baseyear import add_build_year_to_new_assets
from six import iterkeys
from pypsa.descriptors import expand_series
import re
import logging
import re
import pandas as pd
import pypsa
from _helpers import override_component_attrs, update_config_with_sector_opts
from add_existing_baseyear import add_build_year_to_new_assets
from pypsa.descriptors import expand_series
from pypsa.io import import_components_from_dataframe
from six import iterkeys
logger = logging.getLogger(__name__)
# helper functions ---------------------------------------------------
def get_missing(df, n, c):
"""Get in network n missing assets of df for component c.
"""
Get in network n missing assets of df for component c.
Input:
df: pandas DataFrame, static values of pypsa components
n : pypsa Network to which new assets should be added
c : string, pypsa component.list_name (e.g. "generators")
Return:
pd.DataFrame with static values of missing assets
"""
Input:
df: pandas DataFrame, static values of pypsa components
n : pypsa Network to which new assets should be added
c : string, pypsa component.list_name (e.g. "generators")
Return:
pd.DataFrame with static values of missing assets
"""
df_final = getattr(n, c)
missing_i = df.index.difference(df_final.index)
return df.loc[missing_i]
def get_social_discount(t, r=0.01):
"""Calculate for a given time t the social discount."""
"""
Calculate for a given time t the social discount.
"""
return 1 / (1 + r) ** t
def get_investment_weighting(time_weighting, r=0.01):
"""Define cost weighting.
"""
Define cost weighting.
Returns cost weightings depending on the the time_weighting (pd.Series)
and the social discountrate r
Returns cost weightings depending on the the time_weighting
(pd.Series) and the social discountrate r
"""
end = time_weighting.cumsum()
start = time_weighting.cumsum().shift().fillna(0)
@ -52,6 +59,7 @@ def get_investment_weighting(time_weighting, r=0.01):
axis=1,
)
def add_year_to_constraints(n, baseyear):
"""
Parameters
@ -62,28 +70,27 @@ def add_year_to_constraints(n, baseyear):
"""
for c in n.iterate_components(["GlobalConstraint"]):
c.df["investment_period"] = baseyear
c.df.rename(index=lambda x: x + "-" + str(baseyear), inplace=True)
# --------------------------------------------------------------------
def concat_networks(years):
"""Concat given pypsa networks and adds build_year.
"""
Concat given pypsa networks and adds build_year.
Return:
n : pypsa.Network for the whole planning horizon
"""
Return:
n : pypsa.Network for the whole planning horizon
"""
# input paths of sector coupling networks
network_paths = [snakemake.input.brownfield_network] + [
snakemake.input[f"network_{year}"] for year in years[1:]]
snakemake.input[f"network_{year}"] for year in years[1:]
]
# final concatenated network
overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(override_component_attrs=overrides)
# iterate over single year networks and concat to perfect foresight network
for i, network_path in enumerate(network_paths):
year = years[i]
@ -102,7 +109,6 @@ def concat_networks(years):
for component in network.iterate_components(
["Generator", "Link", "Store", "Load", "Line", "StorageUnit"]
):
df_year = component.df.copy()
missing = get_missing(df_year, n, component.list_name)
@ -117,14 +123,14 @@ def concat_networks(years):
pnl = getattr(n, component.list_name + "_t")
for k in iterkeys(component.pnl):
pnl_year = component.pnl[k].copy().reindex(snapshots, level=1)
if pnl_year.empty and ~(component.name=="Load" and k=="p_set"): continue
if pnl_year.empty and ~(component.name == "Load" and k == "p_set"):
continue
if component.name == "Load":
static_load = network.loads.loc[network.loads.p_set != 0]
static_load_t = expand_series(
static_load.p_set, network_sns
).T
pnl_year = pd.concat([pnl_year.reindex(network_sns),
static_load_t], axis=1)
static_load_t = expand_series(static_load.p_set, network_sns).T
pnl_year = pd.concat(
[pnl_year.reindex(network_sns), static_load_t], axis=1
)
columns = (pnl[k].columns.union(pnl_year.columns)).unique()
pnl[k] = pnl[k].reindex(columns=columns)
pnl[k].loc[pnl_year.index, pnl_year.columns] = pnl_year
@ -134,8 +140,7 @@ def concat_networks(years):
cols = pnl_year.columns.difference(pnl[k].columns)
pnl[k] = pd.concat([pnl[k], pnl_year[cols]], axis=1)
n.snapshot_weightings.loc[year,:] = network.snapshot_weightings.values
n.snapshot_weightings.loc[year, :] = network.snapshot_weightings.values
# (3) global constraints
for component in network.iterate_components(["GlobalConstraint"]):
@ -148,18 +153,22 @@ def concat_networks(years):
time_w = n.investment_periods.to_series().diff().shift(-1).fillna(method="ffill")
n.investment_period_weightings["years"] = time_w
# set objective weightings
objective_w = get_investment_weighting(n.investment_period_weightings["years"],
social_discountrate)
objective_w = get_investment_weighting(
n.investment_period_weightings["years"], social_discountrate
)
n.investment_period_weightings["objective"] = objective_w
# all former static loads are now time-dependent -> set static = 0
n.loads["p_set"] = 0
n.loads_t.p_set.fillna(0,inplace=True)
n.loads_t.p_set.fillna(0, inplace=True)
return n
def adjust_stores(n):
"""Make sure that stores still behave cyclic over one year and not whole
modelling horizon."""
"""
Make sure that stores still behave cyclic over one year and not whole
modelling horizon.
"""
# cylclic constraint
cyclic_i = n.stores[n.stores.e_cyclic].index
n.stores.loc[cyclic_i, "e_cyclic_per_period"] = True
@ -177,42 +186,50 @@ def adjust_stores(n):
return n
def set_phase_out(n, carrier, ct, phase_out_year):
"""Set planned phase outs for given carrier,country (ct) and planned year
of phase out (phase_out_year)."""
df = n.links[(n.links.carrier.isin(carrier))& (n.links.bus1.str[:2]==ct)]
"""
Set planned phase outs for given carrier,country (ct) and planned year of
phase out (phase_out_year).
"""
df = n.links[(n.links.carrier.isin(carrier)) & (n.links.bus1.str[:2] == ct)]
# assets which are going to be phased out before end of their lifetime
assets_i = df[df[["build_year", "lifetime"]].sum(axis=1) > phase_out_year].index
build_year = n.links.loc[assets_i, "build_year"]
# adjust lifetime
n.links.loc[assets_i, "lifetime"] = (phase_out_year - build_year).astype(float)
def set_all_phase_outs(n):
# TODO move this to a csv or to the config
planned= [(["nuclear"], "DE", 2022),
(["nuclear"], "BE", 2025),
(["nuclear"], "ES", 2027),
(["coal", "lignite"], "DE", 2038),
(["coal", "lignite"], "ES", 2027),
(["coal", "lignite"], "FR", 2022),
(["coal", "lignite"], "GB", 2024),
(["coal", "lignite"], "IT", 2025)]
planned = [
(["nuclear"], "DE", 2022),
(["nuclear"], "BE", 2025),
(["nuclear"], "ES", 2027),
(["coal", "lignite"], "DE", 2038),
(["coal", "lignite"], "ES", 2027),
(["coal", "lignite"], "FR", 2022),
(["coal", "lignite"], "GB", 2024),
(["coal", "lignite"], "IT", 2025),
]
for carrier, ct, phase_out_year in planned:
set_phase_out(n, carrier,ct, phase_out_year)
set_phase_out(n, carrier, ct, phase_out_year)
# remove assets which are already phased out
remove_i = n.links[n.links[["build_year", "lifetime"]].sum(axis=1)<years[0]].index
remove_i = n.links[n.links[["build_year", "lifetime"]].sum(axis=1) < years[0]].index
n.mremove("Link", remove_i)
def set_carbon_constraints(n, opts):
"""Add global constraints for carbon emissions."""
"""
Add global constraints for carbon emissions.
"""
budget = None
for o in opts:
# other budgets
m = re.match(r"^\d+p\d$", o, re.IGNORECASE)
if m is not None:
budget = snakemake.config["co2_budget"][m.group(0)] * 1e9
if budget!=None:
if budget != None:
logger.info("add carbon budget of {}".format(budget))
n.add(
"GlobalConstraint",
@ -238,12 +255,14 @@ def set_carbon_constraints(n, opts):
if "co2min" in opts:
emissions_1990 = 4.53693
emissions_2019 = 3.344096
target_2030 = 0.45*emissions_1990
annual_reduction = (emissions_2019-target_2030)/11
target_2030 = 0.45 * emissions_1990
annual_reduction = (emissions_2019 - target_2030) / 11
first_year = n.snapshots.levels[0][0]
time_weightings = n.investment_period_weightings.loc[first_year, "years"]
co2min = emissions_2019-((first_year-2019)*annual_reduction)
logger.info("add minimum emissions for {} of {} t CO2/a".format(first_year, co2min))
co2min = emissions_2019 - ((first_year - 2019) * annual_reduction)
logger.info(
"add minimum emissions for {} of {} t CO2/a".format(first_year, co2min)
)
n.add(
"GlobalConstraint",
"Co2min",
@ -251,34 +270,40 @@ def set_carbon_constraints(n, opts):
carrier_attribute="co2_emissions",
sense=">=",
investment_period=first_year,
constant=co2min*1e9*time_weightings,
constant=co2min * 1e9 * time_weightings,
)
return n
#%%
# %%
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
'prepare_perfect_foresight',
simpl='',
"prepare_perfect_foresight",
simpl="",
opts="",
clusters="37",
ll=1.0,
sector_opts='cb40ex0-2190H-T-H-B-solar+p3-dist1',
sector_opts="cb40ex0-2190H-T-H-B-solar+p3-dist1",
)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
# parameters -----------------------------------------------------------
years = snakemake.config["scenario"]["planning_horizons"]
opts = snakemake.wildcards.sector_opts.split('-')
opts = snakemake.wildcards.sector_opts.split("-")
social_discountrate = snakemake.config["costs"]["social_discountrate"]
for o in opts:
if "sdr" in o:
social_discountrate = float(o.replace("sdr",""))/100
social_discountrate = float(o.replace("sdr", "")) / 100
logger.info("Concat networks of investment period {} with social discount rate of {}%"
.format(years, social_discountrate*100))
logger.info(
"Concat networks of investment period {} with social discount rate of {}%".format(
years, social_discountrate * 100
)
)
# concat prenetworks of planning horizon to single network ------------
n = concat_networks(years)
@ -289,7 +314,7 @@ if __name__ == "__main__":
n = adjust_stores(n)
# set carbon constraints
opts = snakemake.wildcards.sector_opts.split('-')
opts = snakemake.wildcards.sector_opts.split("-")
n = set_carbon_constraints(n, opts)
# export network

View File

@ -246,12 +246,24 @@ def build_carbon_budget(o, input_eurostat, input_eea, fn, emissions_scope, repor
countries = snakemake.config["countries"]
e_1990 = co2_emissions_year(
countries, input_eurostat, input_eea, opts, emissions_scope, report_year, year=1990
countries,
input_eurostat,
input_eea,
opts,
emissions_scope,
report_year,
year=1990,
)
# emissions at the beginning of the path (last year available 2018)
e_0 = co2_emissions_year(
countries, input_eurostat, input_eea, opts, emissions_scope, report_year, year=2018
countries,
input_eurostat,
input_eea,
opts,
emissions_scope,
report_year,
year=2018,
)
planning_horizons = snakemake.config["scenario"]["planning_horizons"]
@ -1130,7 +1142,9 @@ def add_storage_and_grids(n, costs):
e_cyclic=True,
carrier="H2 Store",
capital_cost=h2_capital_cost,
lifetime=costs.at["hydrogen storage tank type 1 including compressor", "lifetime"],
lifetime=costs.at[
"hydrogen storage tank type 1 including compressor", "lifetime"
],
)
if options["gas_network"] or options["H2_retrofit"]:
@ -3287,7 +3301,7 @@ if __name__ == "__main__":
spatial = define_spatial(pop_layout.index, options)
if snakemake.config["foresight"] in ['myopic', 'perfect']:
if snakemake.config["foresight"] in ["myopic", "perfect"]:
add_lifetime_wind_solar(n, costs)
conventional = snakemake.config["existing_capacities"]["conventional_carriers"]
@ -3369,7 +3383,12 @@ if __name__ == "__main__":
emissions_scope = snakemake.config["energy"]["emissions"]
report_year = snakemake.config["energy"]["eurostat_report_year"]
build_carbon_budget(
o, snakemake.input.eurostat, snakemake.input.co2, fn, emissions_scope, report_year
o,
snakemake.input.eurostat,
snakemake.input.co2,
fn,
emissions_scope,
report_year,
)
co2_cap = pd.read_csv(fn, index_col=0).squeeze()
limit = co2_cap.loc[investment_year]
@ -3402,7 +3421,7 @@ if __name__ == "__main__":
if options["electricity_grid_connection"]:
add_electricity_grid_connection(n, costs)
first_year_multi = (snakemake.config["foresight"] in ['myopic', 'perfect']) and (
first_year_multi = (snakemake.config["foresight"] in ["myopic", "perfect"]) and (
snakemake.config["scenario"]["planning_horizons"][0] == investment_year
)

View File

@ -156,9 +156,9 @@ def prepare_network(n, solve_opts=None, config=None):
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full
# TODO: retrieve color and nice name from config
n.add("Carrier", "load", color="#dd2e23", nice_name="Load shedding")
buses_i = n.buses.index # query("carrier == 'AC'").index
#if not np.isscalar(load_shedding):
# TODO: do not scale via sign attribute (use Eur/MWh instead of Eur/kWh)
buses_i = n.buses.index # query("carrier == 'AC'").index
# if not np.isscalar(load_shedding):
# TODO: do not scale via sign attribute (use Eur/MWh instead of Eur/kWh)
load_shedding = 1e2 # Eur/kWh
n.madd(
@ -629,14 +629,15 @@ def solve_network(n, config, opts="", **kwargs):
return n
#%%
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"solve_sector_network_perfect",
#configfiles="config.yaml",
# configfiles="config.yaml",
simpl="",
opts="",
clusters="37",