split solve network rules, take summary scripts from Sec

This commit is contained in:
Fabian Neumann 2023-03-06 12:10:23 +01:00
parent 97bbafdd70
commit 43f226b2a3
6 changed files with 372 additions and 1390 deletions

View File

@ -568,7 +568,7 @@ rule solve_network:
input:
"resources/" + RDIR + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
output:
"results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
"results/" + RDIR + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
log:
solver=normpath(
"logs/"
@ -595,11 +595,11 @@ rule solve_network:
rule solve_operations_network:
input:
unprepared="resources/" + RDIR + "networks/elec_s{simpl}_{clusters}_ec.nc",
optimized="results/networks/"
optimized="results/"
+ RDIR
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
+ "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
output:
"results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc",
"results/" + RDIR + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc",
log:
solver=normpath(
"logs/"
@ -1206,7 +1206,7 @@ rule plot_summary:
if config["foresight"] == "overnight":
rule solve_network:
rule solve_sector_network:
input:
overrides="data/override_component_attrs",
network="results/" + RDIR + "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
@ -1221,8 +1221,8 @@ if config["foresight"] == "overnight":
memory=RDIR + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log"
threads: config['solving']['solver'].get('threads', 4)
resources: mem_mb=config['solving']['mem']
benchmark: RDIR + "benchmarks/solve_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
script: "scripts/solve_network.py"
benchmark: RDIR + "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
script: "scripts/solve_sector_network.py"
if config["foresight"] == "myopic":
@ -1277,7 +1277,7 @@ if config["foresight"] == "myopic":
ruleorder: add_existing_baseyear > add_brownfield
rule solve_network_myopic:
rule solve_sector_network_myopic:
input:
overrides="data/override_component_attrs",
network="results/" + RDIR + "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
@ -1291,5 +1291,5 @@ if config["foresight"] == "myopic":
memory=RDIR + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log"
threads: 4
resources: mem_mb=config['solving']['mem']
benchmark: RDIR + "benchmarks/solve_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
script: "scripts/solve_network.py"
benchmark: RDIR + "benchmarks/solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
script: "scripts/solve_sector_network.py"

View File

@ -1,124 +1,4 @@
# -*- coding: utf-8 -*-
<<<<<<< HEAD
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Creates summaries of aggregated energy and costs as ``.csv`` files.
Relevant Settings
-----------------
.. code:: yaml
costs:
year:
version:
fill_values:
marginal_cost:
capital_cost:
electricity:
max_hours:
.. seealso::
Documentation of the configuration file ``config.yaml`` at
:ref:`costs_cf`, :ref:`electricity_cf`
Inputs
------
Outputs
-------
Description
-----------
The following rule can be used to summarize the results in separate .csv files:
.. code:: bash
snakemake results/summaries/elec_s_all_lall_Co2L-3H_all
clusters
line volume or cost cap
- options
- all countries
the line volume/cost cap field can be set to one of the following:
* ``lv1.25`` for a particular line volume extension by 25%
* ``lc1.25`` for a line cost extension by 25 %
* ``lall`` for all evaluated caps
* ``lvall`` for all line volume caps
* ``lcall`` for all line cost caps
Replacing '/summaries/' with '/plots/' creates nice colored maps of the results.
"""
import logging
import os
import pandas as pd
import pypsa
from _helpers import configure_logging
from add_electricity import load_costs, update_transmission_costs
idx = pd.IndexSlice
logger = logging.getLogger(__name__)
opt_name = {"Store": "e", "Line": "s", "Transformer": "s"}
def _add_indexed_rows(df, raw_index):
new_index = df.index.union(pd.MultiIndex.from_product(raw_index))
if isinstance(new_index, pd.Index):
new_index = pd.MultiIndex.from_tuples(new_index)
return df.reindex(new_index)
def assign_carriers(n):
if "carrier" not in n.loads:
n.loads["carrier"] = "electricity"
for carrier in ["transport", "heat", "urban heat"]:
n.loads.loc[n.loads.index.str.contains(carrier), "carrier"] = carrier
n.storage_units["carrier"].replace(
{"hydro": "hydro+PHS", "PHS": "hydro+PHS"}, inplace=True
)
if "carrier" not in n.lines:
n.lines["carrier"] = "AC"
n.lines["carrier"].replace({"AC": "lines"}, inplace=True)
if n.links.empty:
n.links["carrier"] = pd.Series(dtype=str)
n.links["carrier"].replace({"DC": "lines"}, inplace=True)
if (
"EU gas store" in n.stores.index
and n.stores.loc["EU gas Store", "carrier"] == ""
):
n.stores.loc["EU gas Store", "carrier"] = "gas Store"
def calculate_costs(n, label, costs):
for c in n.iterate_components(
n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum()
# Index tuple(s) indicating the newly to-be-added row(s)
raw_index = tuple(
[[c.list_name], ["capital"], list(capital_costs_grouped.index)]
)
costs = _add_indexed_rows(costs, raw_index)
costs.loc[idx[raw_index], label] = capital_costs_grouped.values
=======
import logging
logger = logging.getLogger(__name__)
@ -229,7 +109,6 @@ def calculate_nodal_costs(n, label, nodal_costs):
)
nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index))
nodal_costs.loc[index, label] = capital_costs.values
>>>>>>> pypsa-eur-sec/master
if c.name == "Link":
p = c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum()
@ -242,8 +121,6 @@ def calculate_nodal_costs(n, label, nodal_costs):
else:
p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum()
<<<<<<< HEAD
=======
# correct sequestration cost
if c.name == "Store":
items = c.df.index[
@ -294,24 +171,10 @@ def calculate_costs(n, label, costs):
]
c.df.loc[items, "marginal_cost"] = -20.0
>>>>>>> pypsa-eur-sec/master
marginal_costs = p * c.df.marginal_cost
marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum()
<<<<<<< HEAD
costs = costs.reindex(
costs.index.union(
pd.MultiIndex.from_product(
[[c.list_name], ["marginal"], marginal_costs_grouped.index]
)
)
)
costs.loc[
idx[c.list_name, "marginal", list(marginal_costs_grouped.index)], label
] = marginal_costs_grouped.values
=======
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=["marginal"])
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=[c.list_name])
@ -323,13 +186,10 @@ def calculate_costs(n, label, costs):
# costs.loc[("storage_units", "capital", "hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro", "p_nom"].sum()
# costs.loc[("storage_units", "capital", "PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS", "p_nom"].sum()
# costs.loc[("generators", "capital", "ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror", "p_nom"].sum()
>>>>>>> pypsa-eur-sec/master
return costs
<<<<<<< HEAD
=======
def calculate_cumulative_cost():
planning_horizons = snakemake.config["scenario"]["planning_horizons"]
@ -399,7 +259,6 @@ def calculate_capacities(n, label, capacities):
return capacities
>>>>>>> pypsa-eur-sec/master
def calculate_curtailment(n, label, curtailment):
avail = (
n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt)
@ -416,11 +275,7 @@ def calculate_curtailment(n, label, curtailment):
def calculate_energy(n, label, energy):
for c in n.iterate_components(n.one_port_components | n.branch_components):
<<<<<<< HEAD
if c.name in {"Generator", "Load", "ShuntImpedance"}:
=======
if c.name in n.one_port_components:
>>>>>>> pypsa-eur-sec/master
c_energies = (
c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
.sum()
@ -428,27 +283,6 @@ def calculate_energy(n, label, energy):
.groupby(c.df.carrier)
.sum()
)
<<<<<<< HEAD
elif c.name in {"StorageUnit", "Store"}:
c_energies = (
c.pnl.p.multiply(n.snapshot_weightings.stores, axis=0)
.sum()
.multiply(c.df.sign)
.groupby(c.df.carrier)
.sum()
)
else:
c_energies = (
(
-c.pnl.p1.multiply(n.snapshot_weightings.generators, axis=0).sum()
- c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum()
)
.groupby(c.df.carrier)
.sum()
)
energy = include_in_summary(energy, [c.list_name], label, c_energies)
=======
else:
c_energies = pd.Series(0.0, c.df.carrier.unique())
for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
@ -469,65 +303,10 @@ def calculate_energy(n, label, energy):
energy = energy.reindex(c_energies.index.union(energy.index))
energy.loc[c_energies.index, label] = c_energies
>>>>>>> pypsa-eur-sec/master
return energy
<<<<<<< HEAD
def include_in_summary(summary, multiindexprefix, label, item):
# Index tuple(s) indicating the newly to-be-added row(s)
raw_index = tuple([multiindexprefix, list(item.index)])
summary = _add_indexed_rows(summary, raw_index)
summary.loc[idx[raw_index], label] = item.values
return summary
def calculate_capacity(n, label, capacity):
for c in n.iterate_components(n.one_port_components):
if "p_nom_opt" in c.df.columns:
c_capacities = (
abs(c.df.p_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum()
)
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
elif "e_nom_opt" in c.df.columns:
c_capacities = (
abs(c.df.e_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum()
)
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
for c in n.iterate_components(n.passive_branch_components):
c_capacities = c.df["s_nom_opt"].groupby(c.df.carrier).sum()
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
for c in n.iterate_components(n.controllable_branch_components):
c_capacities = c.df.p_nom_opt.groupby(c.df.carrier).sum()
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
return capacity
def calculate_supply(n, label, supply):
"""
calculate the max dispatch of each component at the buses where the loads
are attached.
"""
load_types = n.buses.carrier.unique()
for i in load_types:
buses = n.buses.query("carrier == @i").index
bus_map = pd.Series(False, index=n.buses.index)
bus_map.loc[buses] = True
for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map)]
if len(items) == 0 or c.pnl.p.empty:
=======
def calculate_supply(n, label, supply):
"""
Calculate the max dispatch of each component at the buses aggregated by
@ -544,7 +323,6 @@ def calculate_supply(n, label, supply):
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0:
>>>>>>> pypsa-eur-sec/master
continue
s = (
@ -554,20 +332,6 @@ def calculate_supply(n, label, supply):
.groupby(c.df.loc[items, "carrier"])
.sum()
)
<<<<<<< HEAD
# Index tuple(s) indicating the newly to-be-added row(s)
raw_index = tuple([[i], [c.list_name], list(s.index)])
supply = _add_indexed_rows(supply, raw_index)
supply.loc[idx[raw_index], label] = s.values
for c in n.iterate_components(n.branch_components):
for end in ["0", "1"]:
items = c.df.index[c.df["bus" + end].map(bus_map)]
if len(items) == 0 or c.pnl["p" + end].empty:
=======
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
@ -579,53 +343,24 @@ def calculate_supply(n, label, supply):
items = c.df.index[c.df["bus" + end].map(bus_map).fillna(False)]
if len(items) == 0:
>>>>>>> pypsa-eur-sec/master
continue
# lots of sign compensation for direction and to do maximums
s = (-1) ** (1 - int(end)) * (
(-1) ** int(end) * c.pnl["p" + end][items]
).max().groupby(c.df.loc[items, "carrier"]).sum()
<<<<<<< HEAD
supply = supply.reindex(
supply.index.union(
pd.MultiIndex.from_product([[i], [c.list_name], s.index])
)
)
supply.loc[idx[i, c.list_name, list(s.index)], label] = s.values
=======
s.index = s.index + end
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply = supply.reindex(s.index.union(supply.index))
supply.loc[s.index, label] = s
>>>>>>> pypsa-eur-sec/master
return supply
def calculate_supply_energy(n, label, supply_energy):
"""
<<<<<<< HEAD
calculate the total dispatch of each component at the buses where the loads
are attached.
"""
load_types = n.buses.carrier.unique()
for i in load_types:
buses = n.buses.query("carrier == @i").index
bus_map = pd.Series(False, index=n.buses.index)
bus_map.loc[buses] = True
for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map)]
if len(items) == 0 or c.pnl.p.empty:
=======
Calculate the total energy supply/consuption of each component at the buses
aggregated by carrier.
"""
@ -640,46 +375,16 @@ def calculate_supply_energy(n, label, supply_energy):
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0:
>>>>>>> pypsa-eur-sec/master
continue
s = (
c.pnl.p[items]
<<<<<<< HEAD
=======
.multiply(n.snapshot_weightings.generators, axis=0)
>>>>>>> pypsa-eur-sec/master
.sum()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"])
.sum()
)
<<<<<<< HEAD
# Index tuple(s) indicating the newly to-be-added row(s)
raw_index = tuple([[i], [c.list_name], list(s.index)])
supply_energy = _add_indexed_rows(supply_energy, raw_index)
supply_energy.loc[idx[raw_index], label] = s.values
for c in n.iterate_components(n.branch_components):
for end in ["0", "1"]:
items = c.df.index[c.df["bus" + end].map(bus_map)]
if len(items) == 0 or c.pnl["p" + end].empty:
continue
s = (-1) * c.pnl["p" + end][items].sum().groupby(
c.df.loc[items, "carrier"]
).sum()
supply_energy = supply_energy.reindex(
supply_energy.index.union(
pd.MultiIndex.from_product([[i], [c.list_name], s.index])
)
)
supply_energy.loc[idx[i, c.list_name, list(s.index)], label] = s.values
=======
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
@ -705,28 +410,11 @@ def calculate_supply_energy(n, label, supply_energy):
)
supply_energy.loc[s.index, label] = s
>>>>>>> pypsa-eur-sec/master
return supply_energy
def calculate_metrics(n, label, metrics):
<<<<<<< HEAD
metrics = metrics.reindex(
metrics.index.union(
pd.Index(
[
"line_volume",
"line_volume_limit",
"line_volume_AC",
"line_volume_DC",
"line_volume_shadow",
"co2_shadow",
]
)
)
)
=======
metrics_list = [
"line_volume",
"line_volume_limit",
@ -737,7 +425,6 @@ def calculate_metrics(n, label, metrics):
]
metrics = metrics.reindex(pd.Index(metrics_list).union(metrics.index))
>>>>>>> pypsa-eur-sec/master
metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[
n.links.carrier == "DC"
@ -749,11 +436,6 @@ def calculate_metrics(n, label, metrics):
if hasattr(n, "line_volume_limit"):
metrics.at["line_volume_limit", label] = n.line_volume_limit
<<<<<<< HEAD
if hasattr(n, "line_volume_limit_dual"):
=======
>>>>>>> pypsa-eur-sec/master
metrics.at["line_volume_shadow", label] = n.line_volume_limit_dual
if "CO2Limit" in n.global_constraints.index:
@ -763,31 +445,16 @@ def calculate_metrics(n, label, metrics):
def calculate_prices(n, label, prices):
<<<<<<< HEAD
bus_type = pd.Series(n.buses.index.str[3:], n.buses.index).replace(
"", "electricity"
)
prices = prices.reindex(prices.index.union(bus_type.value_counts().index))
logger.warning("Prices are time-averaged, not load-weighted")
prices[label] = n.buses_t.marginal_price.mean().groupby(bus_type).mean()
=======
prices = prices.reindex(prices.index.union(n.buses.carrier.unique()))
# WARNING: this is time-averaged, see weighted_prices for load-weighted average
prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean()
>>>>>>> pypsa-eur-sec/master
return prices
def calculate_weighted_prices(n, label, weighted_prices):
<<<<<<< HEAD
logger.warning("Weighted prices don't include storage units as loads")
=======
# Warning: doesn't include storage units as loads
>>>>>>> pypsa-eur-sec/master
weighted_prices = weighted_prices.reindex(
pd.Index(
@ -847,22 +514,6 @@ def calculate_weighted_prices(n, label, weighted_prices):
continue
load += (
<<<<<<< HEAD
n.links_t.p0[names]
.groupby(n.links.loc[names, "bus0"], axis=1)
.sum(axis=1)
)
# Add H2 Store when charging
if carrier == "H2":
stores = (
n.stores_t.p[buses + " Store"]
.groupby(n.stores.loc[buses + " Store", "bus"], axis=1)
.sum(axis=1)
)
stores[stores > 0.0] = 0.0
load += -stores
=======
n.links_t.p0[names].groupby(n.links.loc[names, "bus0"], axis=1).sum()
)
@ -871,80 +522,18 @@ def calculate_weighted_prices(n, label, weighted_prices):
# stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store", "bus"],axis=1).sum(axis=1)
# stores[stores > 0.] = 0.
# load += -stores
>>>>>>> pypsa-eur-sec/master
weighted_prices.loc[carrier, label] = (
load * n.buses_t.marginal_price[buses]
).sum().sum() / load.sum().sum()
<<<<<<< HEAD
if carrier[:5] == "space":
print(load * n.buses_t.marginal_price[buses])
=======
# still have no idea what this is for, only for debug reasons.
if carrier[:5] == "space":
logger.debug(load * n.buses_t.marginal_price[buses])
>>>>>>> pypsa-eur-sec/master
return weighted_prices
<<<<<<< HEAD
outputs = [
"costs",
"curtailment",
"energy",
"capacity",
"supply",
"supply_energy",
"prices",
"weighted_prices",
"metrics",
]
def make_summaries(networks_dict, paths, config, country="all"):
columns = pd.MultiIndex.from_tuples(
networks_dict.keys(), names=["simpl", "clusters", "ll", "opts"]
)
dfs = {}
for output in outputs:
dfs[output] = pd.DataFrame(columns=columns, dtype=float)
for label, filename in networks_dict.items():
print(label, filename)
if not os.path.exists(filename):
print("does not exist!!")
continue
try:
n = pypsa.Network(filename)
except OSError:
logger.warning("Skipping {filename}".format(filename=filename))
continue
if country != "all":
n = n[n.buses.country == country]
Nyears = n.snapshot_weightings.objective.sum() / 8760.0
costs = load_costs(paths[0], config["costs"], config["electricity"], Nyears)
update_transmission_costs(n, costs)
assign_carriers(n)
for output in outputs:
dfs[output] = globals()["calculate_" + output](n, label, dfs[output])
return dfs
def to_csv(dfs, dir):
os.makedirs(dir, exist_ok=True)
for key, df in dfs.items():
df.to_csv(os.path.join(dir, f"{key}.csv"))
=======
def calculate_market_values(n, label, market_values):
# Warning: doesn't include storage units
@ -1076,61 +665,10 @@ def make_summaries(networks_dict):
def to_csv(df):
for key in df:
df[key].to_csv(snakemake.output[key])
>>>>>>> pypsa-eur-sec/master
if __name__ == "__main__":
if "snakemake" not in globals():
<<<<<<< HEAD
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"make_summary",
simpl="",
clusters="5",
ll="copt",
opts="Co2L-24H",
country="all",
)
network_dir = os.path.join(
"..", "results", "networks", snakemake.config["run"]["name"]
)
else:
network_dir = os.path.join(
"results", "networks", snakemake.config["run"]["name"]
)
configure_logging(snakemake)
config = snakemake.config
wildcards = snakemake.wildcards
def expand_from_wildcard(key, config):
w = getattr(wildcards, key)
return config["scenario"][key] if w == "all" else [w]
if wildcards.ll.endswith("all"):
ll = config["scenario"]["ll"]
if len(wildcards.ll) == 4:
ll = [l for l in ll if l[0] == wildcards.ll[0]]
else:
ll = [wildcards.ll]
networks_dict = {
(simpl, clusters, l, opts): os.path.join(
network_dir, f"elec_s{simpl}_" f"{clusters}_ec_l{l}_{opts}.nc"
)
for simpl in expand_from_wildcard("simpl", config)
for clusters in expand_from_wildcard("clusters", config)
for l in ll
for opts in expand_from_wildcard("opts", config)
}
dfs = make_summaries(
networks_dict, snakemake.input, config, country=wildcards.country
)
to_csv(dfs, snakemake.output[0])
=======
from helper import mock_snakemake
snakemake = mock_snakemake("make_summary")
@ -1175,4 +713,3 @@ if __name__ == "__main__":
+ snakemake.config["run"]
+ "/csvs/cumulative_cost.csv"
)
>>>>>>> pypsa-eur-sec/master

408
scripts/plot_network.py Executable file → Normal file
View File

@ -1,188 +1,4 @@
# -*- coding: utf-8 -*-
<<<<<<< HEAD
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Plots map with pie charts and cost box bar charts.
Relevant Settings
-----------------
Inputs
------
Outputs
-------
Description
-----------
"""
import logging
import cartopy.crs as ccrs
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from _helpers import (
aggregate_costs,
aggregate_p,
configure_logging,
load_network_for_plots,
)
from matplotlib.legend_handler import HandlerPatch
from matplotlib.patches import Circle, Ellipse
to_rgba = mpl.colors.colorConverter.to_rgba
logger = logging.getLogger(__name__)
def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False):
fig = ax.get_figure()
def axes2pt():
return np.diff(ax.transData.transform([(0, 0), (1, 1)]), axis=0)[0] * (
72.0 / fig.dpi
)
ellipses = []
if not dont_resize_actively:
def update_width_height(event):
dist = axes2pt()
for e, radius in ellipses:
e.width, e.height = 2.0 * radius * dist
fig.canvas.mpl_connect("resize_event", update_width_height)
ax.callbacks.connect("xlim_changed", update_width_height)
ax.callbacks.connect("ylim_changed", update_width_height)
def legend_circle_handler(
legend, orig_handle, xdescent, ydescent, width, height, fontsize
):
w, h = 2.0 * orig_handle.get_radius() * axes2pt()
e = Ellipse(
xy=(0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent),
width=w,
height=w,
)
ellipses.append((e, orig_handle.get_radius()))
return e
return {Circle: HandlerPatch(patch_func=legend_circle_handler)}
def make_legend_circles_for(sizes, scale=1.0, **kw):
return [Circle((0, 0), radius=(s / scale) ** 0.5, **kw) for s in sizes]
def set_plot_style():
plt.style.use(
[
"classic",
"seaborn-white",
{
"axes.grid": False,
"grid.linestyle": "--",
"grid.color": "0.6",
"hatch.color": "white",
"patch.linewidth": 0.5,
"font.size": 12,
"legend.fontsize": "medium",
"lines.linewidth": 1.5,
"pdf.fonttype": 42,
},
]
)
def plot_map(n, opts, ax=None, attribute="p_nom"):
if ax is None:
ax = plt.gca()
## DATA
line_colors = {
"cur": "purple",
"exp": mpl.colors.rgb2hex(to_rgba("red", 0.7), True),
}
tech_colors = opts["tech_colors"]
if attribute == "p_nom":
# bus_sizes = n.generators_t.p.sum().loc[n.generators.carrier == "load"].groupby(n.generators.bus).sum()
bus_sizes = pd.concat(
(
n.generators.query('carrier != "load"')
.groupby(["bus", "carrier"])
.p_nom_opt.sum(),
n.storage_units.groupby(["bus", "carrier"]).p_nom_opt.sum(),
)
)
line_widths_exp = n.lines.s_nom_opt
line_widths_cur = n.lines.s_nom_min
link_widths_exp = n.links.p_nom_opt
link_widths_cur = n.links.p_nom_min
else:
raise "plotting of {} has not been implemented yet".format(attribute)
line_colors_with_alpha = (line_widths_cur / n.lines.s_nom > 1e-3).map(
{True: line_colors["cur"], False: to_rgba(line_colors["cur"], 0.0)}
)
link_colors_with_alpha = (link_widths_cur / n.links.p_nom > 1e-3).map(
{True: line_colors["cur"], False: to_rgba(line_colors["cur"], 0.0)}
)
## FORMAT
linewidth_factor = opts["map"][attribute]["linewidth_factor"]
bus_size_factor = opts["map"][attribute]["bus_size_factor"]
## PLOT
n.plot(
line_widths=line_widths_exp / linewidth_factor,
link_widths=link_widths_exp / linewidth_factor,
line_colors=line_colors["exp"],
link_colors=line_colors["exp"],
bus_sizes=bus_sizes / bus_size_factor,
bus_colors=tech_colors,
boundaries=map_boundaries,
color_geomap=True,
geomap=True,
ax=ax,
)
n.plot(
line_widths=line_widths_cur / linewidth_factor,
link_widths=link_widths_cur / linewidth_factor,
line_colors=line_colors_with_alpha,
link_colors=link_colors_with_alpha,
bus_sizes=0,
boundaries=map_boundaries,
color_geomap=True,
geomap=True,
ax=ax,
)
ax.set_aspect("equal")
ax.axis("off")
# Rasterize basemap
# TODO : Check if this also works with cartopy
for c in ax.collections[:2]:
c.set_rasterized(True)
# LEGEND
handles = []
labels = []
for s in (10, 1):
handles.append(
plt.Line2D(
[0], [0], color=line_colors["exp"], linewidth=s * 1e3 / linewidth_factor
)
)
labels.append("{} GW".format(s))
=======
import logging
logger = logging.getLogger(__name__)
@ -913,180 +729,10 @@ def plot_map_without(network):
plt.Line2D([0], [0], color=ac_color, linewidth=s * 1e3 / linewidth_factor)
)
labels.append(f"{s} GW")
>>>>>>> pypsa-eur-sec/master
l1_1 = ax.legend(
handles,
labels,
loc="upper left",
<<<<<<< HEAD
bbox_to_anchor=(0.24, 1.01),
frameon=False,
labelspacing=0.8,
handletextpad=1.5,
title="Transmission Exp./Exist. ",
)
ax.add_artist(l1_1)
handles = []
labels = []
for s in (10, 5):
handles.append(
plt.Line2D(
[0], [0], color=line_colors["cur"], linewidth=s * 1e3 / linewidth_factor
)
)
labels.append("/")
l1_2 = ax.legend(
handles,
labels,
loc="upper left",
bbox_to_anchor=(0.26, 1.01),
frameon=False,
labelspacing=0.8,
handletextpad=0.5,
title=" ",
)
ax.add_artist(l1_2)
handles = make_legend_circles_for(
[10e3, 5e3, 1e3], scale=bus_size_factor, facecolor="w"
)
labels = ["{} GW".format(s) for s in (10, 5, 3)]
l2 = ax.legend(
handles,
labels,
loc="upper left",
bbox_to_anchor=(0.01, 1.01),
frameon=False,
labelspacing=1.0,
title="Generation",
handler_map=make_handler_map_to_scale_circles_as_in(ax),
)
ax.add_artist(l2)
techs = (bus_sizes.index.levels[1]).intersection(
pd.Index(opts["vre_techs"] + opts["conv_techs"] + opts["storage_techs"])
)
handles = []
labels = []
for t in techs:
handles.append(
plt.Line2D(
[0], [0], color=tech_colors[t], marker="o", markersize=8, linewidth=0
)
)
labels.append(opts["nice_names"].get(t, t))
l3 = ax.legend(
handles,
labels,
loc="upper center",
bbox_to_anchor=(0.5, -0.0), # bbox_to_anchor=(0.72, -0.05),
handletextpad=0.0,
columnspacing=0.5,
ncol=4,
title="Technology",
)
return fig
def plot_total_energy_pie(n, opts, ax=None):
if ax is None:
ax = plt.gca()
ax.set_title("Energy per technology", fontdict=dict(fontsize="medium"))
e_primary = aggregate_p(n).drop("load", errors="ignore").loc[lambda s: s > 0]
patches, texts, autotexts = ax.pie(
e_primary,
startangle=90,
labels=e_primary.rename(opts["nice_names"]).index,
autopct="%.0f%%",
shadow=False,
colors=[opts["tech_colors"][tech] for tech in e_primary.index],
)
for t1, t2, i in zip(texts, autotexts, e_primary.index):
if e_primary.at[i] < 0.04 * e_primary.sum():
t1.remove()
t2.remove()
def plot_total_cost_bar(n, opts, ax=None):
if ax is None:
ax = plt.gca()
total_load = (n.snapshot_weightings.generators * n.loads_t.p.sum(axis=1)).sum()
tech_colors = opts["tech_colors"]
def split_costs(n):
costs = aggregate_costs(n).reset_index(level=0, drop=True)
costs_ex = aggregate_costs(n, existing_only=True).reset_index(
level=0, drop=True
)
return (
costs["capital"].add(costs["marginal"], fill_value=0.0),
costs_ex["capital"],
costs["capital"] - costs_ex["capital"],
costs["marginal"],
)
costs, costs_cap_ex, costs_cap_new, costs_marg = split_costs(n)
costs_graph = pd.DataFrame(
dict(a=costs.drop("load", errors="ignore")),
index=[
"AC-AC",
"AC line",
"onwind",
"offwind-ac",
"offwind-dc",
"solar",
"OCGT",
"CCGT",
"battery",
"H2",
],
).dropna()
bottom = np.array([0.0, 0.0])
texts = []
for i, ind in enumerate(costs_graph.index):
data = np.asarray(costs_graph.loc[ind]) / total_load
ax.bar([0.5], data, bottom=bottom, color=tech_colors[ind], width=0.7, zorder=-1)
bottom_sub = bottom
bottom = bottom + data
if ind in opts["conv_techs"] + ["AC line"]:
for c in [costs_cap_ex, costs_marg]:
if ind in c:
data_sub = np.asarray([c.loc[ind]]) / total_load
ax.bar(
[0.5],
data_sub,
linewidth=0,
bottom=bottom_sub,
color=tech_colors[ind],
width=0.7,
zorder=-1,
alpha=0.8,
)
bottom_sub += data_sub
if abs(data[-1]) < 5:
continue
text = ax.text(
1.1, (bottom - 0.5 * data)[-1] - 3, opts["nice_names"].get(ind, ind)
)
texts.append(text)
ax.set_ylabel("Average system cost [Eur/MWh]")
ax.set_ylim([0, opts.get("costs_max", 80)])
ax.set_xlim([0, 1])
ax.set_xticklabels([])
ax.grid(True, axis="y", color="k", linestyle="dotted")
=======
bbox_to_anchor=(0.05, 1.01),
frameon=False,
labelspacing=0.8,
@ -1257,68 +903,15 @@ def plot_series(network, carrier="AC", name="test"):
),
transparent=True,
)
>>>>>>> pypsa-eur-sec/master
if __name__ == "__main__":
if "snakemake" not in globals():
<<<<<<< HEAD
from _helpers import mock_snakemake
=======
from helper import mock_snakemake
>>>>>>> pypsa-eur-sec/master
snakemake = mock_snakemake(
"plot_network",
simpl="",
<<<<<<< HEAD
clusters="5",
ll="copt",
opts="Co2L-24H",
attr="p_nom",
ext="pdf",
)
configure_logging(snakemake)
set_plot_style()
config, wildcards = snakemake.config, snakemake.wildcards
map_figsize = config["plotting"]["map"]["figsize"]
map_boundaries = config["plotting"]["map"]["boundaries"]
n = load_network_for_plots(
snakemake.input.network, snakemake.input.tech_costs, config
)
scenario_opts = wildcards.opts.split("-")
fig, ax = plt.subplots(
figsize=map_figsize, subplot_kw={"projection": ccrs.PlateCarree()}
)
plot_map(n, config["plotting"], ax=ax, attribute=wildcards.attr)
fig.savefig(snakemake.output.only_map, dpi=150, bbox_inches="tight")
ax1 = fig.add_axes([-0.115, 0.625, 0.2, 0.2])
plot_total_energy_pie(n, config["plotting"], ax=ax1)
ax2 = fig.add_axes([-0.075, 0.1, 0.1, 0.45])
plot_total_cost_bar(n, config["plotting"], ax=ax2)
ll = wildcards.ll
ll_type = ll[0]
ll_factor = ll[1:]
lbl = dict(c="line cost", v="line volume")[ll_type]
amnt = "{ll} x today's".format(ll=ll_factor) if ll_factor != "opt" else "optimal"
fig.suptitle(
"Expansion to {amount} {label} at {clusters} clusters".format(
amount=amnt, label=lbl, clusters=wildcards.clusters
)
)
fig.savefig(snakemake.output.ext, transparent=True, bbox_inches="tight")
=======
clusters="181",
lv="opt",
opts="",
@ -1348,4 +941,3 @@ if __name__ == "__main__":
# plot_series(n, carrier="AC", name=suffix)
# plot_series(n, carrier="heat", name=suffix)
>>>>>>> pypsa-eur-sec/master

View File

@ -1,56 +1,4 @@
# -*- coding: utf-8 -*-
<<<<<<< HEAD
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Plots energy and cost summaries for solved networks.
Relevant Settings
-----------------
Inputs
------
Outputs
-------
Description
-----------
"""
import logging
import os
import matplotlib.pyplot as plt
import pandas as pd
from _helpers import configure_logging
logger = logging.getLogger(__name__)
def rename_techs(label):
if "H2" in label:
label = "hydrogen storage"
elif label == "solar":
label = "solar PV"
elif label == "offwind-ac":
label = "offshore wind ac"
elif label == "offwind-dc":
label = "offshore wind dc"
elif label == "onwind":
label = "onshore wind"
elif label == "ror":
label = "hydroelectricity"
elif label == "hydro":
label = "hydroelectricity"
elif label == "PHS":
label = "hydroelectricity"
elif "battery" in label:
label = "battery storage"
=======
import logging
logger = logging.getLogger(__name__)
@ -130,7 +78,6 @@ def rename_techs(label):
for old, new in rename.items():
if old == label:
label = new
>>>>>>> pypsa-eur-sec/master
return label
@ -141,16 +88,6 @@ preferred_order = pd.Index(
"hydro reservoir",
"run of river",
"pumped hydro storage",
<<<<<<< HEAD
"onshore wind",
"offshore wind ac",
"offshore wind dc",
"solar PV",
"solar thermal",
"OCGT",
"hydrogen storage",
"battery storage",
=======
"solid biomass",
"biogas",
"onshore wind",
@ -182,21 +119,14 @@ preferred_order = pd.Index(
"battery storage",
"hot water storage",
"CO2 sequestration",
>>>>>>> pypsa-eur-sec/master
]
)
<<<<<<< HEAD
def plot_costs(infn, config, fn=None):
## For now ignore the simpl header
cost_df = pd.read_csv(infn, index_col=list(range(3)), header=[1, 2, 3])
=======
def plot_costs():
cost_df = pd.read_csv(
snakemake.input.costs, index_col=list(range(3)), header=list(range(n_header))
)
>>>>>>> pypsa-eur-sec/master
df = cost_df.groupby(cost_df.index.get_level_values(2)).sum()
@ -205,19 +135,6 @@ def plot_costs():
df = df.groupby(df.index.map(rename_techs)).sum()
<<<<<<< HEAD
to_drop = df.index[df.max(axis=1) < config["plotting"]["costs_threshold"]]
print("dropping")
print(df.loc[to_drop])
df = df.drop(to_drop)
print(df.sum())
new_index = (preferred_order.intersection(df.index)).append(
=======
to_drop = df.index[df.max(axis=1) < snakemake.config["plotting"]["costs_threshold"]]
logger.info(
@ -230,28 +147,18 @@ def plot_costs():
logger.info(f"Total system cost of {round(df.sum()[0])} EUR billion per year")
new_index = preferred_order.intersection(df.index).append(
>>>>>>> pypsa-eur-sec/master
df.index.difference(preferred_order)
)
new_columns = df.sum().sort_values().index
<<<<<<< HEAD
fig, ax = plt.subplots()
fig.set_size_inches((12, 8))
=======
fig, ax = plt.subplots(figsize=(12, 8))
>>>>>>> pypsa-eur-sec/master
df.loc[new_index, new_columns].T.plot(
kind="bar",
ax=ax,
stacked=True,
<<<<<<< HEAD
color=[config["plotting"]["tech_colors"][i] for i in new_index],
=======
color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index],
>>>>>>> pypsa-eur-sec/master
)
handles, labels = ax.get_legend_handles_labels()
@ -259,30 +166,12 @@ def plot_costs():
handles.reverse()
labels.reverse()
<<<<<<< HEAD
ax.set_ylim([0, config["plotting"]["costs_max"]])
=======
ax.set_ylim([0, snakemake.config["plotting"]["costs_max"]])
>>>>>>> pypsa-eur-sec/master
ax.set_ylabel("System Cost [EUR billion per year]")
ax.set_xlabel("")
<<<<<<< HEAD
ax.grid(axis="y")
ax.legend(handles, labels, ncol=4, loc="upper left")
fig.tight_layout()
if fn is not None:
fig.savefig(fn, transparent=True)
def plot_energy(infn, config, fn=None):
energy_df = pd.read_csv(infn, index_col=list(range(2)), header=[1, 2, 3])
=======
ax.grid(axis="x")
ax.legend(
@ -296,7 +185,6 @@ def plot_energy():
energy_df = pd.read_csv(
snakemake.input.energy, index_col=list(range(2)), header=list(range(n_header))
)
>>>>>>> pypsa-eur-sec/master
df = energy_df.groupby(energy_df.index.get_level_values(1)).sum()
@ -305,19 +193,6 @@ def plot_energy():
df = df.groupby(df.index.map(rename_techs)).sum()
<<<<<<< HEAD
to_drop = df.index[df.abs().max(axis=1) < config["plotting"]["energy_threshold"]]
print("dropping")
print(df.loc[to_drop])
df = df.drop(to_drop)
print(df.sum())
new_index = (preferred_order.intersection(df.index)).append(
=======
to_drop = df.index[
df.abs().max(axis=1) < snakemake.config["plotting"]["energy_threshold"]
]
@ -332,30 +207,20 @@ def plot_energy():
logger.info(f"Total energy of {round(df.sum()[0])} TWh/a")
new_index = preferred_order.intersection(df.index).append(
>>>>>>> pypsa-eur-sec/master
df.index.difference(preferred_order)
)
new_columns = df.columns.sort_values()
<<<<<<< HEAD
fig, ax = plt.subplots()
fig.set_size_inches((12, 8))
=======
fig, ax = plt.subplots(figsize=(12, 8))
logger.debug(df.loc[new_index, new_columns])
>>>>>>> pypsa-eur-sec/master
df.loc[new_index, new_columns].T.plot(
kind="bar",
ax=ax,
stacked=True,
<<<<<<< HEAD
color=[config["plotting"]["tech_colors"][i] for i in new_index],
=======
color=[snakemake.config["plotting"]["tech_colors"][i] for i in new_index],
>>>>>>> pypsa-eur-sec/master
)
handles, labels = ax.get_legend_handles_labels()
@ -363,31 +228,17 @@ def plot_energy():
handles.reverse()
labels.reverse()
<<<<<<< HEAD
ax.set_ylim([config["plotting"]["energy_min"], config["plotting"]["energy_max"]])
=======
ax.set_ylim(
[
snakemake.config["plotting"]["energy_min"],
snakemake.config["plotting"]["energy_max"],
]
)
>>>>>>> pypsa-eur-sec/master
ax.set_ylabel("Energy [TWh/a]")
ax.set_xlabel("")
<<<<<<< HEAD
ax.grid(axis="y")
ax.legend(handles, labels, ncol=4, loc="upper left")
fig.tight_layout()
if fn is not None:
fig.savefig(fn, transparent=True)
=======
ax.grid(axis="x")
ax.legend(
@ -684,39 +535,10 @@ def plot_carbon_budget_distribution(input_eurostat):
snakemake.config["results_dir"] + snakemake.config["run"] + "/graphs/"
)
plt.savefig(path_cb_plot + "carbon_budget_plot.pdf", dpi=300)
>>>>>>> pypsa-eur-sec/master
if __name__ == "__main__":
if "snakemake" not in globals():
<<<<<<< HEAD
from _helpers import mock_snakemake
snakemake = mock_snakemake(
"plot_summary",
summary="energy",
simpl="",
clusters=5,
ll="copt",
opts="Co2L-24H",
attr="",
ext="png",
country="all",
)
configure_logging(snakemake)
config = snakemake.config
summary = snakemake.wildcards.summary
try:
func = globals()[f"plot_{summary}"]
except KeyError:
raise RuntimeError(f"plotting function for {summary} has not been defined")
func(
os.path.join(snakemake.input[0], f"{summary}.csv"), config, snakemake.output[0]
)
=======
from helper import mock_snakemake
snakemake = mock_snakemake("plot_summary")
@ -736,4 +558,3 @@ if __name__ == "__main__":
for o in opts:
if "cb" in o:
plot_carbon_budget_distribution(snakemake.input.eurostat)
>>>>>>> pypsa-eur-sec/master

View File

@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
<<<<<<< HEAD
# SPDX-FileCopyrightText: : 2017-2023 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
@ -123,166 +122,19 @@ def prepare_network(n, solve_opts):
carrier="load",
sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW
marginal_cost=load_shedding,
=======
"""
Solve network.
"""
import logging
import numpy as np
import pypsa
from helper import override_component_attrs, update_config_with_sector_opts
from vresutils.benchmark import memory_logger
logger = logging.getLogger(__name__)
pypsa.pf.logger.setLevel(logging.WARNING)
def add_land_use_constraint(n):
if "m" in snakemake.wildcards.clusters:
_add_land_use_constraint_m(n)
else:
_add_land_use_constraint(n)
def _add_land_use_constraint(n):
# warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
ext_i = (n.generators.carrier == carrier) & ~n.generators.p_nom_extendable
existing = (
n.generators.loc[ext_i, "p_nom"]
.groupby(n.generators.bus.map(n.buses.location))
.sum()
)
existing.index += " " + carrier + "-" + snakemake.wildcards.planning_horizons
n.generators.loc[existing.index, "p_nom_max"] -= existing
# check if existing capacities are larger than technical potential
existing_large = n.generators[
n.generators["p_nom_min"] > n.generators["p_nom_max"]
].index
if len(existing_large):
logger.warning(
f"Existing capacities larger than technical potential for {existing_large},\
adjust technical potential to existing capacities"
)
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[
existing_large, "p_nom_min"
]
n.generators.p_nom_max.clip(lower=0, inplace=True)
def _add_land_use_constraint_m(n):
# if generators clustering is lower than network clustering, land_use accounting is at generators clusters
planning_horizons = snakemake.config["scenario"]["planning_horizons"]
grouping_years = snakemake.config["existing_capacities"]["grouping_years"]
current_horizon = snakemake.wildcards.planning_horizons
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"]
ind = list(
set(
[
i.split(sep=" ")[0] + " " + i.split(sep=" ")[1]
for i in existing.index
]
)
)
previous_years = [
str(y)
for y in planning_horizons + grouping_years
if y < int(snakemake.wildcards.planning_horizons)
]
for p_year in previous_years:
ind2 = [
i for i in ind if i + " " + carrier + "-" + p_year in existing.index
]
sel_current = [i + " " + carrier + "-" + current_horizon for i in ind2]
sel_p_year = [i + " " + carrier + "-" + p_year for i in ind2]
n.generators.loc[sel_current, "p_nom_max"] -= existing.loc[
sel_p_year
].rename(lambda x: x[:-4] + current_horizon)
n.generators.p_nom_max.clip(lower=0, inplace=True)
def add_co2_sequestration_limit(n, limit=200):
"""
Add a global constraint on the amount of Mt CO2 that can be sequestered.
"""
n.carriers.loc["co2 stored", "co2_absorptions"] = -1
n.carriers.co2_absorptions = n.carriers.co2_absorptions.fillna(0)
limit = limit * 1e6
for o in opts:
if not "seq" in o:
continue
limit = float(o[o.find("seq") + 3 :]) * 1e6
break
n.add(
"GlobalConstraint",
"co2_sequestration_limit",
sense="<=",
constant=limit,
type="primary_energy",
carrier_attribute="co2_absorptions",
)
def prepare_network(n, solve_opts=None, config=None):
if "clip_p_max_pu" in solve_opts:
for df in (
n.generators_t.p_max_pu,
n.generators_t.p_min_pu,
n.storage_units_t.inflow,
):
df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)
if solve_opts.get("load_shedding"):
# intersect between macroeconomic and surveybased willingness to pay
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full
n.add("Carrier", "Load")
n.madd(
"Generator",
n.buses.index,
" load",
bus=n.buses.index,
carrier="load",
sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW
marginal_cost=1e2, # Eur/kWh
>>>>>>> pypsa-eur-sec/master
p_nom=1e9, # kW
)
if solve_opts.get("noisy_costs"):
<<<<<<< HEAD
for t in n.iterate_components(n.one_port_components):
# if 'capital_cost' in t.df:
# t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5)
if "marginal_cost" in t.df:
=======
for t in n.iterate_components():
# if 'capital_cost' in t.df:
# t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5)
if "marginal_cost" in t.df:
np.random.seed(174)
>>>>>>> pypsa-eur-sec/master
t.df["marginal_cost"] += 1e-2 + 2e-3 * (
np.random.random(len(t.df)) - 0.5
)
for t in n.iterate_components(["Line", "Link"]):
<<<<<<< HEAD
=======
np.random.seed(123)
>>>>>>> pypsa-eur-sec/master
t.df["capital_cost"] += (
1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5)
) * t.df["length"]
@ -292,7 +144,6 @@ def prepare_network(n, solve_opts=None, config=None):
n.set_snapshots(n.snapshots[:nhours])
n.snapshot_weightings[:] = 8760.0 / nhours
<<<<<<< HEAD
return n
@ -519,116 +370,6 @@ def extra_functionality(n, snapshots):
if "EQ" in o:
add_EQ_constraints(n, o)
add_battery_constraints(n)
=======
if snakemake.config["foresight"] == "myopic":
add_land_use_constraint(n)
if n.stores.carrier.eq("co2 stored").any():
limit = config["sector"].get("co2_sequestration_potential", 200)
add_co2_sequestration_limit(n, limit=limit)
return n
def add_battery_constraints(n):
"""
Add constraint ensuring that charger = discharger:
1 * charger_size - efficiency * discharger_size = 0
"""
discharger_bool = n.links.index.str.contains("battery discharger")
charger_bool = n.links.index.str.contains("battery charger")
dischargers_ext = n.links[discharger_bool].query("p_nom_extendable").index
chargers_ext = n.links[charger_bool].query("p_nom_extendable").index
eff = n.links.efficiency[dischargers_ext].values
lhs = (
n.model["Link-p_nom"].loc[chargers_ext]
- n.model["Link-p_nom"].loc[dischargers_ext] * eff
)
n.model.add_constraints(lhs == 0, name="Link-charger_ratio")
def add_chp_constraints(n):
electric = (
n.links.index.str.contains("urban central")
& n.links.index.str.contains("CHP")
& n.links.index.str.contains("electric")
)
heat = (
n.links.index.str.contains("urban central")
& n.links.index.str.contains("CHP")
& n.links.index.str.contains("heat")
)
electric_ext = n.links[electric].query("p_nom_extendable").index
heat_ext = n.links[heat].query("p_nom_extendable").index
electric_fix = n.links[electric].query("~p_nom_extendable").index
heat_fix = n.links[heat].query("~p_nom_extendable").index
p = n.model["Link-p"] # dimension: [time, link]
# output ratio between heat and electricity and top_iso_fuel_line for extendable
if not electric_ext.empty:
p_nom = n.model["Link-p_nom"]
lhs = (
p_nom.loc[electric_ext]
* (n.links.p_nom_ratio * n.links.efficiency)[electric_ext].values
- p_nom.loc[heat_ext] * n.links.efficiency[heat_ext].values
)
n.model.add_constraints(lhs == 0, name="chplink-fix_p_nom_ratio")
rename = {"Link-ext": "Link"}
lhs = (
p.loc[:, electric_ext]
+ p.loc[:, heat_ext]
- p_nom.rename(rename).loc[electric_ext]
)
n.model.add_constraints(lhs <= 0, name="chplink-top_iso_fuel_line_ext")
# top_iso_fuel_line for fixed
if not electric_fix.empty:
lhs = p.loc[:, electric_fix] + p.loc[:, heat_fix]
rhs = n.links.p_nom[electric_fix]
n.model.add_constraints(lhs <= rhs, name="chplink-top_iso_fuel_line_fix")
# back-pressure
if not electric.empty:
lhs = (
p.loc[:, heat] * (n.links.efficiency[heat] * n.links.c_b[electric].values)
- p.loc[:, electric] * n.links.efficiency[electric]
)
n.model.add_constraints(lhs <= rhs, name="chplink-backpressure")
def add_pipe_retrofit_constraint(n):
"""
Add constraint for retrofitting existing CH4 pipelines to H2 pipelines.
"""
gas_pipes_i = n.links.query("carrier == 'gas pipeline' and p_nom_extendable").index
h2_retrofitted_i = n.links.query(
"carrier == 'H2 pipeline retrofitted' and p_nom_extendable"
).index
if h2_retrofitted_i.empty or gas_pipes_i.empty:
return
p_nom = n.model["Link-p_nom"]
CH4_per_H2 = 1 / n.config["sector"]["H2_retrofit_capacity_per_CH4"]
lhs = p_nom.loc[gas_pipes_i] + CH4_per_H2 * p_nom.loc[h2_retrofitted_i]
rhs = n.links.p_nom[gas_pipes_i].rename_axis("Link-ext")
n.model.add_constraints(lhs == rhs, name="Link-pipe_retrofit")
def extra_functionality(n, snapshots):
add_battery_constraints(n)
add_pipe_retrofit_constraint(n)
>>>>>>> pypsa-eur-sec/master
def solve_network(n, config, opts="", **kwargs):
@ -652,7 +393,6 @@ def solve_network(n, config, opts="", **kwargs):
logger.info("No expandable lines found. Skipping iterative solving.")
if skip_iterations:
<<<<<<< HEAD
network_lopf(
n, solver_name=solver_name, solver_options=solver_options, **kwargs
)
@ -682,65 +422,10 @@ if __name__ == "__main__":
if tmpdir is not None:
Path(tmpdir).mkdir(parents=True, exist_ok=True)
opts = snakemake.wildcards.opts.split("-")
=======
status, condition = n.optimize(
solver_name=solver_name,
extra_functionality=extra_functionality,
**solver_options,
**kwargs,
)
else:
status, condition = n.optimize.optimize_transmission_expansion_iteratively(
solver_name=solver_name,
track_iterations=track_iterations,
min_iterations=min_iterations,
max_iterations=max_iterations,
extra_functionality=extra_functionality,
**solver_options,
**kwargs,
)
if status != "ok":
logger.warning(
f"Solving status '{status}' with termination condition '{condition}'"
)
return n
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
"solve_network_myopic",
simpl="",
opts="",
clusters="45",
lv=1.0,
sector_opts="8760H-T-H-B-I-A-solar+p3-dist1",
planning_horizons="2020",
)
logging.basicConfig(
filename=snakemake.log.python, level=snakemake.config["logging_level"]
)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
tmpdir = snakemake.config["solving"].get("tmpdir")
if tmpdir is not None:
from pathlib import Path
Path(tmpdir).mkdir(parents=True, exist_ok=True)
opts = snakemake.wildcards.sector_opts.split("-")
>>>>>>> pypsa-eur-sec/master
solve_opts = snakemake.config["solving"]["options"]
fn = getattr(snakemake.log, "memory", None)
with memory_logger(filename=fn, interval=30.0) as mem:
<<<<<<< HEAD
n = pypsa.Network(snakemake.input[0])
n = prepare_network(n, solve_opts)
n = solve_network(
@ -751,21 +436,6 @@ if __name__ == "__main__":
solver_dir=tmpdir,
solver_logfile=snakemake.log.solver,
)
=======
overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
n = prepare_network(n, solve_opts, config=snakemake.config)
n = solve_network(
n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver
)
if "lv_limit" in n.global_constraints.index:
n.line_volume_limit = n.global_constraints.at["lv_limit", "constant"]
n.line_volume_limit_dual = n.global_constraints.at["lv_limit", "mu"]
>>>>>>> pypsa-eur-sec/master
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0])

View File

@ -0,0 +1,362 @@
# -*- coding: utf-8 -*-
"""
Solve network.
"""
import logging
import numpy as np
import pypsa
from helper import override_component_attrs, update_config_with_sector_opts
from vresutils.benchmark import memory_logger
logger = logging.getLogger(__name__)
pypsa.pf.logger.setLevel(logging.WARNING)
def add_land_use_constraint(n):
if "m" in snakemake.wildcards.clusters:
_add_land_use_constraint_m(n)
else:
_add_land_use_constraint(n)
def _add_land_use_constraint(n):
# warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
ext_i = (n.generators.carrier == carrier) & ~n.generators.p_nom_extendable
existing = (
n.generators.loc[ext_i, "p_nom"]
.groupby(n.generators.bus.map(n.buses.location))
.sum()
)
existing.index += " " + carrier + "-" + snakemake.wildcards.planning_horizons
n.generators.loc[existing.index, "p_nom_max"] -= existing
# check if existing capacities are larger than technical potential
existing_large = n.generators[
n.generators["p_nom_min"] > n.generators["p_nom_max"]
].index
if len(existing_large):
logger.warning(
f"Existing capacities larger than technical potential for {existing_large},\
adjust technical potential to existing capacities"
)
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[
existing_large, "p_nom_min"
]
n.generators.p_nom_max.clip(lower=0, inplace=True)
def _add_land_use_constraint_m(n):
# if generators clustering is lower than network clustering, land_use accounting is at generators clusters
planning_horizons = snakemake.config["scenario"]["planning_horizons"]
grouping_years = snakemake.config["existing_capacities"]["grouping_years"]
current_horizon = snakemake.wildcards.planning_horizons
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"]
ind = list(
set(
[
i.split(sep=" ")[0] + " " + i.split(sep=" ")[1]
for i in existing.index
]
)
)
previous_years = [
str(y)
for y in planning_horizons + grouping_years
if y < int(snakemake.wildcards.planning_horizons)
]
for p_year in previous_years:
ind2 = [
i for i in ind if i + " " + carrier + "-" + p_year in existing.index
]
sel_current = [i + " " + carrier + "-" + current_horizon for i in ind2]
sel_p_year = [i + " " + carrier + "-" + p_year for i in ind2]
n.generators.loc[sel_current, "p_nom_max"] -= existing.loc[
sel_p_year
].rename(lambda x: x[:-4] + current_horizon)
n.generators.p_nom_max.clip(lower=0, inplace=True)
def add_co2_sequestration_limit(n, limit=200):
"""
Add a global constraint on the amount of Mt CO2 that can be sequestered.
"""
n.carriers.loc["co2 stored", "co2_absorptions"] = -1
n.carriers.co2_absorptions = n.carriers.co2_absorptions.fillna(0)
limit = limit * 1e6
for o in opts:
if not "seq" in o:
continue
limit = float(o[o.find("seq") + 3 :]) * 1e6
break
n.add(
"GlobalConstraint",
"co2_sequestration_limit",
sense="<=",
constant=limit,
type="primary_energy",
carrier_attribute="co2_absorptions",
)
def prepare_network(n, solve_opts=None, config=None):
if "clip_p_max_pu" in solve_opts:
for df in (
n.generators_t.p_max_pu,
n.generators_t.p_min_pu,
n.storage_units_t.inflow,
):
df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)
if solve_opts.get("load_shedding"):
# intersect between macroeconomic and surveybased willingness to pay
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full
n.add("Carrier", "Load")
n.madd(
"Generator",
n.buses.index,
" load",
bus=n.buses.index,
carrier="load",
sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW
marginal_cost=1e2, # Eur/kWh
p_nom=1e9, # kW
)
if solve_opts.get("noisy_costs"):
for t in n.iterate_components():
# if 'capital_cost' in t.df:
# t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5)
if "marginal_cost" in t.df:
np.random.seed(174)
t.df["marginal_cost"] += 1e-2 + 2e-3 * (
np.random.random(len(t.df)) - 0.5
)
for t in n.iterate_components(["Line", "Link"]):
np.random.seed(123)
t.df["capital_cost"] += (
1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5)
) * t.df["length"]
if solve_opts.get("nhours"):
nhours = solve_opts["nhours"]
n.set_snapshots(n.snapshots[:nhours])
n.snapshot_weightings[:] = 8760.0 / nhours
if snakemake.config["foresight"] == "myopic":
add_land_use_constraint(n)
if n.stores.carrier.eq("co2 stored").any():
limit = config["sector"].get("co2_sequestration_potential", 200)
add_co2_sequestration_limit(n, limit=limit)
return n
def add_battery_constraints(n):
"""
Add constraint ensuring that charger = discharger:
1 * charger_size - efficiency * discharger_size = 0
"""
discharger_bool = n.links.index.str.contains("battery discharger")
charger_bool = n.links.index.str.contains("battery charger")
dischargers_ext = n.links[discharger_bool].query("p_nom_extendable").index
chargers_ext = n.links[charger_bool].query("p_nom_extendable").index
eff = n.links.efficiency[dischargers_ext].values
lhs = (
n.model["Link-p_nom"].loc[chargers_ext]
- n.model["Link-p_nom"].loc[dischargers_ext] * eff
)
n.model.add_constraints(lhs == 0, name="Link-charger_ratio")
def add_chp_constraints(n):
electric = (
n.links.index.str.contains("urban central")
& n.links.index.str.contains("CHP")
& n.links.index.str.contains("electric")
)
heat = (
n.links.index.str.contains("urban central")
& n.links.index.str.contains("CHP")
& n.links.index.str.contains("heat")
)
electric_ext = n.links[electric].query("p_nom_extendable").index
heat_ext = n.links[heat].query("p_nom_extendable").index
electric_fix = n.links[electric].query("~p_nom_extendable").index
heat_fix = n.links[heat].query("~p_nom_extendable").index
p = n.model["Link-p"] # dimension: [time, link]
# output ratio between heat and electricity and top_iso_fuel_line for extendable
if not electric_ext.empty:
p_nom = n.model["Link-p_nom"]
lhs = (
p_nom.loc[electric_ext]
* (n.links.p_nom_ratio * n.links.efficiency)[electric_ext].values
- p_nom.loc[heat_ext] * n.links.efficiency[heat_ext].values
)
n.model.add_constraints(lhs == 0, name="chplink-fix_p_nom_ratio")
rename = {"Link-ext": "Link"}
lhs = (
p.loc[:, electric_ext]
+ p.loc[:, heat_ext]
- p_nom.rename(rename).loc[electric_ext]
)
n.model.add_constraints(lhs <= 0, name="chplink-top_iso_fuel_line_ext")
# top_iso_fuel_line for fixed
if not electric_fix.empty:
lhs = p.loc[:, electric_fix] + p.loc[:, heat_fix]
rhs = n.links.p_nom[electric_fix]
n.model.add_constraints(lhs <= rhs, name="chplink-top_iso_fuel_line_fix")
# back-pressure
if not electric.empty:
lhs = (
p.loc[:, heat] * (n.links.efficiency[heat] * n.links.c_b[electric].values)
- p.loc[:, electric] * n.links.efficiency[electric]
)
n.model.add_constraints(lhs <= rhs, name="chplink-backpressure")
def add_pipe_retrofit_constraint(n):
"""
Add constraint for retrofitting existing CH4 pipelines to H2 pipelines.
"""
gas_pipes_i = n.links.query("carrier == 'gas pipeline' and p_nom_extendable").index
h2_retrofitted_i = n.links.query(
"carrier == 'H2 pipeline retrofitted' and p_nom_extendable"
).index
if h2_retrofitted_i.empty or gas_pipes_i.empty:
return
p_nom = n.model["Link-p_nom"]
CH4_per_H2 = 1 / n.config["sector"]["H2_retrofit_capacity_per_CH4"]
lhs = p_nom.loc[gas_pipes_i] + CH4_per_H2 * p_nom.loc[h2_retrofitted_i]
rhs = n.links.p_nom[gas_pipes_i].rename_axis("Link-ext")
n.model.add_constraints(lhs == rhs, name="Link-pipe_retrofit")
def extra_functionality(n, snapshots):
add_battery_constraints(n)
add_pipe_retrofit_constraint(n)
def solve_network(n, config, opts="", **kwargs):
set_of_options = config["solving"]["solver"]["options"]
solver_options = (
config["solving"]["solver_options"][set_of_options] if set_of_options else {}
)
solver_name = config["solving"]["solver"]["name"]
cf_solving = config["solving"]["options"]
track_iterations = cf_solving.get("track_iterations", False)
min_iterations = cf_solving.get("min_iterations", 4)
max_iterations = cf_solving.get("max_iterations", 6)
# add to network for extra_functionality
n.config = config
n.opts = opts
skip_iterations = cf_solving.get("skip_iterations", False)
if not n.lines.s_nom_extendable.any():
skip_iterations = True
logger.info("No expandable lines found. Skipping iterative solving.")
if skip_iterations:
status, condition = n.optimize(
solver_name=solver_name,
extra_functionality=extra_functionality,
**solver_options,
**kwargs,
)
else:
status, condition = n.optimize.optimize_transmission_expansion_iteratively(
solver_name=solver_name,
track_iterations=track_iterations,
min_iterations=min_iterations,
max_iterations=max_iterations,
extra_functionality=extra_functionality,
**solver_options,
**kwargs,
)
if status != "ok":
logger.warning(
f"Solving status '{status}' with termination condition '{condition}'"
)
return n
# %%
if __name__ == "__main__":
if "snakemake" not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
"solve_network_myopic",
simpl="",
opts="",
clusters="45",
lv=1.0,
sector_opts="8760H-T-H-B-I-A-solar+p3-dist1",
planning_horizons="2020",
)
logging.basicConfig(
filename=snakemake.log.python, level=snakemake.config["logging_level"]
)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
tmpdir = snakemake.config["solving"].get("tmpdir")
if tmpdir is not None:
from pathlib import Path
Path(tmpdir).mkdir(parents=True, exist_ok=True)
opts = snakemake.wildcards.sector_opts.split("-")
solve_opts = snakemake.config["solving"]["options"]
fn = getattr(snakemake.log, "memory", None)
with memory_logger(filename=fn, interval=30.0) as mem:
overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
n = prepare_network(n, solve_opts, config=snakemake.config)
n = solve_network(
n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver
)
if "lv_limit" in n.global_constraints.index:
n.line_volume_limit = n.global_constraints.at["lv_limit", "constant"]
n.line_volume_limit_dual = n.global_constraints.at["lv_limit", "mu"]
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0])
logger.info("Maximum memory usage: {}".format(mem.mem_usage))