Copy solve_network.py and *_summary.py from pypsa-eur sector branch
This commit is contained in:
parent
4638744505
commit
cfe44ce702
445
scripts/make_summary.py
Normal file
445
scripts/make_summary.py
Normal file
@ -0,0 +1,445 @@
|
||||
|
||||
from six import iteritems
|
||||
|
||||
import sys
|
||||
|
||||
sys.path = ['/home/vres/lib/python3.5/site-packages'] + sys.path
|
||||
|
||||
import pandas as pd
|
||||
|
||||
import numpy as np
|
||||
|
||||
import pypsa
|
||||
|
||||
from vresutils.costdata import annuity
|
||||
|
||||
from prepare_network import generate_periodic_profiles
|
||||
|
||||
from add_electricity import load_costs
|
||||
|
||||
import yaml
|
||||
|
||||
idx = pd.IndexSlice
|
||||
|
||||
opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"}
|
||||
|
||||
#First tell PyPSA that links can have multiple outputs by
|
||||
#overriding the component_attrs. This can be done for
|
||||
#as many buses as you need with format busi for i = 2,3,4,5,....
|
||||
#See https://pypsa.org/doc/components.html#link-with-multiple-outputs-or-inputs
|
||||
|
||||
|
||||
override_component_attrs = pypsa.descriptors.Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()})
|
||||
override_component_attrs["Link"].loc["bus2"] = ["string",np.nan,np.nan,"2nd bus","Input (optional)"]
|
||||
override_component_attrs["Link"].loc["bus3"] = ["string",np.nan,np.nan,"3rd bus","Input (optional)"]
|
||||
override_component_attrs["Link"].loc["efficiency2"] = ["static or series","per unit",1.,"2nd bus efficiency","Input (optional)"]
|
||||
override_component_attrs["Link"].loc["efficiency3"] = ["static or series","per unit",1.,"3rd bus efficiency","Input (optional)"]
|
||||
override_component_attrs["Link"].loc["p2"] = ["series","MW",0.,"2nd bus output","Output"]
|
||||
override_component_attrs["Link"].loc["p3"] = ["series","MW",0.,"3rd bus output","Output"]
|
||||
|
||||
|
||||
|
||||
|
||||
def assign_carriers(n):
|
||||
|
||||
if "carrier" not in n.loads:
|
||||
n.loads["carrier"] = "electricity"
|
||||
for carrier in ["transport","heat","urban heat"]:
|
||||
n.loads.loc[n.loads.index.str.contains(carrier),"carrier"] = carrier
|
||||
|
||||
if "carrier" not in n.lines:
|
||||
n.lines["carrier"] = "AC"
|
||||
|
||||
if n.stores.loc["EU gas Store","carrier"] == "":
|
||||
n.stores.loc["EU gas Store","carrier"] = "gas Store"
|
||||
|
||||
|
||||
def calculate_costs(n,label,costs):
|
||||
|
||||
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}):
|
||||
capital_costs = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"]
|
||||
capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum()
|
||||
|
||||
costs = costs.reindex(costs.index|pd.MultiIndex.from_product([[c.list_name],["capital"],capital_costs_grouped.index]))
|
||||
|
||||
costs.loc[idx[c.list_name,"capital",list(capital_costs_grouped.index)],label] = capital_costs_grouped.values
|
||||
|
||||
if c.name == "Link":
|
||||
p = c.pnl.p0.multiply(n.snapshot_weightings,axis=0).sum()
|
||||
elif c.name == "Line":
|
||||
continue
|
||||
elif c.name == "StorageUnit":
|
||||
p_all = c.pnl.p.multiply(n.snapshot_weightings,axis=0)
|
||||
p_all[p_all < 0.] = 0.
|
||||
p = p_all.sum()
|
||||
else:
|
||||
p = c.pnl.p.multiply(n.snapshot_weightings,axis=0).sum()
|
||||
|
||||
marginal_costs = p*c.df.marginal_cost
|
||||
|
||||
marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum()
|
||||
|
||||
costs = costs.reindex(costs.index|pd.MultiIndex.from_product([[c.list_name],["marginal"],marginal_costs_grouped.index]))
|
||||
|
||||
costs.loc[idx[c.list_name,"marginal",list(marginal_costs_grouped.index)],label] = marginal_costs_grouped.values
|
||||
|
||||
#add back in costs of links if there is a line volume limit
|
||||
if label[1] != "opt":
|
||||
costs.loc[("links-added","capital","transmission lines"),label] = ((costs_db.at['HVDC overhead', 'capital_cost']*n.links.length + costs_db.at['HVDC inverter pair', 'capital_cost'])*n.links.p_nom_opt)[n.links.carrier == "DC"].sum()
|
||||
costs.loc[("lines-added","capital","transmission lines"),label] = costs_db.at["HVAC overhead", "capital_cost"]*(n.lines.length*n.lines.s_nom_opt).sum()
|
||||
else:
|
||||
costs.loc[("links-added","capital","transmission lines"),label] = (costs_db.at['HVDC inverter pair', 'capital_cost']*n.links.p_nom_opt)[n.links.carrier == "DC"].sum()
|
||||
|
||||
|
||||
#add back in all hydro
|
||||
#costs.loc[("storage_units","capital","hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro","p_nom"].sum()
|
||||
#costs.loc[("storage_units","capital","PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS","p_nom"].sum()
|
||||
#costs.loc[("generators","capital","ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror","p_nom"].sum()
|
||||
|
||||
return costs
|
||||
|
||||
|
||||
|
||||
def calculate_curtailment(n,label,curtailment):
|
||||
|
||||
avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum()
|
||||
used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()
|
||||
|
||||
curtailment[label] = (((avail - used)/avail)*100).round(3)
|
||||
|
||||
return curtailment
|
||||
|
||||
def calculate_energy(n,label,energy):
|
||||
|
||||
for c in n.iterate_components(n.one_port_components|n.branch_components):
|
||||
|
||||
if c.name in n.one_port_components:
|
||||
c_energies = c.pnl.p.multiply(n.snapshot_weightings,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum()
|
||||
else:
|
||||
c_energies = (-c.pnl.p1.multiply(n.snapshot_weightings,axis=0).sum() - c.pnl.p0.multiply(n.snapshot_weightings,axis=0).sum()).groupby(c.df.carrier).sum()
|
||||
|
||||
energy = energy.reindex(energy.index|pd.MultiIndex.from_product([[c.list_name],c_energies.index]))
|
||||
|
||||
energy.loc[idx[c.list_name,list(c_energies.index)],label] = c_energies.values
|
||||
|
||||
return energy
|
||||
|
||||
|
||||
def calculate_supply(n,label,supply):
|
||||
"""calculate the max dispatch of each component at the buses where the loads are attached"""
|
||||
|
||||
load_types = n.loads.carrier.value_counts().index
|
||||
|
||||
for i in load_types:
|
||||
|
||||
buses = n.loads.bus[n.loads.carrier == i].values
|
||||
|
||||
bus_map = pd.Series(False,index=n.buses.index)
|
||||
|
||||
bus_map.loc[buses] = True
|
||||
|
||||
for c in n.iterate_components(n.one_port_components):
|
||||
|
||||
items = c.df.index[c.df.bus.map(bus_map)]
|
||||
|
||||
if len(items) == 0:
|
||||
continue
|
||||
|
||||
s = c.pnl.p[items].max().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum()
|
||||
|
||||
supply = supply.reindex(supply.index|pd.MultiIndex.from_product([[i],[c.list_name],s.index]))
|
||||
supply.loc[idx[i,c.list_name,list(s.index)],label] = s.values
|
||||
|
||||
|
||||
for c in n.iterate_components(n.branch_components):
|
||||
|
||||
for end in ["0","1"]:
|
||||
|
||||
items = c.df.index[c.df["bus" + end].map(bus_map)]
|
||||
|
||||
if len(items) == 0:
|
||||
continue
|
||||
|
||||
#lots of sign compensation for direction and to do maximums
|
||||
s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items,'carrier']).sum()
|
||||
|
||||
supply = supply.reindex(supply.index|pd.MultiIndex.from_product([[i],[c.list_name],s.index]))
|
||||
supply.loc[idx[i,c.list_name,list(s.index)],label] = s.values
|
||||
|
||||
return supply
|
||||
|
||||
def calculate_supply_energy(n,label,supply_energy):
|
||||
"""calculate the total dispatch of each component at the buses where the loads are attached"""
|
||||
|
||||
load_types = n.loads.carrier.value_counts().index
|
||||
|
||||
for i in load_types:
|
||||
|
||||
buses = n.loads.bus[n.loads.carrier == i].values
|
||||
|
||||
bus_map = pd.Series(False,index=n.buses.index)
|
||||
|
||||
bus_map.loc[buses] = True
|
||||
|
||||
for c in n.iterate_components(n.one_port_components):
|
||||
|
||||
items = c.df.index[c.df.bus.map(bus_map)]
|
||||
|
||||
if len(items) == 0:
|
||||
continue
|
||||
|
||||
s = c.pnl.p[items].sum().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum()
|
||||
|
||||
supply_energy = supply_energy.reindex(supply_energy.index|pd.MultiIndex.from_product([[i],[c.list_name],s.index]))
|
||||
supply_energy.loc[idx[i,c.list_name,list(s.index)],label] = s.values
|
||||
|
||||
|
||||
for c in n.iterate_components(n.branch_components):
|
||||
|
||||
for end in ["0","1"]:
|
||||
|
||||
items = c.df.index[c.df["bus" + end].map(bus_map)]
|
||||
|
||||
if len(items) == 0:
|
||||
continue
|
||||
|
||||
s = (-1)*c.pnl["p"+end][items].sum().groupby(c.df.loc[items,'carrier']).sum()
|
||||
|
||||
supply_energy = supply_energy.reindex(supply_energy.index|pd.MultiIndex.from_product([[i],[c.list_name],s.index]))
|
||||
supply_energy.loc[idx[i,c.list_name,list(s.index)],label] = s.values
|
||||
|
||||
return supply_energy
|
||||
|
||||
def calculate_metrics(n,label,metrics):
|
||||
|
||||
metrics = metrics.reindex(metrics.index|pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"]))
|
||||
|
||||
metrics.at["line_volume_DC",label] = (n.links.length*n.links.p_nom_opt)[n.links.carrier == "DC"].sum()
|
||||
metrics.at["line_volume_AC",label] = (n.lines.length*n.lines.s_nom_opt).sum()
|
||||
metrics.at["line_volume",label] = metrics.loc[["line_volume_AC","line_volume_DC"],label].sum()
|
||||
|
||||
if hasattr(n,"line_volume_limit"):
|
||||
metrics.at["line_volume_limit",label] = n.line_volume_limit
|
||||
metrics.at["line_volume_shadow",label] = n.line_volume_limit_dual
|
||||
|
||||
if "CO2Limit" in n.global_constraints.index:
|
||||
metrics.at["co2_shadow",label] = n.global_constraints.at["CO2Limit","mu"]
|
||||
|
||||
return metrics
|
||||
|
||||
|
||||
def calculate_prices(n,label,prices):
|
||||
|
||||
bus_type = pd.Series(n.buses.index.str[3:],n.buses.index).replace("","electricity")
|
||||
|
||||
prices = prices.reindex(prices.index|bus_type.value_counts().index)
|
||||
|
||||
#WARNING: this is time-averaged, should really be load-weighted average
|
||||
prices[label] = n.buses_t.marginal_price.mean().groupby(bus_type).mean()
|
||||
|
||||
return prices
|
||||
|
||||
|
||||
|
||||
def calculate_weighted_prices(n,label,weighted_prices):
|
||||
# Warning: doesn't include storage units as loads
|
||||
|
||||
|
||||
weighted_prices = weighted_prices.reindex(pd.Index(["electricity","heat","space heat","urban heat","space urban heat","gas","H2"]))
|
||||
|
||||
link_loads = {"electricity" : ["heat pump", "resistive heater", "battery charger", "H2 Electrolysis"],
|
||||
"heat" : ["water tanks charger"],
|
||||
"urban heat" : ["water tanks charger"],
|
||||
"space heat" : [],
|
||||
"space urban heat" : [],
|
||||
"gas" : ["OCGT","gas boiler","CHP electric","CHP heat"],
|
||||
"H2" : ["Sabatier", "H2 Fuel Cell"]}
|
||||
|
||||
for carrier in link_loads:
|
||||
|
||||
if carrier == "electricity":
|
||||
suffix = ""
|
||||
elif carrier[:5] == "space":
|
||||
suffix = carrier[5:]
|
||||
else:
|
||||
suffix = " " + carrier
|
||||
|
||||
buses = n.buses.index[n.buses.index.str[2:] == suffix]
|
||||
|
||||
if buses.empty:
|
||||
continue
|
||||
|
||||
if carrier in ["H2","gas"]:
|
||||
load = pd.DataFrame(index=n.snapshots,columns=buses,data=0.)
|
||||
elif carrier[:5] == "space":
|
||||
load = heat_demand_df[buses.str[:2]].rename(columns=lambda i: str(i)+suffix)
|
||||
else:
|
||||
load = n.loads_t.p_set[buses]
|
||||
|
||||
|
||||
for tech in link_loads[carrier]:
|
||||
|
||||
names = n.links.index[n.links.index.to_series().str[-len(tech):] == tech]
|
||||
|
||||
if names.empty:
|
||||
continue
|
||||
|
||||
load += n.links_t.p0[names].groupby(n.links.loc[names,"bus0"],axis=1).sum(axis=1)
|
||||
|
||||
#Add H2 Store when charging
|
||||
if carrier == "H2":
|
||||
stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1)
|
||||
stores[stores > 0.] = 0.
|
||||
load += -stores
|
||||
|
||||
weighted_prices.loc[carrier,label] = (load*n.buses_t.marginal_price[buses]).sum().sum()/load.sum().sum()
|
||||
|
||||
if carrier[:5] == "space":
|
||||
print(load*n.buses_t.marginal_price[buses])
|
||||
|
||||
return weighted_prices
|
||||
|
||||
|
||||
|
||||
|
||||
def calculate_market_values(n, label, market_values):
|
||||
# Warning: doesn't include storage units
|
||||
|
||||
carrier = "AC"
|
||||
|
||||
buses = n.buses.index[n.buses.carrier == carrier]
|
||||
|
||||
## First do market value of generators ##
|
||||
|
||||
generators = n.generators.index[n.buses.loc[n.generators.bus,"carrier"] == carrier]
|
||||
|
||||
techs = n.generators.loc[generators,"carrier"].value_counts().index
|
||||
|
||||
market_values = market_values.reindex(market_values.index | techs)
|
||||
|
||||
|
||||
for tech in techs:
|
||||
gens = generators[n.generators.loc[generators,"carrier"] == tech]
|
||||
|
||||
dispatch = n.generators_t.p[gens].groupby(n.generators.loc[gens,"bus"],axis=1).sum().reindex(columns=buses,fill_value=0.)
|
||||
|
||||
revenue = dispatch*n.buses_t.marginal_price[buses]
|
||||
|
||||
market_values.at[tech,label] = revenue.sum().sum()/dispatch.sum().sum()
|
||||
|
||||
|
||||
|
||||
## Now do market value of links ##
|
||||
|
||||
for i in ["0","1"]:
|
||||
all_links = n.links.index[n.buses.loc[n.links["bus"+i],"carrier"] == carrier]
|
||||
|
||||
techs = n.links.loc[all_links,"carrier"].value_counts().index
|
||||
|
||||
market_values = market_values.reindex(market_values.index | techs)
|
||||
|
||||
for tech in techs:
|
||||
links = all_links[n.links.loc[all_links,"carrier"] == tech]
|
||||
|
||||
dispatch = n.links_t["p"+i][links].groupby(n.links.loc[links,"bus"+i],axis=1).sum().reindex(columns=buses,fill_value=0.)
|
||||
|
||||
revenue = dispatch*n.buses_t.marginal_price[buses]
|
||||
|
||||
market_values.at[tech,label] = revenue.sum().sum()/dispatch.sum().sum()
|
||||
|
||||
return market_values
|
||||
|
||||
|
||||
def calculate_price_statistics(n, label, price_statistics):
|
||||
|
||||
|
||||
price_statistics = price_statistics.reindex(price_statistics.index|pd.Index(["zero_hours","mean","standard_deviation"]))
|
||||
|
||||
buses = n.buses.index[n.buses.carrier == "AC"]
|
||||
|
||||
threshold = 0.1 #higher than phoney marginal_cost of wind/solar
|
||||
|
||||
df = pd.DataFrame(data=0.,columns=buses,index=n.snapshots)
|
||||
|
||||
df[n.buses_t.marginal_price[buses] < threshold] = 1.
|
||||
|
||||
price_statistics.at["zero_hours", label] = df.sum().sum()/(df.shape[0]*df.shape[1])
|
||||
|
||||
price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].unstack().mean()
|
||||
|
||||
price_statistics.at["standard_deviation", label] = n.buses_t.marginal_price[buses].unstack().std()
|
||||
|
||||
return price_statistics
|
||||
|
||||
|
||||
outputs = ["costs",
|
||||
"curtailment",
|
||||
"energy",
|
||||
"supply",
|
||||
"supply_energy",
|
||||
"prices",
|
||||
"weighted_prices",
|
||||
"price_statistics",
|
||||
"market_values",
|
||||
"metrics",
|
||||
]
|
||||
|
||||
def make_summaries(networks_dict):
|
||||
|
||||
columns = pd.MultiIndex.from_tuples(networks_dict.keys(),names=["cluster","lv","opt"])
|
||||
|
||||
df = {}
|
||||
|
||||
for output in outputs:
|
||||
df[output] = pd.DataFrame(columns=columns,dtype=float)
|
||||
|
||||
for label, filename in iteritems(networks_dict):
|
||||
print(label, filename)
|
||||
|
||||
n = pypsa.Network(filename,
|
||||
override_component_attrs=override_component_attrs)
|
||||
|
||||
|
||||
assign_carriers(n)
|
||||
|
||||
for output in outputs:
|
||||
df[output] = globals()["calculate_" + output](n, label, df[output])
|
||||
|
||||
return df
|
||||
|
||||
|
||||
def to_csv(df):
|
||||
|
||||
for key in df:
|
||||
df[key].to_csv(snakemake.output[key])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Detect running outside of snakemake and mock snakemake for testing
|
||||
if 'snakemake' not in globals():
|
||||
from vresutils import Dict
|
||||
import yaml
|
||||
snakemake = Dict()
|
||||
with open('config.yaml') as f:
|
||||
snakemake.config = yaml.load(f)
|
||||
snakemake.input = Dict()
|
||||
snakemake.input['heat_demand_name'] = 'data/heating/daily_heat_demand.h5'
|
||||
snakemake.output = Dict()
|
||||
for item in outputs:
|
||||
snakemake.output[item] = snakemake.config['summary_dir'] + '/{name}/csvs/{item}.csv'.format(name=snakemake.config['run'],item=item)
|
||||
|
||||
networks_dict = {(cluster,lv,opt) :
|
||||
snakemake.config['results_dir'] + snakemake.config['run'] + '/postnetworks/elec_s_{cluster}_lv{lv}_{opt}.nc'\
|
||||
.format(cluster=cluster,
|
||||
opt=opt,
|
||||
lv=lv)\
|
||||
for cluster in snakemake.config['scenario']['clusters'] \
|
||||
for opt in snakemake.config['scenario']['opts'] \
|
||||
for lv in snakemake.config['scenario']['lv']}
|
||||
print(networks_dict)
|
||||
|
||||
costs_db = load_costs(Nyears=1.,tech_costs="data/costs.csv",config=snakemake.config["costs"],elec_config=snakemake.config['electricity'])
|
||||
|
||||
df = make_summaries(networks_dict)
|
||||
|
||||
to_csv(df)
|
187
scripts/plot_summary.py
Normal file
187
scripts/plot_summary.py
Normal file
@ -0,0 +1,187 @@
|
||||
|
||||
|
||||
|
||||
import pandas as pd
|
||||
|
||||
#allow plotting without Xwindows
|
||||
import matplotlib
|
||||
matplotlib.use('Agg')
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
|
||||
#consolidate and rename
|
||||
def rename_techs(label):
|
||||
if label[:8] == "central ":
|
||||
label = label[8:]
|
||||
if label[:6] == "urban ":
|
||||
label = label[6:]
|
||||
|
||||
if "retrofitting" in label:
|
||||
label = "building retrofitting"
|
||||
if "H2" in label:
|
||||
label = "hydrogen storage"
|
||||
if "CHP" in label:
|
||||
label = "CHP"
|
||||
if "water tank" in label:
|
||||
label = "water tanks"
|
||||
if label=="water tanks":
|
||||
label = "hot water storage"
|
||||
if "gas" in label and label not in ["gas boiler","biogas"]:
|
||||
label = "natural gas"
|
||||
if "solar thermal" in label:
|
||||
label = "solar thermal"
|
||||
if label == "solar":
|
||||
label = "solar PV"
|
||||
if label == "heat pump":
|
||||
label = "air heat pump"
|
||||
if label == "Sabatier":
|
||||
label = "methanation"
|
||||
if label == "offwind":
|
||||
label = "offshore wind"
|
||||
if label == "onwind":
|
||||
label = "onshore wind"
|
||||
if label == "ror":
|
||||
label = "hydroelectricity"
|
||||
if label == "hydro":
|
||||
label = "hydroelectricity"
|
||||
if label == "PHS":
|
||||
label = "hydroelectricity"
|
||||
if label == "co2 Store":
|
||||
label = "DAC"
|
||||
if label == "co2 stored":
|
||||
label = "CO2 sequestration"
|
||||
if "battery" in label:
|
||||
label = "battery storage"
|
||||
if label in ["AC","DC","B2B"]:
|
||||
label = "transmission lines"
|
||||
return label
|
||||
|
||||
|
||||
preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","solid biomass","biogas","onshore wind","offshore wind","solar PV","solar thermal","solar","building retrofitting","ground heat pump","air heat pump","heat pump","resistive heater","power-to-heat","gas-to-power/heat","CHP","OCGT","gas boiler","gas","natural gas","helmeth","methanation","hydrogen storage","power-to-gas","power-to-liquid","battery storage","hot water storage","CO2 sequestration"])
|
||||
|
||||
def plot_costs():
|
||||
|
||||
|
||||
cost_df = pd.read_csv(snakemake.input.costs,index_col=list(range(3)),header=[0,1,2])
|
||||
|
||||
|
||||
df = cost_df.groupby(cost_df.index.get_level_values(2)).sum()
|
||||
|
||||
#convert to billions
|
||||
df = df/1e9
|
||||
|
||||
df = df.groupby(df.index.map(rename_techs)).sum()
|
||||
|
||||
to_drop = df.index[df.max(axis=1) < snakemake.config['plotting']['costs_threshold']]
|
||||
|
||||
print("dropping")
|
||||
|
||||
print(df.loc[to_drop])
|
||||
|
||||
df = df.drop(to_drop)
|
||||
|
||||
print(df.sum())
|
||||
|
||||
new_index = (preferred_order&df.index).append(df.index.difference(preferred_order))
|
||||
|
||||
new_columns = df.sum().sort_values().index
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
fig.set_size_inches((12,8))
|
||||
|
||||
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index])
|
||||
|
||||
|
||||
handles,labels = ax.get_legend_handles_labels()
|
||||
|
||||
handles.reverse()
|
||||
labels.reverse()
|
||||
|
||||
ax.set_ylim([0,snakemake.config['plotting']['costs_max']])
|
||||
|
||||
ax.set_ylabel("System Cost [EUR billion per year]")
|
||||
|
||||
ax.set_xlabel("")
|
||||
|
||||
ax.grid(axis="y")
|
||||
|
||||
ax.legend(handles,labels,ncol=4,loc="upper left")
|
||||
|
||||
|
||||
fig.tight_layout()
|
||||
|
||||
fig.savefig(snakemake.output.costs,transparent=True)
|
||||
|
||||
|
||||
def plot_energy():
|
||||
|
||||
energy_df = pd.read_csv(snakemake.input.energy,index_col=list(range(2)),header=[0,1,2])
|
||||
|
||||
df = energy_df.groupby(energy_df.index.get_level_values(1)).sum()
|
||||
|
||||
#convert MWh to TWh
|
||||
df = df/1e6
|
||||
|
||||
df = df.groupby(df.index.map(rename_techs)).sum()
|
||||
|
||||
to_drop = df.index[df.abs().max(axis=1) < snakemake.config['plotting']['energy_threshold']]
|
||||
|
||||
print("dropping")
|
||||
|
||||
print(df.loc[to_drop])
|
||||
|
||||
df = df.drop(to_drop)
|
||||
|
||||
print(df.sum())
|
||||
|
||||
new_index = (preferred_order&df.index).append(df.index.difference(preferred_order))
|
||||
|
||||
new_columns = df.columns.sort_values()
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
fig.set_size_inches((12,8))
|
||||
|
||||
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index])
|
||||
|
||||
|
||||
handles,labels = ax.get_legend_handles_labels()
|
||||
|
||||
handles.reverse()
|
||||
labels.reverse()
|
||||
|
||||
ax.set_ylim([snakemake.config['plotting']['energy_min'],snakemake.config['plotting']['energy_max']])
|
||||
|
||||
ax.set_ylabel("Energy [TWh/a]")
|
||||
|
||||
ax.set_xlabel("")
|
||||
|
||||
ax.grid(axis="y")
|
||||
|
||||
ax.legend(handles,labels,ncol=4,loc="upper left")
|
||||
|
||||
|
||||
fig.tight_layout()
|
||||
|
||||
fig.savefig(snakemake.output.energy,transparent=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Detect running outside of snakemake and mock snakemake for testing
|
||||
if 'snakemake' not in globals():
|
||||
from vresutils import Dict
|
||||
import yaml
|
||||
snakemake = Dict()
|
||||
with open('config.yaml') as f:
|
||||
snakemake.config = yaml.load(f)
|
||||
snakemake.input = Dict()
|
||||
snakemake.output = Dict()
|
||||
|
||||
for item in ["costs","energy"]:
|
||||
snakemake.input[item] = snakemake.config['summary_dir'] + '/{name}/csvs/{item}.csv'.format(name=snakemake.config['run'],item=item)
|
||||
snakemake.output[item] = snakemake.config['summary_dir'] + '/{name}/graphs/{item}.pdf'.format(name=snakemake.config['run'],item=item)
|
||||
|
||||
plot_costs()
|
||||
|
||||
plot_energy()
|
375
scripts/solve_network.py
Normal file
375
scripts/solve_network.py
Normal file
@ -0,0 +1,375 @@
|
||||
|
||||
import sys
|
||||
|
||||
sys.path = ["/home/vres/data/tom/lib/pypsa"] + sys.path
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
import gc
|
||||
import os
|
||||
|
||||
import pypsa
|
||||
|
||||
from pypsa.descriptors import free_output_series_dataframes
|
||||
|
||||
# Suppress logging of the slack bus choices
|
||||
pypsa.pf.logger.setLevel(logging.WARNING)
|
||||
|
||||
from vresutils.benchmark import memory_logger
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#First tell PyPSA that links can have multiple outputs by
|
||||
#overriding the component_attrs. This can be done for
|
||||
#as many buses as you need with format busi for i = 2,3,4,5,....
|
||||
#See https://pypsa.org/doc/components.html#link-with-multiple-outputs-or-inputs
|
||||
|
||||
|
||||
override_component_attrs = pypsa.descriptors.Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()})
|
||||
override_component_attrs["Link"].loc["bus2"] = ["string",np.nan,np.nan,"2nd bus","Input (optional)"]
|
||||
override_component_attrs["Link"].loc["bus3"] = ["string",np.nan,np.nan,"3rd bus","Input (optional)"]
|
||||
override_component_attrs["Link"].loc["efficiency2"] = ["static or series","per unit",1.,"2nd bus efficiency","Input (optional)"]
|
||||
override_component_attrs["Link"].loc["efficiency3"] = ["static or series","per unit",1.,"3rd bus efficiency","Input (optional)"]
|
||||
override_component_attrs["Link"].loc["p2"] = ["series","MW",0.,"2nd bus output","Output"]
|
||||
override_component_attrs["Link"].loc["p3"] = ["series","MW",0.,"3rd bus output","Output"]
|
||||
|
||||
|
||||
|
||||
def patch_pyomo_tmpdir(tmpdir):
|
||||
# PYOMO should write its lp files into tmp here
|
||||
import os
|
||||
if not os.path.isdir(tmpdir):
|
||||
os.mkdir(tmpdir)
|
||||
from pyutilib.services import TempfileManager
|
||||
TempfileManager.tempdir = tmpdir
|
||||
|
||||
def prepare_network(n, solve_opts=None):
|
||||
if solve_opts is None:
|
||||
solve_opts = snakemake.config['solving']['options']
|
||||
|
||||
if 'clip_p_max_pu' in solve_opts:
|
||||
for df in (n.generators_t.p_max_pu, n.storage_units_t.inflow):
|
||||
df.where(df>solve_opts['clip_p_max_pu'], other=0., inplace=True)
|
||||
|
||||
if solve_opts.get('load_shedding'):
|
||||
n.add("Carrier", "Load")
|
||||
n.madd("Generator", n.buses.index, " load",
|
||||
bus=n.buses.index,
|
||||
carrier='load',
|
||||
sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW
|
||||
marginal_cost=1e2, # Eur/kWh
|
||||
# intersect between macroeconomic and surveybased
|
||||
# willingness to pay
|
||||
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full
|
||||
p_nom=1e9 # kW
|
||||
)
|
||||
|
||||
if solve_opts.get('noisy_costs'):
|
||||
for t in n.iterate_components():
|
||||
#if 'capital_cost' in t.df:
|
||||
# t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5)
|
||||
if 'marginal_cost' in t.df:
|
||||
t.df['marginal_cost'] += 1e-2 + 2e-3*(np.random.random(len(t.df)) - 0.5)
|
||||
|
||||
for t in n.iterate_components(['Line', 'Link']):
|
||||
t.df['capital_cost'] += (1e-1 + 2e-2*(np.random.random(len(t.df)) - 0.5)) * t.df['length']
|
||||
|
||||
if solve_opts.get('nhours'):
|
||||
nhours = solve_opts['nhours']
|
||||
n.set_snapshots(n.snapshots[:nhours])
|
||||
n.snapshot_weightings[:] = 8760./nhours
|
||||
|
||||
return n
|
||||
|
||||
def add_opts_constraints(n, opts=None):
|
||||
if opts is None:
|
||||
opts = snakemake.wildcards.opts.split('-')
|
||||
|
||||
if 'BAU' in opts:
|
||||
mincaps = snakemake.config['electricity']['BAU_mincapacities']
|
||||
def bau_mincapacities_rule(model, carrier):
|
||||
gens = n.generators.index[n.generators.p_nom_extendable & (n.generators.carrier == carrier)]
|
||||
return sum(model.generator_p_nom[gen] for gen in gens) >= mincaps[carrier]
|
||||
n.model.bau_mincapacities = pypsa.opt.Constraint(list(mincaps), rule=bau_mincapacities_rule)
|
||||
|
||||
if 'SAFE' in opts:
|
||||
peakdemand = (1. + snakemake.config['electricity']['SAFE_reservemargin']) * n.loads_t.p_set.sum(axis=1).max()
|
||||
conv_techs = snakemake.config['plotting']['conv_techs']
|
||||
exist_conv_caps = n.generators.loc[n.generators.carrier.isin(conv_techs) & ~n.generators.p_nom_extendable, 'p_nom'].sum()
|
||||
ext_gens_i = n.generators.index[n.generators.carrier.isin(conv_techs) & n.generators.p_nom_extendable]
|
||||
n.model.safe_peakdemand = pypsa.opt.Constraint(expr=sum(n.model.generator_p_nom[gen] for gen in ext_gens_i) >= peakdemand - exist_conv_caps)
|
||||
|
||||
def add_lv_constraint(n):
|
||||
line_volume = getattr(n, 'line_volume_limit', None)
|
||||
if line_volume is not None and not np.isinf(line_volume):
|
||||
n.model.line_volume_constraint = pypsa.opt.Constraint(
|
||||
expr=((sum(n.model.passive_branch_s_nom["Line",line]*n.lines.at[line,"length"]
|
||||
for line in n.lines.index[n.lines.s_nom_extendable]) +
|
||||
sum(n.model.link_p_nom[link]*n.links.at[link,"length"]
|
||||
for link in n.links.index[(n.links.carrier=='DC') &
|
||||
n.links.p_nom_extendable]))
|
||||
<= line_volume)
|
||||
)
|
||||
|
||||
def add_eps_storage_constraint(n):
|
||||
if not hasattr(n, 'epsilon'):
|
||||
n.epsilon = 1e-5
|
||||
fix_sus_i = n.storage_units.index[~ n.storage_units.p_nom_extendable]
|
||||
n.model.objective.expr += sum(n.epsilon * n.model.state_of_charge[su, n.snapshots[0]] for su in fix_sus_i)
|
||||
|
||||
def add_battery_constraints(network):
|
||||
|
||||
nodes = list(network.buses.index[network.buses.carrier == "battery"])
|
||||
|
||||
def battery(model, node):
|
||||
return model.link_p_nom[node + " charger"] == model.link_p_nom[node + " discharger"]*network.links.at[node + " charger","efficiency"]
|
||||
|
||||
network.model.battery = pypsa.opt.Constraint(nodes, rule=battery)
|
||||
|
||||
|
||||
|
||||
def add_chp_constraints(network):
|
||||
|
||||
options = snakemake.config["sector"]
|
||||
|
||||
if hasattr(network.links.index,"str") and network.links.index.str.contains("CHP").any():
|
||||
|
||||
#also catches central heat buses for district heating
|
||||
nodes = list(network.links.index[network.links.index.str.contains("CHP electric")].str[:-len(" CHP electric")])
|
||||
|
||||
def chp_nom(model,node):
|
||||
return network.links.at[node + " CHP electric","efficiency"]*options['chp_parameters']['p_nom_ratio']*model.link_p_nom[node + " CHP electric"] == network.links.at[node + " CHP heat","efficiency"]*options['chp_parameters']['p_nom_ratio']*model.link_p_nom[node + " CHP heat"]
|
||||
|
||||
|
||||
network.model.chp_nom = pypsa.opt.Constraint(nodes,rule=chp_nom)
|
||||
|
||||
|
||||
def backpressure(model,node,snapshot):
|
||||
return options['chp_parameters']['c_m']*network.links.at[node + " CHP heat","efficiency"]*model.link_p[node + " CHP heat",snapshot] <= network.links.at[node + " CHP electric","efficiency"]*model.link_p[node + " CHP electric",snapshot]
|
||||
|
||||
network.model.backpressure = pypsa.opt.Constraint(nodes,list(network.snapshots),rule=backpressure)
|
||||
|
||||
|
||||
def top_iso_fuel_line(model,node,snapshot):
|
||||
return model.link_p[node + " CHP heat",snapshot] + model.link_p[node + " CHP electric",snapshot] <= model.link_p_nom[node + " CHP electric"]
|
||||
|
||||
network.model.top_iso_fuel_line = pypsa.opt.Constraint(nodes,list(network.snapshots),rule=top_iso_fuel_line)
|
||||
|
||||
|
||||
|
||||
|
||||
def fix_branches(n, lines_s_nom=None, links_p_nom=None):
|
||||
if lines_s_nom is not None and len(lines_s_nom) > 0:
|
||||
for l, s_nom in lines_s_nom.iteritems():
|
||||
n.model.passive_branch_s_nom["Line", l].fix(s_nom)
|
||||
if isinstance(n.opt, pypsa.opf.PersistentSolver):
|
||||
n.opt.update_var(n.model.passive_branch_s_nom)
|
||||
|
||||
if links_p_nom is not None and len(links_p_nom) > 0:
|
||||
for l, p_nom in links_p_nom.iteritems():
|
||||
n.model.link_p_nom[l].fix(p_nom)
|
||||
if isinstance(n.opt, pypsa.opf.PersistentSolver):
|
||||
n.opt.update_var(n.model.link_p_nom)
|
||||
|
||||
def solve_network(n, config=None, solver_log=None, opts=None):
|
||||
if config is None:
|
||||
config = snakemake.config['solving']
|
||||
solve_opts = config['options']
|
||||
|
||||
solver_options = config['solver'].copy()
|
||||
if solver_log is None:
|
||||
solver_log = snakemake.log.solver
|
||||
solver_name = solver_options.pop('name')
|
||||
|
||||
def run_lopf(n, allow_warning_status=False, fix_zero_lines=False, fix_ext_lines=False):
|
||||
free_output_series_dataframes(n)
|
||||
|
||||
if not hasattr(n, 'opt') or not isinstance(n.opt, pypsa.opf.PersistentSolver):
|
||||
pypsa.opf.network_lopf_build_model(n, formulation=solve_opts['formulation'])
|
||||
add_opts_constraints(n, opts)
|
||||
add_lv_constraint(n)
|
||||
#add_eps_storage_constraint(n)
|
||||
add_battery_constraints(n)
|
||||
add_chp_constraints(n)
|
||||
|
||||
pypsa.opf.network_lopf_prepare_solver(n, solver_name=solver_name)
|
||||
|
||||
if fix_zero_lines:
|
||||
fix_lines_b = (n.lines.s_nom_opt == 0.) & n.lines.s_nom_extendable
|
||||
fix_links_b = (n.links.carrier=='DC') & (n.links.p_nom_opt == 0.) & n.links.p_nom_extendable
|
||||
fix_branches(n,
|
||||
lines_s_nom=pd.Series(0., n.lines.index[fix_lines_b]),
|
||||
links_p_nom=pd.Series(0., n.links.index[fix_links_b]))
|
||||
|
||||
if fix_ext_lines:
|
||||
fix_branches(n,
|
||||
lines_s_nom=n.lines.loc[n.lines.s_nom_extendable, 's_nom_opt'],
|
||||
links_p_nom=n.links.loc[(n.links.carrier=='DC') & n.links.p_nom_extendable, 'p_nom_opt'])
|
||||
|
||||
|
||||
if not fix_ext_lines and hasattr(n.model, 'line_volume_constraint'):
|
||||
|
||||
def extra_postprocessing(n, snapshots, duals):
|
||||
index = list(n.model.line_volume_constraint.keys())
|
||||
cdata = pd.Series(list(n.model.line_volume_constraint.values()),
|
||||
index=index)
|
||||
n.line_volume_limit_dual = -cdata.map(duals).sum()
|
||||
print("line volume limit dual:",n.line_volume_limit_dual)
|
||||
|
||||
else:
|
||||
extra_postprocessing = None
|
||||
|
||||
# Firing up solve will increase memory consumption tremendously, so
|
||||
# make sure we freed everything we can
|
||||
gc.collect()
|
||||
|
||||
#from pyomo.opt import ProblemFormat
|
||||
#print("Saving model to MPS")
|
||||
#n.model.write('/home/ka/ka_iai/ka_kc5996/projects/pypsa-eur/128-B-I.mps', format=ProblemFormat.mps)
|
||||
#print("Model is saved to MPS")
|
||||
#sys.exit()
|
||||
|
||||
|
||||
status, termination_condition = \
|
||||
pypsa.opf.network_lopf_solve(n,
|
||||
solver_logfile=solver_log,
|
||||
solver_options=solver_options,
|
||||
formulation=solve_opts['formulation'],
|
||||
extra_postprocessing=extra_postprocessing
|
||||
#free_memory={'pypsa'}
|
||||
)
|
||||
|
||||
assert status == "ok" or allow_warning_status and status == 'warning', \
|
||||
("network_lopf did abort with status={} "
|
||||
"and termination_condition={}"
|
||||
.format(status, termination_condition))
|
||||
|
||||
return status, termination_condition
|
||||
|
||||
lines_ext_b = n.lines.s_nom_extendable
|
||||
if lines_ext_b.any():
|
||||
# puh: ok, we need to iterate, since there is a relation
|
||||
# between s/p_nom and r, x for branches.
|
||||
msq_threshold = 0.01
|
||||
lines = pd.DataFrame(n.lines[['r', 'x', 'type', 'num_parallel']])
|
||||
|
||||
lines['s_nom'] = (
|
||||
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
|
||||
n.lines.bus0.map(n.buses.v_nom)
|
||||
).where(n.lines.type != '', n.lines['s_nom'])
|
||||
|
||||
lines_ext_typed_b = (n.lines.type != '') & lines_ext_b
|
||||
lines_ext_untyped_b = (n.lines.type == '') & lines_ext_b
|
||||
|
||||
def update_line_parameters(n, zero_lines_below=10, fix_zero_lines=False):
|
||||
if zero_lines_below > 0:
|
||||
n.lines.loc[n.lines.s_nom_opt < zero_lines_below, 's_nom_opt'] = 0.
|
||||
n.links.loc[(n.links.carrier=='DC') & (n.links.p_nom_opt < zero_lines_below), 'p_nom_opt'] = 0.
|
||||
|
||||
if lines_ext_untyped_b.any():
|
||||
for attr in ('r', 'x'):
|
||||
n.lines.loc[lines_ext_untyped_b, attr] = (
|
||||
lines[attr].multiply(lines['s_nom']/n.lines['s_nom_opt'])
|
||||
)
|
||||
|
||||
if lines_ext_typed_b.any():
|
||||
n.lines.loc[lines_ext_typed_b, 'num_parallel'] = (
|
||||
n.lines['s_nom_opt']/lines['s_nom']
|
||||
)
|
||||
logger.debug("lines.num_parallel={}".format(n.lines.loc[lines_ext_typed_b, 'num_parallel']))
|
||||
|
||||
if isinstance(n.opt, pypsa.opf.PersistentSolver):
|
||||
n.calculate_dependent_values()
|
||||
|
||||
assert solve_opts['formulation'] == 'kirchhoff', \
|
||||
"Updating persistent solvers has only been implemented for the kirchhoff formulation for now"
|
||||
|
||||
n.opt.remove_constraint(n.model.cycle_constraints)
|
||||
del n.model.cycle_constraints_index
|
||||
del n.model.cycle_constraints_index_0
|
||||
del n.model.cycle_constraints_index_1
|
||||
del n.model.cycle_constraints
|
||||
|
||||
pypsa.opf.define_passive_branch_flows_with_kirchhoff(n, n.snapshots, skip_vars=True)
|
||||
n.opt.add_constraint(n.model.cycle_constraints)
|
||||
|
||||
iteration = 1
|
||||
|
||||
lines['s_nom_opt'] = lines['s_nom'] * n.lines['num_parallel'].where(n.lines.type != '', 1.)
|
||||
status, termination_condition = run_lopf(n, allow_warning_status=True)
|
||||
|
||||
def msq_diff(n):
|
||||
lines_err = np.sqrt(((n.lines['s_nom_opt'] - lines['s_nom_opt'])**2).mean())/lines['s_nom_opt'].mean()
|
||||
logger.info("Mean square difference after iteration {} is {}".format(iteration, lines_err))
|
||||
return lines_err
|
||||
|
||||
min_iterations = solve_opts.get('min_iterations', 2)
|
||||
max_iterations = solve_opts.get('max_iterations', 999)
|
||||
|
||||
while msq_diff(n) > msq_threshold or iteration < min_iterations:
|
||||
if iteration >= max_iterations:
|
||||
logger.info("Iteration {} beyond max_iterations {}. Stopping ...".format(iteration, max_iterations))
|
||||
break
|
||||
|
||||
update_line_parameters(n)
|
||||
lines['s_nom_opt'] = n.lines['s_nom_opt']
|
||||
iteration += 1
|
||||
|
||||
status, termination_condition = run_lopf(n, allow_warning_status=True)
|
||||
|
||||
update_line_parameters(n, zero_lines_below=100)
|
||||
|
||||
logger.info("Starting last run with fixed extendable lines")
|
||||
|
||||
# Not really needed, could also be taken out
|
||||
# if 'snakemake' in globals():
|
||||
# fn = os.path.basename(snakemake.output[0])
|
||||
# n.export_to_netcdf('/home/vres/data/jonas/playground/pypsa-eur/' + fn)
|
||||
|
||||
status, termination_condition = run_lopf(n, fix_ext_lines=True)
|
||||
|
||||
# Drop zero lines from network
|
||||
# zero_lines_i = n.lines.index[(n.lines.s_nom_opt == 0.) & n.lines.s_nom_extendable]
|
||||
# if len(zero_lines_i):
|
||||
# n.mremove("Line", zero_lines_i)
|
||||
# zero_links_i = n.links.index[(n.links.p_nom_opt == 0.) & n.links.p_nom_extendable]
|
||||
# if len(zero_links_i):
|
||||
# n.mremove("Link", zero_links_i)
|
||||
|
||||
|
||||
return n
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Detect running outside of snakemake and mock snakemake for testing
|
||||
if 'snakemake' not in globals():
|
||||
from vresutils.snakemake import MockSnakemake, Dict
|
||||
snakemake = MockSnakemake(
|
||||
wildcards=dict(network='elec', simpl='', clusters='45', lv='1.25', opts='Co2L-3H-T-H'),
|
||||
input=["networks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}.nc"],
|
||||
output=["results/networks/s{simpl}_{clusters}_lv{lv}_{opts}-test.nc"],
|
||||
log=dict(gurobi="logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_gurobi-test.log",
|
||||
python="logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_python-test.log")
|
||||
)
|
||||
|
||||
|
||||
tmpdir = snakemake.config['solving'].get('tmpdir')
|
||||
if tmpdir is not None:
|
||||
patch_pyomo_tmpdir(tmpdir)
|
||||
|
||||
logging.basicConfig(filename=snakemake.log.python,
|
||||
level=snakemake.config['logging_level'])
|
||||
|
||||
with memory_logger(filename=getattr(snakemake.log, 'memory', None), interval=30.) as mem:
|
||||
n = pypsa.Network(snakemake.input[0],
|
||||
override_component_attrs=override_component_attrs)
|
||||
|
||||
n = prepare_network(n)
|
||||
n = solve_network(n)
|
||||
|
||||
n.export_to_netcdf(snakemake.output[0])
|
||||
|
||||
logger.info("Maximum memory usage: {}".format(mem.mem_usage))
|
Loading…
Reference in New Issue
Block a user