From fbff32dcfcba57fab3cfaea1e10ae76f1cad75f1 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Fri, 12 Jan 2024 16:42:12 +0100 Subject: [PATCH 01/30] build_pop_weighted_energy: don't reduce district heat share Previously the DH share was being multiplied by the population weighting, reducing the DH share with multiple nodes. --- scripts/build_population_weighted_energy_totals.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/build_population_weighted_energy_totals.py b/scripts/build_population_weighted_energy_totals.py index 879e3b9b..20467f72 100644 --- a/scripts/build_population_weighted_energy_totals.py +++ b/scripts/build_population_weighted_energy_totals.py @@ -26,4 +26,9 @@ if __name__ == "__main__": nodal_energy_totals.index = pop_layout.index nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0) + # district heating share should not be divided by population fraction + dh_share = energy_totals["district heat share"].loc[pop_layout.ct].fillna(0.0) + dh_share.index = pop_layout.index + nodal_energy_totals["district heat share"] = dh_share + nodal_energy_totals.to_csv(snakemake.output[0]) From 6c20ce83d7f0fd509013cc8d060a4bb91fd2c879 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 15 Jan 2024 16:47:19 +0100 Subject: [PATCH 02/30] move building of daily heat profile to its own script Previously this was handled inside prepare_sector_network.py. --- rules/build_sector.smk | 36 +++++++--- ...t_demand.py => build_daily_heat_demand.py} | 0 scripts/build_hourly_heat_demand.py | 69 +++++++++++++++++++ scripts/prepare_sector_network.py | 29 ++------ 4 files changed, 102 insertions(+), 32 deletions(-) rename scripts/{build_heat_demand.py => build_daily_heat_demand.py} (100%) create mode 100644 scripts/build_hourly_heat_demand.py diff --git a/rules/build_sector.smk b/rules/build_sector.smk index 4744aa25..efaff2a3 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -123,7 +123,7 @@ rule cluster_gas_network: "../scripts/cluster_gas_network.py" -rule build_heat_demands: +rule build_daily_heat_demand: params: snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, input: @@ -131,18 +131,39 @@ rule build_heat_demands: regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", output: - heat_demand=RESOURCES + "heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + heat_demand=RESOURCES + "daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", resources: mem_mb=20000, threads: 8 log: - LOGS + "build_heat_demands_{scope}_{simpl}_{clusters}.loc", + LOGS + "build_daily_heat_demand_{scope}_{simpl}_{clusters}.loc", benchmark: - BENCHMARKS + "build_heat_demands/{scope}_s{simpl}_{clusters}" + BENCHMARKS + "build_daily_heat_demand/{scope}_s{simpl}_{clusters}" conda: "../envs/environment.yaml" script: - "../scripts/build_heat_demand.py" + "../scripts/build_daily_heat_demand.py" + + +rule build_hourly_heat_demand: + params: + snapshots=config["snapshots"], + input: + heat_profile="data/heat_load_profile_BDEW.csv", + heat_demand=RESOURCES + "daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + output: + heat_demand=RESOURCES + "hourly_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + resources: + mem_mb=2000, + threads: 8 + log: + LOGS + "build_hourly_heat_demand_{scope}_{simpl}_{clusters}.loc", + benchmark: + BENCHMARKS + "build_hourly_heat_demand/{scope}_s{simpl}_{clusters}" + conda: + "../envs/environment.yaml" + script: + "../scripts/build_hourly_heat_demand.py" rule build_temperature_profiles: @@ -727,7 +748,6 @@ rule prepare_sector_network: if config["foresight"] == "overnight" else RESOURCES + "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv", - heat_profile="data/heat_load_profile_BDEW.csv", costs="data/costs_{}.csv".format(config["costs"]["year"]) if config["foresight"] == "overnight" else "data/costs_{planning_horizons}.csv", @@ -740,9 +760,7 @@ rule prepare_sector_network: simplified_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}.csv", industrial_demand=RESOURCES + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", - heat_demand_urban=RESOURCES + "heat_demand_urban_elec_s{simpl}_{clusters}.nc", - heat_demand_rural=RESOURCES + "heat_demand_rural_elec_s{simpl}_{clusters}.nc", - heat_demand_total=RESOURCES + "heat_demand_total_elec_s{simpl}_{clusters}.nc", + hourly_heat_demand_total=RESOURCES + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", diff --git a/scripts/build_heat_demand.py b/scripts/build_daily_heat_demand.py similarity index 100% rename from scripts/build_heat_demand.py rename to scripts/build_daily_heat_demand.py diff --git a/scripts/build_hourly_heat_demand.py b/scripts/build_hourly_heat_demand.py new file mode 100644 index 00000000..94ad7266 --- /dev/null +++ b/scripts/build_hourly_heat_demand.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build hourly heat demand time series from daily ones. +""" + +import pandas as pd +import xarray as xr +from _helpers import generate_periodic_profiles, update_config_with_sector_opts +from itertools import product + + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_heat_demands", + simpl="", + clusters=48, + ) + + snapshots = pd.date_range(freq="h", **snakemake.params.snapshots) + + daily_space_heat_demand = ( + xr.open_dataarray(snakemake.input.heat_demand) + .to_pandas() + .reindex(index=snapshots, method="ffill") + ) + + intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) + + sectors = ["residential", "services"] + uses = ["water", "space"] + + heat_demand = {} + for sector, use in product(sectors, uses): + weekday = list(intraday_profiles[f"{sector} {use} weekday"]) + weekend = list(intraday_profiles[f"{sector} {use} weekend"]) + weekly_profile = weekday * 5 + weekend * 2 + intraday_year_profile = generate_periodic_profiles( + daily_space_heat_demand.index.tz_localize("UTC"), + nodes=daily_space_heat_demand.columns, + weekly_profile=weekly_profile, + ) + + if use == "space": + heat_demand[f"{sector} {use}"] = daily_space_heat_demand * intraday_year_profile + else: + heat_demand[f"{sector} {use}"] = intraday_year_profile + + heat_demand = pd.concat(heat_demand, + axis=1, + names = ["sector use", "node"]) + + heat_demand.index.name="snapshots" + + print(heat_demand) + + print(heat_demand.stack()) + + ds = heat_demand.stack().to_xarray()#xr.Dataset.from_dataframe(heat_demand) + + print(ds) + + ds.to_netcdf(snakemake.output.heat_demand) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 4d36e7d4..1f404c4e 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -18,7 +18,7 @@ import numpy as np import pandas as pd import pypsa import xarray as xr -from _helpers import generate_periodic_profiles, update_config_with_sector_opts +from _helpers import update_config_with_sector_opts from add_electricity import calculate_annuity, sanitize_carriers from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2 from networkx.algorithms import complement @@ -1639,14 +1639,8 @@ def add_land_transport(n, costs): def build_heat_demand(n): - # copy forward the daily average heat demand into each hour, so it can be multiplied by the intraday profile - daily_space_heat_demand = ( - xr.open_dataarray(snakemake.input.heat_demand_total) - .to_pandas() - .reindex(index=n.snapshots, method="ffill") - ) - intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) + heat_demand_shape = xr.open_dataset(snakemake.input.hourly_heat_demand_total).to_dataframe().unstack(level=1) sectors = ["residential", "services"] uses = ["water", "space"] @@ -1654,25 +1648,14 @@ def build_heat_demand(n): heat_demand = {} electric_heat_supply = {} for sector, use in product(sectors, uses): - weekday = list(intraday_profiles[f"{sector} {use} weekday"]) - weekend = list(intraday_profiles[f"{sector} {use} weekend"]) - weekly_profile = weekday * 5 + weekend * 2 - intraday_year_profile = generate_periodic_profiles( - daily_space_heat_demand.index.tz_localize("UTC"), - nodes=daily_space_heat_demand.columns, - weekly_profile=weekly_profile, - ) - if use == "space": - heat_demand_shape = daily_space_heat_demand * intraday_year_profile - else: - heat_demand_shape = intraday_year_profile + name = f"{sector} {use}" - heat_demand[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + heat_demand[name] = ( + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"total {sector} {use}"]) * 1e6 electric_heat_supply[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"electricity {sector} {use}"]) * 1e6 heat_demand = pd.concat(heat_demand, axis=1) From bd8a5ecf2bd5124a1062e0fb8666cdcaa18df19d Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 15 Jan 2024 17:51:08 +0100 Subject: [PATCH 03/30] build_energy_totals: output district heat share to separate file --- rules/build_sector.smk | 1 + scripts/build_energy_totals.py | 31 +++++++++++++++++++++---------- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/rules/build_sector.smk b/rules/build_sector.smk index efaff2a3..bfe168e1 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -256,6 +256,7 @@ rule build_energy_totals: energy_name=RESOURCES + "energy_totals.csv", co2_name=RESOURCES + "co2_totals.csv", transport_name=RESOURCES + "transport_data.csv", + district_heat_share=RESOURCES + "district_heat_share.csv", threads: 16 resources: mem_mb=10000, diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 39b2a1be..53aab980 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -391,13 +391,6 @@ def build_idees(countries, year): # convert TWh/100km to kWh/km totals.loc["passenger car efficiency"] *= 10 - # district heating share - district_heat = totals.loc[ - ["derived heat residential", "derived heat services"] - ].sum() - total_heat = totals.loc[["thermal uses residential", "thermal uses services"]].sum() - totals.loc["district heat share"] = district_heat.div(total_heat) - return totals.T @@ -572,16 +565,31 @@ def build_energy_totals(countries, eurostat, swiss, idees): ratio = df.at["BA", "total residential"] / df.at["RS", "total residential"] df.loc["BA", missing] = ratio * df.loc["RS", missing] + return df + + +def build_district_heat_share(idees): + + # district heating share + district_heat = idees[ + ["derived heat residential", "derived heat services"] + ].sum(axis=1) + total_heat = idees[["thermal uses residential", "thermal uses services"]].sum(axis=1) + + district_heat_share = district_heat/total_heat + # Missing district heating share dh_share = pd.read_csv( snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] ) # make conservative assumption and take minimum from both data sets - df["district heat share"] = pd.concat( - [df["district heat share"], dh_share.reindex(index=df.index) / 100], axis=1 + district_heat_share = pd.concat( + [district_heat_share, dh_share.reindex(index=district_heat_share.index) / 100], axis=1 ).min(axis=1) - return df + district_heat_share.name = "district heat share" + + return district_heat_share def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): @@ -750,6 +758,9 @@ if __name__ == "__main__": energy = build_energy_totals(countries, eurostat, swiss, idees) energy.to_csv(snakemake.output.energy_name) + district_heat_share = build_district_heat_share(idees) + district_heat_share.to_csv(snakemake.output.district_heat_share) + base_year_emissions = params["base_emissions_year"] emissions_scope = snakemake.params.energy["emissions"] eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope) From 1a477d6b325dd8d78b561295509cf9608bd2b056 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 15 Jan 2024 18:55:09 +0100 Subject: [PATCH 04/30] move calculation of district heating share to its own script Now the script build_district_heat_share.py does what the old function create_nodes_for_heating() in prepare_sector_networks.py did. There is no need to build nodes lists for each heating sector, since all nodes have district heating now. --- rules/build_sector.smk | 22 +++ scripts/build_district_heat_share.py | 77 +++++++++ scripts/build_energy_totals.py | 6 +- ...build_population_weighted_energy_totals.py | 5 - scripts/prepare_sector_network.py | 161 +++++++----------- 5 files changed, 164 insertions(+), 107 deletions(-) create mode 100644 scripts/build_district_heat_share.py diff --git a/rules/build_sector.smk b/rules/build_sector.smk index bfe168e1..14156268 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -710,6 +710,27 @@ rule build_transport_demand: "../scripts/build_transport_demand.py" + + +rule build_district_heat_share: + params: + sector=config["sector"], + input: + district_heat_share=RESOURCES + "district_heat_share.csv", + clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + output: + district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + threads: 1 + resources: + mem_mb=1000, + log: + LOGS + "build_district_heat_share_s{simpl}_{clusters}_{planning_horizons}.log", + conda: + "../envs/environment.yaml" + script: + "../scripts/build_district_heat_share.py" + + rule prepare_sector_network: params: co2_budget=config["co2_budget"], @@ -762,6 +783,7 @@ rule prepare_sector_network: industrial_demand=RESOURCES + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", hourly_heat_demand_total=RESOURCES + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", + district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py new file mode 100644 index 00000000..d521214d --- /dev/null +++ b/scripts/build_district_heat_share.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build district heat shares at each node, depending on investment year. +""" + +import pandas as pd + +from prepare_sector_network import get + +import logging + + +logger = logging.getLogger(__name__) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_heat_demands", + simpl="", + clusters=48, + ) + + investment_year = int(snakemake.wildcards.planning_horizons[-4:]) + + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, + index_col=0) + + district_heat_share = pd.read_csv(snakemake.input.district_heat_share, + index_col=0).squeeze() + + # make ct-based share nodal + district_heat_share = district_heat_share.loc[pop_layout.ct] + district_heat_share.index = pop_layout.index + + # total urban population per country + ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() + + # distribution of urban population within a country + pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) + + # fraction of node that is urban + urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) + + # maximum potential of urban demand covered by district heating + central_fraction = snakemake.config["sector"]["district_heating"]["potential"] + + # district heating share at each node + dist_fraction_node = ( + district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] + ) + + # if district heating share larger than urban fraction -> set urban + # fraction to district heating share + urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) + + # difference of max potential and today's share of district heating + diff = (urban_fraction * central_fraction) - dist_fraction_node + progress = get(snakemake.config["sector"]["district_heating"]["progress"], investment_year) + dist_fraction_node += diff * progress + logger.info( + f"Increase district heating share by a progress factor of {progress:.2%} " + f"resulting in new average share of {dist_fraction_node.mean():.2%}" + ) + + df = pd.DataFrame(dtype=float) + + df["original district heat share"] = district_heat_share + df["district fraction of node"] = dist_fraction_node + df["urban fraction"] = urban_fraction + + df.to_csv(snakemake.output.district_heat_share) diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 53aab980..306caf4d 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -568,7 +568,7 @@ def build_energy_totals(countries, eurostat, swiss, idees): return df -def build_district_heat_share(idees): +def build_district_heat_share(countries, idees): # district heating share district_heat = idees[ @@ -578,6 +578,8 @@ def build_district_heat_share(idees): district_heat_share = district_heat/total_heat + district_heat_share = district_heat_share.reindex(countries) + # Missing district heating share dh_share = pd.read_csv( snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] @@ -758,7 +760,7 @@ if __name__ == "__main__": energy = build_energy_totals(countries, eurostat, swiss, idees) energy.to_csv(snakemake.output.energy_name) - district_heat_share = build_district_heat_share(idees) + district_heat_share = build_district_heat_share(countries, idees) district_heat_share.to_csv(snakemake.output.district_heat_share) base_year_emissions = params["base_emissions_year"] diff --git a/scripts/build_population_weighted_energy_totals.py b/scripts/build_population_weighted_energy_totals.py index 20467f72..879e3b9b 100644 --- a/scripts/build_population_weighted_energy_totals.py +++ b/scripts/build_population_weighted_energy_totals.py @@ -26,9 +26,4 @@ if __name__ == "__main__": nodal_energy_totals.index = pop_layout.index nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0) - # district heating share should not be divided by population fraction - dh_share = energy_totals["district heat share"].loc[pop_layout.ct].fillna(0.0) - dh_share.index = pop_layout.index - nodal_energy_totals["district heat share"] = dh_share - nodal_energy_totals.to_csv(snakemake.output[0]) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 1f404c4e..8d56ae6b 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1678,7 +1678,10 @@ def add_heat(n, costs): heat_demand = build_heat_demand(n) - nodes, dist_fraction, urban_fraction = create_nodes_for_heat_sector() + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, + index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] # NB: must add costs of central heating afterwards (EUR 400 / kWpeak, 50a, 1% FOM from Fraunhofer ISE) @@ -1715,6 +1718,8 @@ def add_heat(n, costs): # 1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2 solar_thermal = options["solar_cf_correction"] * solar_thermal / 1e3 + nodes = pop_layout.index + for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" @@ -1722,8 +1727,8 @@ def add_heat(n, costs): n.madd( "Bus", - nodes[name] + f" {name} heat", - location=nodes[name], + nodes + f" {name} heat", + location=nodes, carrier=name + " heat", unit="MWh_th", ) @@ -1731,9 +1736,9 @@ def add_heat(n, costs): if name == "urban central" and options.get("central_heat_vent"): n.madd( "Generator", - nodes[name] + f" {name} heat vent", - bus=nodes[name] + f" {name} heat", - location=nodes[name], + nodes + f" {name} heat vent", + bus=nodes + f" {name} heat", + location=nodes, carrier=name + " heat vent", p_nom_extendable=True, p_max_pu=0, @@ -1746,11 +1751,11 @@ def add_heat(n, costs): for sector in sectors: # heat demand weighting if "rural" in name: - factor = 1 - urban_fraction[nodes[name]] + factor = 1 - urban_fraction[nodes] elif "urban central" in name: - factor = dist_fraction[nodes[name]] + factor = dist_fraction[nodes] elif "urban decentral" in name: - factor = urban_fraction[nodes[name]] - dist_fraction[nodes[name]] + factor = urban_fraction[nodes] - dist_fraction[nodes] else: raise NotImplementedError( f" {name} not in " f"heat systems: {heat_systems}" @@ -1761,7 +1766,7 @@ def add_heat(n, costs): heat_demand[[sector + " water", sector + " space"]] .T.groupby(level=1) .sum() - .T[nodes[name]] + .T[nodes] .multiply(factor) ) @@ -1769,7 +1774,7 @@ def add_heat(n, costs): heat_load = ( heat_demand.T.groupby(level=1) .sum() - .T[nodes[name]] + .T[nodes] .multiply( factor * (1 + options["district_heating"]["district_heating_loss"]) ) @@ -1777,9 +1782,9 @@ def add_heat(n, costs): n.madd( "Load", - nodes[name], + nodes, suffix=f" {name} heat", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " heat", p_set=heat_load, ) @@ -1790,17 +1795,17 @@ def add_heat(n, costs): costs_name = f"{name_type} {heat_pump_type}-sourced heat pump" efficiency = ( - cop[heat_pump_type][nodes[name]] + cop[heat_pump_type][nodes] if options["time_dep_hp_cop"] else costs.at[costs_name, "efficiency"] ) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} {heat_pump_type} heat pump", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", + bus0=nodes, + bus1=nodes + f" {name} heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] @@ -1814,17 +1819,17 @@ def add_heat(n, costs): n.madd( "Bus", - nodes[name] + f" {name} water tanks", - location=nodes[name], + nodes + f" {name} water tanks", + location=nodes, carrier=name + " water tanks", unit="MWh_th", ) n.madd( "Link", - nodes[name] + f" {name} water tanks charger", - bus0=nodes[name] + f" {name} heat", - bus1=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks charger", + bus0=nodes + f" {name} heat", + bus1=nodes + f" {name} water tanks", efficiency=costs.at["water tank charger", "efficiency"], carrier=name + " water tanks charger", p_nom_extendable=True, @@ -1832,9 +1837,9 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} water tanks discharger", - bus0=nodes[name] + f" {name} water tanks", - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} water tanks discharger", + bus0=nodes + f" {name} water tanks", + bus1=nodes + f" {name} heat", carrier=name + " water tanks discharger", efficiency=costs.at["water tank discharger", "efficiency"], p_nom_extendable=True, @@ -1853,8 +1858,8 @@ def add_heat(n, costs): n.madd( "Store", - nodes[name] + f" {name} water tanks", - bus=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks", + bus=nodes + f" {name} water tanks", e_cyclic=True, e_nom_extendable=True, carrier=name + " water tanks", @@ -1868,9 +1873,9 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} resistive heater", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} resistive heater", + bus0=nodes, + bus1=nodes + f" {name} heat", carrier=name + " resistive heater", efficiency=costs.at[key, "efficiency"], capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], @@ -1883,10 +1888,10 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} gas boiler", + nodes + f" {name} gas boiler", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[key, "efficiency"], @@ -1900,13 +1905,13 @@ def add_heat(n, costs): n.madd( "Generator", - nodes[name], + nodes, suffix=f" {name} solar thermal collector", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " solar thermal", p_nom_extendable=True, capital_cost=costs.at[name_type + " solar thermal", "fixed"], - p_max_pu=solar_thermal[nodes[name]], + p_max_pu=solar_thermal[nodes], lifetime=costs.at[name_type + " solar thermal", "lifetime"], ) @@ -1914,10 +1919,10 @@ def add_heat(n, costs): # add gas CHP; biomass CHP is added in biomass section n.madd( "Link", - nodes[name] + " urban central gas CHP", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", carrier="urban central gas CHP", p_nom_extendable=True, @@ -1933,12 +1938,12 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + " urban central gas CHP CC", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP CC", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", - bus4=spatial.co2.df.loc[nodes[name], "nodes"].values, + bus4=spatial.co2.df.loc[nodes, "nodes"].values, carrier="urban central gas CHP CC", p_nom_extendable=True, capital_cost=costs.at["central gas CHP", "fixed"] @@ -1970,11 +1975,11 @@ def add_heat(n, costs): if options["chp"] and options["micro_chp"] and name != "urban central": n.madd( "Link", - nodes[name] + f" {name} micro gas CHP", + nodes + f" {name} micro gas CHP", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + f" {name} heat", bus3="co2 atmosphere", carrier=name + " micro gas CHP", efficiency=costs.at["micro CHP", "efficiency"], @@ -2105,50 +2110,6 @@ def add_heat(n, costs): ) -def create_nodes_for_heat_sector(): - # TODO pop_layout - - # rural are areas with low heating density and individual heating - # urban are areas with high heating density - # urban can be split into district heating (central) and individual heating (decentral) - - ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() - # distribution of urban population within a country - pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) - - sectors = ["residential", "services"] - - nodes = {} - urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) - - for sector in sectors: - nodes[sector + " rural"] = pop_layout.index - nodes[sector + " urban decentral"] = pop_layout.index - - district_heat_share = pop_weighted_energy_totals["district heat share"] - - # maximum potential of urban demand covered by district heating - central_fraction = options["district_heating"]["potential"] - # district heating share at each node - dist_fraction_node = ( - district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] - ) - nodes["urban central"] = dist_fraction_node.index - # if district heating share larger than urban fraction -> set urban - # fraction to district heating share - urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) - # difference of max potential and today's share of district heating - diff = (urban_fraction * central_fraction) - dist_fraction_node - progress = get(options["district_heating"]["progress"], investment_year) - dist_fraction_node += diff * progress - logger.info( - f"Increase district heating share by a progress factor of {progress:.2%} " - f"resulting in new average share of {dist_fraction_node.mean():.2%}" - ) - - return nodes, dist_fraction_node, urban_fraction - - def add_biomass(n, costs): logger.info("Add biomass") @@ -2366,7 +2327,7 @@ def add_biomass(n, costs): if options["biomass_boiler"]: # TODO: Add surcharge for pellets - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", "services rural", @@ -2375,10 +2336,10 @@ def add_biomass(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} biomass boiler", + nodes + f" {name} biomass boiler", p_nom_extendable=True, - bus0=spatial.biomass.df.loc[nodes_heat[name], "nodes"].values, - bus1=nodes_heat[name] + f" {name} heat", + bus0=spatial.biomass.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", carrier=name + " biomass boiler", efficiency=costs.at["biomass boiler", "efficiency"], capital_cost=costs.at["biomass boiler", "efficiency"] @@ -2821,7 +2782,7 @@ def add_industry(n, costs): ) if options["oil_boilers"]: - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", @@ -2831,10 +2792,10 @@ def add_industry(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} oil boiler", + nodes + f" {name} oil boiler", p_nom_extendable=True, bus0=spatial.oil.nodes, - bus1=nodes_heat[name] + f" {name} heat", + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=f"{name} oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], From 9897cd6f0535fdbef82b66816c844a9f0ebf10b5 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Fri, 19 Jan 2024 16:24:39 +0100 Subject: [PATCH 05/30] only add district heating (DH) for nodes with non-zero DH share --- scripts/prepare_sector_network.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 8d56ae6b..0bf9848b 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1718,11 +1718,15 @@ def add_heat(n, costs): # 1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2 solar_thermal = options["solar_cf_correction"] * solar_thermal / 1e3 - nodes = pop_layout.index for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" + if name == "urban central": + nodes = dist_fraction.index[dist_fraction > 0] + else: + nodes = pop_layout.index + n.add("Carrier", name + " heat") n.madd( From d98ad95332a8f9c81c06d5a42c426fd0b4be921a Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Fri, 19 Jan 2024 18:42:49 +0100 Subject: [PATCH 06/30] move building of distribution of existing heating to own script This makes the distribution of existing heating to urban/rural, residential/services and spatially more transparent. --- rules/solve_myopic.smk | 37 ++++- scripts/add_existing_baseyear.py | 136 ++++-------------- .../build_existing_heating_distribution.py | 108 ++++++++++++++ 3 files changed, 172 insertions(+), 109 deletions(-) create mode 100644 scripts/build_existing_heating_distribution.py diff --git a/rules/solve_myopic.smk b/rules/solve_myopic.smk index 7ca8857d..20043286 100644 --- a/rules/solve_myopic.smk +++ b/rules/solve_myopic.smk @@ -1,8 +1,40 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-4 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT +rule build_existing_heating_distribution: + params: + baseyear=config["scenario"]["planning_horizons"][0], + sector=config["sector"], + existing_capacities=config["existing_capacities"], + input: + existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + clustered_pop_energy_layout=RESOURCES + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", + district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + output: + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + wildcard_constraints: + planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear + threads: 1 + resources: + mem_mb=2000, + log: + LOGS + + "build_existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.log", + benchmark: + ( + BENCHMARKS + + "build_existing_heating_distribution/elec_s{simpl}_{clusters}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/build_existing_heating_distribution.py" + + rule add_existing_baseyear: params: baseyear=config["scenario"]["planning_horizons"][0], @@ -19,7 +51,8 @@ rule add_existing_baseyear: costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", - existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv", diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index c8486758..01d54cc2 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -409,97 +409,20 @@ def add_heating_capacities_installed_before_baseyear( # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". # TODO start from original file - # retrieve existing heating capacities - techs = [ - "gas boiler", - "oil boiler", - "resistive heater", - "air heat pump", - "ground heat pump", - ] - df = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) + existing_heating = pd.read_csv(snakemake.input.existing_heating_distribution, + header=[0,1], + index_col=0) - # data for Albania, Montenegro and Macedonia not included in database - df.loc["Albania"] = np.nan - df.loc["Montenegro"] = np.nan - df.loc["Macedonia"] = np.nan - df.fillna(0.0, inplace=True) + techs = existing_heating.columns.get_level_values(1).unique() - # convert GW to MW - df *= 1e3 + for name in existing_heating.columns.get_level_values(0).unique(): - df.index = cc.convert(df.index, to="iso2") - - # coal and oil boilers are assimilated to oil boilers - df["oil boiler"] = df["oil boiler"] + df["coal boiler"] - df.drop(["coal boiler"], axis=1, inplace=True) - - # distribute technologies to nodes by population - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - - nodal_df = df.loc[pop_layout.ct] - nodal_df.index = pop_layout.index - nodal_df = nodal_df.multiply(pop_layout.fraction, axis=0) - - # split existing capacities between residential and services - # proportional to energy demand - p_set_sum = n.loads_t.p_set.sum() - ratio_residential = pd.Series( - [ - ( - p_set_sum[f"{node} residential rural heat"] - / ( - p_set_sum[f"{node} residential rural heat"] - + p_set_sum[f"{node} services rural heat"] - ) - ) - # if rural heating demand for one of the nodes doesn't exist, - # then columns were dropped before and heating demand share should be 0.0 - if all( - f"{node} {service} rural heat" in p_set_sum.index - for service in ["residential", "services"] - ) - else 0.0 - for node in nodal_df.index - ], - index=nodal_df.index, - ) - - for tech in techs: - nodal_df["residential " + tech] = nodal_df[tech] * ratio_residential - nodal_df["services " + tech] = nodal_df[tech] * (1 - ratio_residential) - - names = [ - "residential rural", - "services rural", - "residential urban decentral", - "services urban decentral", - "urban central", - ] - - nodes = {} - p_nom = {} - for name in names: name_type = "central" if name == "urban central" else "decentral" - nodes[name] = pd.Index( - [ - n.buses.at[index, "location"] - for index in n.buses.index[ - n.buses.index.str.contains(name) - & n.buses.index.str.contains("heat") - ] - ] - ) - heat_pump_type = "air" if "urban" in name else "ground" - heat_type = "residential" if "residential" in name else "services" - if name == "urban central": - p_nom[name] = nodal_df["air heat pump"][nodes[name]] - else: - p_nom[name] = nodal_df[f"{heat_type} {heat_pump_type} heat pump"][ - nodes[name] - ] + nodes = pd.Index(n.buses.location[n.buses.index.str.contains(f"{name} heat")]) + + heat_pump_type = "air" if "urban" in name else "ground" # Add heat pumps costs_name = f"decentral {heat_pump_type}-sourced heat pump" @@ -507,7 +430,7 @@ def add_heating_capacities_installed_before_baseyear( cop = {"air": ashp_cop, "ground": gshp_cop} if time_dep_hp_cop: - efficiency = cop[heat_pump_type][nodes[name]] + efficiency = cop[heat_pump_type][nodes] else: efficiency = costs.at[costs_name, "efficiency"] @@ -520,27 +443,26 @@ def add_heating_capacities_installed_before_baseyear( n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} {heat_pump_type} heat pump-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes, + bus1=nodes + " " + name + " heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] * costs.at[costs_name, "fixed"], - p_nom=p_nom[name] * ratio / costs.at[costs_name, "efficiency"], + p_nom=existing_heating[(name, f"{heat_pump_type} heat pump")][nodes] * ratio / costs.at[costs_name, "efficiency"], build_year=int(grouping_year), lifetime=costs.at[costs_name, "lifetime"], ) # add resistive heater, gas boilers and oil boilers - # (50% capacities to rural buses, 50% to urban buses) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} resistive heater-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes, + bus1=nodes + " " + name + " heat", carrier=name + " resistive heater", efficiency=costs.at[f"{name_type} resistive heater", "efficiency"], capital_cost=( @@ -548,21 +470,20 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} resistive heater", "fixed"] ), p_nom=( - 0.5 - * nodal_df[f"{heat_type} resistive heater"][nodes[name]] + existing_heating[(name, "resistive heater")][nodes] * ratio / costs.at[f"{name_type} resistive heater", "efficiency"] ), build_year=int(grouping_year), - lifetime=costs.at[costs_name, "lifetime"], + lifetime=costs.at[f"{name_type} resistive heater", "lifetime"], ) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} gas boiler-{grouping_year}", bus0=spatial.gas.nodes, - bus1=nodes[name] + " " + name + " heat", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[f"{name_type} gas boiler", "efficiency"], @@ -572,8 +493,7 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} gas boiler", "fixed"] ), p_nom=( - 0.5 - * nodal_df[f"{heat_type} gas boiler"][nodes[name]] + existing_heating[(name, "gas boiler")][nodes] * ratio / costs.at[f"{name_type} gas boiler", "efficiency"] ), @@ -583,20 +503,20 @@ def add_heating_capacities_installed_before_baseyear( n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} oil boiler-{grouping_year}", bus0=spatial.oil.nodes, - bus1=nodes[name] + " " + name + " heat", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] * costs.at["decentral oil boiler", "fixed"], - p_nom=0.5 - * nodal_df[f"{heat_type} oil boiler"][nodes[name]] - * ratio - / costs.at["decentral oil boiler", "efficiency"], + p_nom= ( + existing_heating[(name, "oil boiler")][nodes] + * ratio + / costs.at["decentral oil boiler", "efficiency"]), build_year=int(grouping_year), lifetime=costs.at[f"{name_type} gas boiler", "lifetime"], ) @@ -624,6 +544,8 @@ def add_heating_capacities_installed_before_baseyear( # drop assets which are at the end of their lifetime links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index + logger.info("Removing following links because at end of their lifetime:") + logger.info(links_i) n.mremove("Link", links_i) diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py new file mode 100644 index 00000000..fe282d39 --- /dev/null +++ b/scripts/build_existing_heating_distribution.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Builds table of existing heat generation capacities for initial planning +horizon. +""" +import pandas as pd +import sys +from pypsa.descriptors import Dict +import numpy as np +import country_converter as coco + +cc = coco.CountryConverter() + + +def build_existing_heating(): + # retrieve existing heating capacities + techs = [ + "gas boiler", + "oil boiler", + "resistive heater", + "air heat pump", + "ground heat pump", + ] + + existing_heating = pd.read_csv(snakemake.input.existing_heating, + index_col=0, + header=0) + + # data for Albania, Montenegro and Macedonia not included in database existing_heating.loc["Albania"] = np.nan + existing_heating.loc["Montenegro"] = np.nan + existing_heating.loc["Macedonia"] = np.nan + + existing_heating.fillna(0.0, inplace=True) + + # convert GW to MW + existing_heating *= 1e3 + + existing_heating.index = cc.convert(existing_heating.index, to="iso2") + + # coal and oil boilers are assimilated to oil boilers + existing_heating["oil boiler"] = existing_heating["oil boiler"] + existing_heating["coal boiler"] + existing_heating.drop(["coal boiler"], axis=1, inplace=True) + + # distribute technologies to nodes by population + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, + index_col=0) + + nodal_heating = existing_heating.loc[pop_layout.ct] + nodal_heating.index = pop_layout.index + nodal_heating = nodal_heating.multiply(pop_layout.fraction, axis=0) + + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, + index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] + + energy_layout = pd.read_csv(snakemake.input.clustered_pop_energy_layout, + index_col=0) + + uses = ["space", "water"] + sectors = ["residential", "services"] + + nodal_sectoral_totals = pd.DataFrame(dtype=float) + + for sector in sectors: + nodal_sectoral_totals[sector] = energy_layout[[f"total {sector} {use}" for use in uses]].sum(axis=1) + + nodal_sectoral_fraction = nodal_sectoral_totals.div(nodal_sectoral_totals.sum(axis=1), + axis=0) + + + nodal_heat_name_fraction = pd.DataFrame(dtype=float) + + nodal_heat_name_fraction["urban central"] = dist_fraction + + for sector in sectors: + + nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[sector]*(1 - urban_fraction) + nodal_heat_name_fraction[f"{sector} urban decentral"] = nodal_sectoral_fraction[sector]*(urban_fraction - dist_fraction) + + + nodal_heat_name_tech = pd.concat({name : nodal_heating .multiply(nodal_heat_name_fraction[name], + axis=0) for name in nodal_heat_name_fraction.columns}, + axis=1, + names=["heat name","technology"]) + + + #move all ground HPs to rural, all air to urban + + for sector in sectors: + nodal_heat_name_tech[(f"{sector} rural","ground heat pump")] += (nodal_heat_name_tech[("urban central","ground heat pump")]*nodal_sectoral_fraction[sector] + + nodal_heat_name_tech[(f"{sector} urban decentral","ground heat pump")]) + nodal_heat_name_tech[(f"{sector} urban decentral","ground heat pump")] = 0. + + nodal_heat_name_tech[(f"{sector} urban decentral","air heat pump")] += nodal_heat_name_tech[(f"{sector} rural","air heat pump")] + nodal_heat_name_tech[(f"{sector} rural","air heat pump")] = 0. + + nodal_heat_name_tech[("urban central","ground heat pump")] = 0. + + nodal_heat_name_tech.to_csv(snakemake.output.existing_heating_distribution) + + +if __name__ == "__main__": + + build_existing_heating() From 2183e742b2b44a7caca47a525537f8e827e501e5 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 22 Jan 2024 09:18:26 +0100 Subject: [PATCH 07/30] add release notes, minor code improvements --- doc/release_notes.rst | 18 +++++++++++++++ doc/sector.rst | 20 ++++++++++++++-- rules/solve_overnight.smk | 7 +++--- scripts/add_existing_baseyear.py | 8 +++---- scripts/build_daily_heat_demand.py | 3 ++- scripts/build_district_heat_share.py | 15 ++++++------ scripts/build_energy_totals.py | 4 ++-- .../build_existing_heating_distribution.py | 23 ++++++++++--------- scripts/build_hourly_heat_demand.py | 18 +++++---------- scripts/prepare_sector_network.py | 2 +- 10 files changed, 75 insertions(+), 43 deletions(-) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index dc1a9dd1..56fee0d8 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -28,6 +28,24 @@ Upcoming Release * Cluster residential and services heat buses by default. Can be disabled with ``cluster_heat_buses: false``. +* Bugfix: Do not reduce district heat share when building population-weighted + energy statistics. Previously the district heating share was being multiplied + by the population weighting, reducing the DH share with multiple nodes. + +* Move building of daily heat profile to its own rule + :mod:`build_hourly_heat_demand` from :mod:`prepare_sector_network`. + +* In :mod:`build_energy_totals`, district heating shares are now reported in a + separate file. + +* Move calculation of district heating share to its own rule + :mod:`build_district_heat_share`. + +* Move building of distribution of existing heating to own rule + :mod:`build_existing_heating_distribution`. This makes the distribution of + existing heating to urban/rural, residential/services and spatially more + transparent. + PyPSA-Eur 0.9.0 (5th January 2024) ================================== diff --git a/doc/sector.rst b/doc/sector.rst index 303e7ed2..411bfd57 100644 --- a/doc/sector.rst +++ b/doc/sector.rst @@ -20,6 +20,12 @@ Rule ``add_existing_baseyear`` .. automodule:: add_existing_baseyear +Rule ``build_existing_heating_distribution`` +============================================================================== + +.. automodule:: build_existing_heating_distribution + + Rule ``build_ammonia_production`` ============================================================================== @@ -60,10 +66,20 @@ Rule ``build_gas_network`` .. automodule:: build_gas_network -Rule ``build_heat_demand`` +Rule ``build_daily_heat_demand`` ============================================================================== -.. automodule:: build_heat_demand +.. automodule:: build_daily_heat_demand + +Rule ``build_hourly_heat_demand`` +============================================================================== + +.. automodule:: build_hourly_heat_demand + +Rule ``build_district_heat_share`` +============================================================================== + +.. automodule:: build_district_heat_share Rule ``build_industrial_distribution_key`` ============================================================================== diff --git a/rules/solve_overnight.smk b/rules/solve_overnight.smk index 39778162..47c86410 100644 --- a/rules/solve_overnight.smk +++ b/rules/solve_overnight.smk @@ -28,9 +28,10 @@ rule solve_sector_network: + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log", python=RESULTS + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", - threads: config["solving"]["solver_options"][config["solving"]["solver"]["options"]].get( - "threads", 4 -) + threads: + config["solving"]["solver_options"][ + config["solving"]["solver"]["options"] + ].get("threads", 4) resources: mem_mb=config["solving"]["mem"], walltime=config["solving"].get("walltime", "12:00:00"), diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index 01d54cc2..d61ece85 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -451,7 +451,7 @@ def add_heating_capacities_installed_before_baseyear( efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] * costs.at[costs_name, "fixed"], - p_nom=existing_heating[(name, f"{heat_pump_type} heat pump")][nodes] * ratio / costs.at[costs_name, "efficiency"], + p_nom=existing_heating.loc[nodes, (name, f"{heat_pump_type} heat pump")] * ratio / costs.at[costs_name, "efficiency"], build_year=int(grouping_year), lifetime=costs.at[costs_name, "lifetime"], ) @@ -470,7 +470,7 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} resistive heater", "fixed"] ), p_nom=( - existing_heating[(name, "resistive heater")][nodes] + existing_heating.loc[nodes, (name, "resistive heater")] * ratio / costs.at[f"{name_type} resistive heater", "efficiency"] ), @@ -493,7 +493,7 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} gas boiler", "fixed"] ), p_nom=( - existing_heating[(name, "gas boiler")][nodes] + existing_heating.loc[nodes, (name, "gas boiler")] * ratio / costs.at[f"{name_type} gas boiler", "efficiency"] ), @@ -514,7 +514,7 @@ def add_heating_capacities_installed_before_baseyear( capital_cost=costs.at["decentral oil boiler", "efficiency"] * costs.at["decentral oil boiler", "fixed"], p_nom= ( - existing_heating[(name, "oil boiler")][nodes] + existing_heating.loc[nodes, (name, "oil boiler")] * ratio / costs.at["decentral oil boiler", "efficiency"]), build_year=int(grouping_year), diff --git a/scripts/build_daily_heat_demand.py b/scripts/build_daily_heat_demand.py index b983f125..e334b1b3 100644 --- a/scripts/build_daily_heat_demand.py +++ b/scripts/build_daily_heat_demand.py @@ -18,7 +18,8 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "build_heat_demands", + "build_daily_heat_demands", + scope="total", simpl="", clusters=48, ) diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py index d521214d..996ed861 100644 --- a/scripts/build_district_heat_share.py +++ b/scripts/build_district_heat_share.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -21,9 +21,10 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "build_heat_demands", + "build_district_heat_share", simpl="", clusters=48, + planning_horizons="2050", ) investment_year = int(snakemake.wildcards.planning_horizons[-4:]) @@ -68,10 +69,10 @@ if __name__ == "__main__": f"resulting in new average share of {dist_fraction_node.mean():.2%}" ) - df = pd.DataFrame(dtype=float) - - df["original district heat share"] = district_heat_share - df["district fraction of node"] = dist_fraction_node - df["urban fraction"] = urban_fraction + df = pd.DataFrame({ + "original district heat share": district_heat_share, + "district fraction of node": dist_fraction_node, + "urban fraction": urban_fraction + }, dtype=float) df.to_csv(snakemake.output.district_heat_share) diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 306caf4d..08d5bef5 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -583,10 +583,10 @@ def build_district_heat_share(countries, idees): # Missing district heating share dh_share = pd.read_csv( snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] - ) + ).div(100).squeeze() # make conservative assumption and take minimum from both data sets district_heat_share = pd.concat( - [district_heat_share, dh_share.reindex(index=district_heat_share.index) / 100], axis=1 + [district_heat_share, dh_share.reindex_like(district_heat_share)], axis=1 ).min(axis=1) district_heat_share.name = "district heat share" diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py index fe282d39..443c5baa 100644 --- a/scripts/build_existing_heating_distribution.py +++ b/scripts/build_existing_heating_distribution.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,8 +7,6 @@ Builds table of existing heat generation capacities for initial planning horizon. """ import pandas as pd -import sys -from pypsa.descriptors import Dict import numpy as np import country_converter as coco @@ -17,19 +15,13 @@ cc = coco.CountryConverter() def build_existing_heating(): # retrieve existing heating capacities - techs = [ - "gas boiler", - "oil boiler", - "resistive heater", - "air heat pump", - "ground heat pump", - ] existing_heating = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) - # data for Albania, Montenegro and Macedonia not included in database existing_heating.loc["Albania"] = np.nan + # data for Albania, Montenegro and Macedonia not included in database + existing_heating.loc["Albania"] = np.nan existing_heating.loc["Montenegro"] = np.nan existing_heating.loc["Macedonia"] = np.nan @@ -104,5 +96,14 @@ def build_existing_heating(): if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_existing_heating_distribution", + simpl="", + clusters=48, + planning_horizons=2050, + ) build_existing_heating() diff --git a/scripts/build_hourly_heat_demand.py b/scripts/build_hourly_heat_demand.py index 94ad7266..2d1dee54 100644 --- a/scripts/build_hourly_heat_demand.py +++ b/scripts/build_hourly_heat_demand.py @@ -6,19 +6,19 @@ Build hourly heat demand time series from daily ones. """ -import pandas as pd -import xarray as xr -from _helpers import generate_periodic_profiles, update_config_with_sector_opts from itertools import product - +import pandas as pd +import xarray as xr +from _helpers import generate_periodic_profiles if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake snakemake = mock_snakemake( - "build_heat_demands", + "build_hourly_heat_demands", + scope="total", simpl="", clusters=48, ) @@ -58,12 +58,6 @@ if __name__ == "__main__": heat_demand.index.name="snapshots" - print(heat_demand) - - print(heat_demand.stack()) - - ds = heat_demand.stack().to_xarray()#xr.Dataset.from_dataframe(heat_demand) - - print(ds) + ds = heat_demand.stack().to_xarray() ds.to_netcdf(snakemake.output.heat_demand) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 0bf9848b..241f3c30 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1654,7 +1654,7 @@ def build_heat_demand(n): heat_demand[name] = ( heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"total {sector} {use}"]) * 1e6 - electric_heat_supply[f"{sector} {use}"] = ( + electric_heat_supply[name] = ( heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"electricity {sector} {use}"]) * 1e6 From d3cf329456056d02fcfcbabe175e6ff9a8e2bb0c Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 22 Jan 2024 09:28:51 +0100 Subject: [PATCH 08/30] correctly read number of solver threads in rule definition --- doc/release_notes.rst | 2 ++ rules/common.smk | 7 +++++++ rules/solve_electricity.smk | 2 +- rules/solve_myopic.smk | 2 +- rules/solve_overnight.smk | 5 +---- rules/solve_perfect.smk | 2 +- 6 files changed, 13 insertions(+), 7 deletions(-) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 56fee0d8..1a0013d5 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -46,6 +46,8 @@ Upcoming Release existing heating to urban/rural, residential/services and spatially more transparent. +* Bugfix: Correctly read out number of solver threads from configuration file. + PyPSA-Eur 0.9.0 (5th January 2024) ================================== diff --git a/rules/common.smk b/rules/common.smk index 2298ff91..1654180f 100644 --- a/rules/common.smk +++ b/rules/common.smk @@ -13,6 +13,13 @@ for path in helper_source_path: from _helpers import validate_checksum +def solver_threads(w): + solver_options = config["solving"]["solver_options"] + option_set = config["solving"]["solver"]["options"] + threads = solver_options[option_set].get("threads", 4) + return threads + + def memory(w): factor = 3.0 for o in w.opts.split("-"): diff --git a/rules/solve_electricity.smk b/rules/solve_electricity.smk index 7f6092be..ac433cf9 100644 --- a/rules/solve_electricity.smk +++ b/rules/solve_electricity.smk @@ -25,7 +25,7 @@ rule solve_network: + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", benchmark: BENCHMARKS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" - threads: 4 + threads: solver_threads resources: mem_mb=memory, walltime=config["solving"].get("walltime", "12:00:00"), diff --git a/rules/solve_myopic.smk b/rules/solve_myopic.smk index 20043286..8c46ed59 100644 --- a/rules/solve_myopic.smk +++ b/rules/solve_myopic.smk @@ -137,7 +137,7 @@ rule solve_sector_network_myopic: + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", python=LOGS + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", - threads: 4 + threads: solver_threads resources: mem_mb=config["solving"]["mem"], walltime=config["solving"].get("walltime", "12:00:00"), diff --git a/rules/solve_overnight.smk b/rules/solve_overnight.smk index 47c86410..aa08b8c3 100644 --- a/rules/solve_overnight.smk +++ b/rules/solve_overnight.smk @@ -28,10 +28,7 @@ rule solve_sector_network: + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log", python=RESULTS + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", - threads: - config["solving"]["solver_options"][ - config["solving"]["solver"]["options"] - ].get("threads", 4) + threads: solver_threads resources: mem_mb=config["solving"]["mem"], walltime=config["solving"].get("walltime", "12:00:00"), diff --git a/rules/solve_perfect.smk b/rules/solve_perfect.smk index a7856fa9..ad310f9f 100644 --- a/rules/solve_perfect.smk +++ b/rules/solve_perfect.smk @@ -127,7 +127,7 @@ rule solve_sector_network_perfect: output: RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", - threads: 4 + threads: solver_threads resources: mem_mb=config["solving"]["mem"], shadow: From 9865a970893d9e515786f33c629b14f71645bf1e Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 22 Jan 2024 09:29:32 +0100 Subject: [PATCH 09/30] apply automated formatting --- rules/build_sector.smk | 11 +-- rules/solve_myopic.smk | 6 +- scripts/add_existing_baseyear.py | 17 ++-- scripts/build_district_heat_share.py | 31 ++++---- scripts/build_energy_totals.py | 21 ++--- .../build_existing_heating_distribution.py | 77 +++++++++++-------- scripts/build_hourly_heat_demand.py | 12 +-- scripts/prepare_sector_network.py | 12 +-- 8 files changed, 105 insertions(+), 82 deletions(-) diff --git a/rules/build_sector.smk b/rules/build_sector.smk index 14156268..a24f9f7d 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -710,8 +710,6 @@ rule build_transport_demand: "../scripts/build_transport_demand.py" - - rule build_district_heat_share: params: sector=config["sector"], @@ -719,7 +717,8 @@ rule build_district_heat_share: district_heat_share=RESOURCES + "district_heat_share.csv", clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", output: - district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", threads: 1 resources: mem_mb=1000, @@ -782,8 +781,10 @@ rule prepare_sector_network: simplified_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}.csv", industrial_demand=RESOURCES + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", - hourly_heat_demand_total=RESOURCES + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", - district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + hourly_heat_demand_total=RESOURCES + + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", diff --git a/rules/solve_myopic.smk b/rules/solve_myopic.smk index 8c46ed59..75334073 100644 --- a/rules/solve_myopic.smk +++ b/rules/solve_myopic.smk @@ -11,8 +11,10 @@ rule build_existing_heating_distribution: input: existing_heating="data/existing_infrastructure/existing_heating_raw.csv", clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", - clustered_pop_energy_layout=RESOURCES + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", - district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + clustered_pop_energy_layout=RESOURCES + + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", output: existing_heating_distribution=RESOURCES + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index d61ece85..c67d5f8b 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -409,15 +409,13 @@ def add_heating_capacities_installed_before_baseyear( # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". # TODO start from original file - existing_heating = pd.read_csv(snakemake.input.existing_heating_distribution, - header=[0,1], - index_col=0) - + existing_heating = pd.read_csv( + snakemake.input.existing_heating_distribution, header=[0, 1], index_col=0 + ) techs = existing_heating.columns.get_level_values(1).unique() for name in existing_heating.columns.get_level_values(0).unique(): - name_type = "central" if name == "urban central" else "decentral" nodes = pd.Index(n.buses.location[n.buses.index.str.contains(f"{name} heat")]) @@ -451,7 +449,9 @@ def add_heating_capacities_installed_before_baseyear( efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] * costs.at[costs_name, "fixed"], - p_nom=existing_heating.loc[nodes, (name, f"{heat_pump_type} heat pump")] * ratio / costs.at[costs_name, "efficiency"], + p_nom=existing_heating.loc[nodes, (name, f"{heat_pump_type} heat pump")] + * ratio + / costs.at[costs_name, "efficiency"], build_year=int(grouping_year), lifetime=costs.at[costs_name, "lifetime"], ) @@ -513,10 +513,11 @@ def add_heating_capacities_installed_before_baseyear( efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] * costs.at["decentral oil boiler", "fixed"], - p_nom= ( + p_nom=( existing_heating.loc[nodes, (name, "oil boiler")] * ratio - / costs.at["decentral oil boiler", "efficiency"]), + / costs.at["decentral oil boiler", "efficiency"] + ), build_year=int(grouping_year), lifetime=costs.at[f"{name_type} gas boiler", "lifetime"], ) diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py index 996ed861..3353437a 100644 --- a/scripts/build_district_heat_share.py +++ b/scripts/build_district_heat_share.py @@ -6,12 +6,10 @@ Build district heat shares at each node, depending on investment year. """ -import pandas as pd - -from prepare_sector_network import get - import logging +import pandas as pd +from prepare_sector_network import get logger = logging.getLogger(__name__) @@ -29,11 +27,11 @@ if __name__ == "__main__": investment_year = int(snakemake.wildcards.planning_horizons[-4:]) - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, - index_col=0) + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - district_heat_share = pd.read_csv(snakemake.input.district_heat_share, - index_col=0).squeeze() + district_heat_share = pd.read_csv( + snakemake.input.district_heat_share, index_col=0 + ).squeeze() # make ct-based share nodal district_heat_share = district_heat_share.loc[pop_layout.ct] @@ -62,17 +60,22 @@ if __name__ == "__main__": # difference of max potential and today's share of district heating diff = (urban_fraction * central_fraction) - dist_fraction_node - progress = get(snakemake.config["sector"]["district_heating"]["progress"], investment_year) + progress = get( + snakemake.config["sector"]["district_heating"]["progress"], investment_year + ) dist_fraction_node += diff * progress logger.info( f"Increase district heating share by a progress factor of {progress:.2%} " f"resulting in new average share of {dist_fraction_node.mean():.2%}" ) - df = pd.DataFrame({ - "original district heat share": district_heat_share, - "district fraction of node": dist_fraction_node, - "urban fraction": urban_fraction - }, dtype=float) + df = pd.DataFrame( + { + "original district heat share": district_heat_share, + "district fraction of node": dist_fraction_node, + "urban fraction": urban_fraction, + }, + dtype=float, + ) df.to_csv(snakemake.output.district_heat_share) diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 08d5bef5..c67bb49d 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -569,21 +569,24 @@ def build_energy_totals(countries, eurostat, swiss, idees): def build_district_heat_share(countries, idees): - # district heating share - district_heat = idees[ - ["derived heat residential", "derived heat services"] - ].sum(axis=1) - total_heat = idees[["thermal uses residential", "thermal uses services"]].sum(axis=1) + district_heat = idees[["derived heat residential", "derived heat services"]].sum( + axis=1 + ) + total_heat = idees[["thermal uses residential", "thermal uses services"]].sum( + axis=1 + ) - district_heat_share = district_heat/total_heat + district_heat_share = district_heat / total_heat district_heat_share = district_heat_share.reindex(countries) # Missing district heating share - dh_share = pd.read_csv( - snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] - ).div(100).squeeze() + dh_share = ( + pd.read_csv(snakemake.input.district_heat_share, index_col=0, usecols=[0, 1]) + .div(100) + .squeeze() + ) # make conservative assumption and take minimum from both data sets district_heat_share = pd.concat( [district_heat_share, dh_share.reindex_like(district_heat_share)], axis=1 diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py index 443c5baa..67993c29 100644 --- a/scripts/build_existing_heating_distribution.py +++ b/scripts/build_existing_heating_distribution.py @@ -6,9 +6,9 @@ Builds table of existing heat generation capacities for initial planning horizon. """ -import pandas as pd -import numpy as np import country_converter as coco +import numpy as np +import pandas as pd cc = coco.CountryConverter() @@ -16,9 +16,9 @@ cc = coco.CountryConverter() def build_existing_heating(): # retrieve existing heating capacities - existing_heating = pd.read_csv(snakemake.input.existing_heating, - index_col=0, - header=0) + existing_heating = pd.read_csv( + snakemake.input.existing_heating, index_col=0, header=0 + ) # data for Albania, Montenegro and Macedonia not included in database existing_heating.loc["Albania"] = np.nan @@ -33,24 +33,25 @@ def build_existing_heating(): existing_heating.index = cc.convert(existing_heating.index, to="iso2") # coal and oil boilers are assimilated to oil boilers - existing_heating["oil boiler"] = existing_heating["oil boiler"] + existing_heating["coal boiler"] + existing_heating["oil boiler"] = ( + existing_heating["oil boiler"] + existing_heating["coal boiler"] + ) existing_heating.drop(["coal boiler"], axis=1, inplace=True) # distribute technologies to nodes by population - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, - index_col=0) + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) nodal_heating = existing_heating.loc[pop_layout.ct] nodal_heating.index = pop_layout.index nodal_heating = nodal_heating.multiply(pop_layout.fraction, axis=0) - district_heat_info = pd.read_csv(snakemake.input.district_heat_share, - index_col=0) + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) dist_fraction = district_heat_info["district fraction of node"] urban_fraction = district_heat_info["urban fraction"] - energy_layout = pd.read_csv(snakemake.input.clustered_pop_energy_layout, - index_col=0) + energy_layout = pd.read_csv( + snakemake.input.clustered_pop_energy_layout, index_col=0 + ) uses = ["space", "water"] sectors = ["residential", "services"] @@ -58,39 +59,51 @@ def build_existing_heating(): nodal_sectoral_totals = pd.DataFrame(dtype=float) for sector in sectors: - nodal_sectoral_totals[sector] = energy_layout[[f"total {sector} {use}" for use in uses]].sum(axis=1) - - nodal_sectoral_fraction = nodal_sectoral_totals.div(nodal_sectoral_totals.sum(axis=1), - axis=0) + nodal_sectoral_totals[sector] = energy_layout[ + [f"total {sector} {use}" for use in uses] + ].sum(axis=1) + nodal_sectoral_fraction = nodal_sectoral_totals.div( + nodal_sectoral_totals.sum(axis=1), axis=0 + ) nodal_heat_name_fraction = pd.DataFrame(dtype=float) nodal_heat_name_fraction["urban central"] = dist_fraction for sector in sectors: + nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[ + sector + ] * (1 - urban_fraction) + nodal_heat_name_fraction[f"{sector} urban decentral"] = nodal_sectoral_fraction[ + sector + ] * (urban_fraction - dist_fraction) - nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[sector]*(1 - urban_fraction) - nodal_heat_name_fraction[f"{sector} urban decentral"] = nodal_sectoral_fraction[sector]*(urban_fraction - dist_fraction) + nodal_heat_name_tech = pd.concat( + { + name: nodal_heating.multiply(nodal_heat_name_fraction[name], axis=0) + for name in nodal_heat_name_fraction.columns + }, + axis=1, + names=["heat name", "technology"], + ) - - nodal_heat_name_tech = pd.concat({name : nodal_heating .multiply(nodal_heat_name_fraction[name], - axis=0) for name in nodal_heat_name_fraction.columns}, - axis=1, - names=["heat name","technology"]) - - - #move all ground HPs to rural, all air to urban + # move all ground HPs to rural, all air to urban for sector in sectors: - nodal_heat_name_tech[(f"{sector} rural","ground heat pump")] += (nodal_heat_name_tech[("urban central","ground heat pump")]*nodal_sectoral_fraction[sector] - + nodal_heat_name_tech[(f"{sector} urban decentral","ground heat pump")]) - nodal_heat_name_tech[(f"{sector} urban decentral","ground heat pump")] = 0. + nodal_heat_name_tech[(f"{sector} rural", "ground heat pump")] += ( + nodal_heat_name_tech[("urban central", "ground heat pump")] + * nodal_sectoral_fraction[sector] + + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] + ) + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] = 0.0 - nodal_heat_name_tech[(f"{sector} urban decentral","air heat pump")] += nodal_heat_name_tech[(f"{sector} rural","air heat pump")] - nodal_heat_name_tech[(f"{sector} rural","air heat pump")] = 0. + nodal_heat_name_tech[ + (f"{sector} urban decentral", "air heat pump") + ] += nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] + nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] = 0.0 - nodal_heat_name_tech[("urban central","ground heat pump")] = 0. + nodal_heat_name_tech[("urban central", "ground heat pump")] = 0.0 nodal_heat_name_tech.to_csv(snakemake.output.existing_heating_distribution) diff --git a/scripts/build_hourly_heat_demand.py b/scripts/build_hourly_heat_demand.py index 2d1dee54..c972da89 100644 --- a/scripts/build_hourly_heat_demand.py +++ b/scripts/build_hourly_heat_demand.py @@ -48,16 +48,16 @@ if __name__ == "__main__": ) if use == "space": - heat_demand[f"{sector} {use}"] = daily_space_heat_demand * intraday_year_profile + heat_demand[f"{sector} {use}"] = ( + daily_space_heat_demand * intraday_year_profile + ) else: heat_demand[f"{sector} {use}"] = intraday_year_profile - heat_demand = pd.concat(heat_demand, - axis=1, - names = ["sector use", "node"]) + heat_demand = pd.concat(heat_demand, axis=1, names=["sector use", "node"]) - heat_demand.index.name="snapshots" + heat_demand.index.name = "snapshots" - ds = heat_demand.stack().to_xarray() + ds = heat_demand.stack().to_xarray() ds.to_netcdf(snakemake.output.heat_demand) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 241f3c30..ba92e137 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1639,8 +1639,11 @@ def add_land_transport(n, costs): def build_heat_demand(n): - - heat_demand_shape = xr.open_dataset(snakemake.input.hourly_heat_demand_total).to_dataframe().unstack(level=1) + heat_demand_shape = ( + xr.open_dataset(snakemake.input.hourly_heat_demand_total) + .to_dataframe() + .unstack(level=1) + ) sectors = ["residential", "services"] uses = ["water", "space"] @@ -1648,7 +1651,6 @@ def build_heat_demand(n): heat_demand = {} electric_heat_supply = {} for sector, use in product(sectors, uses): - name = f"{sector} {use}" heat_demand[name] = ( @@ -1678,8 +1680,7 @@ def add_heat(n, costs): heat_demand = build_heat_demand(n) - district_heat_info = pd.read_csv(snakemake.input.district_heat_share, - index_col=0) + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) dist_fraction = district_heat_info["district fraction of node"] urban_fraction = district_heat_info["urban fraction"] @@ -1718,7 +1719,6 @@ def add_heat(n, costs): # 1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2 solar_thermal = options["solar_cf_correction"] * solar_thermal / 1e3 - for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" From 31c7c10fc5e86702d47f9e2da7eb7dfd230e8b54 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 22 Jan 2024 09:30:53 +0100 Subject: [PATCH 10/30] git-blame-ignore-revs: add automated formatting commit --- .git-blame-ignore-revs | 1 + 1 file changed, 1 insertion(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 0b78b5b6..53f63e71 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -6,3 +6,4 @@ 5d1ef8a64055a039aa4a0834d2d26fe7752fe9a0 92080b1cd2ca5f123158571481722767b99c2b27 13769f90af4500948b0376d57df4cceaa13e78b5 +9865a970893d9e515786f33c629b14f71645bf1e \ No newline at end of file From 757fbcc464c4c04b94d2a29cd325963f0a35aa8a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 08:31:57 +0000 Subject: [PATCH 11/30] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .git-blame-ignore-revs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 53f63e71..3f1edbd8 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -6,4 +6,4 @@ 5d1ef8a64055a039aa4a0834d2d26fe7752fe9a0 92080b1cd2ca5f123158571481722767b99c2b27 13769f90af4500948b0376d57df4cceaa13e78b5 -9865a970893d9e515786f33c629b14f71645bf1e \ No newline at end of file +9865a970893d9e515786f33c629b14f71645bf1e From fd57311094f9ef86400e94121cb33e87b6abacbf Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 22 Jan 2024 10:10:40 +0100 Subject: [PATCH 12/30] build_district_heat_share: make safe for single-country runs --- scripts/build_district_heat_share.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py index 3353437a..6e934a2b 100644 --- a/scripts/build_district_heat_share.py +++ b/scripts/build_district_heat_share.py @@ -31,7 +31,7 @@ if __name__ == "__main__": district_heat_share = pd.read_csv( snakemake.input.district_heat_share, index_col=0 - ).squeeze() + )["district heat share"] # make ct-based share nodal district_heat_share = district_heat_share.loc[pop_layout.ct] From 025f48c0c2ba402f61f9cf2d0cf8c4656beae1ac Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 09:11:06 +0000 Subject: [PATCH 13/30] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- scripts/build_district_heat_share.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py index 6e934a2b..86c42631 100644 --- a/scripts/build_district_heat_share.py +++ b/scripts/build_district_heat_share.py @@ -29,9 +29,9 @@ if __name__ == "__main__": pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - district_heat_share = pd.read_csv( - snakemake.input.district_heat_share, index_col=0 - )["district heat share"] + district_heat_share = pd.read_csv(snakemake.input.district_heat_share, index_col=0)[ + "district heat share" + ] # make ct-based share nodal district_heat_share = district_heat_share.loc[pop_layout.ct] From 6c593a551b6f090b1066285061ec41f48c325d1c Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 22 Jan 2024 18:07:33 +0100 Subject: [PATCH 14/30] build_hourly_heat_demand: only pass subset of snapshot config --- rules/build_sector.smk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rules/build_sector.smk b/rules/build_sector.smk index a24f9f7d..0586ec01 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -147,7 +147,7 @@ rule build_daily_heat_demand: rule build_hourly_heat_demand: params: - snapshots=config["snapshots"], + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, input: heat_profile="data/heat_load_profile_BDEW.csv", heat_demand=RESOURCES + "daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", From 999ff852888f4c1fcab28796332cefc7d16f6272 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 25 Jan 2024 17:19:55 +0100 Subject: [PATCH 15/30] fix snakemake.inputs for add_existing_baseyear with perfect foresight --- rules/solve_perfect.smk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rules/solve_perfect.smk b/rules/solve_perfect.smk index ad310f9f..76051976 100644 --- a/rules/solve_perfect.smk +++ b/rules/solve_perfect.smk @@ -17,6 +17,8 @@ rule add_existing_baseyear: costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", existing_heating="data/existing_infrastructure/existing_heating_raw.csv", existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", From bb202ad2c44558b01bf8d7cdaccaea97bf80cca2 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Sun, 27 Aug 2023 17:05:54 +0200 Subject: [PATCH 16/30] plot_network: remove function plot_map_without() --- rules/postprocess.smk | 2 -- scripts/plot_network.py | 74 ----------------------------------------- 2 files changed, 76 deletions(-) diff --git a/rules/postprocess.smk b/rules/postprocess.smk index 9f4ac78e..5bbffeb8 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -20,8 +20,6 @@ if config["foresight"] != "perfect": output: map=RESULTS + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - today=RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf", threads: 2 resources: mem_mb=10000, diff --git a/scripts/plot_network.py b/scripts/plot_network.py index 13736d01..d951d2e7 100644 --- a/scripts/plot_network.py +++ b/scripts/plot_network.py @@ -671,79 +671,6 @@ def plot_ch4_map(network): ) -def plot_map_without(network): - n = network.copy() - assign_location(n) - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) - - # PDF has minimum width, so set these to zero - line_lower_threshold = 200.0 - line_upper_threshold = 1e4 - linewidth_factor = 3e3 - ac_color = "rosybrown" - dc_color = "darkseagreen" - - # hack because impossible to drop buses... - if "EU gas" in n.buses.index: - eu_location = snakemake.params.plotting.get( - "eu_node_location", dict(x=-5.5, y=46) - ) - n.buses.loc["EU gas", "x"] = eu_location["x"] - n.buses.loc["EU gas", "y"] = eu_location["y"] - - to_drop = n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")] - n.links.drop(to_drop, inplace=True) - - if snakemake.wildcards["ll"] == "v1.0": - line_widths = n.lines.s_nom - link_widths = n.links.p_nom - else: - line_widths = n.lines.s_nom_min - link_widths = n.links.p_nom_min - - line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) - link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) - - line_widths = line_widths.replace(line_lower_threshold, 0) - link_widths = link_widths.replace(line_lower_threshold, 0) - - n.plot( - bus_colors="k", - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, - **map_opts, - ) - - handles = [] - labels = [] - - for s in (10, 5): - handles.append( - plt.Line2D([0], [0], color=ac_color, linewidth=s * 1e3 / linewidth_factor) - ) - labels.append(f"{s} GW") - l1_1 = ax.legend( - handles, - labels, - loc="upper left", - bbox_to_anchor=(0.05, 1.01), - frameon=False, - labelspacing=0.8, - handletextpad=1.5, - title="Today's transmission", - ) - ax.add_artist(l1_1) - - fig.savefig(snakemake.output.today, transparent=True, bbox_inches="tight") - - def plot_series(network, carrier="AC", name="test"): n = network.copy() assign_location(n) @@ -1101,7 +1028,6 @@ if __name__ == "__main__": plot_h2_map(n, regions) plot_ch4_map(n) - plot_map_without(n) # plot_series(n, carrier="AC", name=suffix) # plot_series(n, carrier="heat", name=suffix) From 46a2f55c1bbebe9a491bea76f9805e363cdd6b5c Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Sun, 27 Aug 2023 17:10:41 +0200 Subject: [PATCH 17/30] plot_network: remove function plot_series() This function is superseded by plot_balance_timeseries rule. --- scripts/plot_network.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/plot_network.py b/scripts/plot_network.py index d951d2e7..dde7d61b 100644 --- a/scripts/plot_network.py +++ b/scripts/plot_network.py @@ -671,6 +671,7 @@ def plot_ch4_map(network): ) +<<<<<<< HEAD def plot_series(network, carrier="AC", name="test"): n = network.copy() assign_location(n) @@ -984,6 +985,8 @@ def plot_map_perfect( ) +======= +>>>>>>> ead390b4 (plot_network: remove function plot_series()) if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake @@ -1028,6 +1031,3 @@ if __name__ == "__main__": plot_h2_map(n, regions) plot_ch4_map(n) - - # plot_series(n, carrier="AC", name=suffix) - # plot_series(n, carrier="heat", name=suffix) From ce4d18c8616917165d3716e9543b42667d82c44c Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 25 Jan 2024 17:32:17 +0100 Subject: [PATCH 18/30] remove plot_series() leftovers --- scripts/plot_network.py | 164 ---------------------------------------- 1 file changed, 164 deletions(-) diff --git a/scripts/plot_network.py b/scripts/plot_network.py index dde7d61b..b06e5ce2 100644 --- a/scripts/plot_network.py +++ b/scripts/plot_network.py @@ -671,168 +671,6 @@ def plot_ch4_map(network): ) -<<<<<<< HEAD -def plot_series(network, carrier="AC", name="test"): - n = network.copy() - assign_location(n) - assign_carriers(n) - - buses = n.buses.index[n.buses.carrier.str.contains(carrier)] - - supply = pd.DataFrame(index=n.snapshots) - for c in n.iterate_components(n.branch_components): - n_port = 4 if c.name == "Link" else 2 - for i in range(n_port): - supply = pd.concat( - ( - supply, - ( - -1 - * c.pnl[f"p{str(i)}"] - .loc[:, c.df.index[c.df[f"bus{str(i)}"].isin(buses)]] - .groupby(c.df.carrier, axis=1) - .sum() - ), - ), - axis=1, - ) - - for c in n.iterate_components(n.one_port_components): - comps = c.df.index[c.df.bus.isin(buses)] - supply = pd.concat( - ( - supply, - ((c.pnl["p"].loc[:, comps]).multiply(c.df.loc[comps, "sign"])) - .groupby(c.df.carrier, axis=1) - .sum(), - ), - axis=1, - ) - - supply = supply.groupby(rename_techs_tyndp, axis=1).sum() - - both = supply.columns[(supply < 0.0).any() & (supply > 0.0).any()] - - positive_supply = supply[both] - negative_supply = supply[both] - - positive_supply[positive_supply < 0.0] = 0.0 - negative_supply[negative_supply > 0.0] = 0.0 - - supply[both] = positive_supply - - suffix = " charging" - - negative_supply.columns = negative_supply.columns + suffix - - supply = pd.concat((supply, negative_supply), axis=1) - - # 14-21.2 for flaute - # 19-26.1 for flaute - - start = "2013-02-19" - stop = "2013-02-26" - - threshold = 10e3 - - to_drop = supply.columns[(abs(supply) < threshold).all()] - - if len(to_drop) != 0: - logger.info(f"Dropping {to_drop.tolist()} from supply") - supply.drop(columns=to_drop, inplace=True) - - supply.index.name = None - - supply = supply / 1e3 - - supply.rename( - columns={"electricity": "electric demand", "heat": "heat demand"}, inplace=True - ) - supply.columns = supply.columns.str.replace("residential ", "") - supply.columns = supply.columns.str.replace("services ", "") - supply.columns = supply.columns.str.replace("urban decentral ", "decentral ") - - preferred_order = pd.Index( - [ - "electric demand", - "transmission lines", - "hydroelectricity", - "hydro reservoir", - "run of river", - "pumped hydro storage", - "CHP", - "onshore wind", - "offshore wind", - "solar PV", - "solar thermal", - "building retrofitting", - "ground heat pump", - "air heat pump", - "resistive heater", - "OCGT", - "gas boiler", - "gas", - "natural gas", - "methanation", - "hydrogen storage", - "battery storage", - "hot water storage", - ] - ) - - new_columns = preferred_order.intersection(supply.columns).append( - supply.columns.difference(preferred_order) - ) - - supply = supply.groupby(supply.columns, axis=1).sum() - fig, ax = plt.subplots() - fig.set_size_inches((8, 5)) - - ( - supply.loc[start:stop, new_columns].plot( - ax=ax, - kind="area", - stacked=True, - linewidth=0.0, - color=[ - snakemake.params.plotting["tech_colors"][i.replace(suffix, "")] - for i in new_columns - ], - ) - ) - - handles, labels = ax.get_legend_handles_labels() - - handles.reverse() - labels.reverse() - - new_handles = [] - new_labels = [] - - for i, item in enumerate(labels): - if "charging" not in item: - new_handles.append(handles[i]) - new_labels.append(labels[i]) - - ax.legend(new_handles, new_labels, ncol=3, loc="upper left", frameon=False) - ax.set_xlim([start, stop]) - ax.set_ylim([-1300, 1900]) - ax.grid(True) - ax.set_ylabel("Power [GW]") - fig.tight_layout() - - fig.savefig( - "results/{}maps/series-{}-{}-{}-{}.pdf".format( - snakemake.params.RDIR, - snakemake.wildcards["ll"], - carrier, - start, - stop, - ), - transparent=True, - ) - - def plot_map_perfect( network, components=["Link", "Store", "StorageUnit", "Generator"], @@ -985,8 +823,6 @@ def plot_map_perfect( ) -======= ->>>>>>> ead390b4 (plot_network: remove function plot_series()) if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake From ffd4e1f1af716dbd790e26da014366758b6188e5 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 25 Jan 2024 20:34:59 +0100 Subject: [PATCH 19/30] plot_network: split into separate scripts for power, hydrogen, gas --- doc/plotting.rst | 21 +- doc/release_notes.rst | 6 + rules/collect.smk | 10 - rules/postprocess.smk | 87 ++- scripts/plot_gas_network.py | 251 ++++++++ scripts/plot_hydrogen_network.py | 267 ++++++++ scripts/plot_network.py | 869 -------------------------- scripts/plot_power_network.py | 271 ++++++++ scripts/plot_power_network_perfect.py | 199 ++++++ 9 files changed, 1086 insertions(+), 895 deletions(-) create mode 100644 scripts/plot_gas_network.py create mode 100644 scripts/plot_hydrogen_network.py delete mode 100644 scripts/plot_network.py create mode 100644 scripts/plot_power_network.py create mode 100644 scripts/plot_power_network_perfect.py diff --git a/doc/plotting.rst b/doc/plotting.rst index 895eab3b..02748cf2 100644 --- a/doc/plotting.rst +++ b/doc/plotting.rst @@ -22,7 +22,22 @@ Rule ``plot_summary`` .. _map_plot: -Rule ``plot_network`` -======================== +Rule ``plot_power_network`` +=========================== -.. automodule:: plot_network +.. automodule:: plot_power_network + +Rule ``plot_power_network_perfect`` +=================================== + +.. automodule:: plot_power_network_perfect + +Rule ``plot_hydrogen_network`` +============================== + +.. automodule:: plot_hydrogen_network + +Rule ``plot_gas_network`` +========================= + +.. automodule:: plot_gas_network diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 93d1a268..5bcaf0d2 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -37,6 +37,12 @@ Upcoming Release * Add the option to customise map projection in plotting config. +* The rule ``plot_network`` has been split into separate rules for plotting + electricity, hydrogen and gas networks. + +* Added new collection rule ``plot_all`` which should be used instead of + ``plot_summary``. This allows running the rule :mod:`make_summary` and + :mod:`plot_summary` even if the network plotting rules fail. PyPSA-Eur 0.9.0 (5th January 2024) ================================== diff --git a/rules/collect.smk b/rules/collect.smk index c9bb10ea..1d977da1 100644 --- a/rules/collect.smk +++ b/rules/collect.smk @@ -11,7 +11,6 @@ localrules: prepare_sector_networks, solve_elec_networks, solve_sector_networks, - plot_networks, rule cluster_networks: @@ -69,15 +68,6 @@ rule solve_sector_networks_perfect: ), -rule plot_networks: - input: - expand( - RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - **config["scenario"] - ), - - rule validate_elec_networks: input: expand( diff --git a/rules/postprocess.smk b/rules/postprocess.smk index 5bbffeb8..6db3079a 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -9,9 +9,8 @@ localrules: if config["foresight"] != "perfect": - rule plot_network: + rule plot_power_network: params: - foresight=config["foresight"], plotting=config["plotting"], input: network=RESULTS @@ -26,19 +25,66 @@ if config["foresight"] != "perfect": benchmark: ( BENCHMARKS - + "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + + "plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" script: - "../scripts/plot_network.py" + "../scripts/plot_power_network.py" + + + rule plot_hydrogen_network: + params: + plotting=config["plotting"], + input: + network=RESULTS + + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + output: + map=RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf", + threads: 2 + resources: + mem_mb=10000, + benchmark: + ( + BENCHMARKS + + "plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_hydrogen_network.py" + + + rule plot_gas_network: + params: + plotting=config["plotting"], + input: + network=RESULTS + + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + output: + map=RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf", + threads: 2 + resources: + mem_mb=10000, + benchmark: + ( + BENCHMARKS + + "plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_gas_network.py" if config["foresight"] == "perfect": - rule plot_network: + rule plot_power_network_perfect: params: - foresight=config["foresight"], plotting=config["plotting"], input: network=RESULTS @@ -60,7 +106,7 @@ if config["foresight"] == "perfect": conda: "../envs/environment.yaml" script: - "../scripts/plot_network.py" + "../scripts/plot_power_network_perfect.py" rule copy_config: @@ -95,11 +141,6 @@ rule make_summary: costs="data/costs_{}.csv".format(config["costs"]["year"]) if config["foresight"] == "overnight" else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), - plots=expand( - RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - **config["scenario"] - ), output: nodal_costs=RESULTS + "csvs/nodal_costs.csv", nodal_capacities=RESULTS + "csvs/nodal_capacities.csv", @@ -161,6 +202,26 @@ rule plot_summary: "../scripts/plot_summary.py" +rule plot_all: + input: + RESULTS + "graphs/costs.pdf", + expand( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", + **config["scenario"] + ), + expand( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf", + **config["scenario"] + ), + expand( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf", + **config["scenario"] + ), + + STATISTICS_BARPLOTS = [ "capacity_factor", "installed_capacity", diff --git a/scripts/plot_gas_network.py b/scripts/plot_gas_network.py new file mode 100644 index 00000000..a72c5c56 --- /dev/null +++ b/scripts/plot_gas_network.py @@ -0,0 +1,251 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates map of optimised gas network, storage and selected other infrastructure. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches +from plot_power_network import assign_location, load_projection + +logger = logging.getLogger(__name__) + + +def plot_ch4_map(n): + # if "gas pipeline" not in n.links.carrier.unique(): + # return + + assign_location(n) + + bus_size_factor = 8e7 + linewidth_factor = 1e4 + # MW below which not drawn + line_lower_threshold = 1e3 + + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + fossil_gas_i = n.generators[n.generators.carrier == "gas"].index + fossil_gas = ( + n.generators_t.p.loc[:, fossil_gas_i] + .mul(n.snapshot_weightings.generators, axis=0) + .sum() + .groupby(n.generators.loc[fossil_gas_i, "bus"]) + .sum() + / bus_size_factor + ) + fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True) + fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0) + # make a fake MultiIndex so that area is correct for legend + fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]]) + + methanation_i = n.links.query("carrier == 'Sabatier'").index + methanation = ( + abs( + n.links_t.p1.loc[:, methanation_i].mul( + n.snapshot_weightings.generators, axis=0 + ) + ) + .sum() + .groupby(n.links.loc[methanation_i, "bus1"]) + .sum() + / bus_size_factor + ) + methanation = ( + methanation.groupby(methanation.index) + .sum() + .rename(index=lambda x: x.replace(" gas", "")) + ) + # make a fake MultiIndex so that area is correct for legend + methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]]) + + biogas_i = n.stores[n.stores.carrier == "biogas"].index + biogas = ( + n.stores_t.p.loc[:, biogas_i] + .mul(n.snapshot_weightings.generators, axis=0) + .sum() + .groupby(n.stores.loc[biogas_i, "bus"]) + .sum() + / bus_size_factor + ) + biogas = ( + biogas.groupby(biogas.index) + .sum() + .rename(index=lambda x: x.replace(" biogas", "")) + ) + # make a fake MultiIndex so that area is correct for legend + biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]]) + + bus_sizes = pd.concat([fossil_gas, methanation, biogas]) + bus_sizes.sort_index(inplace=True) + + to_remove = n.links.index[~n.links.carrier.str.contains("gas pipeline")] + n.links.drop(to_remove, inplace=True) + + link_widths_rem = n.links.p_nom_opt / linewidth_factor + link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + link_widths_orig = n.links.p_nom / linewidth_factor + link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.0 + + max_usage = n.links_t.p0.abs().max(axis=0) + link_widths_used = max_usage / linewidth_factor + link_widths_used[max_usage < line_lower_threshold] = 0.0 + + tech_colors = snakemake.params.plotting["tech_colors"] + + pipe_colors = { + "gas pipeline": "#f08080", + "gas pipeline new": "#c46868", + "gas pipeline (in 2020)": "lightgrey", + "gas pipeline (available)": "#e8d1d1", + } + + link_color_used = n.links.carrier.map(pipe_colors) + + n.links.bus0 = n.links.bus0.str.replace(" gas", "") + n.links.bus1 = n.links.bus1.str.replace(" gas", "") + + bus_colors = { + "fossil gas": tech_colors["fossil gas"], + "methanation": tech_colors["methanation"], + "biogas": "seagreen", + } + + fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) + + n.plot( + bus_sizes=bus_sizes, + bus_colors=bus_colors, + link_colors=pipe_colors["gas pipeline (in 2020)"], + link_widths=link_widths_orig, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + n.plot( + ax=ax, + bus_sizes=0.0, + link_colors=pipe_colors["gas pipeline (available)"], + link_widths=link_widths_rem, + branch_components=["Link"], + color_geomap=False, + boundaries=map_opts["boundaries"], + ) + + n.plot( + ax=ax, + bus_sizes=0.0, + link_colors=link_color_used, + link_widths=link_widths_used, + branch_components=["Link"], + color_geomap=False, + boundaries=map_opts["boundaries"], + ) + + sizes = [100, 10] + labels = [f"{s} TWh" for s in sizes] + sizes = [s / bus_size_factor * 1e6 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.03), + labelspacing=0.8, + frameon=False, + handletextpad=1, + title="gas sources", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [50, 10] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.25, 1.03), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title="gas pipeline", + ) + + add_legend_lines( + ax, + sizes, + labels, + patch_kw=dict(color="lightgrey"), + legend_kw=legend_kw, + ) + + colors = list(pipe_colors.values()) + list(bus_colors.values()) + labels = list(pipe_colors.keys()) + list(bus_colors.keys()) + + # legend on the side + # legend_kw = dict( + # bbox_to_anchor=(1.47, 1.04), + # frameon=False, + # ) + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.24), + ncol=2, + frameon=False, + ) + + add_legend_patches( + ax, + colors, + labels, + legend_kw=legend_kw, + ) + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_gas_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_ch4_map(n) diff --git a/scripts/plot_hydrogen_network.py b/scripts/plot_hydrogen_network.py new file mode 100644 index 00000000..13728553 --- /dev/null +++ b/scripts/plot_hydrogen_network.py @@ -0,0 +1,267 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates map of optimised hydrogen network, storage and selected other infrastructure. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches + +from plot_power_network import assign_location, load_projection + +logger = logging.getLogger(__name__) + + +def group_pipes(df, drop_direction=False): + """ + Group pipes which connect same buses and return overall capacity. + """ + if drop_direction: + positive_order = df.bus0 < df.bus1 + df_p = df[positive_order] + swap_buses = {"bus0": "bus1", "bus1": "bus0"} + df_n = df[~positive_order].rename(columns=swap_buses) + df = pd.concat([df_p, df_n]) + + # there are pipes for each investment period rename to AC buses name for plotting + df.index = df.apply( + lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", + axis=1, + ) + return df.groupby(level=0).agg({"p_nom_opt": sum, "bus0": "first", "bus1": "first"}) + + +def plot_h2_map(n, regions): + # if "H2 pipeline" not in n.links.carrier.unique(): + # return + + assign_location(n) + + h2_storage = n.stores.query("carrier == 'H2'") + regions["H2"] = ( + h2_storage.rename(index=h2_storage.bus.map(n.buses.location)) + .e_nom_opt.groupby(level=0) + .sum() + .div(1e6) + ) # TWh + regions["H2"] = regions["H2"].where(regions["H2"] > 0.1) + + bus_size_factor = 1e5 + linewidth_factor = 7e3 + # MW below which not drawn + line_lower_threshold = 750 + + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + carriers = ["H2 Electrolysis", "H2 Fuel Cell"] + + elec = n.links[n.links.carrier.isin(carriers)].index + + bus_sizes = ( + n.links.loc[elec, "p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() + / bus_size_factor + ) + + # make a fake MultiIndex so that area is correct for legend + bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True) + # drop all links which are not H2 pipelines + n.links.drop( + n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True + ) + + h2_new = n.links[n.links.carrier == "H2 pipeline"] + h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"] + + if snakemake.params.foresight == "myopic": + # sum capacitiy for pipelines from different investment periods + h2_new = group_pipes(h2_new) + + if not h2_retro.empty: + h2_retro = ( + group_pipes(h2_retro, drop_direction=True) + .reindex(h2_new.index) + .fillna(0) + ) + + if not h2_retro.empty: + positive_order = h2_retro.bus0 < h2_retro.bus1 + h2_retro_p = h2_retro[positive_order] + swap_buses = {"bus0": "bus1", "bus1": "bus0"} + h2_retro_n = h2_retro[~positive_order].rename(columns=swap_buses) + h2_retro = pd.concat([h2_retro_p, h2_retro_n]) + + h2_retro["index_orig"] = h2_retro.index + h2_retro.index = h2_retro.apply( + lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", + axis=1, + ) + + retro_w_new_i = h2_retro.index.intersection(h2_new.index) + h2_retro_w_new = h2_retro.loc[retro_w_new_i] + + retro_wo_new_i = h2_retro.index.difference(h2_new.index) + h2_retro_wo_new = h2_retro.loc[retro_wo_new_i] + h2_retro_wo_new.index = h2_retro_wo_new.index_orig + + to_concat = [h2_new, h2_retro_w_new, h2_retro_wo_new] + h2_total = pd.concat(to_concat).p_nom_opt.groupby(level=0).sum() + + else: + h2_total = h2_new.p_nom_opt + + link_widths_total = h2_total / linewidth_factor + + n.links.rename(index=lambda x: x.split("-2")[0], inplace=True) + n.links = n.links.groupby(level=0).first() + link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.0) + link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + retro = n.links.p_nom_opt.where( + n.links.carrier == "H2 pipeline retrofitted", other=0.0 + ) + link_widths_retro = retro / linewidth_factor + link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + n.links.bus0 = n.links.bus0.str.replace(" H2", "") + n.links.bus1 = n.links.bus1.str.replace(" H2", "") + + regions = regions.to_crs(proj.proj4_init) + + fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) + + color_h2_pipe = "#b3f3f4" + color_retrofit = "#499a9c" + + bus_colors = {"H2 Electrolysis": "#ff29d9", "H2 Fuel Cell": "#805394"} + + n.plot( + geomap=True, + bus_sizes=bus_sizes, + bus_colors=bus_colors, + link_colors=color_h2_pipe, + link_widths=link_widths_total, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + n.plot( + geomap=True, + bus_sizes=0, + link_colors=color_retrofit, + link_widths=link_widths_retro, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + regions.plot( + ax=ax, + column="H2", + cmap="Blues", + linewidths=0, + legend=True, + vmax=6, + vmin=0, + legend_kwds={ + "label": "Hydrogen Storage [TWh]", + "shrink": 0.7, + "extend": "max", + }, + ) + + sizes = [50, 10] + labels = [f"{s} GW" for s in sizes] + sizes = [s / bus_size_factor * 1e3 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1), + labelspacing=0.8, + handletextpad=0, + frameon=False, + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [30, 10] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.23, 1), + frameon=False, + labelspacing=0.8, + handletextpad=1, + ) + + add_legend_lines( + ax, + sizes, + labels, + patch_kw=dict(color="lightgrey"), + legend_kw=legend_kw, + ) + + colors = [bus_colors[c] for c in carriers] + [color_h2_pipe, color_retrofit] + labels = carriers + ["H2 pipeline (total)", "H2 pipeline (repurposed)"] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.13), + ncol=2, + frameon=False, + ) + + add_legend_patches(ax, colors, labels, legend_kw=legend_kw) + + ax.set_facecolor("white") + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_hydrogen_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_h2_map(n, regions) diff --git a/scripts/plot_network.py b/scripts/plot_network.py deleted file mode 100644 index b06e5ce2..00000000 --- a/scripts/plot_network.py +++ /dev/null @@ -1,869 +0,0 @@ -# -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors -# -# SPDX-License-Identifier: MIT -""" -Creates plots for optimised network topologies, including electricity, gas and -hydrogen networks, and regional generation, storage and conversion capacities -built. - -This rule plots a map of the network with technology capacities at the -nodes. -""" - -import logging - -import cartopy.crs as ccrs -import geopandas as gpd -import matplotlib.pyplot as plt -import pandas as pd -import pypsa -from make_summary import assign_carriers -from plot_summary import preferred_order, rename_techs -from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches - -logger = logging.getLogger(__name__) -plt.style.use(["ggplot"]) - - -def rename_techs_tyndp(tech): - tech = rename_techs(tech) - if "heat pump" in tech or "resistive heater" in tech: - return "power-to-heat" - elif tech in ["H2 Electrolysis", "methanation", "H2 liquefaction"]: - return "power-to-gas" - elif tech == "H2": - return "H2 storage" - elif tech in ["NH3", "Haber-Bosch", "ammonia cracker", "ammonia store"]: - return "ammonia" - elif tech in ["OCGT", "CHP", "gas boiler", "H2 Fuel Cell"]: - return "gas-to-power/heat" - # elif "solar" in tech: - # return "solar" - elif tech in ["Fischer-Tropsch", "methanolisation"]: - return "power-to-liquid" - elif "offshore wind" in tech: - return "offshore wind" - elif "CC" in tech or "sequestration" in tech: - return "CCS" - else: - return tech - - -def assign_location(n): - for c in n.iterate_components(n.one_port_components | n.branch_components): - ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) - for i in ifind.value_counts().index: - # these have already been assigned defaults - if i == -1: - continue - names = ifind.index[ifind == i] - c.df.loc[names, "location"] = names.str[:i] - - -def plot_map( - network, - components=["links", "stores", "storage_units", "generators"], - bus_size_factor=1.7e10, - transmission=False, - with_legend=True, -): - tech_colors = snakemake.params.plotting["tech_colors"] - - n = network.copy() - assign_location(n) - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - costs = pd.DataFrame(index=n.buses.index) - - for comp in components: - df_c = getattr(n, comp) - - if df_c.empty: - continue - - df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) - - attr = "e_nom_opt" if comp == "stores" else "p_nom_opt" - - costs_c = ( - (df_c.capital_cost * df_c[attr]) - .groupby([df_c.location, df_c.nice_group]) - .sum() - .unstack() - .fillna(0.0) - ) - costs = pd.concat([costs, costs_c], axis=1) - - logger.debug(f"{comp}, {costs}") - - costs = costs.groupby(costs.columns, axis=1).sum() - - costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True) - - new_columns = preferred_order.intersection(costs.columns).append( - costs.columns.difference(preferred_order) - ) - costs = costs[new_columns] - - for item in new_columns: - if item not in tech_colors: - logger.warning(f"{item} not in config/plotting/tech_colors") - - costs = costs.stack() # .sort_index() - - # hack because impossible to drop buses... - eu_location = snakemake.params.plotting.get("eu_node_location", dict(x=-5.5, y=46)) - n.buses.loc["EU gas", "x"] = eu_location["x"] - n.buses.loc["EU gas", "y"] = eu_location["y"] - - n.links.drop( - n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], - inplace=True, - ) - - # drop non-bus - to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) - if len(to_drop) != 0: - logger.info(f"Dropping non-buses {to_drop.tolist()}") - costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") - - # make sure they are removed from index - costs.index = pd.MultiIndex.from_tuples(costs.index.values) - - threshold = 100e6 # 100 mEUR/a - carriers = costs.groupby(level=1).sum() - carriers = carriers.where(carriers > threshold).dropna() - carriers = list(carriers.index) - - # PDF has minimum width, so set these to zero - line_lower_threshold = 500.0 - line_upper_threshold = 1e4 - linewidth_factor = 4e3 - ac_color = "rosybrown" - dc_color = "darkseagreen" - - title = "added grid" - - if snakemake.wildcards["ll"] == "v1.0": - # should be zero - line_widths = n.lines.s_nom_opt - n.lines.s_nom - link_widths = n.links.p_nom_opt - n.links.p_nom - if transmission: - line_widths = n.lines.s_nom_opt - link_widths = n.links.p_nom_opt - linewidth_factor = 2e3 - line_lower_threshold = 0.0 - title = "current grid" - else: - line_widths = n.lines.s_nom_opt - n.lines.s_nom_min - link_widths = n.links.p_nom_opt - n.links.p_nom_min - if transmission: - line_widths = n.lines.s_nom_opt - link_widths = n.links.p_nom_opt - title = "total grid" - - line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) - link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) - - line_widths = line_widths.replace(line_lower_threshold, 0) - link_widths = link_widths.replace(line_lower_threshold, 0) - - fig, ax = plt.subplots(subplot_kw={"projection": proj}) - fig.set_size_inches(7, 6) - - n.plot( - bus_sizes=costs / bus_size_factor, - bus_colors=tech_colors, - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, - **map_opts, - ) - - sizes = [20, 10, 5] - labels = [f"{s} bEUR/a" for s in sizes] - sizes = [s / bus_size_factor * 1e9 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.01, 1.06), - labelspacing=0.8, - frameon=False, - handletextpad=0, - title="system cost", - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [10, 5] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.27, 1.06), - frameon=False, - labelspacing=0.8, - handletextpad=1, - title=title, - ) - - add_legend_lines( - ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw - ) - - legend_kw = dict( - bbox_to_anchor=(1.52, 1.04), - frameon=False, - ) - - if with_legend: - colors = [tech_colors[c] for c in carriers] + [ac_color, dc_color] - labels = carriers + ["HVAC line", "HVDC link"] - - add_legend_patches( - ax, - colors, - labels, - legend_kw=legend_kw, - ) - - fig.savefig(snakemake.output.map, transparent=True, bbox_inches="tight") - - -def group_pipes(df, drop_direction=False): - """ - Group pipes which connect same buses and return overall capacity. - """ - if drop_direction: - positive_order = df.bus0 < df.bus1 - df_p = df[positive_order] - swap_buses = {"bus0": "bus1", "bus1": "bus0"} - df_n = df[~positive_order].rename(columns=swap_buses) - df = pd.concat([df_p, df_n]) - - # there are pipes for each investment period rename to AC buses name for plotting - df.index = df.apply( - lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", - axis=1, - ) - return df.groupby(level=0).agg({"p_nom_opt": sum, "bus0": "first", "bus1": "first"}) - - -def plot_h2_map(network, regions): - n = network.copy() - if "H2 pipeline" not in n.links.carrier.unique(): - return - - assign_location(n) - - h2_storage = n.stores.query("carrier == 'H2'") - regions["H2"] = ( - h2_storage.rename(index=h2_storage.bus.map(n.buses.location)) - .e_nom_opt.groupby(level=0) - .sum() - .div(1e6) - ) # TWh - regions["H2"] = regions["H2"].where(regions["H2"] > 0.1) - - bus_size_factor = 1e5 - linewidth_factor = 7e3 - # MW below which not drawn - line_lower_threshold = 750 - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - carriers = ["H2 Electrolysis", "H2 Fuel Cell"] - - elec = n.links[n.links.carrier.isin(carriers)].index - - bus_sizes = ( - n.links.loc[elec, "p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() - / bus_size_factor - ) - - # make a fake MultiIndex so that area is correct for legend - bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True) - # drop all links which are not H2 pipelines - n.links.drop( - n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True - ) - - h2_new = n.links[n.links.carrier == "H2 pipeline"] - h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"] - - if snakemake.params.foresight == "myopic": - # sum capacitiy for pipelines from different investment periods - h2_new = group_pipes(h2_new) - - if not h2_retro.empty: - h2_retro = ( - group_pipes(h2_retro, drop_direction=True) - .reindex(h2_new.index) - .fillna(0) - ) - - if not h2_retro.empty: - positive_order = h2_retro.bus0 < h2_retro.bus1 - h2_retro_p = h2_retro[positive_order] - swap_buses = {"bus0": "bus1", "bus1": "bus0"} - h2_retro_n = h2_retro[~positive_order].rename(columns=swap_buses) - h2_retro = pd.concat([h2_retro_p, h2_retro_n]) - - h2_retro["index_orig"] = h2_retro.index - h2_retro.index = h2_retro.apply( - lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", - axis=1, - ) - - retro_w_new_i = h2_retro.index.intersection(h2_new.index) - h2_retro_w_new = h2_retro.loc[retro_w_new_i] - - retro_wo_new_i = h2_retro.index.difference(h2_new.index) - h2_retro_wo_new = h2_retro.loc[retro_wo_new_i] - h2_retro_wo_new.index = h2_retro_wo_new.index_orig - - to_concat = [h2_new, h2_retro_w_new, h2_retro_wo_new] - h2_total = pd.concat(to_concat).p_nom_opt.groupby(level=0).sum() - - else: - h2_total = h2_new.p_nom_opt - - link_widths_total = h2_total / linewidth_factor - - n.links.rename(index=lambda x: x.split("-2")[0], inplace=True) - n.links = n.links.groupby(level=0).first() - link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.0) - link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - retro = n.links.p_nom_opt.where( - n.links.carrier == "H2 pipeline retrofitted", other=0.0 - ) - link_widths_retro = retro / linewidth_factor - link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - n.links.bus0 = n.links.bus0.str.replace(" H2", "") - n.links.bus1 = n.links.bus1.str.replace(" H2", "") - - regions = regions.to_crs(proj.proj4_init) - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) - - color_h2_pipe = "#b3f3f4" - color_retrofit = "#499a9c" - - bus_colors = {"H2 Electrolysis": "#ff29d9", "H2 Fuel Cell": "#805394"} - - n.plot( - geomap=True, - bus_sizes=bus_sizes, - bus_colors=bus_colors, - link_colors=color_h2_pipe, - link_widths=link_widths_total, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - n.plot( - geomap=True, - bus_sizes=0, - link_colors=color_retrofit, - link_widths=link_widths_retro, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - regions.plot( - ax=ax, - column="H2", - cmap="Blues", - linewidths=0, - legend=True, - vmax=6, - vmin=0, - legend_kwds={ - "label": "Hydrogen Storage [TWh]", - "shrink": 0.7, - "extend": "max", - }, - ) - - sizes = [50, 10] - labels = [f"{s} GW" for s in sizes] - sizes = [s / bus_size_factor * 1e3 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1), - labelspacing=0.8, - handletextpad=0, - frameon=False, - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [30, 10] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.23, 1), - frameon=False, - labelspacing=0.8, - handletextpad=1, - ) - - add_legend_lines( - ax, - sizes, - labels, - patch_kw=dict(color="lightgrey"), - legend_kw=legend_kw, - ) - - colors = [bus_colors[c] for c in carriers] + [color_h2_pipe, color_retrofit] - labels = carriers + ["H2 pipeline (total)", "H2 pipeline (repurposed)"] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.13), - ncol=2, - frameon=False, - ) - - add_legend_patches(ax, colors, labels, legend_kw=legend_kw) - - ax.set_facecolor("white") - - fig.savefig( - snakemake.output.map.replace("-costs-all", "-h2_network"), bbox_inches="tight" - ) - - -def plot_ch4_map(network): - n = network.copy() - - if "gas pipeline" not in n.links.carrier.unique(): - return - - assign_location(n) - - bus_size_factor = 8e7 - linewidth_factor = 1e4 - # MW below which not drawn - line_lower_threshold = 1e3 - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - fossil_gas_i = n.generators[n.generators.carrier == "gas"].index - fossil_gas = ( - n.generators_t.p.loc[:, fossil_gas_i] - .mul(n.snapshot_weightings.generators, axis=0) - .sum() - .groupby(n.generators.loc[fossil_gas_i, "bus"]) - .sum() - / bus_size_factor - ) - fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True) - fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0) - # make a fake MultiIndex so that area is correct for legend - fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]]) - - methanation_i = n.links.query("carrier == 'Sabatier'").index - methanation = ( - abs( - n.links_t.p1.loc[:, methanation_i].mul( - n.snapshot_weightings.generators, axis=0 - ) - ) - .sum() - .groupby(n.links.loc[methanation_i, "bus1"]) - .sum() - / bus_size_factor - ) - methanation = ( - methanation.groupby(methanation.index) - .sum() - .rename(index=lambda x: x.replace(" gas", "")) - ) - # make a fake MultiIndex so that area is correct for legend - methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]]) - - biogas_i = n.stores[n.stores.carrier == "biogas"].index - biogas = ( - n.stores_t.p.loc[:, biogas_i] - .mul(n.snapshot_weightings.generators, axis=0) - .sum() - .groupby(n.stores.loc[biogas_i, "bus"]) - .sum() - / bus_size_factor - ) - biogas = ( - biogas.groupby(biogas.index) - .sum() - .rename(index=lambda x: x.replace(" biogas", "")) - ) - # make a fake MultiIndex so that area is correct for legend - biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]]) - - bus_sizes = pd.concat([fossil_gas, methanation, biogas]) - bus_sizes.sort_index(inplace=True) - - to_remove = n.links.index[~n.links.carrier.str.contains("gas pipeline")] - n.links.drop(to_remove, inplace=True) - - link_widths_rem = n.links.p_nom_opt / linewidth_factor - link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - link_widths_orig = n.links.p_nom / linewidth_factor - link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.0 - - max_usage = n.links_t.p0.abs().max(axis=0) - link_widths_used = max_usage / linewidth_factor - link_widths_used[max_usage < line_lower_threshold] = 0.0 - - tech_colors = snakemake.params.plotting["tech_colors"] - - pipe_colors = { - "gas pipeline": "#f08080", - "gas pipeline new": "#c46868", - "gas pipeline (in 2020)": "lightgrey", - "gas pipeline (available)": "#e8d1d1", - } - - link_color_used = n.links.carrier.map(pipe_colors) - - n.links.bus0 = n.links.bus0.str.replace(" gas", "") - n.links.bus1 = n.links.bus1.str.replace(" gas", "") - - bus_colors = { - "fossil gas": tech_colors["fossil gas"], - "methanation": tech_colors["methanation"], - "biogas": "seagreen", - } - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) - - n.plot( - bus_sizes=bus_sizes, - bus_colors=bus_colors, - link_colors=pipe_colors["gas pipeline (in 2020)"], - link_widths=link_widths_orig, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - n.plot( - ax=ax, - bus_sizes=0.0, - link_colors=pipe_colors["gas pipeline (available)"], - link_widths=link_widths_rem, - branch_components=["Link"], - color_geomap=False, - boundaries=map_opts["boundaries"], - ) - - n.plot( - ax=ax, - bus_sizes=0.0, - link_colors=link_color_used, - link_widths=link_widths_used, - branch_components=["Link"], - color_geomap=False, - boundaries=map_opts["boundaries"], - ) - - sizes = [100, 10] - labels = [f"{s} TWh" for s in sizes] - sizes = [s / bus_size_factor * 1e6 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.03), - labelspacing=0.8, - frameon=False, - handletextpad=1, - title="gas sources", - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [50, 10] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.25, 1.03), - frameon=False, - labelspacing=0.8, - handletextpad=1, - title="gas pipeline", - ) - - add_legend_lines( - ax, - sizes, - labels, - patch_kw=dict(color="lightgrey"), - legend_kw=legend_kw, - ) - - colors = list(pipe_colors.values()) + list(bus_colors.values()) - labels = list(pipe_colors.keys()) + list(bus_colors.keys()) - - # legend on the side - # legend_kw = dict( - # bbox_to_anchor=(1.47, 1.04), - # frameon=False, - # ) - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.24), - ncol=2, - frameon=False, - ) - - add_legend_patches( - ax, - colors, - labels, - legend_kw=legend_kw, - ) - - fig.savefig( - snakemake.output.map.replace("-costs-all", "-ch4_network"), bbox_inches="tight" - ) - - -def plot_map_perfect( - network, - components=["Link", "Store", "StorageUnit", "Generator"], - bus_size_factor=1.7e10, -): - n = network.copy() - assign_location(n) - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - # investment periods - investments = n.snapshots.levels[0] - - costs = {} - for comp in components: - df_c = n.df(comp) - if df_c.empty: - continue - df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) - - attr = "e_nom_opt" if comp == "Store" else "p_nom_opt" - - active = pd.concat( - [n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments], - axis=1, - ).astype(int) - capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost - capital_cost_t = ( - (active.mul(capital_cost, axis=0)) - .groupby([n.df(comp).location, n.df(comp).nice_group]) - .sum() - ) - - capital_cost_t.drop("load", level=1, inplace=True, errors="ignore") - - costs[comp] = capital_cost_t - - costs = pd.concat(costs).groupby(level=[1, 2]).sum() - costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True) - - new_columns = preferred_order.intersection(costs.index.levels[1]).append( - costs.index.levels[1].difference(preferred_order) - ) - costs = costs.reindex(new_columns, level=1) - - for item in new_columns: - if item not in snakemake.config["plotting"]["tech_colors"]: - print( - "Warning!", - item, - "not in config/plotting/tech_colors, assign random color", - ) - snakemake.config["plotting"]["tech_colors"] = "pink" - - n.links.drop( - n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], - inplace=True, - ) - - # drop non-bus - to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) - if len(to_drop) != 0: - print("dropping non-buses", to_drop) - costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") - - # make sure they are removed from index - costs.index = pd.MultiIndex.from_tuples(costs.index.values) - - # PDF has minimum width, so set these to zero - line_lower_threshold = 500.0 - line_upper_threshold = 1e4 - linewidth_factor = 2e3 - ac_color = "gray" - dc_color = "m" - - line_widths = n.lines.s_nom_opt - link_widths = n.links.p_nom_opt - linewidth_factor = 2e3 - line_lower_threshold = 0.0 - title = "Today's transmission" - - line_widths[line_widths < line_lower_threshold] = 0.0 - link_widths[link_widths < line_lower_threshold] = 0.0 - - line_widths[line_widths > line_upper_threshold] = line_upper_threshold - link_widths[link_widths > line_upper_threshold] = line_upper_threshold - - for year in costs.columns: - fig, ax = plt.subplots(subplot_kw={"projection": proj}) - fig.set_size_inches(7, 6) - fig.suptitle(year) - - n.plot( - bus_sizes=costs[year] / bus_size_factor, - bus_colors=snakemake.config["plotting"]["tech_colors"], - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, - **map_opts, - ) - - sizes = [20, 10, 5] - labels = [f"{s} bEUR/a" for s in sizes] - sizes = [s / bus_size_factor * 1e9 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.01, 1.06), - labelspacing=0.8, - frameon=False, - handletextpad=0, - title="system cost", - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [10, 5] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.27, 1.06), - frameon=False, - labelspacing=0.8, - handletextpad=1, - title=title, - ) - - add_legend_lines( - ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw - ) - - legend_kw = dict( - bbox_to_anchor=(1.52, 1.04), - frameon=False, - ) - - fig.savefig( - snakemake.output[f"map_{year}"], transparent=True, bbox_inches="tight" - ) - - -if __name__ == "__main__": - if "snakemake" not in globals(): - from _helpers import mock_snakemake - - snakemake = mock_snakemake( - "plot_network", - simpl="", - opts="", - clusters="37", - ll="v1.0", - sector_opts="4380H-T-H-B-I-A-dist1", - ) - - logging.basicConfig(level=snakemake.config["logging"]["level"]) - - n = pypsa.Network(snakemake.input.network) - - regions = gpd.read_file(snakemake.input.regions).set_index("name") - - map_opts = snakemake.params.plotting["map"] - - if map_opts["boundaries"] is None: - map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] - - proj_kwargs = snakemake.params.plotting.get("projection", dict(name="EqualEarth")) - proj_func = getattr(ccrs, proj_kwargs.pop("name")) - proj = proj_func(**proj_kwargs) - - if snakemake.params["foresight"] == "perfect": - plot_map_perfect( - n, - components=["Link", "Store", "StorageUnit", "Generator"], - bus_size_factor=2e10, - ) - else: - plot_map( - n, - components=["generators", "links", "stores", "storage_units"], - bus_size_factor=2e10, - transmission=False, - ) - - plot_h2_map(n, regions) - plot_ch4_map(n) diff --git a/scripts/plot_power_network.py b/scripts/plot_power_network.py new file mode 100644 index 00000000..48aa01e3 --- /dev/null +++ b/scripts/plot_power_network.py @@ -0,0 +1,271 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates plots for optimised power network topologies and regional generation, +storage and conversion capacities built. +""" + +import logging + +import cartopy.crs as ccrs +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from plot_summary import preferred_order, rename_techs +from _helpers import configure_logging +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches + +logger = logging.getLogger(__name__) + +def rename_techs_tyndp(tech): + tech = rename_techs(tech) + if "heat pump" in tech or "resistive heater" in tech: + return "power-to-heat" + elif tech in ["H2 Electrolysis", "methanation", "H2 liquefaction"]: + return "power-to-gas" + elif tech == "H2": + return "H2 storage" + elif tech in ["NH3", "Haber-Bosch", "ammonia cracker", "ammonia store"]: + return "ammonia" + elif tech in ["OCGT", "CHP", "gas boiler", "H2 Fuel Cell"]: + return "gas-to-power/heat" + # elif "solar" in tech: + # return "solar" + elif tech in ["Fischer-Tropsch", "methanolisation"]: + return "power-to-liquid" + elif "offshore wind" in tech: + return "offshore wind" + elif "CC" in tech or "sequestration" in tech: + return "CCS" + else: + return tech + + +def assign_location(n): + for c in n.iterate_components(n.one_port_components | n.branch_components): + ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) + for i in ifind.value_counts().index: + # these have already been assigned defaults + if i == -1: + continue + names = ifind.index[ifind == i] + c.df.loc[names, "location"] = names.str[:i] + + +def load_projection(plotting_params): + proj_kwargs = plotting_params.get("projection", dict(name="EqualEarth")) + proj_func = getattr(ccrs, proj_kwargs.pop("name")) + return proj_func(**proj_kwargs) + + +def plot_map( + n, + components=["links", "stores", "storage_units", "generators"], + bus_size_factor=2e10, + transmission=False, + with_legend=True, +): + tech_colors = snakemake.params.plotting["tech_colors"] + + assign_location(n) + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + costs = pd.DataFrame(index=n.buses.index) + + for comp in components: + df_c = getattr(n, comp) + + if df_c.empty: + continue + + df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) + + attr = "e_nom_opt" if comp == "stores" else "p_nom_opt" + + costs_c = ( + (df_c.capital_cost * df_c[attr]) + .groupby([df_c.location, df_c.nice_group]) + .sum() + .unstack() + .fillna(0.0) + ) + costs = pd.concat([costs, costs_c], axis=1) + + logger.debug(f"{comp}, {costs}") + + costs = costs.groupby(costs.columns, axis=1).sum() + + costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True) + + new_columns = preferred_order.intersection(costs.columns).append( + costs.columns.difference(preferred_order) + ) + costs = costs[new_columns] + + for item in new_columns: + if item not in tech_colors: + logger.warning(f"{item} not in config/plotting/tech_colors") + + costs = costs.stack() # .sort_index() + + # hack because impossible to drop buses... + eu_location = snakemake.params.plotting.get("eu_node_location", dict(x=-5.5, y=46)) + n.buses.loc["EU gas", "x"] = eu_location["x"] + n.buses.loc["EU gas", "y"] = eu_location["y"] + + n.links.drop( + n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], + inplace=True, + ) + + # drop non-bus + to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) + if len(to_drop) != 0: + logger.info(f"Dropping non-buses {to_drop.tolist()}") + costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") + + # make sure they are removed from index + costs.index = pd.MultiIndex.from_tuples(costs.index.values) + + threshold = 100e6 # 100 mEUR/a + carriers = costs.groupby(level=1).sum() + carriers = carriers.where(carriers > threshold).dropna() + carriers = list(carriers.index) + + # PDF has minimum width, so set these to zero + line_lower_threshold = 500.0 + line_upper_threshold = 1e4 + linewidth_factor = 4e3 + ac_color = "rosybrown" + dc_color = "darkseagreen" + + title = "added grid" + + if snakemake.wildcards["ll"] == "v1.0": + # should be zero + line_widths = n.lines.s_nom_opt - n.lines.s_nom + link_widths = n.links.p_nom_opt - n.links.p_nom + if transmission: + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + linewidth_factor = 2e3 + line_lower_threshold = 0.0 + title = "current grid" + else: + line_widths = n.lines.s_nom_opt - n.lines.s_nom_min + link_widths = n.links.p_nom_opt - n.links.p_nom_min + if transmission: + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + title = "total grid" + + line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) + link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) + + line_widths = line_widths.replace(line_lower_threshold, 0) + link_widths = link_widths.replace(line_lower_threshold, 0) + + fig, ax = plt.subplots(subplot_kw={"projection": proj}) + fig.set_size_inches(7, 6) + + n.plot( + bus_sizes=costs / bus_size_factor, + bus_colors=tech_colors, + line_colors=ac_color, + link_colors=dc_color, + line_widths=line_widths / linewidth_factor, + link_widths=link_widths / linewidth_factor, + ax=ax, + **map_opts, + ) + + sizes = [20, 10, 5] + labels = [f"{s} bEUR/a" for s in sizes] + sizes = [s / bus_size_factor * 1e9 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.01, 1.06), + labelspacing=0.8, + frameon=False, + handletextpad=0, + title="system cost", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [10, 5] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.27, 1.06), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title=title, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw + ) + + legend_kw = dict( + bbox_to_anchor=(1.52, 1.04), + frameon=False, + ) + + if with_legend: + colors = [tech_colors[c] for c in carriers] + [ac_color, dc_color] + labels = carriers + ["HVAC line", "HVDC link"] + + add_legend_patches( + ax, + colors, + labels, + legend_kw=legend_kw, + ) + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_map(n) diff --git a/scripts/plot_power_network_perfect.py b/scripts/plot_power_network_perfect.py new file mode 100644 index 00000000..ce8afef0 --- /dev/null +++ b/scripts/plot_power_network_perfect.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates plots for optimised power network topologies and regional generation, +storage and conversion capacities built for the perfect foresight scenario. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging +from pypsa.plot import add_legend_circles, add_legend_lines +from plot_power_network import assign_location, rename_techs_tyndp, load_projection +from plot_summary import preferred_order + +logger = logging.getLogger(__name__) + + +def plot_map_perfect( + n, + components=["Link", "Store", "StorageUnit", "Generator"], + bus_size_factor=2e10, +): + assign_location(n) + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + # investment periods + investments = n.snapshots.levels[0] + + costs = {} + for comp in components: + df_c = n.df(comp) + if df_c.empty: + continue + df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) + + attr = "e_nom_opt" if comp == "Store" else "p_nom_opt" + + active = pd.concat( + [n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments], + axis=1, + ).astype(int) + capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost + capital_cost_t = ( + (active.mul(capital_cost, axis=0)) + .groupby([n.df(comp).location, n.df(comp).nice_group]) + .sum() + ) + + capital_cost_t.drop("load", level=1, inplace=True, errors="ignore") + + costs[comp] = capital_cost_t + + costs = pd.concat(costs).groupby(level=[1, 2]).sum() + costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True) + + new_columns = preferred_order.intersection(costs.index.levels[1]).append( + costs.index.levels[1].difference(preferred_order) + ) + costs = costs.reindex(new_columns, level=1) + + for item in new_columns: + if item not in snakemake.config["plotting"]["tech_colors"]: + print( + "Warning!", + item, + "not in config/plotting/tech_colors, assign random color", + ) + snakemake.config["plotting"]["tech_colors"] = "pink" + + n.links.drop( + n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], + inplace=True, + ) + + # drop non-bus + to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) + if len(to_drop) != 0: + print("dropping non-buses", to_drop) + costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") + + # make sure they are removed from index + costs.index = pd.MultiIndex.from_tuples(costs.index.values) + + # PDF has minimum width, so set these to zero + line_lower_threshold = 500.0 + line_upper_threshold = 1e4 + linewidth_factor = 2e3 + ac_color = "gray" + dc_color = "m" + + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + linewidth_factor = 2e3 + line_lower_threshold = 0.0 + title = "Today's transmission" + + line_widths[line_widths < line_lower_threshold] = 0.0 + link_widths[link_widths < line_lower_threshold] = 0.0 + + line_widths[line_widths > line_upper_threshold] = line_upper_threshold + link_widths[link_widths > line_upper_threshold] = line_upper_threshold + + for year in costs.columns: + fig, ax = plt.subplots(subplot_kw={"projection": proj}) + fig.set_size_inches(7, 6) + fig.suptitle(year) + + n.plot( + bus_sizes=costs[year] / bus_size_factor, + bus_colors=snakemake.config["plotting"]["tech_colors"], + line_colors=ac_color, + link_colors=dc_color, + line_widths=line_widths / linewidth_factor, + link_widths=link_widths / linewidth_factor, + ax=ax, + **map_opts, + ) + + sizes = [20, 10, 5] + labels = [f"{s} bEUR/a" for s in sizes] + sizes = [s / bus_size_factor * 1e9 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.01, 1.06), + labelspacing=0.8, + frameon=False, + handletextpad=0, + title="system cost", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [10, 5] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.27, 1.06), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title=title, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw + ) + + legend_kw = dict( + bbox_to_anchor=(1.52, 1.04), + frameon=False, + ) + + fig.savefig(snakemake.output[f"map_{year}"], bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network_perfect", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_map_perfect(n) From 3f112f0e957d804e3a556a37b02ab4250c31e114 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 25 Jan 2024 20:37:33 +0100 Subject: [PATCH 20/30] incorporate network plots in rule --- Snakefile | 15 +++++++++++++++ doc/release_notes.rst | 3 --- rules/postprocess.smk | 20 -------------------- 3 files changed, 15 insertions(+), 23 deletions(-) diff --git a/Snakefile b/Snakefile index 14c9e821..76ea7b80 100644 --- a/Snakefile +++ b/Snakefile @@ -75,6 +75,21 @@ if config["foresight"] == "perfect": rule all: input: RESULTS + "graphs/costs.pdf", + expand( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", + **config["scenario"] + ), + expand( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf", + **config["scenario"] + ), + expand( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf", + **config["scenario"] + ), default_target: True diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 5bcaf0d2..7ffbe63c 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -40,9 +40,6 @@ Upcoming Release * The rule ``plot_network`` has been split into separate rules for plotting electricity, hydrogen and gas networks. -* Added new collection rule ``plot_all`` which should be used instead of - ``plot_summary``. This allows running the rule :mod:`make_summary` and - :mod:`plot_summary` even if the network plotting rules fail. PyPSA-Eur 0.9.0 (5th January 2024) ================================== diff --git a/rules/postprocess.smk b/rules/postprocess.smk index 6db3079a..22cefcbd 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -202,26 +202,6 @@ rule plot_summary: "../scripts/plot_summary.py" -rule plot_all: - input: - RESULTS + "graphs/costs.pdf", - expand( - RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - **config["scenario"] - ), - expand( - RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf", - **config["scenario"] - ), - expand( - RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf", - **config["scenario"] - ), - - STATISTICS_BARPLOTS = [ "capacity_factor", "installed_capacity", From 30ccde5b908aa680ea858fcdb08b09b593e901cf Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 25 Jan 2024 19:39:07 +0000 Subject: [PATCH 21/30] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- rules/postprocess.smk | 2 -- scripts/plot_gas_network.py | 5 +++-- scripts/plot_hydrogen_network.py | 6 +++--- scripts/plot_power_network.py | 3 ++- scripts/plot_power_network_perfect.py | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/rules/postprocess.smk b/rules/postprocess.smk index 22cefcbd..f7c50733 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -32,7 +32,6 @@ if config["foresight"] != "perfect": script: "../scripts/plot_power_network.py" - rule plot_hydrogen_network: params: plotting=config["plotting"], @@ -56,7 +55,6 @@ if config["foresight"] != "perfect": script: "../scripts/plot_hydrogen_network.py" - rule plot_gas_network: params: plotting=config["plotting"], diff --git a/scripts/plot_gas_network.py b/scripts/plot_gas_network.py index a72c5c56..cd515e96 100644 --- a/scripts/plot_gas_network.py +++ b/scripts/plot_gas_network.py @@ -3,7 +3,8 @@ # # SPDX-License-Identifier: MIT """ -Creates map of optimised gas network, storage and selected other infrastructure. +Creates map of optimised gas network, storage and selected other +infrastructure. """ import logging @@ -13,8 +14,8 @@ import matplotlib.pyplot as plt import pandas as pd import pypsa from _helpers import configure_logging -from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches from plot_power_network import assign_location, load_projection +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches logger = logging.getLogger(__name__) diff --git a/scripts/plot_hydrogen_network.py b/scripts/plot_hydrogen_network.py index 13728553..a1183311 100644 --- a/scripts/plot_hydrogen_network.py +++ b/scripts/plot_hydrogen_network.py @@ -3,7 +3,8 @@ # # SPDX-License-Identifier: MIT """ -Creates map of optimised hydrogen network, storage and selected other infrastructure. +Creates map of optimised hydrogen network, storage and selected other +infrastructure. """ import logging @@ -13,9 +14,8 @@ import matplotlib.pyplot as plt import pandas as pd import pypsa from _helpers import configure_logging -from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches - from plot_power_network import assign_location, load_projection +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches logger = logging.getLogger(__name__) diff --git a/scripts/plot_power_network.py b/scripts/plot_power_network.py index 48aa01e3..271e638d 100644 --- a/scripts/plot_power_network.py +++ b/scripts/plot_power_network.py @@ -14,12 +14,13 @@ import geopandas as gpd import matplotlib.pyplot as plt import pandas as pd import pypsa -from plot_summary import preferred_order, rename_techs from _helpers import configure_logging +from plot_summary import preferred_order, rename_techs from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches logger = logging.getLogger(__name__) + def rename_techs_tyndp(tech): tech = rename_techs(tech) if "heat pump" in tech or "resistive heater" in tech: diff --git a/scripts/plot_power_network_perfect.py b/scripts/plot_power_network_perfect.py index ce8afef0..ff576d33 100644 --- a/scripts/plot_power_network_perfect.py +++ b/scripts/plot_power_network_perfect.py @@ -14,9 +14,9 @@ import matplotlib.pyplot as plt import pandas as pd import pypsa from _helpers import configure_logging -from pypsa.plot import add_legend_circles, add_legend_lines -from plot_power_network import assign_location, rename_techs_tyndp, load_projection +from plot_power_network import assign_location, load_projection, rename_techs_tyndp from plot_summary import preferred_order +from pypsa.plot import add_legend_circles, add_legend_lines logger = logging.getLogger(__name__) From a2cd042472ac6e4148afeb651337e812eb6b2092 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 14 Aug 2023 18:25:58 +0200 Subject: [PATCH 22/30] plot clustered network topology before optimisation --- Snakefile | 1 + rules/postprocess.smk | 20 +++++++ scripts/plot_power_network_clustered.py | 79 +++++++++++++++++++++++++ 3 files changed, 100 insertions(+) create mode 100644 scripts/plot_power_network_clustered.py diff --git a/Snakefile b/Snakefile index 76ea7b80..23835a7e 100644 --- a/Snakefile +++ b/Snakefile @@ -75,6 +75,7 @@ if config["foresight"] == "perfect": rule all: input: RESULTS + "graphs/costs.pdf", + expand(RESULTS + "maps/power-network-{clusters}.pdf", **config["scenario"]), expand( RESULTS + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", diff --git a/rules/postprocess.smk b/rules/postprocess.smk index f7c50733..d275279e 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -9,6 +9,26 @@ localrules: if config["foresight"] != "perfect": + rule plot_power_network_clustered: + params: + plotting=config["plotting"], + input: + network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", + regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + rc="matplotlibrc", + output: + map=RESULTS + "maps/power-network-{clusters}.pdf", + threads: 1 + resources: + mem_mb=4000, + benchmark: + BENCHMARKS + "plot_power_network_clustered/elec_s{simpl}_{clusters}" + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_power_network_clustered.py" + + rule plot_power_network: params: plotting=config["plotting"], diff --git a/scripts/plot_power_network_clustered.py b/scripts/plot_power_network_clustered.py new file mode 100644 index 00000000..e1140fd6 --- /dev/null +++ b/scripts/plot_power_network_clustered.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2023-2024 PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Plot clustered electricity transmission network. +""" + +import pypsa +import geopandas as gpd +import matplotlib.pyplot as plt +import cartopy.crs as ccrs +from matplotlib.lines import Line2D +from pypsa.plot import add_legend_lines + +from plot_power_network import load_projection + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network_clustered", + clusters=128, + configfiles=["../../config/config.test.yaml"] + ) + + plt.style.use(snakemake.input.rc) + + lw_factor = 2e3 + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions_onshore).set_index('name') + + proj = load_projection(snakemake.params.plotting) + + fig, ax = plt.subplots(figsize=(8,8), subplot_kw={"projection": proj}) + regions.to_crs(proj.proj4_init).plot( + ax=ax, + facecolor='none', + edgecolor='lightgray', + linewidth=0.75 + ) + n.plot( + ax=ax, + margin=0.06, + line_widths=n.lines.s_nom / lw_factor, + link_colors=n.links.p_nom.apply( + lambda x: "darkseagreen" if x > 0 else "skyblue" + ), + link_widths=2., + ) + + sizes = [10, 20] + labels = [f"HVAC ({s} GW)" for s in sizes] + scale = 1e3 / lw_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc=[0.25, 0.9], + frameon=False, + labelspacing=0.5, + handletextpad=1, + fontsize=13, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="rosybrown"), legend_kw=legend_kw + ) + + handles = [ + Line2D([0], [0], color="darkseagreen", lw=2), + Line2D([0], [0], color="skyblue", lw=2), + ] + plt.legend(handles, ["HVDC existing", "HVDC planned"], frameon=False, loc=[0., 0.9], fontsize=13) + + plt.savefig(snakemake.output.map, bbox_inches='tight') From 5fcfafe971cdb0b7ccbd31fa4a613d989f499b0d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 25 Jan 2024 19:50:03 +0000 Subject: [PATCH 23/30] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- rules/postprocess.smk | 4 ++-- scripts/plot_power_network_clustered.py | 31 +++++++++++++------------ 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/rules/postprocess.smk b/rules/postprocess.smk index d275279e..d1c65ced 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -14,7 +14,8 @@ if config["foresight"] != "perfect": plotting=config["plotting"], input: network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", - regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + regions_onshore=RESOURCES + + "regions_onshore_elec_s{simpl}_{clusters}.geojson", rc="matplotlibrc", output: map=RESULTS + "maps/power-network-{clusters}.pdf", @@ -28,7 +29,6 @@ if config["foresight"] != "perfect": script: "../scripts/plot_power_network_clustered.py" - rule plot_power_network: params: plotting=config["plotting"], diff --git a/scripts/plot_power_network_clustered.py b/scripts/plot_power_network_clustered.py index e1140fd6..318afbb9 100644 --- a/scripts/plot_power_network_clustered.py +++ b/scripts/plot_power_network_clustered.py @@ -6,15 +6,13 @@ Plot clustered electricity transmission network. """ -import pypsa +import cartopy.crs as ccrs import geopandas as gpd import matplotlib.pyplot as plt -import cartopy.crs as ccrs +import pypsa from matplotlib.lines import Line2D -from pypsa.plot import add_legend_lines - from plot_power_network import load_projection - +from pypsa.plot import add_legend_lines if __name__ == "__main__": if "snakemake" not in globals(): @@ -23,7 +21,7 @@ if __name__ == "__main__": snakemake = mock_snakemake( "plot_power_network_clustered", clusters=128, - configfiles=["../../config/config.test.yaml"] + configfiles=["../../config/config.test.yaml"], ) plt.style.use(snakemake.input.rc) @@ -32,16 +30,13 @@ if __name__ == "__main__": n = pypsa.Network(snakemake.input.network) - regions = gpd.read_file(snakemake.input.regions_onshore).set_index('name') + regions = gpd.read_file(snakemake.input.regions_onshore).set_index("name") proj = load_projection(snakemake.params.plotting) - fig, ax = plt.subplots(figsize=(8,8), subplot_kw={"projection": proj}) + fig, ax = plt.subplots(figsize=(8, 8), subplot_kw={"projection": proj}) regions.to_crs(proj.proj4_init).plot( - ax=ax, - facecolor='none', - edgecolor='lightgray', - linewidth=0.75 + ax=ax, facecolor="none", edgecolor="lightgray", linewidth=0.75 ) n.plot( ax=ax, @@ -50,7 +45,7 @@ if __name__ == "__main__": link_colors=n.links.p_nom.apply( lambda x: "darkseagreen" if x > 0 else "skyblue" ), - link_widths=2., + link_widths=2.0, ) sizes = [10, 20] @@ -74,6 +69,12 @@ if __name__ == "__main__": Line2D([0], [0], color="darkseagreen", lw=2), Line2D([0], [0], color="skyblue", lw=2), ] - plt.legend(handles, ["HVDC existing", "HVDC planned"], frameon=False, loc=[0., 0.9], fontsize=13) + plt.legend( + handles, + ["HVDC existing", "HVDC planned"], + frameon=False, + loc=[0.0, 0.9], + fontsize=13, + ) - plt.savefig(snakemake.output.map, bbox_inches='tight') + plt.savefig(snakemake.output.map, bbox_inches="tight") From 7becfdea9c044c98224cccf1e6b09a092db4a774 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 25 Jan 2024 20:51:51 +0100 Subject: [PATCH 24/30] resolve merge conflict --- Snakefile | 2 +- rules/postprocess.smk | 3 +-- scripts/plot_power_network_clustered.py | 2 -- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/Snakefile b/Snakefile index 23835a7e..8a256acc 100644 --- a/Snakefile +++ b/Snakefile @@ -75,7 +75,7 @@ if config["foresight"] == "perfect": rule all: input: RESULTS + "graphs/costs.pdf", - expand(RESULTS + "maps/power-network-{clusters}.pdf", **config["scenario"]), + expand(RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", **config["scenario"]), expand( RESULTS + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", diff --git a/rules/postprocess.smk b/rules/postprocess.smk index d1c65ced..1ca6eb4d 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -16,9 +16,8 @@ if config["foresight"] != "perfect": network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", - rc="matplotlibrc", output: - map=RESULTS + "maps/power-network-{clusters}.pdf", + map=RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", threads: 1 resources: mem_mb=4000, diff --git a/scripts/plot_power_network_clustered.py b/scripts/plot_power_network_clustered.py index 318afbb9..8217ac2e 100644 --- a/scripts/plot_power_network_clustered.py +++ b/scripts/plot_power_network_clustered.py @@ -24,8 +24,6 @@ if __name__ == "__main__": configfiles=["../../config/config.test.yaml"], ) - plt.style.use(snakemake.input.rc) - lw_factor = 2e3 n = pypsa.Network(snakemake.input.network) From a56bd830f8b5808189020414efcae11a7bfc184a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 25 Jan 2024 19:53:23 +0000 Subject: [PATCH 25/30] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- Snakefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Snakefile b/Snakefile index 8a256acc..2021b49f 100644 --- a/Snakefile +++ b/Snakefile @@ -75,7 +75,10 @@ if config["foresight"] == "perfect": rule all: input: RESULTS + "graphs/costs.pdf", - expand(RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", **config["scenario"]), + expand( + RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", + **config["scenario"] + ), expand( RESULTS + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", From 707adcec69d13e6c1e628fb80ebb7cf64c5aff82 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Fri, 26 Jan 2024 09:15:04 +0100 Subject: [PATCH 26/30] all: make H2 and CH4 maps dependent on network config --- Snakefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Snakefile b/Snakefile index 2021b49f..211e7540 100644 --- a/Snakefile +++ b/Snakefile @@ -86,12 +86,16 @@ rule all: ), expand( RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf", + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf" + if config["sector"]["H2_network"] + else [], **config["scenario"] ), expand( RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf", + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf" + if config["sector"]["gas_network"] + else [], **config["scenario"] ), default_target: True From b73c614f5bc122cb78cfa6e594817edfe6538b41 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Fri, 26 Jan 2024 16:39:03 +0100 Subject: [PATCH 27/30] plot_gas_network: save max usage calculation --- scripts/plot_gas_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/plot_gas_network.py b/scripts/plot_gas_network.py index cd515e96..e2953604 100644 --- a/scripts/plot_gas_network.py +++ b/scripts/plot_gas_network.py @@ -97,7 +97,7 @@ def plot_ch4_map(n): link_widths_orig = n.links.p_nom / linewidth_factor link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.0 - max_usage = n.links_t.p0.abs().max(axis=0) + max_usage = n.links_t.p0[n.links.index].abs().max(axis=0) link_widths_used = max_usage / linewidth_factor link_widths_used[max_usage < line_lower_threshold] = 0.0 From f84e73006fa478e9a3b176081615eb34ad1ae216 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Fri, 26 Jan 2024 17:05:38 +0100 Subject: [PATCH 28/30] postprocess: keep network plotting triggers in make_summary --- Snakefile | 23 ----------------------- rules/postprocess.smk | 23 +++++++++++++++++++++++ 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/Snakefile b/Snakefile index 211e7540..14c9e821 100644 --- a/Snakefile +++ b/Snakefile @@ -75,29 +75,6 @@ if config["foresight"] == "perfect": rule all: input: RESULTS + "graphs/costs.pdf", - expand( - RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", - **config["scenario"] - ), - expand( - RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - **config["scenario"] - ), - expand( - RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf" - if config["sector"]["H2_network"] - else [], - **config["scenario"] - ), - expand( - RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf" - if config["sector"]["gas_network"] - else [], - **config["scenario"] - ), default_target: True diff --git a/rules/postprocess.smk b/rules/postprocess.smk index 1ca6eb4d..d01f90db 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -158,6 +158,29 @@ rule make_summary: costs="data/costs_{}.csv".format(config["costs"]["year"]) if config["foresight"] == "overnight" else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), + expand( + RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", + **config["scenario"] + ), + costs_plot=expand( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", + **config["scenario"] + ), + h2_plot=expand( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf" + if config["sector"]["H2_network"] + else [], + **config["scenario"] + ), + ch4_plot=expand( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf" + if config["sector"]["gas_network"] + else [], + **config["scenario"] + ), output: nodal_costs=RESULTS + "csvs/nodal_costs.csv", nodal_capacities=RESULTS + "csvs/nodal_capacities.csv", From d6288d3d4dfed4bb2ce58fa4053eabf0123e3404 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 26 Jan 2024 16:06:20 +0000 Subject: [PATCH 29/30] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- rules/postprocess.smk | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/rules/postprocess.smk b/rules/postprocess.smk index d01f90db..88b56e2f 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -150,6 +150,10 @@ rule make_summary: scenario=config["scenario"], RDIR=RDIR, input: + expand( + RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", + **config["scenario"] + ), networks=expand( RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", @@ -158,10 +162,6 @@ rule make_summary: costs="data/costs_{}.csv".format(config["costs"]["year"]) if config["foresight"] == "overnight" else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), - expand( - RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", - **config["scenario"] - ), costs_plot=expand( RESULTS + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", From 8cb7396c112de5e80e04a1ba115ea5a733a4d7da Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Fri, 26 Jan 2024 17:07:19 +0100 Subject: [PATCH 30/30] make_summary: name all inputs --- rules/postprocess.smk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rules/postprocess.smk b/rules/postprocess.smk index d01f90db..50fe634c 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -158,7 +158,7 @@ rule make_summary: costs="data/costs_{}.csv".format(config["costs"]["year"]) if config["foresight"] == "overnight" else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), - expand( + ac_plot=expand( RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", **config["scenario"] ),