From fbff32dcfcba57fab3cfaea1e10ae76f1cad75f1 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Fri, 12 Jan 2024 16:42:12 +0100 Subject: [PATCH 01/15] build_pop_weighted_energy: don't reduce district heat share Previously the DH share was being multiplied by the population weighting, reducing the DH share with multiple nodes. --- scripts/build_population_weighted_energy_totals.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/build_population_weighted_energy_totals.py b/scripts/build_population_weighted_energy_totals.py index 879e3b9b..20467f72 100644 --- a/scripts/build_population_weighted_energy_totals.py +++ b/scripts/build_population_weighted_energy_totals.py @@ -26,4 +26,9 @@ if __name__ == "__main__": nodal_energy_totals.index = pop_layout.index nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0) + # district heating share should not be divided by population fraction + dh_share = energy_totals["district heat share"].loc[pop_layout.ct].fillna(0.0) + dh_share.index = pop_layout.index + nodal_energy_totals["district heat share"] = dh_share + nodal_energy_totals.to_csv(snakemake.output[0]) From 6c20ce83d7f0fd509013cc8d060a4bb91fd2c879 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 15 Jan 2024 16:47:19 +0100 Subject: [PATCH 02/15] move building of daily heat profile to its own script Previously this was handled inside prepare_sector_network.py. --- rules/build_sector.smk | 36 +++++++--- ...t_demand.py => build_daily_heat_demand.py} | 0 scripts/build_hourly_heat_demand.py | 69 +++++++++++++++++++ scripts/prepare_sector_network.py | 29 ++------ 4 files changed, 102 insertions(+), 32 deletions(-) rename scripts/{build_heat_demand.py => build_daily_heat_demand.py} (100%) create mode 100644 scripts/build_hourly_heat_demand.py diff --git a/rules/build_sector.smk b/rules/build_sector.smk index 4744aa25..efaff2a3 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -123,7 +123,7 @@ rule cluster_gas_network: "../scripts/cluster_gas_network.py" -rule build_heat_demands: +rule build_daily_heat_demand: params: snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, input: @@ -131,18 +131,39 @@ rule build_heat_demands: regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", output: - heat_demand=RESOURCES + "heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + heat_demand=RESOURCES + "daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", resources: mem_mb=20000, threads: 8 log: - LOGS + "build_heat_demands_{scope}_{simpl}_{clusters}.loc", + LOGS + "build_daily_heat_demand_{scope}_{simpl}_{clusters}.loc", benchmark: - BENCHMARKS + "build_heat_demands/{scope}_s{simpl}_{clusters}" + BENCHMARKS + "build_daily_heat_demand/{scope}_s{simpl}_{clusters}" conda: "../envs/environment.yaml" script: - "../scripts/build_heat_demand.py" + "../scripts/build_daily_heat_demand.py" + + +rule build_hourly_heat_demand: + params: + snapshots=config["snapshots"], + input: + heat_profile="data/heat_load_profile_BDEW.csv", + heat_demand=RESOURCES + "daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + output: + heat_demand=RESOURCES + "hourly_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + resources: + mem_mb=2000, + threads: 8 + log: + LOGS + "build_hourly_heat_demand_{scope}_{simpl}_{clusters}.loc", + benchmark: + BENCHMARKS + "build_hourly_heat_demand/{scope}_s{simpl}_{clusters}" + conda: + "../envs/environment.yaml" + script: + "../scripts/build_hourly_heat_demand.py" rule build_temperature_profiles: @@ -727,7 +748,6 @@ rule prepare_sector_network: if config["foresight"] == "overnight" else RESOURCES + "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv", - heat_profile="data/heat_load_profile_BDEW.csv", costs="data/costs_{}.csv".format(config["costs"]["year"]) if config["foresight"] == "overnight" else "data/costs_{planning_horizons}.csv", @@ -740,9 +760,7 @@ rule prepare_sector_network: simplified_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}.csv", industrial_demand=RESOURCES + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", - heat_demand_urban=RESOURCES + "heat_demand_urban_elec_s{simpl}_{clusters}.nc", - heat_demand_rural=RESOURCES + "heat_demand_rural_elec_s{simpl}_{clusters}.nc", - heat_demand_total=RESOURCES + "heat_demand_total_elec_s{simpl}_{clusters}.nc", + hourly_heat_demand_total=RESOURCES + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", diff --git a/scripts/build_heat_demand.py b/scripts/build_daily_heat_demand.py similarity index 100% rename from scripts/build_heat_demand.py rename to scripts/build_daily_heat_demand.py diff --git a/scripts/build_hourly_heat_demand.py b/scripts/build_hourly_heat_demand.py new file mode 100644 index 00000000..94ad7266 --- /dev/null +++ b/scripts/build_hourly_heat_demand.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build hourly heat demand time series from daily ones. +""" + +import pandas as pd +import xarray as xr +from _helpers import generate_periodic_profiles, update_config_with_sector_opts +from itertools import product + + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_heat_demands", + simpl="", + clusters=48, + ) + + snapshots = pd.date_range(freq="h", **snakemake.params.snapshots) + + daily_space_heat_demand = ( + xr.open_dataarray(snakemake.input.heat_demand) + .to_pandas() + .reindex(index=snapshots, method="ffill") + ) + + intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) + + sectors = ["residential", "services"] + uses = ["water", "space"] + + heat_demand = {} + for sector, use in product(sectors, uses): + weekday = list(intraday_profiles[f"{sector} {use} weekday"]) + weekend = list(intraday_profiles[f"{sector} {use} weekend"]) + weekly_profile = weekday * 5 + weekend * 2 + intraday_year_profile = generate_periodic_profiles( + daily_space_heat_demand.index.tz_localize("UTC"), + nodes=daily_space_heat_demand.columns, + weekly_profile=weekly_profile, + ) + + if use == "space": + heat_demand[f"{sector} {use}"] = daily_space_heat_demand * intraday_year_profile + else: + heat_demand[f"{sector} {use}"] = intraday_year_profile + + heat_demand = pd.concat(heat_demand, + axis=1, + names = ["sector use", "node"]) + + heat_demand.index.name="snapshots" + + print(heat_demand) + + print(heat_demand.stack()) + + ds = heat_demand.stack().to_xarray()#xr.Dataset.from_dataframe(heat_demand) + + print(ds) + + ds.to_netcdf(snakemake.output.heat_demand) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 4d36e7d4..1f404c4e 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -18,7 +18,7 @@ import numpy as np import pandas as pd import pypsa import xarray as xr -from _helpers import generate_periodic_profiles, update_config_with_sector_opts +from _helpers import update_config_with_sector_opts from add_electricity import calculate_annuity, sanitize_carriers from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2 from networkx.algorithms import complement @@ -1639,14 +1639,8 @@ def add_land_transport(n, costs): def build_heat_demand(n): - # copy forward the daily average heat demand into each hour, so it can be multiplied by the intraday profile - daily_space_heat_demand = ( - xr.open_dataarray(snakemake.input.heat_demand_total) - .to_pandas() - .reindex(index=n.snapshots, method="ffill") - ) - intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) + heat_demand_shape = xr.open_dataset(snakemake.input.hourly_heat_demand_total).to_dataframe().unstack(level=1) sectors = ["residential", "services"] uses = ["water", "space"] @@ -1654,25 +1648,14 @@ def build_heat_demand(n): heat_demand = {} electric_heat_supply = {} for sector, use in product(sectors, uses): - weekday = list(intraday_profiles[f"{sector} {use} weekday"]) - weekend = list(intraday_profiles[f"{sector} {use} weekend"]) - weekly_profile = weekday * 5 + weekend * 2 - intraday_year_profile = generate_periodic_profiles( - daily_space_heat_demand.index.tz_localize("UTC"), - nodes=daily_space_heat_demand.columns, - weekly_profile=weekly_profile, - ) - if use == "space": - heat_demand_shape = daily_space_heat_demand * intraday_year_profile - else: - heat_demand_shape = intraday_year_profile + name = f"{sector} {use}" - heat_demand[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + heat_demand[name] = ( + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"total {sector} {use}"]) * 1e6 electric_heat_supply[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"electricity {sector} {use}"]) * 1e6 heat_demand = pd.concat(heat_demand, axis=1) From bd8a5ecf2bd5124a1062e0fb8666cdcaa18df19d Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 15 Jan 2024 17:51:08 +0100 Subject: [PATCH 03/15] build_energy_totals: output district heat share to separate file --- rules/build_sector.smk | 1 + scripts/build_energy_totals.py | 31 +++++++++++++++++++++---------- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/rules/build_sector.smk b/rules/build_sector.smk index efaff2a3..bfe168e1 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -256,6 +256,7 @@ rule build_energy_totals: energy_name=RESOURCES + "energy_totals.csv", co2_name=RESOURCES + "co2_totals.csv", transport_name=RESOURCES + "transport_data.csv", + district_heat_share=RESOURCES + "district_heat_share.csv", threads: 16 resources: mem_mb=10000, diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 39b2a1be..53aab980 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -391,13 +391,6 @@ def build_idees(countries, year): # convert TWh/100km to kWh/km totals.loc["passenger car efficiency"] *= 10 - # district heating share - district_heat = totals.loc[ - ["derived heat residential", "derived heat services"] - ].sum() - total_heat = totals.loc[["thermal uses residential", "thermal uses services"]].sum() - totals.loc["district heat share"] = district_heat.div(total_heat) - return totals.T @@ -572,16 +565,31 @@ def build_energy_totals(countries, eurostat, swiss, idees): ratio = df.at["BA", "total residential"] / df.at["RS", "total residential"] df.loc["BA", missing] = ratio * df.loc["RS", missing] + return df + + +def build_district_heat_share(idees): + + # district heating share + district_heat = idees[ + ["derived heat residential", "derived heat services"] + ].sum(axis=1) + total_heat = idees[["thermal uses residential", "thermal uses services"]].sum(axis=1) + + district_heat_share = district_heat/total_heat + # Missing district heating share dh_share = pd.read_csv( snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] ) # make conservative assumption and take minimum from both data sets - df["district heat share"] = pd.concat( - [df["district heat share"], dh_share.reindex(index=df.index) / 100], axis=1 + district_heat_share = pd.concat( + [district_heat_share, dh_share.reindex(index=district_heat_share.index) / 100], axis=1 ).min(axis=1) - return df + district_heat_share.name = "district heat share" + + return district_heat_share def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): @@ -750,6 +758,9 @@ if __name__ == "__main__": energy = build_energy_totals(countries, eurostat, swiss, idees) energy.to_csv(snakemake.output.energy_name) + district_heat_share = build_district_heat_share(idees) + district_heat_share.to_csv(snakemake.output.district_heat_share) + base_year_emissions = params["base_emissions_year"] emissions_scope = snakemake.params.energy["emissions"] eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope) From 1a477d6b325dd8d78b561295509cf9608bd2b056 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Mon, 15 Jan 2024 18:55:09 +0100 Subject: [PATCH 04/15] move calculation of district heating share to its own script Now the script build_district_heat_share.py does what the old function create_nodes_for_heating() in prepare_sector_networks.py did. There is no need to build nodes lists for each heating sector, since all nodes have district heating now. --- rules/build_sector.smk | 22 +++ scripts/build_district_heat_share.py | 77 +++++++++ scripts/build_energy_totals.py | 6 +- ...build_population_weighted_energy_totals.py | 5 - scripts/prepare_sector_network.py | 161 +++++++----------- 5 files changed, 164 insertions(+), 107 deletions(-) create mode 100644 scripts/build_district_heat_share.py diff --git a/rules/build_sector.smk b/rules/build_sector.smk index bfe168e1..14156268 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -710,6 +710,27 @@ rule build_transport_demand: "../scripts/build_transport_demand.py" + + +rule build_district_heat_share: + params: + sector=config["sector"], + input: + district_heat_share=RESOURCES + "district_heat_share.csv", + clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + output: + district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + threads: 1 + resources: + mem_mb=1000, + log: + LOGS + "build_district_heat_share_s{simpl}_{clusters}_{planning_horizons}.log", + conda: + "../envs/environment.yaml" + script: + "../scripts/build_district_heat_share.py" + + rule prepare_sector_network: params: co2_budget=config["co2_budget"], @@ -762,6 +783,7 @@ rule prepare_sector_network: industrial_demand=RESOURCES + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", hourly_heat_demand_total=RESOURCES + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", + district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py new file mode 100644 index 00000000..d521214d --- /dev/null +++ b/scripts/build_district_heat_share.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build district heat shares at each node, depending on investment year. +""" + +import pandas as pd + +from prepare_sector_network import get + +import logging + + +logger = logging.getLogger(__name__) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_heat_demands", + simpl="", + clusters=48, + ) + + investment_year = int(snakemake.wildcards.planning_horizons[-4:]) + + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, + index_col=0) + + district_heat_share = pd.read_csv(snakemake.input.district_heat_share, + index_col=0).squeeze() + + # make ct-based share nodal + district_heat_share = district_heat_share.loc[pop_layout.ct] + district_heat_share.index = pop_layout.index + + # total urban population per country + ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() + + # distribution of urban population within a country + pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) + + # fraction of node that is urban + urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) + + # maximum potential of urban demand covered by district heating + central_fraction = snakemake.config["sector"]["district_heating"]["potential"] + + # district heating share at each node + dist_fraction_node = ( + district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] + ) + + # if district heating share larger than urban fraction -> set urban + # fraction to district heating share + urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) + + # difference of max potential and today's share of district heating + diff = (urban_fraction * central_fraction) - dist_fraction_node + progress = get(snakemake.config["sector"]["district_heating"]["progress"], investment_year) + dist_fraction_node += diff * progress + logger.info( + f"Increase district heating share by a progress factor of {progress:.2%} " + f"resulting in new average share of {dist_fraction_node.mean():.2%}" + ) + + df = pd.DataFrame(dtype=float) + + df["original district heat share"] = district_heat_share + df["district fraction of node"] = dist_fraction_node + df["urban fraction"] = urban_fraction + + df.to_csv(snakemake.output.district_heat_share) diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 53aab980..306caf4d 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -568,7 +568,7 @@ def build_energy_totals(countries, eurostat, swiss, idees): return df -def build_district_heat_share(idees): +def build_district_heat_share(countries, idees): # district heating share district_heat = idees[ @@ -578,6 +578,8 @@ def build_district_heat_share(idees): district_heat_share = district_heat/total_heat + district_heat_share = district_heat_share.reindex(countries) + # Missing district heating share dh_share = pd.read_csv( snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] @@ -758,7 +760,7 @@ if __name__ == "__main__": energy = build_energy_totals(countries, eurostat, swiss, idees) energy.to_csv(snakemake.output.energy_name) - district_heat_share = build_district_heat_share(idees) + district_heat_share = build_district_heat_share(countries, idees) district_heat_share.to_csv(snakemake.output.district_heat_share) base_year_emissions = params["base_emissions_year"] diff --git a/scripts/build_population_weighted_energy_totals.py b/scripts/build_population_weighted_energy_totals.py index 20467f72..879e3b9b 100644 --- a/scripts/build_population_weighted_energy_totals.py +++ b/scripts/build_population_weighted_energy_totals.py @@ -26,9 +26,4 @@ if __name__ == "__main__": nodal_energy_totals.index = pop_layout.index nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0) - # district heating share should not be divided by population fraction - dh_share = energy_totals["district heat share"].loc[pop_layout.ct].fillna(0.0) - dh_share.index = pop_layout.index - nodal_energy_totals["district heat share"] = dh_share - nodal_energy_totals.to_csv(snakemake.output[0]) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 1f404c4e..8d56ae6b 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1678,7 +1678,10 @@ def add_heat(n, costs): heat_demand = build_heat_demand(n) - nodes, dist_fraction, urban_fraction = create_nodes_for_heat_sector() + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, + index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] # NB: must add costs of central heating afterwards (EUR 400 / kWpeak, 50a, 1% FOM from Fraunhofer ISE) @@ -1715,6 +1718,8 @@ def add_heat(n, costs): # 1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2 solar_thermal = options["solar_cf_correction"] * solar_thermal / 1e3 + nodes = pop_layout.index + for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" @@ -1722,8 +1727,8 @@ def add_heat(n, costs): n.madd( "Bus", - nodes[name] + f" {name} heat", - location=nodes[name], + nodes + f" {name} heat", + location=nodes, carrier=name + " heat", unit="MWh_th", ) @@ -1731,9 +1736,9 @@ def add_heat(n, costs): if name == "urban central" and options.get("central_heat_vent"): n.madd( "Generator", - nodes[name] + f" {name} heat vent", - bus=nodes[name] + f" {name} heat", - location=nodes[name], + nodes + f" {name} heat vent", + bus=nodes + f" {name} heat", + location=nodes, carrier=name + " heat vent", p_nom_extendable=True, p_max_pu=0, @@ -1746,11 +1751,11 @@ def add_heat(n, costs): for sector in sectors: # heat demand weighting if "rural" in name: - factor = 1 - urban_fraction[nodes[name]] + factor = 1 - urban_fraction[nodes] elif "urban central" in name: - factor = dist_fraction[nodes[name]] + factor = dist_fraction[nodes] elif "urban decentral" in name: - factor = urban_fraction[nodes[name]] - dist_fraction[nodes[name]] + factor = urban_fraction[nodes] - dist_fraction[nodes] else: raise NotImplementedError( f" {name} not in " f"heat systems: {heat_systems}" @@ -1761,7 +1766,7 @@ def add_heat(n, costs): heat_demand[[sector + " water", sector + " space"]] .T.groupby(level=1) .sum() - .T[nodes[name]] + .T[nodes] .multiply(factor) ) @@ -1769,7 +1774,7 @@ def add_heat(n, costs): heat_load = ( heat_demand.T.groupby(level=1) .sum() - .T[nodes[name]] + .T[nodes] .multiply( factor * (1 + options["district_heating"]["district_heating_loss"]) ) @@ -1777,9 +1782,9 @@ def add_heat(n, costs): n.madd( "Load", - nodes[name], + nodes, suffix=f" {name} heat", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " heat", p_set=heat_load, ) @@ -1790,17 +1795,17 @@ def add_heat(n, costs): costs_name = f"{name_type} {heat_pump_type}-sourced heat pump" efficiency = ( - cop[heat_pump_type][nodes[name]] + cop[heat_pump_type][nodes] if options["time_dep_hp_cop"] else costs.at[costs_name, "efficiency"] ) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} {heat_pump_type} heat pump", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", + bus0=nodes, + bus1=nodes + f" {name} heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] @@ -1814,17 +1819,17 @@ def add_heat(n, costs): n.madd( "Bus", - nodes[name] + f" {name} water tanks", - location=nodes[name], + nodes + f" {name} water tanks", + location=nodes, carrier=name + " water tanks", unit="MWh_th", ) n.madd( "Link", - nodes[name] + f" {name} water tanks charger", - bus0=nodes[name] + f" {name} heat", - bus1=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks charger", + bus0=nodes + f" {name} heat", + bus1=nodes + f" {name} water tanks", efficiency=costs.at["water tank charger", "efficiency"], carrier=name + " water tanks charger", p_nom_extendable=True, @@ -1832,9 +1837,9 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} water tanks discharger", - bus0=nodes[name] + f" {name} water tanks", - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} water tanks discharger", + bus0=nodes + f" {name} water tanks", + bus1=nodes + f" {name} heat", carrier=name + " water tanks discharger", efficiency=costs.at["water tank discharger", "efficiency"], p_nom_extendable=True, @@ -1853,8 +1858,8 @@ def add_heat(n, costs): n.madd( "Store", - nodes[name] + f" {name} water tanks", - bus=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks", + bus=nodes + f" {name} water tanks", e_cyclic=True, e_nom_extendable=True, carrier=name + " water tanks", @@ -1868,9 +1873,9 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} resistive heater", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} resistive heater", + bus0=nodes, + bus1=nodes + f" {name} heat", carrier=name + " resistive heater", efficiency=costs.at[key, "efficiency"], capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], @@ -1883,10 +1888,10 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} gas boiler", + nodes + f" {name} gas boiler", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[key, "efficiency"], @@ -1900,13 +1905,13 @@ def add_heat(n, costs): n.madd( "Generator", - nodes[name], + nodes, suffix=f" {name} solar thermal collector", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " solar thermal", p_nom_extendable=True, capital_cost=costs.at[name_type + " solar thermal", "fixed"], - p_max_pu=solar_thermal[nodes[name]], + p_max_pu=solar_thermal[nodes], lifetime=costs.at[name_type + " solar thermal", "lifetime"], ) @@ -1914,10 +1919,10 @@ def add_heat(n, costs): # add gas CHP; biomass CHP is added in biomass section n.madd( "Link", - nodes[name] + " urban central gas CHP", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", carrier="urban central gas CHP", p_nom_extendable=True, @@ -1933,12 +1938,12 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + " urban central gas CHP CC", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP CC", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", - bus4=spatial.co2.df.loc[nodes[name], "nodes"].values, + bus4=spatial.co2.df.loc[nodes, "nodes"].values, carrier="urban central gas CHP CC", p_nom_extendable=True, capital_cost=costs.at["central gas CHP", "fixed"] @@ -1970,11 +1975,11 @@ def add_heat(n, costs): if options["chp"] and options["micro_chp"] and name != "urban central": n.madd( "Link", - nodes[name] + f" {name} micro gas CHP", + nodes + f" {name} micro gas CHP", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + f" {name} heat", bus3="co2 atmosphere", carrier=name + " micro gas CHP", efficiency=costs.at["micro CHP", "efficiency"], @@ -2105,50 +2110,6 @@ def add_heat(n, costs): ) -def create_nodes_for_heat_sector(): - # TODO pop_layout - - # rural are areas with low heating density and individual heating - # urban are areas with high heating density - # urban can be split into district heating (central) and individual heating (decentral) - - ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() - # distribution of urban population within a country - pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) - - sectors = ["residential", "services"] - - nodes = {} - urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) - - for sector in sectors: - nodes[sector + " rural"] = pop_layout.index - nodes[sector + " urban decentral"] = pop_layout.index - - district_heat_share = pop_weighted_energy_totals["district heat share"] - - # maximum potential of urban demand covered by district heating - central_fraction = options["district_heating"]["potential"] - # district heating share at each node - dist_fraction_node = ( - district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] - ) - nodes["urban central"] = dist_fraction_node.index - # if district heating share larger than urban fraction -> set urban - # fraction to district heating share - urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) - # difference of max potential and today's share of district heating - diff = (urban_fraction * central_fraction) - dist_fraction_node - progress = get(options["district_heating"]["progress"], investment_year) - dist_fraction_node += diff * progress - logger.info( - f"Increase district heating share by a progress factor of {progress:.2%} " - f"resulting in new average share of {dist_fraction_node.mean():.2%}" - ) - - return nodes, dist_fraction_node, urban_fraction - - def add_biomass(n, costs): logger.info("Add biomass") @@ -2366,7 +2327,7 @@ def add_biomass(n, costs): if options["biomass_boiler"]: # TODO: Add surcharge for pellets - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", "services rural", @@ -2375,10 +2336,10 @@ def add_biomass(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} biomass boiler", + nodes + f" {name} biomass boiler", p_nom_extendable=True, - bus0=spatial.biomass.df.loc[nodes_heat[name], "nodes"].values, - bus1=nodes_heat[name] + f" {name} heat", + bus0=spatial.biomass.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", carrier=name + " biomass boiler", efficiency=costs.at["biomass boiler", "efficiency"], capital_cost=costs.at["biomass boiler", "efficiency"] @@ -2821,7 +2782,7 @@ def add_industry(n, costs): ) if options["oil_boilers"]: - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", @@ -2831,10 +2792,10 @@ def add_industry(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} oil boiler", + nodes + f" {name} oil boiler", p_nom_extendable=True, bus0=spatial.oil.nodes, - bus1=nodes_heat[name] + f" {name} heat", + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=f"{name} oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], From 9897cd6f0535fdbef82b66816c844a9f0ebf10b5 Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Fri, 19 Jan 2024 16:24:39 +0100 Subject: [PATCH 05/15] only add district heating (DH) for nodes with non-zero DH share --- scripts/prepare_sector_network.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 8d56ae6b..0bf9848b 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1718,11 +1718,15 @@ def add_heat(n, costs): # 1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2 solar_thermal = options["solar_cf_correction"] * solar_thermal / 1e3 - nodes = pop_layout.index for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" + if name == "urban central": + nodes = dist_fraction.index[dist_fraction > 0] + else: + nodes = pop_layout.index + n.add("Carrier", name + " heat") n.madd( From d98ad95332a8f9c81c06d5a42c426fd0b4be921a Mon Sep 17 00:00:00 2001 From: Tom Brown Date: Fri, 19 Jan 2024 18:42:49 +0100 Subject: [PATCH 06/15] move building of distribution of existing heating to own script This makes the distribution of existing heating to urban/rural, residential/services and spatially more transparent. --- rules/solve_myopic.smk | 37 ++++- scripts/add_existing_baseyear.py | 136 ++++-------------- .../build_existing_heating_distribution.py | 108 ++++++++++++++ 3 files changed, 172 insertions(+), 109 deletions(-) create mode 100644 scripts/build_existing_heating_distribution.py diff --git a/rules/solve_myopic.smk b/rules/solve_myopic.smk index 7ca8857d..20043286 100644 --- a/rules/solve_myopic.smk +++ b/rules/solve_myopic.smk @@ -1,8 +1,40 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-4 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT +rule build_existing_heating_distribution: + params: + baseyear=config["scenario"]["planning_horizons"][0], + sector=config["sector"], + existing_capacities=config["existing_capacities"], + input: + existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + clustered_pop_energy_layout=RESOURCES + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", + district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + output: + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + wildcard_constraints: + planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear + threads: 1 + resources: + mem_mb=2000, + log: + LOGS + + "build_existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.log", + benchmark: + ( + BENCHMARKS + + "build_existing_heating_distribution/elec_s{simpl}_{clusters}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/build_existing_heating_distribution.py" + + rule add_existing_baseyear: params: baseyear=config["scenario"]["planning_horizons"][0], @@ -19,7 +51,8 @@ rule add_existing_baseyear: costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", - existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv", diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index c8486758..01d54cc2 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -409,97 +409,20 @@ def add_heating_capacities_installed_before_baseyear( # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". # TODO start from original file - # retrieve existing heating capacities - techs = [ - "gas boiler", - "oil boiler", - "resistive heater", - "air heat pump", - "ground heat pump", - ] - df = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) + existing_heating = pd.read_csv(snakemake.input.existing_heating_distribution, + header=[0,1], + index_col=0) - # data for Albania, Montenegro and Macedonia not included in database - df.loc["Albania"] = np.nan - df.loc["Montenegro"] = np.nan - df.loc["Macedonia"] = np.nan - df.fillna(0.0, inplace=True) + techs = existing_heating.columns.get_level_values(1).unique() - # convert GW to MW - df *= 1e3 + for name in existing_heating.columns.get_level_values(0).unique(): - df.index = cc.convert(df.index, to="iso2") - - # coal and oil boilers are assimilated to oil boilers - df["oil boiler"] = df["oil boiler"] + df["coal boiler"] - df.drop(["coal boiler"], axis=1, inplace=True) - - # distribute technologies to nodes by population - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - - nodal_df = df.loc[pop_layout.ct] - nodal_df.index = pop_layout.index - nodal_df = nodal_df.multiply(pop_layout.fraction, axis=0) - - # split existing capacities between residential and services - # proportional to energy demand - p_set_sum = n.loads_t.p_set.sum() - ratio_residential = pd.Series( - [ - ( - p_set_sum[f"{node} residential rural heat"] - / ( - p_set_sum[f"{node} residential rural heat"] - + p_set_sum[f"{node} services rural heat"] - ) - ) - # if rural heating demand for one of the nodes doesn't exist, - # then columns were dropped before and heating demand share should be 0.0 - if all( - f"{node} {service} rural heat" in p_set_sum.index - for service in ["residential", "services"] - ) - else 0.0 - for node in nodal_df.index - ], - index=nodal_df.index, - ) - - for tech in techs: - nodal_df["residential " + tech] = nodal_df[tech] * ratio_residential - nodal_df["services " + tech] = nodal_df[tech] * (1 - ratio_residential) - - names = [ - "residential rural", - "services rural", - "residential urban decentral", - "services urban decentral", - "urban central", - ] - - nodes = {} - p_nom = {} - for name in names: name_type = "central" if name == "urban central" else "decentral" - nodes[name] = pd.Index( - [ - n.buses.at[index, "location"] - for index in n.buses.index[ - n.buses.index.str.contains(name) - & n.buses.index.str.contains("heat") - ] - ] - ) - heat_pump_type = "air" if "urban" in name else "ground" - heat_type = "residential" if "residential" in name else "services" - if name == "urban central": - p_nom[name] = nodal_df["air heat pump"][nodes[name]] - else: - p_nom[name] = nodal_df[f"{heat_type} {heat_pump_type} heat pump"][ - nodes[name] - ] + nodes = pd.Index(n.buses.location[n.buses.index.str.contains(f"{name} heat")]) + + heat_pump_type = "air" if "urban" in name else "ground" # Add heat pumps costs_name = f"decentral {heat_pump_type}-sourced heat pump" @@ -507,7 +430,7 @@ def add_heating_capacities_installed_before_baseyear( cop = {"air": ashp_cop, "ground": gshp_cop} if time_dep_hp_cop: - efficiency = cop[heat_pump_type][nodes[name]] + efficiency = cop[heat_pump_type][nodes] else: efficiency = costs.at[costs_name, "efficiency"] @@ -520,27 +443,26 @@ def add_heating_capacities_installed_before_baseyear( n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} {heat_pump_type} heat pump-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes, + bus1=nodes + " " + name + " heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] * costs.at[costs_name, "fixed"], - p_nom=p_nom[name] * ratio / costs.at[costs_name, "efficiency"], + p_nom=existing_heating[(name, f"{heat_pump_type} heat pump")][nodes] * ratio / costs.at[costs_name, "efficiency"], build_year=int(grouping_year), lifetime=costs.at[costs_name, "lifetime"], ) # add resistive heater, gas boilers and oil boilers - # (50% capacities to rural buses, 50% to urban buses) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} resistive heater-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes, + bus1=nodes + " " + name + " heat", carrier=name + " resistive heater", efficiency=costs.at[f"{name_type} resistive heater", "efficiency"], capital_cost=( @@ -548,21 +470,20 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} resistive heater", "fixed"] ), p_nom=( - 0.5 - * nodal_df[f"{heat_type} resistive heater"][nodes[name]] + existing_heating[(name, "resistive heater")][nodes] * ratio / costs.at[f"{name_type} resistive heater", "efficiency"] ), build_year=int(grouping_year), - lifetime=costs.at[costs_name, "lifetime"], + lifetime=costs.at[f"{name_type} resistive heater", "lifetime"], ) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} gas boiler-{grouping_year}", bus0=spatial.gas.nodes, - bus1=nodes[name] + " " + name + " heat", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[f"{name_type} gas boiler", "efficiency"], @@ -572,8 +493,7 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} gas boiler", "fixed"] ), p_nom=( - 0.5 - * nodal_df[f"{heat_type} gas boiler"][nodes[name]] + existing_heating[(name, "gas boiler")][nodes] * ratio / costs.at[f"{name_type} gas boiler", "efficiency"] ), @@ -583,20 +503,20 @@ def add_heating_capacities_installed_before_baseyear( n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} oil boiler-{grouping_year}", bus0=spatial.oil.nodes, - bus1=nodes[name] + " " + name + " heat", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] * costs.at["decentral oil boiler", "fixed"], - p_nom=0.5 - * nodal_df[f"{heat_type} oil boiler"][nodes[name]] - * ratio - / costs.at["decentral oil boiler", "efficiency"], + p_nom= ( + existing_heating[(name, "oil boiler")][nodes] + * ratio + / costs.at["decentral oil boiler", "efficiency"]), build_year=int(grouping_year), lifetime=costs.at[f"{name_type} gas boiler", "lifetime"], ) @@ -624,6 +544,8 @@ def add_heating_capacities_installed_before_baseyear( # drop assets which are at the end of their lifetime links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index + logger.info("Removing following links because at end of their lifetime:") + logger.info(links_i) n.mremove("Link", links_i) diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py new file mode 100644 index 00000000..fe282d39 --- /dev/null +++ b/scripts/build_existing_heating_distribution.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Builds table of existing heat generation capacities for initial planning +horizon. +""" +import pandas as pd +import sys +from pypsa.descriptors import Dict +import numpy as np +import country_converter as coco + +cc = coco.CountryConverter() + + +def build_existing_heating(): + # retrieve existing heating capacities + techs = [ + "gas boiler", + "oil boiler", + "resistive heater", + "air heat pump", + "ground heat pump", + ] + + existing_heating = pd.read_csv(snakemake.input.existing_heating, + index_col=0, + header=0) + + # data for Albania, Montenegro and Macedonia not included in database existing_heating.loc["Albania"] = np.nan + existing_heating.loc["Montenegro"] = np.nan + existing_heating.loc["Macedonia"] = np.nan + + existing_heating.fillna(0.0, inplace=True) + + # convert GW to MW + existing_heating *= 1e3 + + existing_heating.index = cc.convert(existing_heating.index, to="iso2") + + # coal and oil boilers are assimilated to oil boilers + existing_heating["oil boiler"] = existing_heating["oil boiler"] + existing_heating["coal boiler"] + existing_heating.drop(["coal boiler"], axis=1, inplace=True) + + # distribute technologies to nodes by population + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, + index_col=0) + + nodal_heating = existing_heating.loc[pop_layout.ct] + nodal_heating.index = pop_layout.index + nodal_heating = nodal_heating.multiply(pop_layout.fraction, axis=0) + + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, + index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] + + energy_layout = pd.read_csv(snakemake.input.clustered_pop_energy_layout, + index_col=0) + + uses = ["space", "water"] + sectors = ["residential", "services"] + + nodal_sectoral_totals = pd.DataFrame(dtype=float) + + for sector in sectors: + nodal_sectoral_totals[sector] = energy_layout[[f"total {sector} {use}" for use in uses]].sum(axis=1) + + nodal_sectoral_fraction = nodal_sectoral_totals.div(nodal_sectoral_totals.sum(axis=1), + axis=0) + + + nodal_heat_name_fraction = pd.DataFrame(dtype=float) + + nodal_heat_name_fraction["urban central"] = dist_fraction + + for sector in sectors: + + nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[sector]*(1 - urban_fraction) + nodal_heat_name_fraction[f"{sector} urban decentral"] = nodal_sectoral_fraction[sector]*(urban_fraction - dist_fraction) + + + nodal_heat_name_tech = pd.concat({name : nodal_heating .multiply(nodal_heat_name_fraction[name], + axis=0) for name in nodal_heat_name_fraction.columns}, + axis=1, + names=["heat name","technology"]) + + + #move all ground HPs to rural, all air to urban + + for sector in sectors: + nodal_heat_name_tech[(f"{sector} rural","ground heat pump")] += (nodal_heat_name_tech[("urban central","ground heat pump")]*nodal_sectoral_fraction[sector] + + nodal_heat_name_tech[(f"{sector} urban decentral","ground heat pump")]) + nodal_heat_name_tech[(f"{sector} urban decentral","ground heat pump")] = 0. + + nodal_heat_name_tech[(f"{sector} urban decentral","air heat pump")] += nodal_heat_name_tech[(f"{sector} rural","air heat pump")] + nodal_heat_name_tech[(f"{sector} rural","air heat pump")] = 0. + + nodal_heat_name_tech[("urban central","ground heat pump")] = 0. + + nodal_heat_name_tech.to_csv(snakemake.output.existing_heating_distribution) + + +if __name__ == "__main__": + + build_existing_heating() From 2183e742b2b44a7caca47a525537f8e827e501e5 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 22 Jan 2024 09:18:26 +0100 Subject: [PATCH 07/15] add release notes, minor code improvements --- doc/release_notes.rst | 18 +++++++++++++++ doc/sector.rst | 20 ++++++++++++++-- rules/solve_overnight.smk | 7 +++--- scripts/add_existing_baseyear.py | 8 +++---- scripts/build_daily_heat_demand.py | 3 ++- scripts/build_district_heat_share.py | 15 ++++++------ scripts/build_energy_totals.py | 4 ++-- .../build_existing_heating_distribution.py | 23 ++++++++++--------- scripts/build_hourly_heat_demand.py | 18 +++++---------- scripts/prepare_sector_network.py | 2 +- 10 files changed, 75 insertions(+), 43 deletions(-) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index dc1a9dd1..56fee0d8 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -28,6 +28,24 @@ Upcoming Release * Cluster residential and services heat buses by default. Can be disabled with ``cluster_heat_buses: false``. +* Bugfix: Do not reduce district heat share when building population-weighted + energy statistics. Previously the district heating share was being multiplied + by the population weighting, reducing the DH share with multiple nodes. + +* Move building of daily heat profile to its own rule + :mod:`build_hourly_heat_demand` from :mod:`prepare_sector_network`. + +* In :mod:`build_energy_totals`, district heating shares are now reported in a + separate file. + +* Move calculation of district heating share to its own rule + :mod:`build_district_heat_share`. + +* Move building of distribution of existing heating to own rule + :mod:`build_existing_heating_distribution`. This makes the distribution of + existing heating to urban/rural, residential/services and spatially more + transparent. + PyPSA-Eur 0.9.0 (5th January 2024) ================================== diff --git a/doc/sector.rst b/doc/sector.rst index 303e7ed2..411bfd57 100644 --- a/doc/sector.rst +++ b/doc/sector.rst @@ -20,6 +20,12 @@ Rule ``add_existing_baseyear`` .. automodule:: add_existing_baseyear +Rule ``build_existing_heating_distribution`` +============================================================================== + +.. automodule:: build_existing_heating_distribution + + Rule ``build_ammonia_production`` ============================================================================== @@ -60,10 +66,20 @@ Rule ``build_gas_network`` .. automodule:: build_gas_network -Rule ``build_heat_demand`` +Rule ``build_daily_heat_demand`` ============================================================================== -.. automodule:: build_heat_demand +.. automodule:: build_daily_heat_demand + +Rule ``build_hourly_heat_demand`` +============================================================================== + +.. automodule:: build_hourly_heat_demand + +Rule ``build_district_heat_share`` +============================================================================== + +.. automodule:: build_district_heat_share Rule ``build_industrial_distribution_key`` ============================================================================== diff --git a/rules/solve_overnight.smk b/rules/solve_overnight.smk index 39778162..47c86410 100644 --- a/rules/solve_overnight.smk +++ b/rules/solve_overnight.smk @@ -28,9 +28,10 @@ rule solve_sector_network: + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log", python=RESULTS + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", - threads: config["solving"]["solver_options"][config["solving"]["solver"]["options"]].get( - "threads", 4 -) + threads: + config["solving"]["solver_options"][ + config["solving"]["solver"]["options"] + ].get("threads", 4) resources: mem_mb=config["solving"]["mem"], walltime=config["solving"].get("walltime", "12:00:00"), diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index 01d54cc2..d61ece85 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -451,7 +451,7 @@ def add_heating_capacities_installed_before_baseyear( efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] * costs.at[costs_name, "fixed"], - p_nom=existing_heating[(name, f"{heat_pump_type} heat pump")][nodes] * ratio / costs.at[costs_name, "efficiency"], + p_nom=existing_heating.loc[nodes, (name, f"{heat_pump_type} heat pump")] * ratio / costs.at[costs_name, "efficiency"], build_year=int(grouping_year), lifetime=costs.at[costs_name, "lifetime"], ) @@ -470,7 +470,7 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} resistive heater", "fixed"] ), p_nom=( - existing_heating[(name, "resistive heater")][nodes] + existing_heating.loc[nodes, (name, "resistive heater")] * ratio / costs.at[f"{name_type} resistive heater", "efficiency"] ), @@ -493,7 +493,7 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} gas boiler", "fixed"] ), p_nom=( - existing_heating[(name, "gas boiler")][nodes] + existing_heating.loc[nodes, (name, "gas boiler")] * ratio / costs.at[f"{name_type} gas boiler", "efficiency"] ), @@ -514,7 +514,7 @@ def add_heating_capacities_installed_before_baseyear( capital_cost=costs.at["decentral oil boiler", "efficiency"] * costs.at["decentral oil boiler", "fixed"], p_nom= ( - existing_heating[(name, "oil boiler")][nodes] + existing_heating.loc[nodes, (name, "oil boiler")] * ratio / costs.at["decentral oil boiler", "efficiency"]), build_year=int(grouping_year), diff --git a/scripts/build_daily_heat_demand.py b/scripts/build_daily_heat_demand.py index b983f125..e334b1b3 100644 --- a/scripts/build_daily_heat_demand.py +++ b/scripts/build_daily_heat_demand.py @@ -18,7 +18,8 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "build_heat_demands", + "build_daily_heat_demands", + scope="total", simpl="", clusters=48, ) diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py index d521214d..996ed861 100644 --- a/scripts/build_district_heat_share.py +++ b/scripts/build_district_heat_share.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -21,9 +21,10 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "build_heat_demands", + "build_district_heat_share", simpl="", clusters=48, + planning_horizons="2050", ) investment_year = int(snakemake.wildcards.planning_horizons[-4:]) @@ -68,10 +69,10 @@ if __name__ == "__main__": f"resulting in new average share of {dist_fraction_node.mean():.2%}" ) - df = pd.DataFrame(dtype=float) - - df["original district heat share"] = district_heat_share - df["district fraction of node"] = dist_fraction_node - df["urban fraction"] = urban_fraction + df = pd.DataFrame({ + "original district heat share": district_heat_share, + "district fraction of node": dist_fraction_node, + "urban fraction": urban_fraction + }, dtype=float) df.to_csv(snakemake.output.district_heat_share) diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 306caf4d..08d5bef5 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -583,10 +583,10 @@ def build_district_heat_share(countries, idees): # Missing district heating share dh_share = pd.read_csv( snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] - ) + ).div(100).squeeze() # make conservative assumption and take minimum from both data sets district_heat_share = pd.concat( - [district_heat_share, dh_share.reindex(index=district_heat_share.index) / 100], axis=1 + [district_heat_share, dh_share.reindex_like(district_heat_share)], axis=1 ).min(axis=1) district_heat_share.name = "district heat share" diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py index fe282d39..443c5baa 100644 --- a/scripts/build_existing_heating_distribution.py +++ b/scripts/build_existing_heating_distribution.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT """ @@ -7,8 +7,6 @@ Builds table of existing heat generation capacities for initial planning horizon. """ import pandas as pd -import sys -from pypsa.descriptors import Dict import numpy as np import country_converter as coco @@ -17,19 +15,13 @@ cc = coco.CountryConverter() def build_existing_heating(): # retrieve existing heating capacities - techs = [ - "gas boiler", - "oil boiler", - "resistive heater", - "air heat pump", - "ground heat pump", - ] existing_heating = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) - # data for Albania, Montenegro and Macedonia not included in database existing_heating.loc["Albania"] = np.nan + # data for Albania, Montenegro and Macedonia not included in database + existing_heating.loc["Albania"] = np.nan existing_heating.loc["Montenegro"] = np.nan existing_heating.loc["Macedonia"] = np.nan @@ -104,5 +96,14 @@ def build_existing_heating(): if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_existing_heating_distribution", + simpl="", + clusters=48, + planning_horizons=2050, + ) build_existing_heating() diff --git a/scripts/build_hourly_heat_demand.py b/scripts/build_hourly_heat_demand.py index 94ad7266..2d1dee54 100644 --- a/scripts/build_hourly_heat_demand.py +++ b/scripts/build_hourly_heat_demand.py @@ -6,19 +6,19 @@ Build hourly heat demand time series from daily ones. """ -import pandas as pd -import xarray as xr -from _helpers import generate_periodic_profiles, update_config_with_sector_opts from itertools import product - +import pandas as pd +import xarray as xr +from _helpers import generate_periodic_profiles if __name__ == "__main__": if "snakemake" not in globals(): from _helpers import mock_snakemake snakemake = mock_snakemake( - "build_heat_demands", + "build_hourly_heat_demands", + scope="total", simpl="", clusters=48, ) @@ -58,12 +58,6 @@ if __name__ == "__main__": heat_demand.index.name="snapshots" - print(heat_demand) - - print(heat_demand.stack()) - - ds = heat_demand.stack().to_xarray()#xr.Dataset.from_dataframe(heat_demand) - - print(ds) + ds = heat_demand.stack().to_xarray() ds.to_netcdf(snakemake.output.heat_demand) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 0bf9848b..241f3c30 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1654,7 +1654,7 @@ def build_heat_demand(n): heat_demand[name] = ( heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"total {sector} {use}"]) * 1e6 - electric_heat_supply[f"{sector} {use}"] = ( + electric_heat_supply[name] = ( heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"electricity {sector} {use}"]) * 1e6 From d3cf329456056d02fcfcbabe175e6ff9a8e2bb0c Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 22 Jan 2024 09:28:51 +0100 Subject: [PATCH 08/15] correctly read number of solver threads in rule definition --- doc/release_notes.rst | 2 ++ rules/common.smk | 7 +++++++ rules/solve_electricity.smk | 2 +- rules/solve_myopic.smk | 2 +- rules/solve_overnight.smk | 5 +---- rules/solve_perfect.smk | 2 +- 6 files changed, 13 insertions(+), 7 deletions(-) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 56fee0d8..1a0013d5 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -46,6 +46,8 @@ Upcoming Release existing heating to urban/rural, residential/services and spatially more transparent. +* Bugfix: Correctly read out number of solver threads from configuration file. + PyPSA-Eur 0.9.0 (5th January 2024) ================================== diff --git a/rules/common.smk b/rules/common.smk index 2298ff91..1654180f 100644 --- a/rules/common.smk +++ b/rules/common.smk @@ -13,6 +13,13 @@ for path in helper_source_path: from _helpers import validate_checksum +def solver_threads(w): + solver_options = config["solving"]["solver_options"] + option_set = config["solving"]["solver"]["options"] + threads = solver_options[option_set].get("threads", 4) + return threads + + def memory(w): factor = 3.0 for o in w.opts.split("-"): diff --git a/rules/solve_electricity.smk b/rules/solve_electricity.smk index 7f6092be..ac433cf9 100644 --- a/rules/solve_electricity.smk +++ b/rules/solve_electricity.smk @@ -25,7 +25,7 @@ rule solve_network: + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", benchmark: BENCHMARKS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" - threads: 4 + threads: solver_threads resources: mem_mb=memory, walltime=config["solving"].get("walltime", "12:00:00"), diff --git a/rules/solve_myopic.smk b/rules/solve_myopic.smk index 20043286..8c46ed59 100644 --- a/rules/solve_myopic.smk +++ b/rules/solve_myopic.smk @@ -137,7 +137,7 @@ rule solve_sector_network_myopic: + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", python=LOGS + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", - threads: 4 + threads: solver_threads resources: mem_mb=config["solving"]["mem"], walltime=config["solving"].get("walltime", "12:00:00"), diff --git a/rules/solve_overnight.smk b/rules/solve_overnight.smk index 47c86410..aa08b8c3 100644 --- a/rules/solve_overnight.smk +++ b/rules/solve_overnight.smk @@ -28,10 +28,7 @@ rule solve_sector_network: + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log", python=RESULTS + "logs/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", - threads: - config["solving"]["solver_options"][ - config["solving"]["solver"]["options"] - ].get("threads", 4) + threads: solver_threads resources: mem_mb=config["solving"]["mem"], walltime=config["solving"].get("walltime", "12:00:00"), diff --git a/rules/solve_perfect.smk b/rules/solve_perfect.smk index a7856fa9..ad310f9f 100644 --- a/rules/solve_perfect.smk +++ b/rules/solve_perfect.smk @@ -127,7 +127,7 @@ rule solve_sector_network_perfect: output: RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc", - threads: 4 + threads: solver_threads resources: mem_mb=config["solving"]["mem"], shadow: From 9865a970893d9e515786f33c629b14f71645bf1e Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 22 Jan 2024 09:29:32 +0100 Subject: [PATCH 09/15] apply automated formatting --- rules/build_sector.smk | 11 +-- rules/solve_myopic.smk | 6 +- scripts/add_existing_baseyear.py | 17 ++-- scripts/build_district_heat_share.py | 31 ++++---- scripts/build_energy_totals.py | 21 ++--- .../build_existing_heating_distribution.py | 77 +++++++++++-------- scripts/build_hourly_heat_demand.py | 12 +-- scripts/prepare_sector_network.py | 12 +-- 8 files changed, 105 insertions(+), 82 deletions(-) diff --git a/rules/build_sector.smk b/rules/build_sector.smk index 14156268..a24f9f7d 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -710,8 +710,6 @@ rule build_transport_demand: "../scripts/build_transport_demand.py" - - rule build_district_heat_share: params: sector=config["sector"], @@ -719,7 +717,8 @@ rule build_district_heat_share: district_heat_share=RESOURCES + "district_heat_share.csv", clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", output: - district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", threads: 1 resources: mem_mb=1000, @@ -782,8 +781,10 @@ rule prepare_sector_network: simplified_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}.csv", industrial_demand=RESOURCES + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", - hourly_heat_demand_total=RESOURCES + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", - district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + hourly_heat_demand_total=RESOURCES + + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", diff --git a/rules/solve_myopic.smk b/rules/solve_myopic.smk index 8c46ed59..75334073 100644 --- a/rules/solve_myopic.smk +++ b/rules/solve_myopic.smk @@ -11,8 +11,10 @@ rule build_existing_heating_distribution: input: existing_heating="data/existing_infrastructure/existing_heating_raw.csv", clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", - clustered_pop_energy_layout=RESOURCES + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", - district_heat_share=RESOURCES + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + clustered_pop_energy_layout=RESOURCES + + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", output: existing_heating_distribution=RESOURCES + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index d61ece85..c67d5f8b 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -409,15 +409,13 @@ def add_heating_capacities_installed_before_baseyear( # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". # TODO start from original file - existing_heating = pd.read_csv(snakemake.input.existing_heating_distribution, - header=[0,1], - index_col=0) - + existing_heating = pd.read_csv( + snakemake.input.existing_heating_distribution, header=[0, 1], index_col=0 + ) techs = existing_heating.columns.get_level_values(1).unique() for name in existing_heating.columns.get_level_values(0).unique(): - name_type = "central" if name == "urban central" else "decentral" nodes = pd.Index(n.buses.location[n.buses.index.str.contains(f"{name} heat")]) @@ -451,7 +449,9 @@ def add_heating_capacities_installed_before_baseyear( efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] * costs.at[costs_name, "fixed"], - p_nom=existing_heating.loc[nodes, (name, f"{heat_pump_type} heat pump")] * ratio / costs.at[costs_name, "efficiency"], + p_nom=existing_heating.loc[nodes, (name, f"{heat_pump_type} heat pump")] + * ratio + / costs.at[costs_name, "efficiency"], build_year=int(grouping_year), lifetime=costs.at[costs_name, "lifetime"], ) @@ -513,10 +513,11 @@ def add_heating_capacities_installed_before_baseyear( efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] * costs.at["decentral oil boiler", "fixed"], - p_nom= ( + p_nom=( existing_heating.loc[nodes, (name, "oil boiler")] * ratio - / costs.at["decentral oil boiler", "efficiency"]), + / costs.at["decentral oil boiler", "efficiency"] + ), build_year=int(grouping_year), lifetime=costs.at[f"{name_type} gas boiler", "lifetime"], ) diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py index 996ed861..3353437a 100644 --- a/scripts/build_district_heat_share.py +++ b/scripts/build_district_heat_share.py @@ -6,12 +6,10 @@ Build district heat shares at each node, depending on investment year. """ -import pandas as pd - -from prepare_sector_network import get - import logging +import pandas as pd +from prepare_sector_network import get logger = logging.getLogger(__name__) @@ -29,11 +27,11 @@ if __name__ == "__main__": investment_year = int(snakemake.wildcards.planning_horizons[-4:]) - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, - index_col=0) + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - district_heat_share = pd.read_csv(snakemake.input.district_heat_share, - index_col=0).squeeze() + district_heat_share = pd.read_csv( + snakemake.input.district_heat_share, index_col=0 + ).squeeze() # make ct-based share nodal district_heat_share = district_heat_share.loc[pop_layout.ct] @@ -62,17 +60,22 @@ if __name__ == "__main__": # difference of max potential and today's share of district heating diff = (urban_fraction * central_fraction) - dist_fraction_node - progress = get(snakemake.config["sector"]["district_heating"]["progress"], investment_year) + progress = get( + snakemake.config["sector"]["district_heating"]["progress"], investment_year + ) dist_fraction_node += diff * progress logger.info( f"Increase district heating share by a progress factor of {progress:.2%} " f"resulting in new average share of {dist_fraction_node.mean():.2%}" ) - df = pd.DataFrame({ - "original district heat share": district_heat_share, - "district fraction of node": dist_fraction_node, - "urban fraction": urban_fraction - }, dtype=float) + df = pd.DataFrame( + { + "original district heat share": district_heat_share, + "district fraction of node": dist_fraction_node, + "urban fraction": urban_fraction, + }, + dtype=float, + ) df.to_csv(snakemake.output.district_heat_share) diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 08d5bef5..c67bb49d 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -569,21 +569,24 @@ def build_energy_totals(countries, eurostat, swiss, idees): def build_district_heat_share(countries, idees): - # district heating share - district_heat = idees[ - ["derived heat residential", "derived heat services"] - ].sum(axis=1) - total_heat = idees[["thermal uses residential", "thermal uses services"]].sum(axis=1) + district_heat = idees[["derived heat residential", "derived heat services"]].sum( + axis=1 + ) + total_heat = idees[["thermal uses residential", "thermal uses services"]].sum( + axis=1 + ) - district_heat_share = district_heat/total_heat + district_heat_share = district_heat / total_heat district_heat_share = district_heat_share.reindex(countries) # Missing district heating share - dh_share = pd.read_csv( - snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] - ).div(100).squeeze() + dh_share = ( + pd.read_csv(snakemake.input.district_heat_share, index_col=0, usecols=[0, 1]) + .div(100) + .squeeze() + ) # make conservative assumption and take minimum from both data sets district_heat_share = pd.concat( [district_heat_share, dh_share.reindex_like(district_heat_share)], axis=1 diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py index 443c5baa..67993c29 100644 --- a/scripts/build_existing_heating_distribution.py +++ b/scripts/build_existing_heating_distribution.py @@ -6,9 +6,9 @@ Builds table of existing heat generation capacities for initial planning horizon. """ -import pandas as pd -import numpy as np import country_converter as coco +import numpy as np +import pandas as pd cc = coco.CountryConverter() @@ -16,9 +16,9 @@ cc = coco.CountryConverter() def build_existing_heating(): # retrieve existing heating capacities - existing_heating = pd.read_csv(snakemake.input.existing_heating, - index_col=0, - header=0) + existing_heating = pd.read_csv( + snakemake.input.existing_heating, index_col=0, header=0 + ) # data for Albania, Montenegro and Macedonia not included in database existing_heating.loc["Albania"] = np.nan @@ -33,24 +33,25 @@ def build_existing_heating(): existing_heating.index = cc.convert(existing_heating.index, to="iso2") # coal and oil boilers are assimilated to oil boilers - existing_heating["oil boiler"] = existing_heating["oil boiler"] + existing_heating["coal boiler"] + existing_heating["oil boiler"] = ( + existing_heating["oil boiler"] + existing_heating["coal boiler"] + ) existing_heating.drop(["coal boiler"], axis=1, inplace=True) # distribute technologies to nodes by population - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, - index_col=0) + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) nodal_heating = existing_heating.loc[pop_layout.ct] nodal_heating.index = pop_layout.index nodal_heating = nodal_heating.multiply(pop_layout.fraction, axis=0) - district_heat_info = pd.read_csv(snakemake.input.district_heat_share, - index_col=0) + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) dist_fraction = district_heat_info["district fraction of node"] urban_fraction = district_heat_info["urban fraction"] - energy_layout = pd.read_csv(snakemake.input.clustered_pop_energy_layout, - index_col=0) + energy_layout = pd.read_csv( + snakemake.input.clustered_pop_energy_layout, index_col=0 + ) uses = ["space", "water"] sectors = ["residential", "services"] @@ -58,39 +59,51 @@ def build_existing_heating(): nodal_sectoral_totals = pd.DataFrame(dtype=float) for sector in sectors: - nodal_sectoral_totals[sector] = energy_layout[[f"total {sector} {use}" for use in uses]].sum(axis=1) - - nodal_sectoral_fraction = nodal_sectoral_totals.div(nodal_sectoral_totals.sum(axis=1), - axis=0) + nodal_sectoral_totals[sector] = energy_layout[ + [f"total {sector} {use}" for use in uses] + ].sum(axis=1) + nodal_sectoral_fraction = nodal_sectoral_totals.div( + nodal_sectoral_totals.sum(axis=1), axis=0 + ) nodal_heat_name_fraction = pd.DataFrame(dtype=float) nodal_heat_name_fraction["urban central"] = dist_fraction for sector in sectors: + nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[ + sector + ] * (1 - urban_fraction) + nodal_heat_name_fraction[f"{sector} urban decentral"] = nodal_sectoral_fraction[ + sector + ] * (urban_fraction - dist_fraction) - nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[sector]*(1 - urban_fraction) - nodal_heat_name_fraction[f"{sector} urban decentral"] = nodal_sectoral_fraction[sector]*(urban_fraction - dist_fraction) + nodal_heat_name_tech = pd.concat( + { + name: nodal_heating.multiply(nodal_heat_name_fraction[name], axis=0) + for name in nodal_heat_name_fraction.columns + }, + axis=1, + names=["heat name", "technology"], + ) - - nodal_heat_name_tech = pd.concat({name : nodal_heating .multiply(nodal_heat_name_fraction[name], - axis=0) for name in nodal_heat_name_fraction.columns}, - axis=1, - names=["heat name","technology"]) - - - #move all ground HPs to rural, all air to urban + # move all ground HPs to rural, all air to urban for sector in sectors: - nodal_heat_name_tech[(f"{sector} rural","ground heat pump")] += (nodal_heat_name_tech[("urban central","ground heat pump")]*nodal_sectoral_fraction[sector] - + nodal_heat_name_tech[(f"{sector} urban decentral","ground heat pump")]) - nodal_heat_name_tech[(f"{sector} urban decentral","ground heat pump")] = 0. + nodal_heat_name_tech[(f"{sector} rural", "ground heat pump")] += ( + nodal_heat_name_tech[("urban central", "ground heat pump")] + * nodal_sectoral_fraction[sector] + + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] + ) + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] = 0.0 - nodal_heat_name_tech[(f"{sector} urban decentral","air heat pump")] += nodal_heat_name_tech[(f"{sector} rural","air heat pump")] - nodal_heat_name_tech[(f"{sector} rural","air heat pump")] = 0. + nodal_heat_name_tech[ + (f"{sector} urban decentral", "air heat pump") + ] += nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] + nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] = 0.0 - nodal_heat_name_tech[("urban central","ground heat pump")] = 0. + nodal_heat_name_tech[("urban central", "ground heat pump")] = 0.0 nodal_heat_name_tech.to_csv(snakemake.output.existing_heating_distribution) diff --git a/scripts/build_hourly_heat_demand.py b/scripts/build_hourly_heat_demand.py index 2d1dee54..c972da89 100644 --- a/scripts/build_hourly_heat_demand.py +++ b/scripts/build_hourly_heat_demand.py @@ -48,16 +48,16 @@ if __name__ == "__main__": ) if use == "space": - heat_demand[f"{sector} {use}"] = daily_space_heat_demand * intraday_year_profile + heat_demand[f"{sector} {use}"] = ( + daily_space_heat_demand * intraday_year_profile + ) else: heat_demand[f"{sector} {use}"] = intraday_year_profile - heat_demand = pd.concat(heat_demand, - axis=1, - names = ["sector use", "node"]) + heat_demand = pd.concat(heat_demand, axis=1, names=["sector use", "node"]) - heat_demand.index.name="snapshots" + heat_demand.index.name = "snapshots" - ds = heat_demand.stack().to_xarray() + ds = heat_demand.stack().to_xarray() ds.to_netcdf(snakemake.output.heat_demand) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 241f3c30..ba92e137 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1639,8 +1639,11 @@ def add_land_transport(n, costs): def build_heat_demand(n): - - heat_demand_shape = xr.open_dataset(snakemake.input.hourly_heat_demand_total).to_dataframe().unstack(level=1) + heat_demand_shape = ( + xr.open_dataset(snakemake.input.hourly_heat_demand_total) + .to_dataframe() + .unstack(level=1) + ) sectors = ["residential", "services"] uses = ["water", "space"] @@ -1648,7 +1651,6 @@ def build_heat_demand(n): heat_demand = {} electric_heat_supply = {} for sector, use in product(sectors, uses): - name = f"{sector} {use}" heat_demand[name] = ( @@ -1678,8 +1680,7 @@ def add_heat(n, costs): heat_demand = build_heat_demand(n) - district_heat_info = pd.read_csv(snakemake.input.district_heat_share, - index_col=0) + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) dist_fraction = district_heat_info["district fraction of node"] urban_fraction = district_heat_info["urban fraction"] @@ -1718,7 +1719,6 @@ def add_heat(n, costs): # 1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2 solar_thermal = options["solar_cf_correction"] * solar_thermal / 1e3 - for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" From 31c7c10fc5e86702d47f9e2da7eb7dfd230e8b54 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 22 Jan 2024 09:30:53 +0100 Subject: [PATCH 10/15] git-blame-ignore-revs: add automated formatting commit --- .git-blame-ignore-revs | 1 + 1 file changed, 1 insertion(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 0b78b5b6..53f63e71 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -6,3 +6,4 @@ 5d1ef8a64055a039aa4a0834d2d26fe7752fe9a0 92080b1cd2ca5f123158571481722767b99c2b27 13769f90af4500948b0376d57df4cceaa13e78b5 +9865a970893d9e515786f33c629b14f71645bf1e \ No newline at end of file From 757fbcc464c4c04b94d2a29cd325963f0a35aa8a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 08:31:57 +0000 Subject: [PATCH 11/15] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .git-blame-ignore-revs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 53f63e71..3f1edbd8 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -6,4 +6,4 @@ 5d1ef8a64055a039aa4a0834d2d26fe7752fe9a0 92080b1cd2ca5f123158571481722767b99c2b27 13769f90af4500948b0376d57df4cceaa13e78b5 -9865a970893d9e515786f33c629b14f71645bf1e \ No newline at end of file +9865a970893d9e515786f33c629b14f71645bf1e From fd57311094f9ef86400e94121cb33e87b6abacbf Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 22 Jan 2024 10:10:40 +0100 Subject: [PATCH 12/15] build_district_heat_share: make safe for single-country runs --- scripts/build_district_heat_share.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py index 3353437a..6e934a2b 100644 --- a/scripts/build_district_heat_share.py +++ b/scripts/build_district_heat_share.py @@ -31,7 +31,7 @@ if __name__ == "__main__": district_heat_share = pd.read_csv( snakemake.input.district_heat_share, index_col=0 - ).squeeze() + )["district heat share"] # make ct-based share nodal district_heat_share = district_heat_share.loc[pop_layout.ct] From 025f48c0c2ba402f61f9cf2d0cf8c4656beae1ac Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 09:11:06 +0000 Subject: [PATCH 13/15] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- scripts/build_district_heat_share.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py index 6e934a2b..86c42631 100644 --- a/scripts/build_district_heat_share.py +++ b/scripts/build_district_heat_share.py @@ -29,9 +29,9 @@ if __name__ == "__main__": pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - district_heat_share = pd.read_csv( - snakemake.input.district_heat_share, index_col=0 - )["district heat share"] + district_heat_share = pd.read_csv(snakemake.input.district_heat_share, index_col=0)[ + "district heat share" + ] # make ct-based share nodal district_heat_share = district_heat_share.loc[pop_layout.ct] From 6c593a551b6f090b1066285061ec41f48c325d1c Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Mon, 22 Jan 2024 18:07:33 +0100 Subject: [PATCH 14/15] build_hourly_heat_demand: only pass subset of snapshot config --- rules/build_sector.smk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rules/build_sector.smk b/rules/build_sector.smk index a24f9f7d..0586ec01 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -147,7 +147,7 @@ rule build_daily_heat_demand: rule build_hourly_heat_demand: params: - snapshots=config["snapshots"], + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, input: heat_profile="data/heat_load_profile_BDEW.csv", heat_demand=RESOURCES + "daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", From 999ff852888f4c1fcab28796332cefc7d16f6272 Mon Sep 17 00:00:00 2001 From: Fabian Neumann Date: Thu, 25 Jan 2024 17:19:55 +0100 Subject: [PATCH 15/15] fix snakemake.inputs for add_existing_baseyear with perfect foresight --- rules/solve_perfect.smk | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rules/solve_perfect.smk b/rules/solve_perfect.smk index ad310f9f..76051976 100644 --- a/rules/solve_perfect.smk +++ b/rules/solve_perfect.smk @@ -17,6 +17,8 @@ rule add_existing_baseyear: costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", existing_heating="data/existing_infrastructure/existing_heating_raw.csv", existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv",