diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 0b78b5b6..3f1edbd8 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -6,3 +6,4 @@ 5d1ef8a64055a039aa4a0834d2d26fe7752fe9a0 92080b1cd2ca5f123158571481722767b99c2b27 13769f90af4500948b0376d57df4cceaa13e78b5 +9865a970893d9e515786f33c629b14f71645bf1e diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 7ffbe63c..86763287 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -28,6 +28,26 @@ Upcoming Release * Cluster residential and services heat buses by default. Can be disabled with ``cluster_heat_buses: false``. +* Bugfix: Do not reduce district heat share when building population-weighted + energy statistics. Previously the district heating share was being multiplied + by the population weighting, reducing the DH share with multiple nodes. + +* Move building of daily heat profile to its own rule + :mod:`build_hourly_heat_demand` from :mod:`prepare_sector_network`. + +* In :mod:`build_energy_totals`, district heating shares are now reported in a + separate file. + +* Move calculation of district heating share to its own rule + :mod:`build_district_heat_share`. + +* Move building of distribution of existing heating to own rule + :mod:`build_existing_heating_distribution`. This makes the distribution of + existing heating to urban/rural, residential/services and spatially more + transparent. + +* Bugfix: Correctly read out number of solver threads from configuration file. + * Air-sourced heat pumps can now also be built in rural areas. Previously, only ground-sourced heat pumps were considered for this category. diff --git a/doc/sector.rst b/doc/sector.rst index 303e7ed2..411bfd57 100644 --- a/doc/sector.rst +++ b/doc/sector.rst @@ -20,6 +20,12 @@ Rule ``add_existing_baseyear`` .. automodule:: add_existing_baseyear +Rule ``build_existing_heating_distribution`` +============================================================================== + +.. automodule:: build_existing_heating_distribution + + Rule ``build_ammonia_production`` ============================================================================== @@ -60,10 +66,20 @@ Rule ``build_gas_network`` .. automodule:: build_gas_network -Rule ``build_heat_demand`` +Rule ``build_daily_heat_demand`` ============================================================================== -.. automodule:: build_heat_demand +.. automodule:: build_daily_heat_demand + +Rule ``build_hourly_heat_demand`` +============================================================================== + +.. automodule:: build_hourly_heat_demand + +Rule ``build_district_heat_share`` +============================================================================== + +.. automodule:: build_district_heat_share Rule ``build_industrial_distribution_key`` ============================================================================== diff --git a/rules/build_sector.smk b/rules/build_sector.smk index 4744aa25..0586ec01 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -123,7 +123,7 @@ rule cluster_gas_network: "../scripts/cluster_gas_network.py" -rule build_heat_demands: +rule build_daily_heat_demand: params: snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, input: @@ -131,18 +131,39 @@ rule build_heat_demands: regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", output: - heat_demand=RESOURCES + "heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + heat_demand=RESOURCES + "daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", resources: mem_mb=20000, threads: 8 log: - LOGS + "build_heat_demands_{scope}_{simpl}_{clusters}.loc", + LOGS + "build_daily_heat_demand_{scope}_{simpl}_{clusters}.loc", benchmark: - BENCHMARKS + "build_heat_demands/{scope}_s{simpl}_{clusters}" + BENCHMARKS + "build_daily_heat_demand/{scope}_s{simpl}_{clusters}" conda: "../envs/environment.yaml" script: - "../scripts/build_heat_demand.py" + "../scripts/build_daily_heat_demand.py" + + +rule build_hourly_heat_demand: + params: + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, + input: + heat_profile="data/heat_load_profile_BDEW.csv", + heat_demand=RESOURCES + "daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + output: + heat_demand=RESOURCES + "hourly_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + resources: + mem_mb=2000, + threads: 8 + log: + LOGS + "build_hourly_heat_demand_{scope}_{simpl}_{clusters}.loc", + benchmark: + BENCHMARKS + "build_hourly_heat_demand/{scope}_s{simpl}_{clusters}" + conda: + "../envs/environment.yaml" + script: + "../scripts/build_hourly_heat_demand.py" rule build_temperature_profiles: @@ -235,6 +256,7 @@ rule build_energy_totals: energy_name=RESOURCES + "energy_totals.csv", co2_name=RESOURCES + "co2_totals.csv", transport_name=RESOURCES + "transport_data.csv", + district_heat_share=RESOURCES + "district_heat_share.csv", threads: 16 resources: mem_mb=10000, @@ -688,6 +710,26 @@ rule build_transport_demand: "../scripts/build_transport_demand.py" +rule build_district_heat_share: + params: + sector=config["sector"], + input: + district_heat_share=RESOURCES + "district_heat_share.csv", + clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + output: + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + threads: 1 + resources: + mem_mb=1000, + log: + LOGS + "build_district_heat_share_s{simpl}_{clusters}_{planning_horizons}.log", + conda: + "../envs/environment.yaml" + script: + "../scripts/build_district_heat_share.py" + + rule prepare_sector_network: params: co2_budget=config["co2_budget"], @@ -727,7 +769,6 @@ rule prepare_sector_network: if config["foresight"] == "overnight" else RESOURCES + "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv", - heat_profile="data/heat_load_profile_BDEW.csv", costs="data/costs_{}.csv".format(config["costs"]["year"]) if config["foresight"] == "overnight" else "data/costs_{planning_horizons}.csv", @@ -740,9 +781,10 @@ rule prepare_sector_network: simplified_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}.csv", industrial_demand=RESOURCES + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", - heat_demand_urban=RESOURCES + "heat_demand_urban_elec_s{simpl}_{clusters}.nc", - heat_demand_rural=RESOURCES + "heat_demand_rural_elec_s{simpl}_{clusters}.nc", - heat_demand_total=RESOURCES + "heat_demand_total_elec_s{simpl}_{clusters}.nc", + hourly_heat_demand_total=RESOURCES + + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", diff --git a/rules/solve_myopic.smk b/rules/solve_myopic.smk index aa94c945..75334073 100644 --- a/rules/solve_myopic.smk +++ b/rules/solve_myopic.smk @@ -1,8 +1,42 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-4 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT +rule build_existing_heating_distribution: + params: + baseyear=config["scenario"]["planning_horizons"][0], + sector=config["sector"], + existing_capacities=config["existing_capacities"], + input: + existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + clustered_pop_energy_layout=RESOURCES + + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + output: + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + wildcard_constraints: + planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear + threads: 1 + resources: + mem_mb=2000, + log: + LOGS + + "build_existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.log", + benchmark: + ( + BENCHMARKS + + "build_existing_heating_distribution/elec_s{simpl}_{clusters}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/build_existing_heating_distribution.py" + + rule add_existing_baseyear: params: baseyear=config["scenario"]["planning_horizons"][0], @@ -19,7 +53,8 @@ rule add_existing_baseyear: costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", - existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv", diff --git a/rules/solve_perfect.smk b/rules/solve_perfect.smk index ad310f9f..76051976 100644 --- a/rules/solve_perfect.smk +++ b/rules/solve_perfect.smk @@ -17,6 +17,8 @@ rule add_existing_baseyear: costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", existing_heating="data/existing_infrastructure/existing_heating_raw.csv", existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index f219b912..46d4bc31 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -409,97 +409,18 @@ def add_heating_capacities_installed_before_baseyear( # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". # TODO start from original file - # retrieve existing heating capacities - techs = [ - "gas boiler", - "oil boiler", - "resistive heater", - "air heat pump", - "ground heat pump", - ] - df = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) - - # data for Albania, Montenegro and Macedonia not included in database - df.loc["Albania"] = np.nan - df.loc["Montenegro"] = np.nan - df.loc["Macedonia"] = np.nan - - df.fillna(0.0, inplace=True) - - # convert GW to MW - df *= 1e3 - - df.index = cc.convert(df.index, to="iso2") - - # coal and oil boilers are assimilated to oil boilers - df["oil boiler"] = df["oil boiler"] + df["coal boiler"] - df.drop(["coal boiler"], axis=1, inplace=True) - - # distribute technologies to nodes by population - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - - nodal_df = df.loc[pop_layout.ct] - nodal_df.index = pop_layout.index - nodal_df = nodal_df.multiply(pop_layout.fraction, axis=0) - - # split existing capacities between residential and services - # proportional to energy demand - p_set_sum = n.loads_t.p_set.sum() - ratio_residential = pd.Series( - [ - ( - p_set_sum[f"{node} residential rural heat"] - / ( - p_set_sum[f"{node} residential rural heat"] - + p_set_sum[f"{node} services rural heat"] - ) - ) - # if rural heating demand for one of the nodes doesn't exist, - # then columns were dropped before and heating demand share should be 0.0 - if all( - f"{node} {service} rural heat" in p_set_sum.index - for service in ["residential", "services"] - ) - else 0.0 - for node in nodal_df.index - ], - index=nodal_df.index, + existing_heating = pd.read_csv( + snakemake.input.existing_heating_distribution, header=[0, 1], index_col=0 ) - for tech in techs: - nodal_df["residential " + tech] = nodal_df[tech] * ratio_residential - nodal_df["services " + tech] = nodal_df[tech] * (1 - ratio_residential) + techs = existing_heating.columns.get_level_values(1).unique() - names = [ - "residential rural", - "services rural", - "residential urban decentral", - "services urban decentral", - "urban central", - ] - - nodes = {} - p_nom = {} - for name in names: + for name in existing_heating.columns.get_level_values(0).unique(): name_type = "central" if name == "urban central" else "decentral" - nodes[name] = pd.Index( - [ - n.buses.at[index, "location"] - for index in n.buses.index[ - n.buses.index.str.contains(name) - & n.buses.index.str.contains("heat") - ] - ] - ) - heat_pump_type = "air" if "urban" in name else "ground" - heat_type = "residential" if "residential" in name else "services" - if name == "urban central": - p_nom[name] = nodal_df["air heat pump"][nodes[name]] - else: - p_nom[name] = nodal_df[f"{heat_type} {heat_pump_type} heat pump"][ - nodes[name] - ] + nodes = pd.Index(n.buses.location[n.buses.index.str.contains(f"{name} heat")]) + + heat_pump_type = "air" if "urban" in name else "ground" # Add heat pumps costs_name = f"decentral {heat_pump_type}-sourced heat pump" @@ -507,7 +428,7 @@ def add_heating_capacities_installed_before_baseyear( cop = {"air": ashp_cop, "ground": gshp_cop} if time_dep_hp_cop: - efficiency = cop[heat_pump_type][nodes[name]] + efficiency = cop[heat_pump_type][nodes] else: efficiency = costs.at[costs_name, "efficiency"] @@ -520,27 +441,28 @@ def add_heating_capacities_installed_before_baseyear( n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} {heat_pump_type} heat pump-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes, + bus1=nodes + " " + name + " heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] * costs.at[costs_name, "fixed"], - p_nom=p_nom[name] * ratio / costs.at[costs_name, "efficiency"], + p_nom=existing_heating.loc[nodes, (name, f"{heat_pump_type} heat pump")] + * ratio + / costs.at[costs_name, "efficiency"], build_year=int(grouping_year), lifetime=costs.at[costs_name, "lifetime"], ) # add resistive heater, gas boilers and oil boilers - # (50% capacities to rural buses, 50% to urban buses) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} resistive heater-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes, + bus1=nodes + " " + name + " heat", carrier=name + " resistive heater", efficiency=costs.at[f"{name_type} resistive heater", "efficiency"], capital_cost=( @@ -548,21 +470,20 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} resistive heater", "fixed"] ), p_nom=( - 0.5 - * nodal_df[f"{heat_type} resistive heater"][nodes[name]] + existing_heating.loc[nodes, (name, "resistive heater")] * ratio / costs.at[f"{name_type} resistive heater", "efficiency"] ), build_year=int(grouping_year), - lifetime=costs.at[costs_name, "lifetime"], + lifetime=costs.at[f"{name_type} resistive heater", "lifetime"], ) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} gas boiler-{grouping_year}", bus0=spatial.gas.nodes, - bus1=nodes[name] + " " + name + " heat", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[f"{name_type} gas boiler", "efficiency"], @@ -572,8 +493,7 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} gas boiler", "fixed"] ), p_nom=( - 0.5 - * nodal_df[f"{heat_type} gas boiler"][nodes[name]] + existing_heating.loc[nodes, (name, "gas boiler")] * ratio / costs.at[f"{name_type} gas boiler", "efficiency"] ), @@ -583,20 +503,21 @@ def add_heating_capacities_installed_before_baseyear( n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} oil boiler-{grouping_year}", bus0=spatial.oil.nodes, - bus1=nodes[name] + " " + name + " heat", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] * costs.at["decentral oil boiler", "fixed"], - p_nom=0.5 - * nodal_df[f"{heat_type} oil boiler"][nodes[name]] - * ratio - / costs.at["decentral oil boiler", "efficiency"], + p_nom=( + existing_heating.loc[nodes, (name, "oil boiler")] + * ratio + / costs.at["decentral oil boiler", "efficiency"] + ), build_year=int(grouping_year), lifetime=costs.at[f"{name_type} gas boiler", "lifetime"], ) @@ -624,6 +545,8 @@ def add_heating_capacities_installed_before_baseyear( # drop assets which are at the end of their lifetime links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index + logger.info("Removing following links because at end of their lifetime:") + logger.info(links_i) n.mremove("Link", links_i) diff --git a/scripts/build_heat_demand.py b/scripts/build_daily_heat_demand.py similarity index 95% rename from scripts/build_heat_demand.py rename to scripts/build_daily_heat_demand.py index b983f125..e334b1b3 100644 --- a/scripts/build_heat_demand.py +++ b/scripts/build_daily_heat_demand.py @@ -18,7 +18,8 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "build_heat_demands", + "build_daily_heat_demands", + scope="total", simpl="", clusters=48, ) diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py new file mode 100644 index 00000000..86c42631 --- /dev/null +++ b/scripts/build_district_heat_share.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build district heat shares at each node, depending on investment year. +""" + +import logging + +import pandas as pd +from prepare_sector_network import get + +logger = logging.getLogger(__name__) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_district_heat_share", + simpl="", + clusters=48, + planning_horizons="2050", + ) + + investment_year = int(snakemake.wildcards.planning_horizons[-4:]) + + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) + + district_heat_share = pd.read_csv(snakemake.input.district_heat_share, index_col=0)[ + "district heat share" + ] + + # make ct-based share nodal + district_heat_share = district_heat_share.loc[pop_layout.ct] + district_heat_share.index = pop_layout.index + + # total urban population per country + ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() + + # distribution of urban population within a country + pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) + + # fraction of node that is urban + urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) + + # maximum potential of urban demand covered by district heating + central_fraction = snakemake.config["sector"]["district_heating"]["potential"] + + # district heating share at each node + dist_fraction_node = ( + district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] + ) + + # if district heating share larger than urban fraction -> set urban + # fraction to district heating share + urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) + + # difference of max potential and today's share of district heating + diff = (urban_fraction * central_fraction) - dist_fraction_node + progress = get( + snakemake.config["sector"]["district_heating"]["progress"], investment_year + ) + dist_fraction_node += diff * progress + logger.info( + f"Increase district heating share by a progress factor of {progress:.2%} " + f"resulting in new average share of {dist_fraction_node.mean():.2%}" + ) + + df = pd.DataFrame( + { + "original district heat share": district_heat_share, + "district fraction of node": dist_fraction_node, + "urban fraction": urban_fraction, + }, + dtype=float, + ) + + df.to_csv(snakemake.output.district_heat_share) diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 39b2a1be..c67bb49d 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -391,13 +391,6 @@ def build_idees(countries, year): # convert TWh/100km to kWh/km totals.loc["passenger car efficiency"] *= 10 - # district heating share - district_heat = totals.loc[ - ["derived heat residential", "derived heat services"] - ].sum() - total_heat = totals.loc[["thermal uses residential", "thermal uses services"]].sum() - totals.loc["district heat share"] = district_heat.div(total_heat) - return totals.T @@ -572,16 +565,36 @@ def build_energy_totals(countries, eurostat, swiss, idees): ratio = df.at["BA", "total residential"] / df.at["RS", "total residential"] df.loc["BA", missing] = ratio * df.loc["RS", missing] + return df + + +def build_district_heat_share(countries, idees): + # district heating share + district_heat = idees[["derived heat residential", "derived heat services"]].sum( + axis=1 + ) + total_heat = idees[["thermal uses residential", "thermal uses services"]].sum( + axis=1 + ) + + district_heat_share = district_heat / total_heat + + district_heat_share = district_heat_share.reindex(countries) + # Missing district heating share - dh_share = pd.read_csv( - snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] + dh_share = ( + pd.read_csv(snakemake.input.district_heat_share, index_col=0, usecols=[0, 1]) + .div(100) + .squeeze() ) # make conservative assumption and take minimum from both data sets - df["district heat share"] = pd.concat( - [df["district heat share"], dh_share.reindex(index=df.index) / 100], axis=1 + district_heat_share = pd.concat( + [district_heat_share, dh_share.reindex_like(district_heat_share)], axis=1 ).min(axis=1) - return df + district_heat_share.name = "district heat share" + + return district_heat_share def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): @@ -750,6 +763,9 @@ if __name__ == "__main__": energy = build_energy_totals(countries, eurostat, swiss, idees) energy.to_csv(snakemake.output.energy_name) + district_heat_share = build_district_heat_share(countries, idees) + district_heat_share.to_csv(snakemake.output.district_heat_share) + base_year_emissions = params["base_emissions_year"] emissions_scope = snakemake.params.energy["emissions"] eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope) diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py new file mode 100644 index 00000000..67993c29 --- /dev/null +++ b/scripts/build_existing_heating_distribution.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Builds table of existing heat generation capacities for initial planning +horizon. +""" +import country_converter as coco +import numpy as np +import pandas as pd + +cc = coco.CountryConverter() + + +def build_existing_heating(): + # retrieve existing heating capacities + + existing_heating = pd.read_csv( + snakemake.input.existing_heating, index_col=0, header=0 + ) + + # data for Albania, Montenegro and Macedonia not included in database + existing_heating.loc["Albania"] = np.nan + existing_heating.loc["Montenegro"] = np.nan + existing_heating.loc["Macedonia"] = np.nan + + existing_heating.fillna(0.0, inplace=True) + + # convert GW to MW + existing_heating *= 1e3 + + existing_heating.index = cc.convert(existing_heating.index, to="iso2") + + # coal and oil boilers are assimilated to oil boilers + existing_heating["oil boiler"] = ( + existing_heating["oil boiler"] + existing_heating["coal boiler"] + ) + existing_heating.drop(["coal boiler"], axis=1, inplace=True) + + # distribute technologies to nodes by population + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) + + nodal_heating = existing_heating.loc[pop_layout.ct] + nodal_heating.index = pop_layout.index + nodal_heating = nodal_heating.multiply(pop_layout.fraction, axis=0) + + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] + + energy_layout = pd.read_csv( + snakemake.input.clustered_pop_energy_layout, index_col=0 + ) + + uses = ["space", "water"] + sectors = ["residential", "services"] + + nodal_sectoral_totals = pd.DataFrame(dtype=float) + + for sector in sectors: + nodal_sectoral_totals[sector] = energy_layout[ + [f"total {sector} {use}" for use in uses] + ].sum(axis=1) + + nodal_sectoral_fraction = nodal_sectoral_totals.div( + nodal_sectoral_totals.sum(axis=1), axis=0 + ) + + nodal_heat_name_fraction = pd.DataFrame(dtype=float) + + nodal_heat_name_fraction["urban central"] = dist_fraction + + for sector in sectors: + nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[ + sector + ] * (1 - urban_fraction) + nodal_heat_name_fraction[f"{sector} urban decentral"] = nodal_sectoral_fraction[ + sector + ] * (urban_fraction - dist_fraction) + + nodal_heat_name_tech = pd.concat( + { + name: nodal_heating.multiply(nodal_heat_name_fraction[name], axis=0) + for name in nodal_heat_name_fraction.columns + }, + axis=1, + names=["heat name", "technology"], + ) + + # move all ground HPs to rural, all air to urban + + for sector in sectors: + nodal_heat_name_tech[(f"{sector} rural", "ground heat pump")] += ( + nodal_heat_name_tech[("urban central", "ground heat pump")] + * nodal_sectoral_fraction[sector] + + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] + ) + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] = 0.0 + + nodal_heat_name_tech[ + (f"{sector} urban decentral", "air heat pump") + ] += nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] + nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] = 0.0 + + nodal_heat_name_tech[("urban central", "ground heat pump")] = 0.0 + + nodal_heat_name_tech.to_csv(snakemake.output.existing_heating_distribution) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_existing_heating_distribution", + simpl="", + clusters=48, + planning_horizons=2050, + ) + + build_existing_heating() diff --git a/scripts/build_hourly_heat_demand.py b/scripts/build_hourly_heat_demand.py new file mode 100644 index 00000000..c972da89 --- /dev/null +++ b/scripts/build_hourly_heat_demand.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build hourly heat demand time series from daily ones. +""" + +from itertools import product + +import pandas as pd +import xarray as xr +from _helpers import generate_periodic_profiles + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_hourly_heat_demands", + scope="total", + simpl="", + clusters=48, + ) + + snapshots = pd.date_range(freq="h", **snakemake.params.snapshots) + + daily_space_heat_demand = ( + xr.open_dataarray(snakemake.input.heat_demand) + .to_pandas() + .reindex(index=snapshots, method="ffill") + ) + + intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) + + sectors = ["residential", "services"] + uses = ["water", "space"] + + heat_demand = {} + for sector, use in product(sectors, uses): + weekday = list(intraday_profiles[f"{sector} {use} weekday"]) + weekend = list(intraday_profiles[f"{sector} {use} weekend"]) + weekly_profile = weekday * 5 + weekend * 2 + intraday_year_profile = generate_periodic_profiles( + daily_space_heat_demand.index.tz_localize("UTC"), + nodes=daily_space_heat_demand.columns, + weekly_profile=weekly_profile, + ) + + if use == "space": + heat_demand[f"{sector} {use}"] = ( + daily_space_heat_demand * intraday_year_profile + ) + else: + heat_demand[f"{sector} {use}"] = intraday_year_profile + + heat_demand = pd.concat(heat_demand, axis=1, names=["sector use", "node"]) + + heat_demand.index.name = "snapshots" + + ds = heat_demand.stack().to_xarray() + + ds.to_netcdf(snakemake.output.heat_demand) diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index d24feb79..22fb1bcc 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -18,7 +18,7 @@ import numpy as np import pandas as pd import pypsa import xarray as xr -from _helpers import generate_periodic_profiles, update_config_with_sector_opts +from _helpers import update_config_with_sector_opts from add_electricity import calculate_annuity, sanitize_carriers from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2 from networkx.algorithms import complement @@ -1639,40 +1639,25 @@ def add_land_transport(n, costs): def build_heat_demand(n): - # copy forward the daily average heat demand into each hour, so it can be multiplied by the intraday profile - daily_space_heat_demand = ( - xr.open_dataarray(snakemake.input.heat_demand_total) - .to_pandas() - .reindex(index=n.snapshots, method="ffill") + heat_demand_shape = ( + xr.open_dataset(snakemake.input.hourly_heat_demand_total) + .to_dataframe() + .unstack(level=1) ) - intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) - sectors = ["residential", "services"] uses = ["water", "space"] heat_demand = {} electric_heat_supply = {} for sector, use in product(sectors, uses): - weekday = list(intraday_profiles[f"{sector} {use} weekday"]) - weekend = list(intraday_profiles[f"{sector} {use} weekend"]) - weekly_profile = weekday * 5 + weekend * 2 - intraday_year_profile = generate_periodic_profiles( - daily_space_heat_demand.index.tz_localize("UTC"), - nodes=daily_space_heat_demand.columns, - weekly_profile=weekly_profile, - ) + name = f"{sector} {use}" - if use == "space": - heat_demand_shape = daily_space_heat_demand * intraday_year_profile - else: - heat_demand_shape = intraday_year_profile - - heat_demand[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + heat_demand[name] = ( + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"total {sector} {use}"]) * 1e6 - electric_heat_supply[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + electric_heat_supply[name] = ( + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"electricity {sector} {use}"]) * 1e6 heat_demand = pd.concat(heat_demand, axis=1) @@ -1695,7 +1680,9 @@ def add_heat(n, costs): heat_demand = build_heat_demand(n) - nodes, dist_fraction, urban_fraction = create_nodes_for_heat_sector() + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] # NB: must add costs of central heating afterwards (EUR 400 / kWpeak, 50a, 1% FOM from Fraunhofer ISE) @@ -1735,12 +1722,17 @@ def add_heat(n, costs): for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" + if name == "urban central": + nodes = dist_fraction.index[dist_fraction > 0] + else: + nodes = pop_layout.index + n.add("Carrier", name + " heat") n.madd( "Bus", - nodes[name] + f" {name} heat", - location=nodes[name], + nodes + f" {name} heat", + location=nodes, carrier=name + " heat", unit="MWh_th", ) @@ -1748,9 +1740,9 @@ def add_heat(n, costs): if name == "urban central" and options.get("central_heat_vent"): n.madd( "Generator", - nodes[name] + f" {name} heat vent", - bus=nodes[name] + f" {name} heat", - location=nodes[name], + nodes + f" {name} heat vent", + bus=nodes + f" {name} heat", + location=nodes, carrier=name + " heat vent", p_nom_extendable=True, p_max_pu=0, @@ -1763,11 +1755,11 @@ def add_heat(n, costs): for sector in sectors: # heat demand weighting if "rural" in name: - factor = 1 - urban_fraction[nodes[name]] + factor = 1 - urban_fraction[nodes] elif "urban central" in name: - factor = dist_fraction[nodes[name]] + factor = dist_fraction[nodes] elif "urban decentral" in name: - factor = urban_fraction[nodes[name]] - dist_fraction[nodes[name]] + factor = urban_fraction[nodes] - dist_fraction[nodes] else: raise NotImplementedError( f" {name} not in " f"heat systems: {heat_systems}" @@ -1778,7 +1770,7 @@ def add_heat(n, costs): heat_demand[[sector + " water", sector + " space"]] .T.groupby(level=1) .sum() - .T[nodes[name]] + .T[nodes] .multiply(factor) ) @@ -1786,7 +1778,7 @@ def add_heat(n, costs): heat_load = ( heat_demand.T.groupby(level=1) .sum() - .T[nodes[name]] + .T[nodes] .multiply( factor * (1 + options["district_heating"]["district_heating_loss"]) ) @@ -1794,9 +1786,9 @@ def add_heat(n, costs): n.madd( "Load", - nodes[name], + nodes, suffix=f" {name} heat", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " heat", p_set=heat_load, ) @@ -1808,17 +1800,17 @@ def add_heat(n, costs): for heat_pump_type in heat_pump_types: costs_name = f"{name_type} {heat_pump_type}-sourced heat pump" efficiency = ( - cop[heat_pump_type][nodes[name]] + cop[heat_pump_type][nodes] if options["time_dep_hp_cop"] else costs.at[costs_name, "efficiency"] ) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} {heat_pump_type} heat pump", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", + bus0=nodes, + bus1=nodes + f" {name} heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] @@ -1832,17 +1824,17 @@ def add_heat(n, costs): n.madd( "Bus", - nodes[name] + f" {name} water tanks", - location=nodes[name], + nodes + f" {name} water tanks", + location=nodes, carrier=name + " water tanks", unit="MWh_th", ) n.madd( "Link", - nodes[name] + f" {name} water tanks charger", - bus0=nodes[name] + f" {name} heat", - bus1=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks charger", + bus0=nodes + f" {name} heat", + bus1=nodes + f" {name} water tanks", efficiency=costs.at["water tank charger", "efficiency"], carrier=name + " water tanks charger", p_nom_extendable=True, @@ -1850,9 +1842,9 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} water tanks discharger", - bus0=nodes[name] + f" {name} water tanks", - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} water tanks discharger", + bus0=nodes + f" {name} water tanks", + bus1=nodes + f" {name} heat", carrier=name + " water tanks discharger", efficiency=costs.at["water tank discharger", "efficiency"], p_nom_extendable=True, @@ -1871,8 +1863,8 @@ def add_heat(n, costs): n.madd( "Store", - nodes[name] + f" {name} water tanks", - bus=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks", + bus=nodes + f" {name} water tanks", e_cyclic=True, e_nom_extendable=True, carrier=name + " water tanks", @@ -1886,9 +1878,9 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} resistive heater", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} resistive heater", + bus0=nodes, + bus1=nodes + f" {name} heat", carrier=name + " resistive heater", efficiency=costs.at[key, "efficiency"], capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], @@ -1901,10 +1893,10 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} gas boiler", + nodes + f" {name} gas boiler", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[key, "efficiency"], @@ -1918,13 +1910,13 @@ def add_heat(n, costs): n.madd( "Generator", - nodes[name], + nodes, suffix=f" {name} solar thermal collector", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " solar thermal", p_nom_extendable=True, capital_cost=costs.at[name_type + " solar thermal", "fixed"], - p_max_pu=solar_thermal[nodes[name]], + p_max_pu=solar_thermal[nodes], lifetime=costs.at[name_type + " solar thermal", "lifetime"], ) @@ -1932,10 +1924,10 @@ def add_heat(n, costs): # add gas CHP; biomass CHP is added in biomass section n.madd( "Link", - nodes[name] + " urban central gas CHP", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", carrier="urban central gas CHP", p_nom_extendable=True, @@ -1951,12 +1943,12 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + " urban central gas CHP CC", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP CC", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", - bus4=spatial.co2.df.loc[nodes[name], "nodes"].values, + bus4=spatial.co2.df.loc[nodes, "nodes"].values, carrier="urban central gas CHP CC", p_nom_extendable=True, capital_cost=costs.at["central gas CHP", "fixed"] @@ -1988,11 +1980,11 @@ def add_heat(n, costs): if options["chp"] and options["micro_chp"] and name != "urban central": n.madd( "Link", - nodes[name] + f" {name} micro gas CHP", + nodes + f" {name} micro gas CHP", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + f" {name} heat", bus3="co2 atmosphere", carrier=name + " micro gas CHP", efficiency=costs.at["micro CHP", "efficiency"], @@ -2123,50 +2115,6 @@ def add_heat(n, costs): ) -def create_nodes_for_heat_sector(): - # TODO pop_layout - - # rural are areas with low heating density and individual heating - # urban are areas with high heating density - # urban can be split into district heating (central) and individual heating (decentral) - - ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() - # distribution of urban population within a country - pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) - - sectors = ["residential", "services"] - - nodes = {} - urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) - - for sector in sectors: - nodes[sector + " rural"] = pop_layout.index - nodes[sector + " urban decentral"] = pop_layout.index - - district_heat_share = pop_weighted_energy_totals["district heat share"] - - # maximum potential of urban demand covered by district heating - central_fraction = options["district_heating"]["potential"] - # district heating share at each node - dist_fraction_node = ( - district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] - ) - nodes["urban central"] = dist_fraction_node.index - # if district heating share larger than urban fraction -> set urban - # fraction to district heating share - urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) - # difference of max potential and today's share of district heating - diff = (urban_fraction * central_fraction) - dist_fraction_node - progress = get(options["district_heating"]["progress"], investment_year) - dist_fraction_node += diff * progress - logger.info( - f"Increase district heating share by a progress factor of {progress:.2%} " - f"resulting in new average share of {dist_fraction_node.mean():.2%}" - ) - - return nodes, dist_fraction_node, urban_fraction - - def add_biomass(n, costs): logger.info("Add biomass") @@ -2384,7 +2332,7 @@ def add_biomass(n, costs): if options["biomass_boiler"]: # TODO: Add surcharge for pellets - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", "services rural", @@ -2393,10 +2341,10 @@ def add_biomass(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} biomass boiler", + nodes + f" {name} biomass boiler", p_nom_extendable=True, - bus0=spatial.biomass.df.loc[nodes_heat[name], "nodes"].values, - bus1=nodes_heat[name] + f" {name} heat", + bus0=spatial.biomass.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", carrier=name + " biomass boiler", efficiency=costs.at["biomass boiler", "efficiency"], capital_cost=costs.at["biomass boiler", "efficiency"] @@ -2839,7 +2787,7 @@ def add_industry(n, costs): ) if options["oil_boilers"]: - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", @@ -2849,10 +2797,10 @@ def add_industry(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} oil boiler", + nodes + f" {name} oil boiler", p_nom_extendable=True, bus0=spatial.oil.nodes, - bus1=nodes_heat[name] + f" {name} heat", + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=f"{name} oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"],