diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 0b78b5b6..3f1edbd8 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -6,3 +6,4 @@ 5d1ef8a64055a039aa4a0834d2d26fe7752fe9a0 92080b1cd2ca5f123158571481722767b99c2b27 13769f90af4500948b0376d57df4cceaa13e78b5 +9865a970893d9e515786f33c629b14f71645bf1e diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a38e6800..d90f5cb9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: # Formatting with "black" coding style - repo: https://github.com/psf/black-pre-commit-mirror - rev: 23.12.1 + rev: 24.1.1 hooks: # Format Python files - id: black diff --git a/doc/plotting.rst b/doc/plotting.rst index 895eab3b..02748cf2 100644 --- a/doc/plotting.rst +++ b/doc/plotting.rst @@ -22,7 +22,22 @@ Rule ``plot_summary`` .. _map_plot: -Rule ``plot_network`` -======================== +Rule ``plot_power_network`` +=========================== -.. automodule:: plot_network +.. automodule:: plot_power_network + +Rule ``plot_power_network_perfect`` +=================================== + +.. automodule:: plot_power_network_perfect + +Rule ``plot_hydrogen_network`` +============================== + +.. automodule:: plot_hydrogen_network + +Rule ``plot_gas_network`` +========================= + +.. automodule:: plot_gas_network diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 93d1a268..86763287 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -28,6 +28,26 @@ Upcoming Release * Cluster residential and services heat buses by default. Can be disabled with ``cluster_heat_buses: false``. +* Bugfix: Do not reduce district heat share when building population-weighted + energy statistics. Previously the district heating share was being multiplied + by the population weighting, reducing the DH share with multiple nodes. + +* Move building of daily heat profile to its own rule + :mod:`build_hourly_heat_demand` from :mod:`prepare_sector_network`. + +* In :mod:`build_energy_totals`, district heating shares are now reported in a + separate file. + +* Move calculation of district heating share to its own rule + :mod:`build_district_heat_share`. + +* Move building of distribution of existing heating to own rule + :mod:`build_existing_heating_distribution`. This makes the distribution of + existing heating to urban/rural, residential/services and spatially more + transparent. + +* Bugfix: Correctly read out number of solver threads from configuration file. + * Air-sourced heat pumps can now also be built in rural areas. Previously, only ground-sourced heat pumps were considered for this category. @@ -37,6 +57,9 @@ Upcoming Release * Add the option to customise map projection in plotting config. +* The rule ``plot_network`` has been split into separate rules for plotting + electricity, hydrogen and gas networks. + PyPSA-Eur 0.9.0 (5th January 2024) ================================== diff --git a/doc/sector.rst b/doc/sector.rst index 303e7ed2..411bfd57 100644 --- a/doc/sector.rst +++ b/doc/sector.rst @@ -20,6 +20,12 @@ Rule ``add_existing_baseyear`` .. automodule:: add_existing_baseyear +Rule ``build_existing_heating_distribution`` +============================================================================== + +.. automodule:: build_existing_heating_distribution + + Rule ``build_ammonia_production`` ============================================================================== @@ -60,10 +66,20 @@ Rule ``build_gas_network`` .. automodule:: build_gas_network -Rule ``build_heat_demand`` +Rule ``build_daily_heat_demand`` ============================================================================== -.. automodule:: build_heat_demand +.. automodule:: build_daily_heat_demand + +Rule ``build_hourly_heat_demand`` +============================================================================== + +.. automodule:: build_hourly_heat_demand + +Rule ``build_district_heat_share`` +============================================================================== + +.. automodule:: build_district_heat_share Rule ``build_industrial_distribution_key`` ============================================================================== diff --git a/rules/build_sector.smk b/rules/build_sector.smk index 4744aa25..0586ec01 100644 --- a/rules/build_sector.smk +++ b/rules/build_sector.smk @@ -123,7 +123,7 @@ rule cluster_gas_network: "../scripts/cluster_gas_network.py" -rule build_heat_demands: +rule build_daily_heat_demand: params: snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, input: @@ -131,18 +131,39 @@ rule build_heat_demands: regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc", output: - heat_demand=RESOURCES + "heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + heat_demand=RESOURCES + "daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", resources: mem_mb=20000, threads: 8 log: - LOGS + "build_heat_demands_{scope}_{simpl}_{clusters}.loc", + LOGS + "build_daily_heat_demand_{scope}_{simpl}_{clusters}.loc", benchmark: - BENCHMARKS + "build_heat_demands/{scope}_s{simpl}_{clusters}" + BENCHMARKS + "build_daily_heat_demand/{scope}_s{simpl}_{clusters}" conda: "../envs/environment.yaml" script: - "../scripts/build_heat_demand.py" + "../scripts/build_daily_heat_demand.py" + + +rule build_hourly_heat_demand: + params: + snapshots={k: config["snapshots"][k] for k in ["start", "end", "inclusive"]}, + input: + heat_profile="data/heat_load_profile_BDEW.csv", + heat_demand=RESOURCES + "daily_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + output: + heat_demand=RESOURCES + "hourly_heat_demand_{scope}_elec_s{simpl}_{clusters}.nc", + resources: + mem_mb=2000, + threads: 8 + log: + LOGS + "build_hourly_heat_demand_{scope}_{simpl}_{clusters}.loc", + benchmark: + BENCHMARKS + "build_hourly_heat_demand/{scope}_s{simpl}_{clusters}" + conda: + "../envs/environment.yaml" + script: + "../scripts/build_hourly_heat_demand.py" rule build_temperature_profiles: @@ -235,6 +256,7 @@ rule build_energy_totals: energy_name=RESOURCES + "energy_totals.csv", co2_name=RESOURCES + "co2_totals.csv", transport_name=RESOURCES + "transport_data.csv", + district_heat_share=RESOURCES + "district_heat_share.csv", threads: 16 resources: mem_mb=10000, @@ -688,6 +710,26 @@ rule build_transport_demand: "../scripts/build_transport_demand.py" +rule build_district_heat_share: + params: + sector=config["sector"], + input: + district_heat_share=RESOURCES + "district_heat_share.csv", + clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + output: + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + threads: 1 + resources: + mem_mb=1000, + log: + LOGS + "build_district_heat_share_s{simpl}_{clusters}_{planning_horizons}.log", + conda: + "../envs/environment.yaml" + script: + "../scripts/build_district_heat_share.py" + + rule prepare_sector_network: params: co2_budget=config["co2_budget"], @@ -727,7 +769,6 @@ rule prepare_sector_network: if config["foresight"] == "overnight" else RESOURCES + "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv", - heat_profile="data/heat_load_profile_BDEW.csv", costs="data/costs_{}.csv".format(config["costs"]["year"]) if config["foresight"] == "overnight" else "data/costs_{planning_horizons}.csv", @@ -740,9 +781,10 @@ rule prepare_sector_network: simplified_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}.csv", industrial_demand=RESOURCES + "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv", - heat_demand_urban=RESOURCES + "heat_demand_urban_elec_s{simpl}_{clusters}.nc", - heat_demand_rural=RESOURCES + "heat_demand_rural_elec_s{simpl}_{clusters}.nc", - heat_demand_total=RESOURCES + "heat_demand_total_elec_s{simpl}_{clusters}.nc", + hourly_heat_demand_total=RESOURCES + + "hourly_heat_demand_total_elec_s{simpl}_{clusters}.nc", + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc", temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc", temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc", diff --git a/rules/collect.smk b/rules/collect.smk index c9bb10ea..1d977da1 100644 --- a/rules/collect.smk +++ b/rules/collect.smk @@ -11,7 +11,6 @@ localrules: prepare_sector_networks, solve_elec_networks, solve_sector_networks, - plot_networks, rule cluster_networks: @@ -69,15 +68,6 @@ rule solve_sector_networks_perfect: ), -rule plot_networks: - input: - expand( - RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - **config["scenario"] - ), - - rule validate_elec_networks: input: expand( diff --git a/rules/postprocess.smk b/rules/postprocess.smk index 9f4ac78e..59f9ead7 100644 --- a/rules/postprocess.smk +++ b/rules/postprocess.smk @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-2024 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT @@ -9,9 +9,27 @@ localrules: if config["foresight"] != "perfect": - rule plot_network: + rule plot_power_network_clustered: + params: + plotting=config["plotting"], + input: + network=RESOURCES + "networks/elec_s{simpl}_{clusters}.nc", + regions_onshore=RESOURCES + + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + output: + map=RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", + threads: 1 + resources: + mem_mb=4000, + benchmark: + BENCHMARKS + "plot_power_network_clustered/elec_s{simpl}_{clusters}" + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_power_network_clustered.py" + + rule plot_power_network: params: - foresight=config["foresight"], plotting=config["plotting"], input: network=RESULTS @@ -20,27 +38,70 @@ if config["foresight"] != "perfect": output: map=RESULTS + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - today=RESULTS - + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf", threads: 2 resources: mem_mb=10000, benchmark: ( BENCHMARKS - + "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + + "plot_power_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" ) conda: "../envs/environment.yaml" script: - "../scripts/plot_network.py" + "../scripts/plot_power_network.py" + + rule plot_hydrogen_network: + params: + plotting=config["plotting"], + input: + network=RESULTS + + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + output: + map=RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf", + threads: 2 + resources: + mem_mb=10000, + benchmark: + ( + BENCHMARKS + + "plot_hydrogen_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_hydrogen_network.py" + + rule plot_gas_network: + params: + plotting=config["plotting"], + input: + network=RESULTS + + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", + regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", + output: + map=RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf", + threads: 2 + resources: + mem_mb=10000, + benchmark: + ( + BENCHMARKS + + "plot_gas_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/plot_gas_network.py" if config["foresight"] == "perfect": - rule plot_network: + rule plot_power_network_perfect: params: - foresight=config["foresight"], plotting=config["plotting"], input: network=RESULTS @@ -62,7 +123,7 @@ if config["foresight"] == "perfect": conda: "../envs/environment.yaml" script: - "../scripts/plot_network.py" + "../scripts/plot_power_network_perfect.py" rule copy_config: @@ -89,6 +150,10 @@ rule make_summary: scenario=config["scenario"], RDIR=RDIR, input: + expand( + RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", + **config["scenario"] + ), networks=expand( RESULTS + "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc", @@ -97,11 +162,29 @@ rule make_summary: costs="data/costs_{}.csv".format(config["costs"]["year"]) if config["foresight"] == "overnight" else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), - plots=expand( + ac_plot=expand( + RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf", + **config["scenario"] + ), + costs_plot=expand( RESULTS + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", **config["scenario"] ), + h2_plot=expand( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf" + if config["sector"]["H2_network"] + else [], + **config["scenario"] + ), + ch4_plot=expand( + RESULTS + + "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf" + if config["sector"]["gas_network"] + else [], + **config["scenario"] + ), output: nodal_costs=RESULTS + "csvs/nodal_costs.csv", nodal_capacities=RESULTS + "csvs/nodal_capacities.csv", diff --git a/rules/solve_myopic.smk b/rules/solve_myopic.smk index aa94c945..75334073 100644 --- a/rules/solve_myopic.smk +++ b/rules/solve_myopic.smk @@ -1,8 +1,42 @@ -# SPDX-FileCopyrightText: : 2023 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2023-4 The PyPSA-Eur Authors # # SPDX-License-Identifier: MIT +rule build_existing_heating_distribution: + params: + baseyear=config["scenario"]["planning_horizons"][0], + sector=config["sector"], + existing_capacities=config["existing_capacities"], + input: + existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", + clustered_pop_energy_layout=RESOURCES + + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", + district_heat_share=RESOURCES + + "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + output: + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", + wildcard_constraints: + planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear + threads: 1 + resources: + mem_mb=2000, + log: + LOGS + + "build_existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.log", + benchmark: + ( + BENCHMARKS + + "build_existing_heating_distribution/elec_s{simpl}_{clusters}_{planning_horizons}" + ) + conda: + "../envs/environment.yaml" + script: + "../scripts/build_existing_heating_distribution.py" + + rule add_existing_baseyear: params: baseyear=config["scenario"]["planning_horizons"][0], @@ -19,7 +53,8 @@ rule add_existing_baseyear: costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", - existing_heating="data/existing_infrastructure/existing_heating_raw.csv", + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv", diff --git a/rules/solve_perfect.smk b/rules/solve_perfect.smk index ad310f9f..76051976 100644 --- a/rules/solve_perfect.smk +++ b/rules/solve_perfect.smk @@ -17,6 +17,8 @@ rule add_existing_baseyear: costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]), cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc", cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc", + existing_heating_distribution=RESOURCES + + "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv", existing_heating="data/existing_infrastructure/existing_heating_raw.csv", existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv", existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv", diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index f219b912..46d4bc31 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -409,97 +409,18 @@ def add_heating_capacities_installed_before_baseyear( # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". # TODO start from original file - # retrieve existing heating capacities - techs = [ - "gas boiler", - "oil boiler", - "resistive heater", - "air heat pump", - "ground heat pump", - ] - df = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) - - # data for Albania, Montenegro and Macedonia not included in database - df.loc["Albania"] = np.nan - df.loc["Montenegro"] = np.nan - df.loc["Macedonia"] = np.nan - - df.fillna(0.0, inplace=True) - - # convert GW to MW - df *= 1e3 - - df.index = cc.convert(df.index, to="iso2") - - # coal and oil boilers are assimilated to oil boilers - df["oil boiler"] = df["oil boiler"] + df["coal boiler"] - df.drop(["coal boiler"], axis=1, inplace=True) - - # distribute technologies to nodes by population - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - - nodal_df = df.loc[pop_layout.ct] - nodal_df.index = pop_layout.index - nodal_df = nodal_df.multiply(pop_layout.fraction, axis=0) - - # split existing capacities between residential and services - # proportional to energy demand - p_set_sum = n.loads_t.p_set.sum() - ratio_residential = pd.Series( - [ - ( - p_set_sum[f"{node} residential rural heat"] - / ( - p_set_sum[f"{node} residential rural heat"] - + p_set_sum[f"{node} services rural heat"] - ) - ) - # if rural heating demand for one of the nodes doesn't exist, - # then columns were dropped before and heating demand share should be 0.0 - if all( - f"{node} {service} rural heat" in p_set_sum.index - for service in ["residential", "services"] - ) - else 0.0 - for node in nodal_df.index - ], - index=nodal_df.index, + existing_heating = pd.read_csv( + snakemake.input.existing_heating_distribution, header=[0, 1], index_col=0 ) - for tech in techs: - nodal_df["residential " + tech] = nodal_df[tech] * ratio_residential - nodal_df["services " + tech] = nodal_df[tech] * (1 - ratio_residential) + techs = existing_heating.columns.get_level_values(1).unique() - names = [ - "residential rural", - "services rural", - "residential urban decentral", - "services urban decentral", - "urban central", - ] - - nodes = {} - p_nom = {} - for name in names: + for name in existing_heating.columns.get_level_values(0).unique(): name_type = "central" if name == "urban central" else "decentral" - nodes[name] = pd.Index( - [ - n.buses.at[index, "location"] - for index in n.buses.index[ - n.buses.index.str.contains(name) - & n.buses.index.str.contains("heat") - ] - ] - ) - heat_pump_type = "air" if "urban" in name else "ground" - heat_type = "residential" if "residential" in name else "services" - if name == "urban central": - p_nom[name] = nodal_df["air heat pump"][nodes[name]] - else: - p_nom[name] = nodal_df[f"{heat_type} {heat_pump_type} heat pump"][ - nodes[name] - ] + nodes = pd.Index(n.buses.location[n.buses.index.str.contains(f"{name} heat")]) + + heat_pump_type = "air" if "urban" in name else "ground" # Add heat pumps costs_name = f"decentral {heat_pump_type}-sourced heat pump" @@ -507,7 +428,7 @@ def add_heating_capacities_installed_before_baseyear( cop = {"air": ashp_cop, "ground": gshp_cop} if time_dep_hp_cop: - efficiency = cop[heat_pump_type][nodes[name]] + efficiency = cop[heat_pump_type][nodes] else: efficiency = costs.at[costs_name, "efficiency"] @@ -520,27 +441,28 @@ def add_heating_capacities_installed_before_baseyear( n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} {heat_pump_type} heat pump-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes, + bus1=nodes + " " + name + " heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] * costs.at[costs_name, "fixed"], - p_nom=p_nom[name] * ratio / costs.at[costs_name, "efficiency"], + p_nom=existing_heating.loc[nodes, (name, f"{heat_pump_type} heat pump")] + * ratio + / costs.at[costs_name, "efficiency"], build_year=int(grouping_year), lifetime=costs.at[costs_name, "lifetime"], ) # add resistive heater, gas boilers and oil boilers - # (50% capacities to rural buses, 50% to urban buses) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} resistive heater-{grouping_year}", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", + bus0=nodes, + bus1=nodes + " " + name + " heat", carrier=name + " resistive heater", efficiency=costs.at[f"{name_type} resistive heater", "efficiency"], capital_cost=( @@ -548,21 +470,20 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} resistive heater", "fixed"] ), p_nom=( - 0.5 - * nodal_df[f"{heat_type} resistive heater"][nodes[name]] + existing_heating.loc[nodes, (name, "resistive heater")] * ratio / costs.at[f"{name_type} resistive heater", "efficiency"] ), build_year=int(grouping_year), - lifetime=costs.at[costs_name, "lifetime"], + lifetime=costs.at[f"{name_type} resistive heater", "lifetime"], ) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} gas boiler-{grouping_year}", bus0=spatial.gas.nodes, - bus1=nodes[name] + " " + name + " heat", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[f"{name_type} gas boiler", "efficiency"], @@ -572,8 +493,7 @@ def add_heating_capacities_installed_before_baseyear( * costs.at[f"{name_type} gas boiler", "fixed"] ), p_nom=( - 0.5 - * nodal_df[f"{heat_type} gas boiler"][nodes[name]] + existing_heating.loc[nodes, (name, "gas boiler")] * ratio / costs.at[f"{name_type} gas boiler", "efficiency"] ), @@ -583,20 +503,21 @@ def add_heating_capacities_installed_before_baseyear( n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} oil boiler-{grouping_year}", bus0=spatial.oil.nodes, - bus1=nodes[name] + " " + name + " heat", + bus1=nodes + " " + name + " heat", bus2="co2 atmosphere", carrier=name + " oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], efficiency2=costs.at["oil", "CO2 intensity"], capital_cost=costs.at["decentral oil boiler", "efficiency"] * costs.at["decentral oil boiler", "fixed"], - p_nom=0.5 - * nodal_df[f"{heat_type} oil boiler"][nodes[name]] - * ratio - / costs.at["decentral oil boiler", "efficiency"], + p_nom=( + existing_heating.loc[nodes, (name, "oil boiler")] + * ratio + / costs.at["decentral oil boiler", "efficiency"] + ), build_year=int(grouping_year), lifetime=costs.at[f"{name_type} gas boiler", "lifetime"], ) @@ -624,6 +545,8 @@ def add_heating_capacities_installed_before_baseyear( # drop assets which are at the end of their lifetime links_i = n.links[(n.links.build_year + n.links.lifetime <= baseyear)].index + logger.info("Removing following links because at end of their lifetime:") + logger.info(links_i) n.mremove("Link", links_i) diff --git a/scripts/build_heat_demand.py b/scripts/build_daily_heat_demand.py similarity index 95% rename from scripts/build_heat_demand.py rename to scripts/build_daily_heat_demand.py index b983f125..e334b1b3 100644 --- a/scripts/build_heat_demand.py +++ b/scripts/build_daily_heat_demand.py @@ -18,7 +18,8 @@ if __name__ == "__main__": from _helpers import mock_snakemake snakemake = mock_snakemake( - "build_heat_demands", + "build_daily_heat_demands", + scope="total", simpl="", clusters=48, ) diff --git a/scripts/build_district_heat_share.py b/scripts/build_district_heat_share.py new file mode 100644 index 00000000..86c42631 --- /dev/null +++ b/scripts/build_district_heat_share.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build district heat shares at each node, depending on investment year. +""" + +import logging + +import pandas as pd +from prepare_sector_network import get + +logger = logging.getLogger(__name__) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_district_heat_share", + simpl="", + clusters=48, + planning_horizons="2050", + ) + + investment_year = int(snakemake.wildcards.planning_horizons[-4:]) + + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) + + district_heat_share = pd.read_csv(snakemake.input.district_heat_share, index_col=0)[ + "district heat share" + ] + + # make ct-based share nodal + district_heat_share = district_heat_share.loc[pop_layout.ct] + district_heat_share.index = pop_layout.index + + # total urban population per country + ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() + + # distribution of urban population within a country + pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) + + # fraction of node that is urban + urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) + + # maximum potential of urban demand covered by district heating + central_fraction = snakemake.config["sector"]["district_heating"]["potential"] + + # district heating share at each node + dist_fraction_node = ( + district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] + ) + + # if district heating share larger than urban fraction -> set urban + # fraction to district heating share + urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) + + # difference of max potential and today's share of district heating + diff = (urban_fraction * central_fraction) - dist_fraction_node + progress = get( + snakemake.config["sector"]["district_heating"]["progress"], investment_year + ) + dist_fraction_node += diff * progress + logger.info( + f"Increase district heating share by a progress factor of {progress:.2%} " + f"resulting in new average share of {dist_fraction_node.mean():.2%}" + ) + + df = pd.DataFrame( + { + "original district heat share": district_heat_share, + "district fraction of node": dist_fraction_node, + "urban fraction": urban_fraction, + }, + dtype=float, + ) + + df.to_csv(snakemake.output.district_heat_share) diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 39b2a1be..c67bb49d 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -391,13 +391,6 @@ def build_idees(countries, year): # convert TWh/100km to kWh/km totals.loc["passenger car efficiency"] *= 10 - # district heating share - district_heat = totals.loc[ - ["derived heat residential", "derived heat services"] - ].sum() - total_heat = totals.loc[["thermal uses residential", "thermal uses services"]].sum() - totals.loc["district heat share"] = district_heat.div(total_heat) - return totals.T @@ -572,16 +565,36 @@ def build_energy_totals(countries, eurostat, swiss, idees): ratio = df.at["BA", "total residential"] / df.at["RS", "total residential"] df.loc["BA", missing] = ratio * df.loc["RS", missing] + return df + + +def build_district_heat_share(countries, idees): + # district heating share + district_heat = idees[["derived heat residential", "derived heat services"]].sum( + axis=1 + ) + total_heat = idees[["thermal uses residential", "thermal uses services"]].sum( + axis=1 + ) + + district_heat_share = district_heat / total_heat + + district_heat_share = district_heat_share.reindex(countries) + # Missing district heating share - dh_share = pd.read_csv( - snakemake.input.district_heat_share, index_col=0, usecols=[0, 1] + dh_share = ( + pd.read_csv(snakemake.input.district_heat_share, index_col=0, usecols=[0, 1]) + .div(100) + .squeeze() ) # make conservative assumption and take minimum from both data sets - df["district heat share"] = pd.concat( - [df["district heat share"], dh_share.reindex(index=df.index) / 100], axis=1 + district_heat_share = pd.concat( + [district_heat_share, dh_share.reindex_like(district_heat_share)], axis=1 ).min(axis=1) - return df + district_heat_share.name = "district heat share" + + return district_heat_share def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"): @@ -750,6 +763,9 @@ if __name__ == "__main__": energy = build_energy_totals(countries, eurostat, swiss, idees) energy.to_csv(snakemake.output.energy_name) + district_heat_share = build_district_heat_share(countries, idees) + district_heat_share.to_csv(snakemake.output.district_heat_share) + base_year_emissions = params["base_emissions_year"] emissions_scope = snakemake.params.energy["emissions"] eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope) diff --git a/scripts/build_existing_heating_distribution.py b/scripts/build_existing_heating_distribution.py new file mode 100644 index 00000000..67993c29 --- /dev/null +++ b/scripts/build_existing_heating_distribution.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Builds table of existing heat generation capacities for initial planning +horizon. +""" +import country_converter as coco +import numpy as np +import pandas as pd + +cc = coco.CountryConverter() + + +def build_existing_heating(): + # retrieve existing heating capacities + + existing_heating = pd.read_csv( + snakemake.input.existing_heating, index_col=0, header=0 + ) + + # data for Albania, Montenegro and Macedonia not included in database + existing_heating.loc["Albania"] = np.nan + existing_heating.loc["Montenegro"] = np.nan + existing_heating.loc["Macedonia"] = np.nan + + existing_heating.fillna(0.0, inplace=True) + + # convert GW to MW + existing_heating *= 1e3 + + existing_heating.index = cc.convert(existing_heating.index, to="iso2") + + # coal and oil boilers are assimilated to oil boilers + existing_heating["oil boiler"] = ( + existing_heating["oil boiler"] + existing_heating["coal boiler"] + ) + existing_heating.drop(["coal boiler"], axis=1, inplace=True) + + # distribute technologies to nodes by population + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) + + nodal_heating = existing_heating.loc[pop_layout.ct] + nodal_heating.index = pop_layout.index + nodal_heating = nodal_heating.multiply(pop_layout.fraction, axis=0) + + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] + + energy_layout = pd.read_csv( + snakemake.input.clustered_pop_energy_layout, index_col=0 + ) + + uses = ["space", "water"] + sectors = ["residential", "services"] + + nodal_sectoral_totals = pd.DataFrame(dtype=float) + + for sector in sectors: + nodal_sectoral_totals[sector] = energy_layout[ + [f"total {sector} {use}" for use in uses] + ].sum(axis=1) + + nodal_sectoral_fraction = nodal_sectoral_totals.div( + nodal_sectoral_totals.sum(axis=1), axis=0 + ) + + nodal_heat_name_fraction = pd.DataFrame(dtype=float) + + nodal_heat_name_fraction["urban central"] = dist_fraction + + for sector in sectors: + nodal_heat_name_fraction[f"{sector} rural"] = nodal_sectoral_fraction[ + sector + ] * (1 - urban_fraction) + nodal_heat_name_fraction[f"{sector} urban decentral"] = nodal_sectoral_fraction[ + sector + ] * (urban_fraction - dist_fraction) + + nodal_heat_name_tech = pd.concat( + { + name: nodal_heating.multiply(nodal_heat_name_fraction[name], axis=0) + for name in nodal_heat_name_fraction.columns + }, + axis=1, + names=["heat name", "technology"], + ) + + # move all ground HPs to rural, all air to urban + + for sector in sectors: + nodal_heat_name_tech[(f"{sector} rural", "ground heat pump")] += ( + nodal_heat_name_tech[("urban central", "ground heat pump")] + * nodal_sectoral_fraction[sector] + + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] + ) + nodal_heat_name_tech[(f"{sector} urban decentral", "ground heat pump")] = 0.0 + + nodal_heat_name_tech[ + (f"{sector} urban decentral", "air heat pump") + ] += nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] + nodal_heat_name_tech[(f"{sector} rural", "air heat pump")] = 0.0 + + nodal_heat_name_tech[("urban central", "ground heat pump")] = 0.0 + + nodal_heat_name_tech.to_csv(snakemake.output.existing_heating_distribution) + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_existing_heating_distribution", + simpl="", + clusters=48, + planning_horizons=2050, + ) + + build_existing_heating() diff --git a/scripts/build_hourly_heat_demand.py b/scripts/build_hourly_heat_demand.py new file mode 100644 index 00000000..c972da89 --- /dev/null +++ b/scripts/build_hourly_heat_demand.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Build hourly heat demand time series from daily ones. +""" + +from itertools import product + +import pandas as pd +import xarray as xr +from _helpers import generate_periodic_profiles + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "build_hourly_heat_demands", + scope="total", + simpl="", + clusters=48, + ) + + snapshots = pd.date_range(freq="h", **snakemake.params.snapshots) + + daily_space_heat_demand = ( + xr.open_dataarray(snakemake.input.heat_demand) + .to_pandas() + .reindex(index=snapshots, method="ffill") + ) + + intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) + + sectors = ["residential", "services"] + uses = ["water", "space"] + + heat_demand = {} + for sector, use in product(sectors, uses): + weekday = list(intraday_profiles[f"{sector} {use} weekday"]) + weekend = list(intraday_profiles[f"{sector} {use} weekend"]) + weekly_profile = weekday * 5 + weekend * 2 + intraday_year_profile = generate_periodic_profiles( + daily_space_heat_demand.index.tz_localize("UTC"), + nodes=daily_space_heat_demand.columns, + weekly_profile=weekly_profile, + ) + + if use == "space": + heat_demand[f"{sector} {use}"] = ( + daily_space_heat_demand * intraday_year_profile + ) + else: + heat_demand[f"{sector} {use}"] = intraday_year_profile + + heat_demand = pd.concat(heat_demand, axis=1, names=["sector use", "node"]) + + heat_demand.index.name = "snapshots" + + ds = heat_demand.stack().to_xarray() + + ds.to_netcdf(snakemake.output.heat_demand) diff --git a/scripts/build_retro_cost.py b/scripts/build_retro_cost.py index 67440263..bd285e18 100755 --- a/scripts/build_retro_cost.py +++ b/scripts/build_retro_cost.py @@ -607,21 +607,28 @@ def calculate_costs(u_values, l, cost_retro, window_assumptions): # noqa: E741 """ return u_values.apply( lambda x: ( - cost_retro.loc[x.name[3], "cost_var"] - * 100 - * float(l) - * l_weight.loc[x.name[3]].iloc[0] - + cost_retro.loc[x.name[3], "cost_fix"] - ) - * x.A_element - / x.A_C_Ref - if x.name[3] != "Window" - else ( - (window_cost(x[f"new_U_{l}"], cost_retro, window_assumptions) * x.A_element) + ( + cost_retro.loc[x.name[3], "cost_var"] + * 100 + * float(l) + * l_weight.loc[x.name[3]].iloc[0] + + cost_retro.loc[x.name[3], "cost_fix"] + ) + * x.A_element / x.A_C_Ref - ) - if x.value > window_limit(float(l), window_assumptions) - else 0, + if x.name[3] != "Window" + else ( + ( + ( + window_cost(x[f"new_U_{l}"], cost_retro, window_assumptions) + * x.A_element + ) + / x.A_C_Ref + ) + if x.value > window_limit(float(l), window_assumptions) + else 0 + ) + ), axis=1, ) @@ -648,12 +655,14 @@ def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): # noqa k: thermal conductivity """ return u_values.apply( - lambda x: k / ((k / x.value) + (float(l) * l_weight.loc[x.name[3]])) - if x.name[3] != "Window" - else ( - min(x.value, u_retro_window(float(l), window_assumptions)) - if x.value > window_limit(float(l), window_assumptions) - else x.value + lambda x: ( + k / ((k / x.value) + (float(l) * l_weight.loc[x.name[3]])) + if x.name[3] != "Window" + else ( + min(x.value, u_retro_window(float(l), window_assumptions)) + if x.value > window_limit(float(l), window_assumptions) + else x.value + ) ), axis=1, ) diff --git a/scripts/plot_gas_network.py b/scripts/plot_gas_network.py new file mode 100644 index 00000000..e2953604 --- /dev/null +++ b/scripts/plot_gas_network.py @@ -0,0 +1,252 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates map of optimised gas network, storage and selected other +infrastructure. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging +from plot_power_network import assign_location, load_projection +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches + +logger = logging.getLogger(__name__) + + +def plot_ch4_map(n): + # if "gas pipeline" not in n.links.carrier.unique(): + # return + + assign_location(n) + + bus_size_factor = 8e7 + linewidth_factor = 1e4 + # MW below which not drawn + line_lower_threshold = 1e3 + + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + fossil_gas_i = n.generators[n.generators.carrier == "gas"].index + fossil_gas = ( + n.generators_t.p.loc[:, fossil_gas_i] + .mul(n.snapshot_weightings.generators, axis=0) + .sum() + .groupby(n.generators.loc[fossil_gas_i, "bus"]) + .sum() + / bus_size_factor + ) + fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True) + fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0) + # make a fake MultiIndex so that area is correct for legend + fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]]) + + methanation_i = n.links.query("carrier == 'Sabatier'").index + methanation = ( + abs( + n.links_t.p1.loc[:, methanation_i].mul( + n.snapshot_weightings.generators, axis=0 + ) + ) + .sum() + .groupby(n.links.loc[methanation_i, "bus1"]) + .sum() + / bus_size_factor + ) + methanation = ( + methanation.groupby(methanation.index) + .sum() + .rename(index=lambda x: x.replace(" gas", "")) + ) + # make a fake MultiIndex so that area is correct for legend + methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]]) + + biogas_i = n.stores[n.stores.carrier == "biogas"].index + biogas = ( + n.stores_t.p.loc[:, biogas_i] + .mul(n.snapshot_weightings.generators, axis=0) + .sum() + .groupby(n.stores.loc[biogas_i, "bus"]) + .sum() + / bus_size_factor + ) + biogas = ( + biogas.groupby(biogas.index) + .sum() + .rename(index=lambda x: x.replace(" biogas", "")) + ) + # make a fake MultiIndex so that area is correct for legend + biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]]) + + bus_sizes = pd.concat([fossil_gas, methanation, biogas]) + bus_sizes.sort_index(inplace=True) + + to_remove = n.links.index[~n.links.carrier.str.contains("gas pipeline")] + n.links.drop(to_remove, inplace=True) + + link_widths_rem = n.links.p_nom_opt / linewidth_factor + link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + link_widths_orig = n.links.p_nom / linewidth_factor + link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.0 + + max_usage = n.links_t.p0[n.links.index].abs().max(axis=0) + link_widths_used = max_usage / linewidth_factor + link_widths_used[max_usage < line_lower_threshold] = 0.0 + + tech_colors = snakemake.params.plotting["tech_colors"] + + pipe_colors = { + "gas pipeline": "#f08080", + "gas pipeline new": "#c46868", + "gas pipeline (in 2020)": "lightgrey", + "gas pipeline (available)": "#e8d1d1", + } + + link_color_used = n.links.carrier.map(pipe_colors) + + n.links.bus0 = n.links.bus0.str.replace(" gas", "") + n.links.bus1 = n.links.bus1.str.replace(" gas", "") + + bus_colors = { + "fossil gas": tech_colors["fossil gas"], + "methanation": tech_colors["methanation"], + "biogas": "seagreen", + } + + fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) + + n.plot( + bus_sizes=bus_sizes, + bus_colors=bus_colors, + link_colors=pipe_colors["gas pipeline (in 2020)"], + link_widths=link_widths_orig, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + n.plot( + ax=ax, + bus_sizes=0.0, + link_colors=pipe_colors["gas pipeline (available)"], + link_widths=link_widths_rem, + branch_components=["Link"], + color_geomap=False, + boundaries=map_opts["boundaries"], + ) + + n.plot( + ax=ax, + bus_sizes=0.0, + link_colors=link_color_used, + link_widths=link_widths_used, + branch_components=["Link"], + color_geomap=False, + boundaries=map_opts["boundaries"], + ) + + sizes = [100, 10] + labels = [f"{s} TWh" for s in sizes] + sizes = [s / bus_size_factor * 1e6 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.03), + labelspacing=0.8, + frameon=False, + handletextpad=1, + title="gas sources", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [50, 10] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.25, 1.03), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title="gas pipeline", + ) + + add_legend_lines( + ax, + sizes, + labels, + patch_kw=dict(color="lightgrey"), + legend_kw=legend_kw, + ) + + colors = list(pipe_colors.values()) + list(bus_colors.values()) + labels = list(pipe_colors.keys()) + list(bus_colors.keys()) + + # legend on the side + # legend_kw = dict( + # bbox_to_anchor=(1.47, 1.04), + # frameon=False, + # ) + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.24), + ncol=2, + frameon=False, + ) + + add_legend_patches( + ax, + colors, + labels, + legend_kw=legend_kw, + ) + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_gas_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_ch4_map(n) diff --git a/scripts/plot_hydrogen_network.py b/scripts/plot_hydrogen_network.py new file mode 100644 index 00000000..a1183311 --- /dev/null +++ b/scripts/plot_hydrogen_network.py @@ -0,0 +1,267 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates map of optimised hydrogen network, storage and selected other +infrastructure. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging +from plot_power_network import assign_location, load_projection +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches + +logger = logging.getLogger(__name__) + + +def group_pipes(df, drop_direction=False): + """ + Group pipes which connect same buses and return overall capacity. + """ + if drop_direction: + positive_order = df.bus0 < df.bus1 + df_p = df[positive_order] + swap_buses = {"bus0": "bus1", "bus1": "bus0"} + df_n = df[~positive_order].rename(columns=swap_buses) + df = pd.concat([df_p, df_n]) + + # there are pipes for each investment period rename to AC buses name for plotting + df.index = df.apply( + lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", + axis=1, + ) + return df.groupby(level=0).agg({"p_nom_opt": sum, "bus0": "first", "bus1": "first"}) + + +def plot_h2_map(n, regions): + # if "H2 pipeline" not in n.links.carrier.unique(): + # return + + assign_location(n) + + h2_storage = n.stores.query("carrier == 'H2'") + regions["H2"] = ( + h2_storage.rename(index=h2_storage.bus.map(n.buses.location)) + .e_nom_opt.groupby(level=0) + .sum() + .div(1e6) + ) # TWh + regions["H2"] = regions["H2"].where(regions["H2"] > 0.1) + + bus_size_factor = 1e5 + linewidth_factor = 7e3 + # MW below which not drawn + line_lower_threshold = 750 + + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + carriers = ["H2 Electrolysis", "H2 Fuel Cell"] + + elec = n.links[n.links.carrier.isin(carriers)].index + + bus_sizes = ( + n.links.loc[elec, "p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() + / bus_size_factor + ) + + # make a fake MultiIndex so that area is correct for legend + bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True) + # drop all links which are not H2 pipelines + n.links.drop( + n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True + ) + + h2_new = n.links[n.links.carrier == "H2 pipeline"] + h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"] + + if snakemake.params.foresight == "myopic": + # sum capacitiy for pipelines from different investment periods + h2_new = group_pipes(h2_new) + + if not h2_retro.empty: + h2_retro = ( + group_pipes(h2_retro, drop_direction=True) + .reindex(h2_new.index) + .fillna(0) + ) + + if not h2_retro.empty: + positive_order = h2_retro.bus0 < h2_retro.bus1 + h2_retro_p = h2_retro[positive_order] + swap_buses = {"bus0": "bus1", "bus1": "bus0"} + h2_retro_n = h2_retro[~positive_order].rename(columns=swap_buses) + h2_retro = pd.concat([h2_retro_p, h2_retro_n]) + + h2_retro["index_orig"] = h2_retro.index + h2_retro.index = h2_retro.apply( + lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", + axis=1, + ) + + retro_w_new_i = h2_retro.index.intersection(h2_new.index) + h2_retro_w_new = h2_retro.loc[retro_w_new_i] + + retro_wo_new_i = h2_retro.index.difference(h2_new.index) + h2_retro_wo_new = h2_retro.loc[retro_wo_new_i] + h2_retro_wo_new.index = h2_retro_wo_new.index_orig + + to_concat = [h2_new, h2_retro_w_new, h2_retro_wo_new] + h2_total = pd.concat(to_concat).p_nom_opt.groupby(level=0).sum() + + else: + h2_total = h2_new.p_nom_opt + + link_widths_total = h2_total / linewidth_factor + + n.links.rename(index=lambda x: x.split("-2")[0], inplace=True) + n.links = n.links.groupby(level=0).first() + link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.0) + link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + retro = n.links.p_nom_opt.where( + n.links.carrier == "H2 pipeline retrofitted", other=0.0 + ) + link_widths_retro = retro / linewidth_factor + link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.0 + + n.links.bus0 = n.links.bus0.str.replace(" H2", "") + n.links.bus1 = n.links.bus1.str.replace(" H2", "") + + regions = regions.to_crs(proj.proj4_init) + + fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) + + color_h2_pipe = "#b3f3f4" + color_retrofit = "#499a9c" + + bus_colors = {"H2 Electrolysis": "#ff29d9", "H2 Fuel Cell": "#805394"} + + n.plot( + geomap=True, + bus_sizes=bus_sizes, + bus_colors=bus_colors, + link_colors=color_h2_pipe, + link_widths=link_widths_total, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + n.plot( + geomap=True, + bus_sizes=0, + link_colors=color_retrofit, + link_widths=link_widths_retro, + branch_components=["Link"], + ax=ax, + **map_opts, + ) + + regions.plot( + ax=ax, + column="H2", + cmap="Blues", + linewidths=0, + legend=True, + vmax=6, + vmin=0, + legend_kwds={ + "label": "Hydrogen Storage [TWh]", + "shrink": 0.7, + "extend": "max", + }, + ) + + sizes = [50, 10] + labels = [f"{s} GW" for s in sizes] + sizes = [s / bus_size_factor * 1e3 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1), + labelspacing=0.8, + handletextpad=0, + frameon=False, + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [30, 10] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.23, 1), + frameon=False, + labelspacing=0.8, + handletextpad=1, + ) + + add_legend_lines( + ax, + sizes, + labels, + patch_kw=dict(color="lightgrey"), + legend_kw=legend_kw, + ) + + colors = [bus_colors[c] for c in carriers] + [color_h2_pipe, color_retrofit] + labels = carriers + ["H2 pipeline (total)", "H2 pipeline (repurposed)"] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0, 1.13), + ncol=2, + frameon=False, + ) + + add_legend_patches(ax, colors, labels, legend_kw=legend_kw) + + ax.set_facecolor("white") + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_hydrogen_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_h2_map(n, regions) diff --git a/scripts/plot_network.py b/scripts/plot_network.py deleted file mode 100644 index 13736d01..00000000 --- a/scripts/plot_network.py +++ /dev/null @@ -1,1107 +0,0 @@ -# -*- coding: utf-8 -*- -# SPDX-FileCopyrightText: : 2020-2023 The PyPSA-Eur Authors -# -# SPDX-License-Identifier: MIT -""" -Creates plots for optimised network topologies, including electricity, gas and -hydrogen networks, and regional generation, storage and conversion capacities -built. - -This rule plots a map of the network with technology capacities at the -nodes. -""" - -import logging - -import cartopy.crs as ccrs -import geopandas as gpd -import matplotlib.pyplot as plt -import pandas as pd -import pypsa -from make_summary import assign_carriers -from plot_summary import preferred_order, rename_techs -from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches - -logger = logging.getLogger(__name__) -plt.style.use(["ggplot"]) - - -def rename_techs_tyndp(tech): - tech = rename_techs(tech) - if "heat pump" in tech or "resistive heater" in tech: - return "power-to-heat" - elif tech in ["H2 Electrolysis", "methanation", "H2 liquefaction"]: - return "power-to-gas" - elif tech == "H2": - return "H2 storage" - elif tech in ["NH3", "Haber-Bosch", "ammonia cracker", "ammonia store"]: - return "ammonia" - elif tech in ["OCGT", "CHP", "gas boiler", "H2 Fuel Cell"]: - return "gas-to-power/heat" - # elif "solar" in tech: - # return "solar" - elif tech in ["Fischer-Tropsch", "methanolisation"]: - return "power-to-liquid" - elif "offshore wind" in tech: - return "offshore wind" - elif "CC" in tech or "sequestration" in tech: - return "CCS" - else: - return tech - - -def assign_location(n): - for c in n.iterate_components(n.one_port_components | n.branch_components): - ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) - for i in ifind.value_counts().index: - # these have already been assigned defaults - if i == -1: - continue - names = ifind.index[ifind == i] - c.df.loc[names, "location"] = names.str[:i] - - -def plot_map( - network, - components=["links", "stores", "storage_units", "generators"], - bus_size_factor=1.7e10, - transmission=False, - with_legend=True, -): - tech_colors = snakemake.params.plotting["tech_colors"] - - n = network.copy() - assign_location(n) - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - costs = pd.DataFrame(index=n.buses.index) - - for comp in components: - df_c = getattr(n, comp) - - if df_c.empty: - continue - - df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) - - attr = "e_nom_opt" if comp == "stores" else "p_nom_opt" - - costs_c = ( - (df_c.capital_cost * df_c[attr]) - .groupby([df_c.location, df_c.nice_group]) - .sum() - .unstack() - .fillna(0.0) - ) - costs = pd.concat([costs, costs_c], axis=1) - - logger.debug(f"{comp}, {costs}") - - costs = costs.groupby(costs.columns, axis=1).sum() - - costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True) - - new_columns = preferred_order.intersection(costs.columns).append( - costs.columns.difference(preferred_order) - ) - costs = costs[new_columns] - - for item in new_columns: - if item not in tech_colors: - logger.warning(f"{item} not in config/plotting/tech_colors") - - costs = costs.stack() # .sort_index() - - # hack because impossible to drop buses... - eu_location = snakemake.params.plotting.get("eu_node_location", dict(x=-5.5, y=46)) - n.buses.loc["EU gas", "x"] = eu_location["x"] - n.buses.loc["EU gas", "y"] = eu_location["y"] - - n.links.drop( - n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], - inplace=True, - ) - - # drop non-bus - to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) - if len(to_drop) != 0: - logger.info(f"Dropping non-buses {to_drop.tolist()}") - costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") - - # make sure they are removed from index - costs.index = pd.MultiIndex.from_tuples(costs.index.values) - - threshold = 100e6 # 100 mEUR/a - carriers = costs.groupby(level=1).sum() - carriers = carriers.where(carriers > threshold).dropna() - carriers = list(carriers.index) - - # PDF has minimum width, so set these to zero - line_lower_threshold = 500.0 - line_upper_threshold = 1e4 - linewidth_factor = 4e3 - ac_color = "rosybrown" - dc_color = "darkseagreen" - - title = "added grid" - - if snakemake.wildcards["ll"] == "v1.0": - # should be zero - line_widths = n.lines.s_nom_opt - n.lines.s_nom - link_widths = n.links.p_nom_opt - n.links.p_nom - if transmission: - line_widths = n.lines.s_nom_opt - link_widths = n.links.p_nom_opt - linewidth_factor = 2e3 - line_lower_threshold = 0.0 - title = "current grid" - else: - line_widths = n.lines.s_nom_opt - n.lines.s_nom_min - link_widths = n.links.p_nom_opt - n.links.p_nom_min - if transmission: - line_widths = n.lines.s_nom_opt - link_widths = n.links.p_nom_opt - title = "total grid" - - line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) - link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) - - line_widths = line_widths.replace(line_lower_threshold, 0) - link_widths = link_widths.replace(line_lower_threshold, 0) - - fig, ax = plt.subplots(subplot_kw={"projection": proj}) - fig.set_size_inches(7, 6) - - n.plot( - bus_sizes=costs / bus_size_factor, - bus_colors=tech_colors, - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, - **map_opts, - ) - - sizes = [20, 10, 5] - labels = [f"{s} bEUR/a" for s in sizes] - sizes = [s / bus_size_factor * 1e9 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.01, 1.06), - labelspacing=0.8, - frameon=False, - handletextpad=0, - title="system cost", - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [10, 5] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.27, 1.06), - frameon=False, - labelspacing=0.8, - handletextpad=1, - title=title, - ) - - add_legend_lines( - ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw - ) - - legend_kw = dict( - bbox_to_anchor=(1.52, 1.04), - frameon=False, - ) - - if with_legend: - colors = [tech_colors[c] for c in carriers] + [ac_color, dc_color] - labels = carriers + ["HVAC line", "HVDC link"] - - add_legend_patches( - ax, - colors, - labels, - legend_kw=legend_kw, - ) - - fig.savefig(snakemake.output.map, transparent=True, bbox_inches="tight") - - -def group_pipes(df, drop_direction=False): - """ - Group pipes which connect same buses and return overall capacity. - """ - if drop_direction: - positive_order = df.bus0 < df.bus1 - df_p = df[positive_order] - swap_buses = {"bus0": "bus1", "bus1": "bus0"} - df_n = df[~positive_order].rename(columns=swap_buses) - df = pd.concat([df_p, df_n]) - - # there are pipes for each investment period rename to AC buses name for plotting - df.index = df.apply( - lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", - axis=1, - ) - return df.groupby(level=0).agg({"p_nom_opt": sum, "bus0": "first", "bus1": "first"}) - - -def plot_h2_map(network, regions): - n = network.copy() - if "H2 pipeline" not in n.links.carrier.unique(): - return - - assign_location(n) - - h2_storage = n.stores.query("carrier == 'H2'") - regions["H2"] = ( - h2_storage.rename(index=h2_storage.bus.map(n.buses.location)) - .e_nom_opt.groupby(level=0) - .sum() - .div(1e6) - ) # TWh - regions["H2"] = regions["H2"].where(regions["H2"] > 0.1) - - bus_size_factor = 1e5 - linewidth_factor = 7e3 - # MW below which not drawn - line_lower_threshold = 750 - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - carriers = ["H2 Electrolysis", "H2 Fuel Cell"] - - elec = n.links[n.links.carrier.isin(carriers)].index - - bus_sizes = ( - n.links.loc[elec, "p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() - / bus_size_factor - ) - - # make a fake MultiIndex so that area is correct for legend - bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True) - # drop all links which are not H2 pipelines - n.links.drop( - n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True - ) - - h2_new = n.links[n.links.carrier == "H2 pipeline"] - h2_retro = n.links[n.links.carrier == "H2 pipeline retrofitted"] - - if snakemake.params.foresight == "myopic": - # sum capacitiy for pipelines from different investment periods - h2_new = group_pipes(h2_new) - - if not h2_retro.empty: - h2_retro = ( - group_pipes(h2_retro, drop_direction=True) - .reindex(h2_new.index) - .fillna(0) - ) - - if not h2_retro.empty: - positive_order = h2_retro.bus0 < h2_retro.bus1 - h2_retro_p = h2_retro[positive_order] - swap_buses = {"bus0": "bus1", "bus1": "bus0"} - h2_retro_n = h2_retro[~positive_order].rename(columns=swap_buses) - h2_retro = pd.concat([h2_retro_p, h2_retro_n]) - - h2_retro["index_orig"] = h2_retro.index - h2_retro.index = h2_retro.apply( - lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}", - axis=1, - ) - - retro_w_new_i = h2_retro.index.intersection(h2_new.index) - h2_retro_w_new = h2_retro.loc[retro_w_new_i] - - retro_wo_new_i = h2_retro.index.difference(h2_new.index) - h2_retro_wo_new = h2_retro.loc[retro_wo_new_i] - h2_retro_wo_new.index = h2_retro_wo_new.index_orig - - to_concat = [h2_new, h2_retro_w_new, h2_retro_wo_new] - h2_total = pd.concat(to_concat).p_nom_opt.groupby(level=0).sum() - - else: - h2_total = h2_new.p_nom_opt - - link_widths_total = h2_total / linewidth_factor - - n.links.rename(index=lambda x: x.split("-2")[0], inplace=True) - n.links = n.links.groupby(level=0).first() - link_widths_total = link_widths_total.reindex(n.links.index).fillna(0.0) - link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - retro = n.links.p_nom_opt.where( - n.links.carrier == "H2 pipeline retrofitted", other=0.0 - ) - link_widths_retro = retro / linewidth_factor - link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - n.links.bus0 = n.links.bus0.str.replace(" H2", "") - n.links.bus1 = n.links.bus1.str.replace(" H2", "") - - regions = regions.to_crs(proj.proj4_init) - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) - - color_h2_pipe = "#b3f3f4" - color_retrofit = "#499a9c" - - bus_colors = {"H2 Electrolysis": "#ff29d9", "H2 Fuel Cell": "#805394"} - - n.plot( - geomap=True, - bus_sizes=bus_sizes, - bus_colors=bus_colors, - link_colors=color_h2_pipe, - link_widths=link_widths_total, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - n.plot( - geomap=True, - bus_sizes=0, - link_colors=color_retrofit, - link_widths=link_widths_retro, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - regions.plot( - ax=ax, - column="H2", - cmap="Blues", - linewidths=0, - legend=True, - vmax=6, - vmin=0, - legend_kwds={ - "label": "Hydrogen Storage [TWh]", - "shrink": 0.7, - "extend": "max", - }, - ) - - sizes = [50, 10] - labels = [f"{s} GW" for s in sizes] - sizes = [s / bus_size_factor * 1e3 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1), - labelspacing=0.8, - handletextpad=0, - frameon=False, - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [30, 10] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.23, 1), - frameon=False, - labelspacing=0.8, - handletextpad=1, - ) - - add_legend_lines( - ax, - sizes, - labels, - patch_kw=dict(color="lightgrey"), - legend_kw=legend_kw, - ) - - colors = [bus_colors[c] for c in carriers] + [color_h2_pipe, color_retrofit] - labels = carriers + ["H2 pipeline (total)", "H2 pipeline (repurposed)"] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.13), - ncol=2, - frameon=False, - ) - - add_legend_patches(ax, colors, labels, legend_kw=legend_kw) - - ax.set_facecolor("white") - - fig.savefig( - snakemake.output.map.replace("-costs-all", "-h2_network"), bbox_inches="tight" - ) - - -def plot_ch4_map(network): - n = network.copy() - - if "gas pipeline" not in n.links.carrier.unique(): - return - - assign_location(n) - - bus_size_factor = 8e7 - linewidth_factor = 1e4 - # MW below which not drawn - line_lower_threshold = 1e3 - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - fossil_gas_i = n.generators[n.generators.carrier == "gas"].index - fossil_gas = ( - n.generators_t.p.loc[:, fossil_gas_i] - .mul(n.snapshot_weightings.generators, axis=0) - .sum() - .groupby(n.generators.loc[fossil_gas_i, "bus"]) - .sum() - / bus_size_factor - ) - fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True) - fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0) - # make a fake MultiIndex so that area is correct for legend - fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]]) - - methanation_i = n.links.query("carrier == 'Sabatier'").index - methanation = ( - abs( - n.links_t.p1.loc[:, methanation_i].mul( - n.snapshot_weightings.generators, axis=0 - ) - ) - .sum() - .groupby(n.links.loc[methanation_i, "bus1"]) - .sum() - / bus_size_factor - ) - methanation = ( - methanation.groupby(methanation.index) - .sum() - .rename(index=lambda x: x.replace(" gas", "")) - ) - # make a fake MultiIndex so that area is correct for legend - methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]]) - - biogas_i = n.stores[n.stores.carrier == "biogas"].index - biogas = ( - n.stores_t.p.loc[:, biogas_i] - .mul(n.snapshot_weightings.generators, axis=0) - .sum() - .groupby(n.stores.loc[biogas_i, "bus"]) - .sum() - / bus_size_factor - ) - biogas = ( - biogas.groupby(biogas.index) - .sum() - .rename(index=lambda x: x.replace(" biogas", "")) - ) - # make a fake MultiIndex so that area is correct for legend - biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]]) - - bus_sizes = pd.concat([fossil_gas, methanation, biogas]) - bus_sizes.sort_index(inplace=True) - - to_remove = n.links.index[~n.links.carrier.str.contains("gas pipeline")] - n.links.drop(to_remove, inplace=True) - - link_widths_rem = n.links.p_nom_opt / linewidth_factor - link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.0 - - link_widths_orig = n.links.p_nom / linewidth_factor - link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.0 - - max_usage = n.links_t.p0.abs().max(axis=0) - link_widths_used = max_usage / linewidth_factor - link_widths_used[max_usage < line_lower_threshold] = 0.0 - - tech_colors = snakemake.params.plotting["tech_colors"] - - pipe_colors = { - "gas pipeline": "#f08080", - "gas pipeline new": "#c46868", - "gas pipeline (in 2020)": "lightgrey", - "gas pipeline (available)": "#e8d1d1", - } - - link_color_used = n.links.carrier.map(pipe_colors) - - n.links.bus0 = n.links.bus0.str.replace(" gas", "") - n.links.bus1 = n.links.bus1.str.replace(" gas", "") - - bus_colors = { - "fossil gas": tech_colors["fossil gas"], - "methanation": tech_colors["methanation"], - "biogas": "seagreen", - } - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) - - n.plot( - bus_sizes=bus_sizes, - bus_colors=bus_colors, - link_colors=pipe_colors["gas pipeline (in 2020)"], - link_widths=link_widths_orig, - branch_components=["Link"], - ax=ax, - **map_opts, - ) - - n.plot( - ax=ax, - bus_sizes=0.0, - link_colors=pipe_colors["gas pipeline (available)"], - link_widths=link_widths_rem, - branch_components=["Link"], - color_geomap=False, - boundaries=map_opts["boundaries"], - ) - - n.plot( - ax=ax, - bus_sizes=0.0, - link_colors=link_color_used, - link_widths=link_widths_used, - branch_components=["Link"], - color_geomap=False, - boundaries=map_opts["boundaries"], - ) - - sizes = [100, 10] - labels = [f"{s} TWh" for s in sizes] - sizes = [s / bus_size_factor * 1e6 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.03), - labelspacing=0.8, - frameon=False, - handletextpad=1, - title="gas sources", - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [50, 10] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.25, 1.03), - frameon=False, - labelspacing=0.8, - handletextpad=1, - title="gas pipeline", - ) - - add_legend_lines( - ax, - sizes, - labels, - patch_kw=dict(color="lightgrey"), - legend_kw=legend_kw, - ) - - colors = list(pipe_colors.values()) + list(bus_colors.values()) - labels = list(pipe_colors.keys()) + list(bus_colors.keys()) - - # legend on the side - # legend_kw = dict( - # bbox_to_anchor=(1.47, 1.04), - # frameon=False, - # ) - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0, 1.24), - ncol=2, - frameon=False, - ) - - add_legend_patches( - ax, - colors, - labels, - legend_kw=legend_kw, - ) - - fig.savefig( - snakemake.output.map.replace("-costs-all", "-ch4_network"), bbox_inches="tight" - ) - - -def plot_map_without(network): - n = network.copy() - assign_location(n) - - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - - fig, ax = plt.subplots(figsize=(7, 6), subplot_kw={"projection": proj}) - - # PDF has minimum width, so set these to zero - line_lower_threshold = 200.0 - line_upper_threshold = 1e4 - linewidth_factor = 3e3 - ac_color = "rosybrown" - dc_color = "darkseagreen" - - # hack because impossible to drop buses... - if "EU gas" in n.buses.index: - eu_location = snakemake.params.plotting.get( - "eu_node_location", dict(x=-5.5, y=46) - ) - n.buses.loc["EU gas", "x"] = eu_location["x"] - n.buses.loc["EU gas", "y"] = eu_location["y"] - - to_drop = n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")] - n.links.drop(to_drop, inplace=True) - - if snakemake.wildcards["ll"] == "v1.0": - line_widths = n.lines.s_nom - link_widths = n.links.p_nom - else: - line_widths = n.lines.s_nom_min - link_widths = n.links.p_nom_min - - line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) - link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) - - line_widths = line_widths.replace(line_lower_threshold, 0) - link_widths = link_widths.replace(line_lower_threshold, 0) - - n.plot( - bus_colors="k", - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, - **map_opts, - ) - - handles = [] - labels = [] - - for s in (10, 5): - handles.append( - plt.Line2D([0], [0], color=ac_color, linewidth=s * 1e3 / linewidth_factor) - ) - labels.append(f"{s} GW") - l1_1 = ax.legend( - handles, - labels, - loc="upper left", - bbox_to_anchor=(0.05, 1.01), - frameon=False, - labelspacing=0.8, - handletextpad=1.5, - title="Today's transmission", - ) - ax.add_artist(l1_1) - - fig.savefig(snakemake.output.today, transparent=True, bbox_inches="tight") - - -def plot_series(network, carrier="AC", name="test"): - n = network.copy() - assign_location(n) - assign_carriers(n) - - buses = n.buses.index[n.buses.carrier.str.contains(carrier)] - - supply = pd.DataFrame(index=n.snapshots) - for c in n.iterate_components(n.branch_components): - n_port = 4 if c.name == "Link" else 2 - for i in range(n_port): - supply = pd.concat( - ( - supply, - ( - -1 - * c.pnl[f"p{str(i)}"] - .loc[:, c.df.index[c.df[f"bus{str(i)}"].isin(buses)]] - .groupby(c.df.carrier, axis=1) - .sum() - ), - ), - axis=1, - ) - - for c in n.iterate_components(n.one_port_components): - comps = c.df.index[c.df.bus.isin(buses)] - supply = pd.concat( - ( - supply, - ((c.pnl["p"].loc[:, comps]).multiply(c.df.loc[comps, "sign"])) - .groupby(c.df.carrier, axis=1) - .sum(), - ), - axis=1, - ) - - supply = supply.groupby(rename_techs_tyndp, axis=1).sum() - - both = supply.columns[(supply < 0.0).any() & (supply > 0.0).any()] - - positive_supply = supply[both] - negative_supply = supply[both] - - positive_supply[positive_supply < 0.0] = 0.0 - negative_supply[negative_supply > 0.0] = 0.0 - - supply[both] = positive_supply - - suffix = " charging" - - negative_supply.columns = negative_supply.columns + suffix - - supply = pd.concat((supply, negative_supply), axis=1) - - # 14-21.2 for flaute - # 19-26.1 for flaute - - start = "2013-02-19" - stop = "2013-02-26" - - threshold = 10e3 - - to_drop = supply.columns[(abs(supply) < threshold).all()] - - if len(to_drop) != 0: - logger.info(f"Dropping {to_drop.tolist()} from supply") - supply.drop(columns=to_drop, inplace=True) - - supply.index.name = None - - supply = supply / 1e3 - - supply.rename( - columns={"electricity": "electric demand", "heat": "heat demand"}, inplace=True - ) - supply.columns = supply.columns.str.replace("residential ", "") - supply.columns = supply.columns.str.replace("services ", "") - supply.columns = supply.columns.str.replace("urban decentral ", "decentral ") - - preferred_order = pd.Index( - [ - "electric demand", - "transmission lines", - "hydroelectricity", - "hydro reservoir", - "run of river", - "pumped hydro storage", - "CHP", - "onshore wind", - "offshore wind", - "solar PV", - "solar thermal", - "building retrofitting", - "ground heat pump", - "air heat pump", - "resistive heater", - "OCGT", - "gas boiler", - "gas", - "natural gas", - "methanation", - "hydrogen storage", - "battery storage", - "hot water storage", - ] - ) - - new_columns = preferred_order.intersection(supply.columns).append( - supply.columns.difference(preferred_order) - ) - - supply = supply.groupby(supply.columns, axis=1).sum() - fig, ax = plt.subplots() - fig.set_size_inches((8, 5)) - - ( - supply.loc[start:stop, new_columns].plot( - ax=ax, - kind="area", - stacked=True, - linewidth=0.0, - color=[ - snakemake.params.plotting["tech_colors"][i.replace(suffix, "")] - for i in new_columns - ], - ) - ) - - handles, labels = ax.get_legend_handles_labels() - - handles.reverse() - labels.reverse() - - new_handles = [] - new_labels = [] - - for i, item in enumerate(labels): - if "charging" not in item: - new_handles.append(handles[i]) - new_labels.append(labels[i]) - - ax.legend(new_handles, new_labels, ncol=3, loc="upper left", frameon=False) - ax.set_xlim([start, stop]) - ax.set_ylim([-1300, 1900]) - ax.grid(True) - ax.set_ylabel("Power [GW]") - fig.tight_layout() - - fig.savefig( - "results/{}maps/series-{}-{}-{}-{}.pdf".format( - snakemake.params.RDIR, - snakemake.wildcards["ll"], - carrier, - start, - stop, - ), - transparent=True, - ) - - -def plot_map_perfect( - network, - components=["Link", "Store", "StorageUnit", "Generator"], - bus_size_factor=1.7e10, -): - n = network.copy() - assign_location(n) - # Drop non-electric buses so they don't clutter the plot - n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - # investment periods - investments = n.snapshots.levels[0] - - costs = {} - for comp in components: - df_c = n.df(comp) - if df_c.empty: - continue - df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) - - attr = "e_nom_opt" if comp == "Store" else "p_nom_opt" - - active = pd.concat( - [n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments], - axis=1, - ).astype(int) - capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost - capital_cost_t = ( - (active.mul(capital_cost, axis=0)) - .groupby([n.df(comp).location, n.df(comp).nice_group]) - .sum() - ) - - capital_cost_t.drop("load", level=1, inplace=True, errors="ignore") - - costs[comp] = capital_cost_t - - costs = pd.concat(costs).groupby(level=[1, 2]).sum() - costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True) - - new_columns = preferred_order.intersection(costs.index.levels[1]).append( - costs.index.levels[1].difference(preferred_order) - ) - costs = costs.reindex(new_columns, level=1) - - for item in new_columns: - if item not in snakemake.config["plotting"]["tech_colors"]: - print( - "Warning!", - item, - "not in config/plotting/tech_colors, assign random color", - ) - snakemake.config["plotting"]["tech_colors"] = "pink" - - n.links.drop( - n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], - inplace=True, - ) - - # drop non-bus - to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) - if len(to_drop) != 0: - print("dropping non-buses", to_drop) - costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") - - # make sure they are removed from index - costs.index = pd.MultiIndex.from_tuples(costs.index.values) - - # PDF has minimum width, so set these to zero - line_lower_threshold = 500.0 - line_upper_threshold = 1e4 - linewidth_factor = 2e3 - ac_color = "gray" - dc_color = "m" - - line_widths = n.lines.s_nom_opt - link_widths = n.links.p_nom_opt - linewidth_factor = 2e3 - line_lower_threshold = 0.0 - title = "Today's transmission" - - line_widths[line_widths < line_lower_threshold] = 0.0 - link_widths[link_widths < line_lower_threshold] = 0.0 - - line_widths[line_widths > line_upper_threshold] = line_upper_threshold - link_widths[link_widths > line_upper_threshold] = line_upper_threshold - - for year in costs.columns: - fig, ax = plt.subplots(subplot_kw={"projection": proj}) - fig.set_size_inches(7, 6) - fig.suptitle(year) - - n.plot( - bus_sizes=costs[year] / bus_size_factor, - bus_colors=snakemake.config["plotting"]["tech_colors"], - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, - **map_opts, - ) - - sizes = [20, 10, 5] - labels = [f"{s} bEUR/a" for s in sizes] - sizes = [s / bus_size_factor * 1e9 for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.01, 1.06), - labelspacing=0.8, - frameon=False, - handletextpad=0, - title="system cost", - ) - - add_legend_circles( - ax, - sizes, - labels, - srid=n.srid, - patch_kw=dict(facecolor="lightgrey"), - legend_kw=legend_kw, - ) - - sizes = [10, 5] - labels = [f"{s} GW" for s in sizes] - scale = 1e3 / linewidth_factor - sizes = [s * scale for s in sizes] - - legend_kw = dict( - loc="upper left", - bbox_to_anchor=(0.27, 1.06), - frameon=False, - labelspacing=0.8, - handletextpad=1, - title=title, - ) - - add_legend_lines( - ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw - ) - - legend_kw = dict( - bbox_to_anchor=(1.52, 1.04), - frameon=False, - ) - - fig.savefig( - snakemake.output[f"map_{year}"], transparent=True, bbox_inches="tight" - ) - - -if __name__ == "__main__": - if "snakemake" not in globals(): - from _helpers import mock_snakemake - - snakemake = mock_snakemake( - "plot_network", - simpl="", - opts="", - clusters="37", - ll="v1.0", - sector_opts="4380H-T-H-B-I-A-dist1", - ) - - logging.basicConfig(level=snakemake.config["logging"]["level"]) - - n = pypsa.Network(snakemake.input.network) - - regions = gpd.read_file(snakemake.input.regions).set_index("name") - - map_opts = snakemake.params.plotting["map"] - - if map_opts["boundaries"] is None: - map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] - - proj_kwargs = snakemake.params.plotting.get("projection", dict(name="EqualEarth")) - proj_func = getattr(ccrs, proj_kwargs.pop("name")) - proj = proj_func(**proj_kwargs) - - if snakemake.params["foresight"] == "perfect": - plot_map_perfect( - n, - components=["Link", "Store", "StorageUnit", "Generator"], - bus_size_factor=2e10, - ) - else: - plot_map( - n, - components=["generators", "links", "stores", "storage_units"], - bus_size_factor=2e10, - transmission=False, - ) - - plot_h2_map(n, regions) - plot_ch4_map(n) - plot_map_without(n) - - # plot_series(n, carrier="AC", name=suffix) - # plot_series(n, carrier="heat", name=suffix) diff --git a/scripts/plot_power_network.py b/scripts/plot_power_network.py new file mode 100644 index 00000000..271e638d --- /dev/null +++ b/scripts/plot_power_network.py @@ -0,0 +1,272 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates plots for optimised power network topologies and regional generation, +storage and conversion capacities built. +""" + +import logging + +import cartopy.crs as ccrs +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging +from plot_summary import preferred_order, rename_techs +from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches + +logger = logging.getLogger(__name__) + + +def rename_techs_tyndp(tech): + tech = rename_techs(tech) + if "heat pump" in tech or "resistive heater" in tech: + return "power-to-heat" + elif tech in ["H2 Electrolysis", "methanation", "H2 liquefaction"]: + return "power-to-gas" + elif tech == "H2": + return "H2 storage" + elif tech in ["NH3", "Haber-Bosch", "ammonia cracker", "ammonia store"]: + return "ammonia" + elif tech in ["OCGT", "CHP", "gas boiler", "H2 Fuel Cell"]: + return "gas-to-power/heat" + # elif "solar" in tech: + # return "solar" + elif tech in ["Fischer-Tropsch", "methanolisation"]: + return "power-to-liquid" + elif "offshore wind" in tech: + return "offshore wind" + elif "CC" in tech or "sequestration" in tech: + return "CCS" + else: + return tech + + +def assign_location(n): + for c in n.iterate_components(n.one_port_components | n.branch_components): + ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) + for i in ifind.value_counts().index: + # these have already been assigned defaults + if i == -1: + continue + names = ifind.index[ifind == i] + c.df.loc[names, "location"] = names.str[:i] + + +def load_projection(plotting_params): + proj_kwargs = plotting_params.get("projection", dict(name="EqualEarth")) + proj_func = getattr(ccrs, proj_kwargs.pop("name")) + return proj_func(**proj_kwargs) + + +def plot_map( + n, + components=["links", "stores", "storage_units", "generators"], + bus_size_factor=2e10, + transmission=False, + with_legend=True, +): + tech_colors = snakemake.params.plotting["tech_colors"] + + assign_location(n) + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + + costs = pd.DataFrame(index=n.buses.index) + + for comp in components: + df_c = getattr(n, comp) + + if df_c.empty: + continue + + df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) + + attr = "e_nom_opt" if comp == "stores" else "p_nom_opt" + + costs_c = ( + (df_c.capital_cost * df_c[attr]) + .groupby([df_c.location, df_c.nice_group]) + .sum() + .unstack() + .fillna(0.0) + ) + costs = pd.concat([costs, costs_c], axis=1) + + logger.debug(f"{comp}, {costs}") + + costs = costs.groupby(costs.columns, axis=1).sum() + + costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True) + + new_columns = preferred_order.intersection(costs.columns).append( + costs.columns.difference(preferred_order) + ) + costs = costs[new_columns] + + for item in new_columns: + if item not in tech_colors: + logger.warning(f"{item} not in config/plotting/tech_colors") + + costs = costs.stack() # .sort_index() + + # hack because impossible to drop buses... + eu_location = snakemake.params.plotting.get("eu_node_location", dict(x=-5.5, y=46)) + n.buses.loc["EU gas", "x"] = eu_location["x"] + n.buses.loc["EU gas", "y"] = eu_location["y"] + + n.links.drop( + n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], + inplace=True, + ) + + # drop non-bus + to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) + if len(to_drop) != 0: + logger.info(f"Dropping non-buses {to_drop.tolist()}") + costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") + + # make sure they are removed from index + costs.index = pd.MultiIndex.from_tuples(costs.index.values) + + threshold = 100e6 # 100 mEUR/a + carriers = costs.groupby(level=1).sum() + carriers = carriers.where(carriers > threshold).dropna() + carriers = list(carriers.index) + + # PDF has minimum width, so set these to zero + line_lower_threshold = 500.0 + line_upper_threshold = 1e4 + linewidth_factor = 4e3 + ac_color = "rosybrown" + dc_color = "darkseagreen" + + title = "added grid" + + if snakemake.wildcards["ll"] == "v1.0": + # should be zero + line_widths = n.lines.s_nom_opt - n.lines.s_nom + link_widths = n.links.p_nom_opt - n.links.p_nom + if transmission: + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + linewidth_factor = 2e3 + line_lower_threshold = 0.0 + title = "current grid" + else: + line_widths = n.lines.s_nom_opt - n.lines.s_nom_min + link_widths = n.links.p_nom_opt - n.links.p_nom_min + if transmission: + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + title = "total grid" + + line_widths = line_widths.clip(line_lower_threshold, line_upper_threshold) + link_widths = link_widths.clip(line_lower_threshold, line_upper_threshold) + + line_widths = line_widths.replace(line_lower_threshold, 0) + link_widths = link_widths.replace(line_lower_threshold, 0) + + fig, ax = plt.subplots(subplot_kw={"projection": proj}) + fig.set_size_inches(7, 6) + + n.plot( + bus_sizes=costs / bus_size_factor, + bus_colors=tech_colors, + line_colors=ac_color, + link_colors=dc_color, + line_widths=line_widths / linewidth_factor, + link_widths=link_widths / linewidth_factor, + ax=ax, + **map_opts, + ) + + sizes = [20, 10, 5] + labels = [f"{s} bEUR/a" for s in sizes] + sizes = [s / bus_size_factor * 1e9 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.01, 1.06), + labelspacing=0.8, + frameon=False, + handletextpad=0, + title="system cost", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [10, 5] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.27, 1.06), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title=title, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw + ) + + legend_kw = dict( + bbox_to_anchor=(1.52, 1.04), + frameon=False, + ) + + if with_legend: + colors = [tech_colors[c] for c in carriers] + [ac_color, dc_color] + labels = carriers + ["HVAC line", "HVDC link"] + + add_legend_patches( + ax, + colors, + labels, + legend_kw=legend_kw, + ) + + fig.savefig(snakemake.output.map, bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_map(n) diff --git a/scripts/plot_power_network_clustered.py b/scripts/plot_power_network_clustered.py new file mode 100644 index 00000000..8217ac2e --- /dev/null +++ b/scripts/plot_power_network_clustered.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2023-2024 PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Plot clustered electricity transmission network. +""" + +import cartopy.crs as ccrs +import geopandas as gpd +import matplotlib.pyplot as plt +import pypsa +from matplotlib.lines import Line2D +from plot_power_network import load_projection +from pypsa.plot import add_legend_lines + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network_clustered", + clusters=128, + configfiles=["../../config/config.test.yaml"], + ) + + lw_factor = 2e3 + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions_onshore).set_index("name") + + proj = load_projection(snakemake.params.plotting) + + fig, ax = plt.subplots(figsize=(8, 8), subplot_kw={"projection": proj}) + regions.to_crs(proj.proj4_init).plot( + ax=ax, facecolor="none", edgecolor="lightgray", linewidth=0.75 + ) + n.plot( + ax=ax, + margin=0.06, + line_widths=n.lines.s_nom / lw_factor, + link_colors=n.links.p_nom.apply( + lambda x: "darkseagreen" if x > 0 else "skyblue" + ), + link_widths=2.0, + ) + + sizes = [10, 20] + labels = [f"HVAC ({s} GW)" for s in sizes] + scale = 1e3 / lw_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc=[0.25, 0.9], + frameon=False, + labelspacing=0.5, + handletextpad=1, + fontsize=13, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="rosybrown"), legend_kw=legend_kw + ) + + handles = [ + Line2D([0], [0], color="darkseagreen", lw=2), + Line2D([0], [0], color="skyblue", lw=2), + ] + plt.legend( + handles, + ["HVDC existing", "HVDC planned"], + frameon=False, + loc=[0.0, 0.9], + fontsize=13, + ) + + plt.savefig(snakemake.output.map, bbox_inches="tight") diff --git a/scripts/plot_power_network_perfect.py b/scripts/plot_power_network_perfect.py new file mode 100644 index 00000000..ff576d33 --- /dev/null +++ b/scripts/plot_power_network_perfect.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- +# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: MIT +""" +Creates plots for optimised power network topologies and regional generation, +storage and conversion capacities built for the perfect foresight scenario. +""" + +import logging + +import geopandas as gpd +import matplotlib.pyplot as plt +import pandas as pd +import pypsa +from _helpers import configure_logging +from plot_power_network import assign_location, load_projection, rename_techs_tyndp +from plot_summary import preferred_order +from pypsa.plot import add_legend_circles, add_legend_lines + +logger = logging.getLogger(__name__) + + +def plot_map_perfect( + n, + components=["Link", "Store", "StorageUnit", "Generator"], + bus_size_factor=2e10, +): + assign_location(n) + # Drop non-electric buses so they don't clutter the plot + n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) + # investment periods + investments = n.snapshots.levels[0] + + costs = {} + for comp in components: + df_c = n.df(comp) + if df_c.empty: + continue + df_c["nice_group"] = df_c.carrier.map(rename_techs_tyndp) + + attr = "e_nom_opt" if comp == "Store" else "p_nom_opt" + + active = pd.concat( + [n.get_active_assets(comp, inv_p).rename(inv_p) for inv_p in investments], + axis=1, + ).astype(int) + capital_cost = n.df(comp)[attr] * n.df(comp).capital_cost + capital_cost_t = ( + (active.mul(capital_cost, axis=0)) + .groupby([n.df(comp).location, n.df(comp).nice_group]) + .sum() + ) + + capital_cost_t.drop("load", level=1, inplace=True, errors="ignore") + + costs[comp] = capital_cost_t + + costs = pd.concat(costs).groupby(level=[1, 2]).sum() + costs.drop(costs[costs.sum(axis=1) == 0].index, inplace=True) + + new_columns = preferred_order.intersection(costs.index.levels[1]).append( + costs.index.levels[1].difference(preferred_order) + ) + costs = costs.reindex(new_columns, level=1) + + for item in new_columns: + if item not in snakemake.config["plotting"]["tech_colors"]: + print( + "Warning!", + item, + "not in config/plotting/tech_colors, assign random color", + ) + snakemake.config["plotting"]["tech_colors"] = "pink" + + n.links.drop( + n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")], + inplace=True, + ) + + # drop non-bus + to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) + if len(to_drop) != 0: + print("dropping non-buses", to_drop) + costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore") + + # make sure they are removed from index + costs.index = pd.MultiIndex.from_tuples(costs.index.values) + + # PDF has minimum width, so set these to zero + line_lower_threshold = 500.0 + line_upper_threshold = 1e4 + linewidth_factor = 2e3 + ac_color = "gray" + dc_color = "m" + + line_widths = n.lines.s_nom_opt + link_widths = n.links.p_nom_opt + linewidth_factor = 2e3 + line_lower_threshold = 0.0 + title = "Today's transmission" + + line_widths[line_widths < line_lower_threshold] = 0.0 + link_widths[link_widths < line_lower_threshold] = 0.0 + + line_widths[line_widths > line_upper_threshold] = line_upper_threshold + link_widths[link_widths > line_upper_threshold] = line_upper_threshold + + for year in costs.columns: + fig, ax = plt.subplots(subplot_kw={"projection": proj}) + fig.set_size_inches(7, 6) + fig.suptitle(year) + + n.plot( + bus_sizes=costs[year] / bus_size_factor, + bus_colors=snakemake.config["plotting"]["tech_colors"], + line_colors=ac_color, + link_colors=dc_color, + line_widths=line_widths / linewidth_factor, + link_widths=link_widths / linewidth_factor, + ax=ax, + **map_opts, + ) + + sizes = [20, 10, 5] + labels = [f"{s} bEUR/a" for s in sizes] + sizes = [s / bus_size_factor * 1e9 for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.01, 1.06), + labelspacing=0.8, + frameon=False, + handletextpad=0, + title="system cost", + ) + + add_legend_circles( + ax, + sizes, + labels, + srid=n.srid, + patch_kw=dict(facecolor="lightgrey"), + legend_kw=legend_kw, + ) + + sizes = [10, 5] + labels = [f"{s} GW" for s in sizes] + scale = 1e3 / linewidth_factor + sizes = [s * scale for s in sizes] + + legend_kw = dict( + loc="upper left", + bbox_to_anchor=(0.27, 1.06), + frameon=False, + labelspacing=0.8, + handletextpad=1, + title=title, + ) + + add_legend_lines( + ax, sizes, labels, patch_kw=dict(color="lightgrey"), legend_kw=legend_kw + ) + + legend_kw = dict( + bbox_to_anchor=(1.52, 1.04), + frameon=False, + ) + + fig.savefig(snakemake.output[f"map_{year}"], bbox_inches="tight") + + +if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake( + "plot_power_network_perfect", + simpl="", + opts="", + clusters="37", + ll="v1.0", + sector_opts="4380H-T-H-B-I-A-dist1", + ) + + configure_logging(snakemake) + + n = pypsa.Network(snakemake.input.network) + + regions = gpd.read_file(snakemake.input.regions).set_index("name") + + map_opts = snakemake.params.plotting["map"] + + if map_opts["boundaries"] is None: + map_opts["boundaries"] = regions.total_bounds[[0, 2, 1, 3]] + [-1, 1, -1, 1] + + proj = load_projection(snakemake.params.plotting) + + plot_map_perfect(n) diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index 92bf726c..cfb32441 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -281,12 +281,14 @@ def plot_balances(): # remove trailing link ports df.index = [ - i[:-1] - if ( - (i not in ["co2", "NH3", "H2"]) - and (i[-1:] in ["0", "1", "2", "3", "4"]) + ( + i[:-1] + if ( + (i not in ["co2", "NH3", "H2"]) + and (i[-1:] in ["0", "1", "2", "3", "4"]) + ) + else i ) - else i for i in df.index ] diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index d24feb79..42def055 100755 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -18,7 +18,7 @@ import numpy as np import pandas as pd import pypsa import xarray as xr -from _helpers import generate_periodic_profiles, update_config_with_sector_opts +from _helpers import update_config_with_sector_opts from add_electricity import calculate_annuity, sanitize_carriers from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2 from networkx.algorithms import complement @@ -458,9 +458,9 @@ def update_wind_solar_costs(n, costs): ) ) - n.generators.loc[ - n.generators.carrier == tech, "capital_cost" - ] = capital_cost.rename(index=lambda node: node + " " + tech) + n.generators.loc[n.generators.carrier == tech, "capital_cost"] = ( + capital_cost.rename(index=lambda node: node + " " + tech) + ) def add_carrier_buses(n, carrier, nodes=None): @@ -1639,40 +1639,25 @@ def add_land_transport(n, costs): def build_heat_demand(n): - # copy forward the daily average heat demand into each hour, so it can be multiplied by the intraday profile - daily_space_heat_demand = ( - xr.open_dataarray(snakemake.input.heat_demand_total) - .to_pandas() - .reindex(index=n.snapshots, method="ffill") + heat_demand_shape = ( + xr.open_dataset(snakemake.input.hourly_heat_demand_total) + .to_dataframe() + .unstack(level=1) ) - intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) - sectors = ["residential", "services"] uses = ["water", "space"] heat_demand = {} electric_heat_supply = {} for sector, use in product(sectors, uses): - weekday = list(intraday_profiles[f"{sector} {use} weekday"]) - weekend = list(intraday_profiles[f"{sector} {use} weekend"]) - weekly_profile = weekday * 5 + weekend * 2 - intraday_year_profile = generate_periodic_profiles( - daily_space_heat_demand.index.tz_localize("UTC"), - nodes=daily_space_heat_demand.columns, - weekly_profile=weekly_profile, - ) + name = f"{sector} {use}" - if use == "space": - heat_demand_shape = daily_space_heat_demand * intraday_year_profile - else: - heat_demand_shape = intraday_year_profile - - heat_demand[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + heat_demand[name] = ( + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"total {sector} {use}"]) * 1e6 - electric_heat_supply[f"{sector} {use}"] = ( - heat_demand_shape / heat_demand_shape.sum() + electric_heat_supply[name] = ( + heat_demand_shape[name] / heat_demand_shape[name].sum() ).multiply(pop_weighted_energy_totals[f"electricity {sector} {use}"]) * 1e6 heat_demand = pd.concat(heat_demand, axis=1) @@ -1695,7 +1680,9 @@ def add_heat(n, costs): heat_demand = build_heat_demand(n) - nodes, dist_fraction, urban_fraction = create_nodes_for_heat_sector() + district_heat_info = pd.read_csv(snakemake.input.district_heat_share, index_col=0) + dist_fraction = district_heat_info["district fraction of node"] + urban_fraction = district_heat_info["urban fraction"] # NB: must add costs of central heating afterwards (EUR 400 / kWpeak, 50a, 1% FOM from Fraunhofer ISE) @@ -1735,12 +1722,17 @@ def add_heat(n, costs): for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" + if name == "urban central": + nodes = dist_fraction.index[dist_fraction > 0] + else: + nodes = pop_layout.index + n.add("Carrier", name + " heat") n.madd( "Bus", - nodes[name] + f" {name} heat", - location=nodes[name], + nodes + f" {name} heat", + location=nodes, carrier=name + " heat", unit="MWh_th", ) @@ -1748,9 +1740,9 @@ def add_heat(n, costs): if name == "urban central" and options.get("central_heat_vent"): n.madd( "Generator", - nodes[name] + f" {name} heat vent", - bus=nodes[name] + f" {name} heat", - location=nodes[name], + nodes + f" {name} heat vent", + bus=nodes + f" {name} heat", + location=nodes, carrier=name + " heat vent", p_nom_extendable=True, p_max_pu=0, @@ -1763,11 +1755,11 @@ def add_heat(n, costs): for sector in sectors: # heat demand weighting if "rural" in name: - factor = 1 - urban_fraction[nodes[name]] + factor = 1 - urban_fraction[nodes] elif "urban central" in name: - factor = dist_fraction[nodes[name]] + factor = dist_fraction[nodes] elif "urban decentral" in name: - factor = urban_fraction[nodes[name]] - dist_fraction[nodes[name]] + factor = urban_fraction[nodes] - dist_fraction[nodes] else: raise NotImplementedError( f" {name} not in " f"heat systems: {heat_systems}" @@ -1778,7 +1770,7 @@ def add_heat(n, costs): heat_demand[[sector + " water", sector + " space"]] .T.groupby(level=1) .sum() - .T[nodes[name]] + .T[nodes] .multiply(factor) ) @@ -1786,7 +1778,7 @@ def add_heat(n, costs): heat_load = ( heat_demand.T.groupby(level=1) .sum() - .T[nodes[name]] + .T[nodes] .multiply( factor * (1 + options["district_heating"]["district_heating_loss"]) ) @@ -1794,9 +1786,9 @@ def add_heat(n, costs): n.madd( "Load", - nodes[name], + nodes, suffix=f" {name} heat", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " heat", p_set=heat_load, ) @@ -1808,17 +1800,17 @@ def add_heat(n, costs): for heat_pump_type in heat_pump_types: costs_name = f"{name_type} {heat_pump_type}-sourced heat pump" efficiency = ( - cop[heat_pump_type][nodes[name]] + cop[heat_pump_type][nodes] if options["time_dep_hp_cop"] else costs.at[costs_name, "efficiency"] ) n.madd( "Link", - nodes[name], + nodes, suffix=f" {name} {heat_pump_type} heat pump", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", + bus0=nodes, + bus1=nodes + f" {name} heat", carrier=f"{name} {heat_pump_type} heat pump", efficiency=efficiency, capital_cost=costs.at[costs_name, "efficiency"] @@ -1832,17 +1824,17 @@ def add_heat(n, costs): n.madd( "Bus", - nodes[name] + f" {name} water tanks", - location=nodes[name], + nodes + f" {name} water tanks", + location=nodes, carrier=name + " water tanks", unit="MWh_th", ) n.madd( "Link", - nodes[name] + f" {name} water tanks charger", - bus0=nodes[name] + f" {name} heat", - bus1=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks charger", + bus0=nodes + f" {name} heat", + bus1=nodes + f" {name} water tanks", efficiency=costs.at["water tank charger", "efficiency"], carrier=name + " water tanks charger", p_nom_extendable=True, @@ -1850,9 +1842,9 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} water tanks discharger", - bus0=nodes[name] + f" {name} water tanks", - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} water tanks discharger", + bus0=nodes + f" {name} water tanks", + bus1=nodes + f" {name} heat", carrier=name + " water tanks discharger", efficiency=costs.at["water tank discharger", "efficiency"], p_nom_extendable=True, @@ -1871,8 +1863,8 @@ def add_heat(n, costs): n.madd( "Store", - nodes[name] + f" {name} water tanks", - bus=nodes[name] + f" {name} water tanks", + nodes + f" {name} water tanks", + bus=nodes + f" {name} water tanks", e_cyclic=True, e_nom_extendable=True, carrier=name + " water tanks", @@ -1886,9 +1878,9 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} resistive heater", - bus0=nodes[name], - bus1=nodes[name] + f" {name} heat", + nodes + f" {name} resistive heater", + bus0=nodes, + bus1=nodes + f" {name} heat", carrier=name + " resistive heater", efficiency=costs.at[key, "efficiency"], capital_cost=costs.at[key, "efficiency"] * costs.at[key, "fixed"], @@ -1901,10 +1893,10 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + f" {name} gas boiler", + nodes + f" {name} gas boiler", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=name + " gas boiler", efficiency=costs.at[key, "efficiency"], @@ -1918,13 +1910,13 @@ def add_heat(n, costs): n.madd( "Generator", - nodes[name], + nodes, suffix=f" {name} solar thermal collector", - bus=nodes[name] + f" {name} heat", + bus=nodes + f" {name} heat", carrier=name + " solar thermal", p_nom_extendable=True, capital_cost=costs.at[name_type + " solar thermal", "fixed"], - p_max_pu=solar_thermal[nodes[name]], + p_max_pu=solar_thermal[nodes], lifetime=costs.at[name_type + " solar thermal", "lifetime"], ) @@ -1932,10 +1924,10 @@ def add_heat(n, costs): # add gas CHP; biomass CHP is added in biomass section n.madd( "Link", - nodes[name] + " urban central gas CHP", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", carrier="urban central gas CHP", p_nom_extendable=True, @@ -1951,12 +1943,12 @@ def add_heat(n, costs): n.madd( "Link", - nodes[name] + " urban central gas CHP CC", - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", + nodes + " urban central gas CHP CC", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + " urban central heat", bus3="co2 atmosphere", - bus4=spatial.co2.df.loc[nodes[name], "nodes"].values, + bus4=spatial.co2.df.loc[nodes, "nodes"].values, carrier="urban central gas CHP CC", p_nom_extendable=True, capital_cost=costs.at["central gas CHP", "fixed"] @@ -1988,11 +1980,11 @@ def add_heat(n, costs): if options["chp"] and options["micro_chp"] and name != "urban central": n.madd( "Link", - nodes[name] + f" {name} micro gas CHP", + nodes + f" {name} micro gas CHP", p_nom_extendable=True, - bus0=spatial.gas.df.loc[nodes[name], "nodes"].values, - bus1=nodes[name], - bus2=nodes[name] + f" {name} heat", + bus0=spatial.gas.df.loc[nodes, "nodes"].values, + bus1=nodes, + bus2=nodes + f" {name} heat", bus3="co2 atmosphere", carrier=name + " micro gas CHP", efficiency=costs.at["micro CHP", "efficiency"], @@ -2123,50 +2115,6 @@ def add_heat(n, costs): ) -def create_nodes_for_heat_sector(): - # TODO pop_layout - - # rural are areas with low heating density and individual heating - # urban are areas with high heating density - # urban can be split into district heating (central) and individual heating (decentral) - - ct_urban = pop_layout.urban.groupby(pop_layout.ct).sum() - # distribution of urban population within a country - pop_layout["urban_ct_fraction"] = pop_layout.urban / pop_layout.ct.map(ct_urban.get) - - sectors = ["residential", "services"] - - nodes = {} - urban_fraction = pop_layout.urban / pop_layout[["rural", "urban"]].sum(axis=1) - - for sector in sectors: - nodes[sector + " rural"] = pop_layout.index - nodes[sector + " urban decentral"] = pop_layout.index - - district_heat_share = pop_weighted_energy_totals["district heat share"] - - # maximum potential of urban demand covered by district heating - central_fraction = options["district_heating"]["potential"] - # district heating share at each node - dist_fraction_node = ( - district_heat_share * pop_layout["urban_ct_fraction"] / pop_layout["fraction"] - ) - nodes["urban central"] = dist_fraction_node.index - # if district heating share larger than urban fraction -> set urban - # fraction to district heating share - urban_fraction = pd.concat([urban_fraction, dist_fraction_node], axis=1).max(axis=1) - # difference of max potential and today's share of district heating - diff = (urban_fraction * central_fraction) - dist_fraction_node - progress = get(options["district_heating"]["progress"], investment_year) - dist_fraction_node += diff * progress - logger.info( - f"Increase district heating share by a progress factor of {progress:.2%} " - f"resulting in new average share of {dist_fraction_node.mean():.2%}" - ) - - return nodes, dist_fraction_node, urban_fraction - - def add_biomass(n, costs): logger.info("Add biomass") @@ -2384,7 +2332,7 @@ def add_biomass(n, costs): if options["biomass_boiler"]: # TODO: Add surcharge for pellets - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", "services rural", @@ -2393,10 +2341,10 @@ def add_biomass(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} biomass boiler", + nodes + f" {name} biomass boiler", p_nom_extendable=True, - bus0=spatial.biomass.df.loc[nodes_heat[name], "nodes"].values, - bus1=nodes_heat[name] + f" {name} heat", + bus0=spatial.biomass.df.loc[nodes, "nodes"].values, + bus1=nodes + f" {name} heat", carrier=name + " biomass boiler", efficiency=costs.at["biomass boiler", "efficiency"], capital_cost=costs.at["biomass boiler", "efficiency"] @@ -2839,7 +2787,7 @@ def add_industry(n, costs): ) if options["oil_boilers"]: - nodes_heat = create_nodes_for_heat_sector()[0] + nodes = pop_layout.index for name in [ "residential rural", @@ -2849,10 +2797,10 @@ def add_industry(n, costs): ]: n.madd( "Link", - nodes_heat[name] + f" {name} oil boiler", + nodes + f" {name} oil boiler", p_nom_extendable=True, bus0=spatial.oil.nodes, - bus1=nodes_heat[name] + f" {name} heat", + bus1=nodes + f" {name} heat", bus2="co2 atmosphere", carrier=f"{name} oil boiler", efficiency=costs.at["decentral oil boiler", "efficiency"], @@ -2984,9 +2932,11 @@ def add_industry(n, costs): nodes, suffix=" low-temperature heat for industry", bus=[ - node + " urban central heat" - if node + " urban central heat" in n.buses.index - else node + " services urban decentral heat" + ( + node + " urban central heat" + if node + " urban central heat" in n.buses.index + else node + " services urban decentral heat" + ) for node in nodes ], carrier="low-temperature heat for industry", @@ -3581,10 +3531,9 @@ def lossy_bidirectional_links(n, carrier, efficiencies={}): ) n.links.loc[carrier_i, "p_min_pu"] = 0 - n.links.loc[ - carrier_i, "efficiency" - ] = efficiency_static * efficiency_per_1000km ** ( - n.links.loc[carrier_i, "length"] / 1e3 + n.links.loc[carrier_i, "efficiency"] = ( + efficiency_static + * efficiency_per_1000km ** (n.links.loc[carrier_i, "length"] / 1e3) ) rev_links = ( n.links.loc[carrier_i].copy().rename({"bus0": "bus1", "bus1": "bus0"}, axis=1) diff --git a/scripts/simplify_network.py b/scripts/simplify_network.py index f8629ea7..31bc8f4e 100644 --- a/scripts/simplify_network.py +++ b/scripts/simplify_network.py @@ -468,9 +468,9 @@ def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None): dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index ) - dist[ - buses_i - ] = np.inf # bus in buses_i should not be assigned to different bus in buses_i + dist[buses_i] = ( + np.inf + ) # bus in buses_i should not be assigned to different bus in buses_i for c in n.buses.country.unique(): incountry_b = n.buses.country == c diff --git a/scripts/solve_network.py b/scripts/solve_network.py index 23e9b0f8..a1d790a7 100644 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -703,9 +703,11 @@ def add_lossy_bidirectional_link_constraints(n): def get_backward_i(forward_i): return pd.Index( [ - re.sub(r"-(\d{4})$", r"-reversed-\1", s) - if re.search(r"-\d{4}$", s) - else s + "-reversed" + ( + re.sub(r"-(\d{4})$", r"-reversed-\1", s) + if re.search(r"-\d{4}$", s) + else s + "-reversed" + ) for s in forward_i ] )