diff --git a/.gitignore b/.gitignore index a55300e2..1401c0ad 100644 --- a/.gitignore +++ b/.gitignore @@ -2,9 +2,10 @@ .ipynb_checkpoints __pycache__ gurobi.log +.vscode /bak -/resources +/resources* /results /networks /benchmarks @@ -46,4 +47,4 @@ config.yaml doc/_build -*.xls \ No newline at end of file +*.xls diff --git a/Snakefile b/Snakefile index 9ab7d2f1..b91785d9 100644 --- a/Snakefile +++ b/Snakefile @@ -1,9 +1,9 @@ configfile: "config.yaml" + wildcard_constraints: lv="[a-z0-9\.]+", - network="[a-zA-Z0-9]*", simpl="[a-zA-Z0-9]*", clusters="[0-9]+m?", sectors="[+a-zA-Z0-9]+", @@ -11,27 +11,31 @@ wildcard_constraints: sector_opts="[-+a-zA-Z0-9\.\s]*" +SDIR = config['summary_dir'] + '/' + config['run'] +RDIR = config['results_dir'] + config['run'] +CDIR = config['costs_dir'] + subworkflow pypsaeur: workdir: "../pypsa-eur" snakefile: "../pypsa-eur/Snakefile" configfile: "../pypsa-eur/config.yaml" -rule all: - input: - config['summary_dir'] + '/' + config['run'] + '/graphs/costs.pdf' +rule all: + input: SDIR + '/graphs/costs.pdf' rule solve_all_networks: input: - expand(config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", + expand(RDIR + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", **config['scenario']) + rule prepare_sector_networks: input: - expand(config['results_dir'] + config['run'] + "/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", - **config['scenario']) + expand(RDIR + "/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", + **config['scenario']) rule build_population_layouts: @@ -43,6 +47,8 @@ rule build_population_layouts: pop_layout_urban="resources/pop_layout_urban.nc", pop_layout_rural="resources/pop_layout_rural.nc" resources: mem_mb=20000 + benchmark: "benchmarks/build_population_layouts" + threads: 8 script: "scripts/build_population_layouts.py" @@ -55,6 +61,7 @@ rule build_clustered_population_layouts: output: clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv" resources: mem_mb=10000 + benchmark: "benchmarks/build_clustered_population_layouts/s{simpl}_{clusters}" script: "scripts/build_clustered_population_layouts.py" @@ -67,6 +74,7 @@ rule build_simplified_population_layouts: output: clustered_pop_layout="resources/pop_layout_elec_s{simpl}.csv" resources: mem_mb=10000 + benchmark: "benchmarks/build_clustered_population_layouts/s{simpl}" script: "scripts/build_clustered_population_layouts.py" @@ -81,8 +89,10 @@ rule build_heat_demands: heat_demand_rural="resources/heat_demand_rural_elec_s{simpl}_{clusters}.nc", heat_demand_total="resources/heat_demand_total_elec_s{simpl}_{clusters}.nc" resources: mem_mb=20000 + benchmark: "benchmarks/build_heat_demands/s{simpl}_{clusters}" script: "scripts/build_heat_demand.py" + rule build_temperature_profiles: input: pop_layout_total="resources/pop_layout_total.nc", @@ -97,6 +107,7 @@ rule build_temperature_profiles: temp_air_rural="resources/temp_air_rural_elec_s{simpl}_{clusters}.nc", temp_air_urban="resources/temp_air_urban_elec_s{simpl}_{clusters}.nc" resources: mem_mb=20000 + benchmark: "benchmarks/build_temperature_profiles/s{simpl}_{clusters}" script: "scripts/build_temperature_profiles.py" @@ -116,6 +127,7 @@ rule build_cop_profiles: cop_air_rural="resources/cop_air_rural_elec_s{simpl}_{clusters}.nc", cop_air_urban="resources/cop_air_urban_elec_s{simpl}_{clusters}.nc" resources: mem_mb=20000 + benchmark: "benchmarks/build_cop_profiles/s{simpl}_{clusters}" script: "scripts/build_cop_profiles.py" @@ -130,21 +142,32 @@ rule build_solar_thermal_profiles: solar_thermal_urban="resources/solar_thermal_urban_elec_s{simpl}_{clusters}.nc", solar_thermal_rural="resources/solar_thermal_rural_elec_s{simpl}_{clusters}.nc" resources: mem_mb=20000 + benchmark: "benchmarks/build_solar_thermal_profiles/s{simpl}_{clusters}" script: "scripts/build_solar_thermal_profiles.py" +def input_eurostat(w): + # 2016 includes BA, 2017 does not + report_year = config["energy"]["eurostat_report_year"] + return f"data/eurostat-energy_balances-june_{report_year}_edition" rule build_energy_totals: input: - nuts3_shapes=pypsaeur('resources/nuts3_shapes.geojson') + nuts3_shapes=pypsaeur('resources/nuts3_shapes.geojson'), + co2="data/eea/UNFCCC_v23.csv", + swiss="data/switzerland-sfoe/switzerland-new_format.csv", + idees="data/jrc-idees-2015", + eurostat=input_eurostat output: energy_name='resources/energy_totals.csv', - co2_name='resources/co2_totals.csv', - transport_name='resources/transport_data.csv' - threads: 1 + co2_name='resources/co2_totals.csv', + transport_name='resources/transport_data.csv' + threads: 16 resources: mem_mb=10000 + benchmark: "benchmarks/build_energy_totals" script: 'scripts/build_energy_totals.py' + rule build_biomass_potentials: input: jrc_potentials="data/biomass/JRC Biomass Potentials.xlsx" @@ -153,8 +176,10 @@ rule build_biomass_potentials: biomass_potentials='resources/biomass_potentials.csv' threads: 1 resources: mem_mb=1000 + benchmark: "benchmarks/build_biomass_potentials" script: 'scripts/build_biomass_potentials.py' + rule build_ammonia_production: input: usgs="data/myb1-2017-nitro.xls" @@ -162,26 +187,32 @@ rule build_ammonia_production: ammonia_production="resources/ammonia_production.csv" threads: 1 resources: mem_mb=1000 + benchmark: "benchmarks/build_ammonia_production" script: 'scripts/build_ammonia_production.py' rule build_industry_sector_ratios: input: - ammonia_production="resources/ammonia_production.csv" + ammonia_production="resources/ammonia_production.csv", + idees="data/jrc-idees-2015" output: industry_sector_ratios="resources/industry_sector_ratios.csv" threads: 1 resources: mem_mb=1000 + benchmark: "benchmarks/build_industry_sector_ratios" script: 'scripts/build_industry_sector_ratios.py' rule build_industrial_production_per_country: input: - ammonia_production="resources/ammonia_production.csv" + ammonia_production="resources/ammonia_production.csv", + jrc="data/jrc-idees-2015", + eurostat="data/eurostat-energy_balances-may_2018_edition", output: - industrial_production_per_country="resources/industrial_production_per_country.csv" - threads: 1 + industrial_production_per_country="resources/industrial_production_per_country.csv" + threads: 8 resources: mem_mb=1000 + benchmark: "benchmarks/build_industrial_production_per_country" script: 'scripts/build_industrial_production_per_country.py' @@ -192,25 +223,23 @@ rule build_industrial_production_per_country_tomorrow: industrial_production_per_country_tomorrow="resources/industrial_production_per_country_tomorrow.csv" threads: 1 resources: mem_mb=1000 + benchmark: "benchmarks/build_industrial_production_per_country_tomorrow" script: 'scripts/build_industrial_production_per_country_tomorrow.py' - - rule build_industrial_distribution_key: input: + regions_onshore=pypsaeur('resources/regions_onshore_elec_s{simpl}_{clusters}.geojson'), clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", - europe_shape=pypsaeur('resources/europe_shape.geojson'), hotmaps_industrial_database="data/Industrial_Database.csv", - network=pypsaeur('networks/elec_s{simpl}_{clusters}.nc') output: industrial_distribution_key="resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv" threads: 1 resources: mem_mb=1000 + benchmark: "benchmarks/build_industrial_distribution_key/s{simpl}_{clusters}" script: 'scripts/build_industrial_distribution_key.py' - rule build_industrial_production_per_node: input: industrial_distribution_key="resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv", @@ -219,6 +248,7 @@ rule build_industrial_production_per_node: industrial_production_per_node="resources/industrial_production_elec_s{simpl}_{clusters}.csv" threads: 1 resources: mem_mb=1000 + benchmark: "benchmarks/build_industrial_production_per_node/s{simpl}_{clusters}" script: 'scripts/build_industrial_production_per_node.py' @@ -231,17 +261,20 @@ rule build_industrial_energy_demand_per_node: industrial_energy_demand_per_node="resources/industrial_energy_demand_elec_s{simpl}_{clusters}.csv" threads: 1 resources: mem_mb=1000 + benchmark: "benchmarks/build_industrial_energy_demand_per_node/s{simpl}_{clusters}" script: 'scripts/build_industrial_energy_demand_per_node.py' rule build_industrial_energy_demand_per_country_today: input: + jrc="data/jrc-idees-2015", ammonia_production="resources/ammonia_production.csv", industrial_production_per_country="resources/industrial_production_per_country.csv" output: industrial_energy_demand_per_country_today="resources/industrial_energy_demand_per_country_today.csv" - threads: 1 + threads: 8 resources: mem_mb=1000 + benchmark: "benchmarks/build_industrial_energy_demand_per_country_today" script: 'scripts/build_industrial_energy_demand_per_country_today.py' @@ -253,64 +286,49 @@ rule build_industrial_energy_demand_per_node_today: industrial_energy_demand_per_node_today="resources/industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv" threads: 1 resources: mem_mb=1000 + benchmark: "benchmarks/build_industrial_energy_demand_per_node_today/s{simpl}_{clusters}" script: 'scripts/build_industrial_energy_demand_per_node_today.py' - -rule build_industrial_energy_demand_per_country: - input: - industry_sector_ratios="resources/industry_sector_ratios.csv", - industrial_production_per_country="resources/industrial_production_per_country_tomorrow.csv" - output: - industrial_energy_demand_per_country="resources/industrial_energy_demand_per_country.csv" - threads: 1 - resources: mem_mb=1000 - script: 'scripts/build_industrial_energy_demand_per_country.py' - - -rule build_industrial_demand: - input: - clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", - industrial_demand_per_country="resources/industrial_energy_demand_per_country.csv" - output: - industrial_demand="resources/industrial_demand_elec_s{simpl}_{clusters}.csv" - threads: 1 - resources: mem_mb=1000 - script: 'scripts/build_industrial_demand.py' - -rule build_retro_cost: - input: - building_stock="data/retro/data_building_stock.csv", - data_tabula="data/retro/tabula-calculator-calcsetbuilding.csv", - air_temperature = "resources/temp_air_total_elec_s{simpl}_{clusters}.nc", - u_values_PL="data/retro/u_values_poland.csv", - tax_w="data/retro/electricity_taxes_eu.csv", - construction_index="data/retro/comparative_level_investment.csv", - floor_area_missing="data/retro/floor_area_missing.csv", - clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", - cost_germany="data/retro/retro_cost_germany.csv", - window_assumptions="data/retro/window_assumptions.csv", - output: - retro_cost="resources/retro_cost_elec_s{simpl}_{clusters}.csv", - floor_area="resources/floor_area_elec_s{simpl}_{clusters}.csv" - resources: mem_mb=1000 - script: "scripts/build_retro_cost.py" +if config["sector"]["retrofitting"]["retro_endogen"]: + rule build_retro_cost: + input: + building_stock="data/retro/data_building_stock.csv", + data_tabula="data/retro/tabula-calculator-calcsetbuilding.csv", + air_temperature = "resources/temp_air_total_elec_s{simpl}_{clusters}.nc", + u_values_PL="data/retro/u_values_poland.csv", + tax_w="data/retro/electricity_taxes_eu.csv", + construction_index="data/retro/comparative_level_investment.csv", + floor_area_missing="data/retro/floor_area_missing.csv", + clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", + cost_germany="data/retro/retro_cost_germany.csv", + window_assumptions="data/retro/window_assumptions.csv", + output: + retro_cost="resources/retro_cost_elec_s{simpl}_{clusters}.csv", + floor_area="resources/floor_area_elec_s{simpl}_{clusters}.csv" + resources: mem_mb=1000 + benchmark: "benchmarks/build_retro_cost/s{simpl}_{clusters}" + script: "scripts/build_retro_cost.py" + build_retro_cost_output = rules.build_retro_cost.output +else: + build_retro_cost_output = {} rule prepare_sector_network: input: + overrides="data/override_component_attrs", network=pypsaeur('networks/elec_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc'), energy_totals_name='resources/energy_totals.csv', co2_totals_name='resources/co2_totals.csv', transport_name='resources/transport_data.csv', - traffic_data = "data/emobility/", + traffic_data_KFZ = "data/emobility/KFZ__count", + traffic_data_Pkw = "data/emobility/Pkw__count", biomass_potentials='resources/biomass_potentials.csv', - timezone_mappings='data/timezone_mappings.csv', heat_profile="data/heat_load_profile_BDEW.csv", - costs=config['costs_dir'] + "costs_{planning_horizons}.csv", - h2_cavern = "data/hydrogen_salt_cavern_potentials.csv", + costs=CDIR + "costs_{planning_horizons}.csv", profile_offwind_ac=pypsaeur("resources/profile_offwind-ac.nc"), profile_offwind_dc=pypsaeur("resources/profile_offwind-dc.nc"), + h2_cavern="data/hydrogen_salt_cavern_potentials.csv", busmap_s=pypsaeur("resources/busmap_elec_s{simpl}.csv"), busmap=pypsaeur("resources/busmap_elec_s{simpl}_{clusters}.csv"), clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", @@ -334,97 +352,101 @@ rule prepare_sector_network: solar_thermal_total="resources/solar_thermal_total_elec_s{simpl}_{clusters}.nc", solar_thermal_urban="resources/solar_thermal_urban_elec_s{simpl}_{clusters}.nc", solar_thermal_rural="resources/solar_thermal_rural_elec_s{simpl}_{clusters}.nc", - retro_cost_energy = "resources/retro_cost_elec_s{simpl}_{clusters}.csv", - floor_area = "resources/floor_area_elec_s{simpl}_{clusters}.csv" - output: config['results_dir'] + config['run'] + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc' + **build_retro_cost_output + output: RDIR + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc' threads: 1 resources: mem_mb=2000 - benchmark: config['results_dir'] + config['run'] + "/benchmarks/prepare_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" + benchmark: RDIR + "/benchmarks/prepare_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" script: "scripts/prepare_sector_network.py" - rule plot_network: input: - network=config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" + overrides="data/override_component_attrs", + network=RDIR + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" output: - map=config['results_dir'] + config['run'] + "/maps/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - today=config['results_dir'] + config['run'] + "/maps/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}-today.pdf" + map=RDIR + "/maps/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", + today=RDIR + "/maps/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}-today.pdf" threads: 2 resources: mem_mb=10000 + benchmark: RDIR + "/benchmarks/plot_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" script: "scripts/plot_network.py" rule copy_config: - output: - config=config['summary_dir'] + '/' + config['run'] + '/configs/config.yaml' + output: SDIR + '/configs/config.yaml' threads: 1 resources: mem_mb=1000 - script: - 'scripts/copy_config.py' + benchmark: SDIR + "/benchmarks/copy_config" + script: "scripts/copy_config.py" rule make_summary: input: - networks=expand(config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", - **config['scenario']), - costs=config['costs_dir'] + "costs_{}.csv".format(config['scenario']['planning_horizons'][0]), - plots=expand(config['results_dir'] + config['run'] + "/maps/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", - **config['scenario']) - #heat_demand_name='data/heating/daily_heat_demand.h5' + overrides="data/override_component_attrs", + networks=expand( + RDIR + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", + **config['scenario'] + ), + costs=CDIR + "costs_{}.csv".format(config['scenario']['planning_horizons'][0]), + plots=expand( + RDIR + "/maps/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf", + **config['scenario'] + ) output: - nodal_costs=config['summary_dir'] + '/' + config['run'] + '/csvs/nodal_costs.csv', - nodal_capacities=config['summary_dir'] + '/' + config['run'] + '/csvs/nodal_capacities.csv', - nodal_cfs=config['summary_dir'] + '/' + config['run'] + '/csvs/nodal_cfs.csv', - cfs=config['summary_dir'] + '/' + config['run'] + '/csvs/cfs.csv', - costs=config['summary_dir'] + '/' + config['run'] + '/csvs/costs.csv', - capacities=config['summary_dir'] + '/' + config['run'] + '/csvs/capacities.csv', - curtailment=config['summary_dir'] + '/' + config['run'] + '/csvs/curtailment.csv', - energy=config['summary_dir'] + '/' + config['run'] + '/csvs/energy.csv', - supply=config['summary_dir'] + '/' + config['run'] + '/csvs/supply.csv', - supply_energy=config['summary_dir'] + '/' + config['run'] + '/csvs/supply_energy.csv', - prices=config['summary_dir'] + '/' + config['run'] + '/csvs/prices.csv', - weighted_prices=config['summary_dir'] + '/' + config['run'] + '/csvs/weighted_prices.csv', - market_values=config['summary_dir'] + '/' + config['run'] + '/csvs/market_values.csv', - price_statistics=config['summary_dir'] + '/' + config['run'] + '/csvs/price_statistics.csv', - metrics=config['summary_dir'] + '/' + config['run'] + '/csvs/metrics.csv' + nodal_costs=SDIR + '/csvs/nodal_costs.csv', + nodal_capacities=SDIR + '/csvs/nodal_capacities.csv', + nodal_cfs=SDIR + '/csvs/nodal_cfs.csv', + cfs=SDIR + '/csvs/cfs.csv', + costs=SDIR + '/csvs/costs.csv', + capacities=SDIR + '/csvs/capacities.csv', + curtailment=SDIR + '/csvs/curtailment.csv', + energy=SDIR + '/csvs/energy.csv', + supply=SDIR + '/csvs/supply.csv', + supply_energy=SDIR + '/csvs/supply_energy.csv', + prices=SDIR + '/csvs/prices.csv', + weighted_prices=SDIR + '/csvs/weighted_prices.csv', + market_values=SDIR + '/csvs/market_values.csv', + price_statistics=SDIR + '/csvs/price_statistics.csv', + metrics=SDIR + '/csvs/metrics.csv' threads: 2 resources: mem_mb=10000 - script: - 'scripts/make_summary.py' + benchmark: SDIR + "/benchmarks/make_summary" + script: "scripts/make_summary.py" rule plot_summary: input: - costs=config['summary_dir'] + '/' + config['run'] + '/csvs/costs.csv', - energy=config['summary_dir'] + '/' + config['run'] + '/csvs/energy.csv', - balances=config['summary_dir'] + '/' + config['run'] + '/csvs/supply_energy.csv' + costs=SDIR + '/csvs/costs.csv', + energy=SDIR + '/csvs/energy.csv', + balances=SDIR + '/csvs/supply_energy.csv' output: - costs=config['summary_dir'] + '/' + config['run'] + '/graphs/costs.pdf', - energy=config['summary_dir'] + '/' + config['run'] + '/graphs/energy.pdf', - balances=config['summary_dir'] + '/' + config['run'] + '/graphs/balances-energy.pdf' + costs=SDIR + '/graphs/costs.pdf', + energy=SDIR + '/graphs/energy.pdf', + balances=SDIR + '/graphs/balances-energy.pdf' threads: 2 resources: mem_mb=10000 - script: - 'scripts/plot_summary.py' + benchmark: SDIR + "/benchmarks/plot_summary" + script: "scripts/plot_summary.py" + if config["foresight"] == "overnight": rule solve_network: input: - network=config['results_dir'] + config['run'] + "/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", - costs=config['costs_dir'] + "costs_{planning_horizons}.csv", - config=config['summary_dir'] + '/' + config['run'] + '/configs/config.yaml' - output: config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" + overrides="data/override_component_attrs", + network=RDIR + "/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", + costs=CDIR + "costs_{planning_horizons}.csv", + config=SDIR + '/configs/config.yaml' + output: RDIR + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" shadow: "shallow" log: - solver=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_solver.log", - python=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_python.log", - memory=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_memory.log" - benchmark: config['results_dir'] + config['run'] + "/benchmarks/solve_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" + solver=RDIR + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_solver.log", + python=RDIR + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_python.log", + memory=RDIR + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_memory.log" threads: 4 resources: mem_mb=config['solving']['mem'] - # group: "solve" # with group, threads is ignored https://bitbucket.org/snakemake/snakemake/issues/971/group-job-description-does-not-contain + benchmark: RDIR + "/benchmarks/solve_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" script: "scripts/solve_network.py" @@ -432,53 +454,67 @@ if config["foresight"] == "myopic": rule add_existing_baseyear: input: - network=config['results_dir'] + config['run'] + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc', + overrides="data/override_component_attrs", + network=RDIR + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc', powerplants=pypsaeur('resources/powerplants.csv'), busmap_s=pypsaeur("resources/busmap_elec_s{simpl}.csv"), busmap=pypsaeur("resources/busmap_elec_s{simpl}_{clusters}.csv"), clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", - costs=config['costs_dir'] + "costs_{}.csv".format(config['scenario']['planning_horizons'][0]), + costs=CDIR + "costs_{}.csv".format(config['scenario']['planning_horizons'][0]), cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc", - cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc" - output: config['results_dir'] + config['run'] + '/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc' + cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc", + existing_heating='data/existing_infrastructure/existing_heating_raw.csv', + country_codes='data/Country_codes.csv', + existing_solar='data/existing_infrastructure/solar_capacity_IRENA.csv', + existing_onwind='data/existing_infrastructure/onwind_capacity_IRENA.csv', + existing_offwind='data/existing_infrastructure/offwind_capacity_IRENA.csv', + output: RDIR + '/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc' wildcard_constraints: planning_horizons=config['scenario']['planning_horizons'][0] #only applies to baseyear threads: 1 resources: mem_mb=2000 + benchmark: RDIR + '/benchmarks/add_existing_baseyear/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}' script: "scripts/add_existing_baseyear.py" - def process_input(wildcards): - i = config["scenario"]["planning_horizons"].index(int(wildcards.planning_horizons)) - return config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_" + str(config["scenario"]["planning_horizons"][i-1]) + ".nc" + + def solved_previous_horizon(wildcards): + planning_horizons = config["scenario"]["planning_horizons"] + i = planning_horizons.index(int(wildcards.planning_horizons)) + planning_horizon_p = str(planning_horizons[i-1]) + return RDIR + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_" + planning_horizon_p + ".nc" rule add_brownfield: input: - network=config['results_dir'] + config['run'] + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc', - network_p=process_input, #solved network at previous time step - costs=config['costs_dir'] + "costs_{planning_horizons}.csv", + overrides="data/override_component_attrs", + network=RDIR + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc', + network_p=solved_previous_horizon, #solved network at previous time step + costs=CDIR + "costs_{planning_horizons}.csv", cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc", cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc" - - output: config['results_dir'] + config['run'] + "/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" + output: RDIR + "/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" threads: 4 resources: mem_mb=10000 + benchmark: RDIR + '/benchmarks/add_brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}' script: "scripts/add_brownfield.py" + ruleorder: add_existing_baseyear > add_brownfield + rule solve_network_myopic: input: - network=config['results_dir'] + config['run'] + "/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", - costs=config['costs_dir'] + "costs_{planning_horizons}.csv", - config=config['summary_dir'] + '/' + config['run'] + '/configs/config.yaml' - output: config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" + overrides="data/override_component_attrs", + network=RDIR + "/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", + costs=CDIR + "costs_{planning_horizons}.csv", + config=SDIR + '/configs/config.yaml' + output: RDIR + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" shadow: "shallow" log: - solver=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_solver.log", - python=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_python.log", - memory=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_memory.log" - benchmark: config['results_dir'] + config['run'] + "/benchmarks/solve_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" + solver=RDIR + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_solver.log", + python=RDIR + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_python.log", + memory=RDIR + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_memory.log" threads: 4 resources: mem_mb=config['solving']['mem'] + benchmark: RDIR + "/benchmarks/solve_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" script: "scripts/solve_network.py" diff --git a/config.default.yaml b/config.default.yaml index 617e716a..676450d7 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -2,20 +2,26 @@ version: 0.5.0 logging_level: INFO -results_dir: 'results/' +results_dir: results/ summary_dir: results -costs_dir: '../technology-data/outputs/' -run: 'your-run-name' # use this to keep track of runs with different settings -foresight: 'overnight' # options are overnight, myopic, perfect (perfect is not yet implemented) +costs_dir: ../technology-data/outputs/ +run: your-run-name # use this to keep track of runs with different settings +foresight: overnight # options are overnight, myopic, perfect (perfect is not yet implemented) # if you use myopic or perfect foresight, set the investment years in "planning_horizons" below scenario: - sectors: [E] # ignore this legacy setting - simpl: [''] # only relevant for PyPSA-Eur - lv: [1.0,1.5] # allowed transmission line volume expansion, can be any float >= 1.0 (today) or "opt" - clusters: [45,50] # number of nodes in Europe, any integer between 37 (1 node per country-zone) and several hundred - opts: [''] # only relevant for PyPSA-Eur - sector_opts: [Co2L0-3H-T-H-B-I-solar+p3-dist1] # this is where the main scenario settings are + simpl: # only relevant for PyPSA-Eur + - '' + lv: # allowed transmission line volume expansion, can be any float >= 1.0 (today) or "opt" + - 1.0 + - 1.5 + clusters: # number of nodes in Europe, any integer between 37 (1 node per country-zone) and several hundred + - 45 + - 50 + opts: # only relevant for PyPSA-Eur + - '' + sector_opts: # this is where the main scenario settings are + - Co2L0-3H-T-H-B-I-solar+p3-dist1 # to really understand the options here, look in scripts/prepare_sector_network.py # Co2Lx specifies the CO2 target in x% of the 1990 values; default will give default (5%); # Co2L0p25 will give 25% CO2 emissions; Co2Lm0p05 will give 5% negative emissions @@ -30,7 +36,8 @@ scenario: # planning_horizons), be:beta decay; ex:exponential decay # cb40ex0 distributes a carbon budget of 40 GtCO2 following an exponential # decay with initial growth rate 0 - planning_horizons : [2030] # investment years for myopic and perfect; or costs year for overnight + planning_horizons: # investment years for myopic and perfect; or costs year for overnight + - 2030 # for example, set to [2020, 2030, 2040, 2050] for myopic foresight # CO2 budget as a fraction of 1990 emissions @@ -50,11 +57,10 @@ snapshots: # arguments to pd.date_range start: "2013-01-01" end: "2014-01-01" - closed: 'left' # end is not inclusive + closed: left # end is not inclusive atlite: - cutout_dir: '../pypsa-eur/cutouts' - cutout_name: "europe-2013-era5" + cutout: ../pypsa-eur/cutouts/europe-2013-era5.nc # this information is NOT used but needed as an argument for # pypsa-eur/scripts/add_electricity.py/load_costs in make_summary.py @@ -67,102 +73,174 @@ electricity: # some technologies are removed because they are implemented differently # or have different year-dependent costs in PyPSA-Eur-Sec pypsa_eur: - "Bus": ["AC"] - "Link": ["DC"] - "Generator": ["onwind", "offwind-ac", "offwind-dc", "solar", "ror"] - "StorageUnit": ["PHS","hydro"] - "Store": [] + Bus: + - AC + Link: + - DC + Generator: + - onwind + - offwind-ac + - offwind-dc + - solar + - ror + StorageUnit: + - PHS + - hydro + Store: [] + + +energy: + energy_totals_year: 2011 + base_emissions_year: 1990 + eurostat_report_year: 2016 + emissions: CO2 # "CO2" or "All greenhouse gases - (CO2 equivalent)" biomass: year: 2030 - scenario: "Med" + scenario: Med classes: - solid biomass: ['Primary agricultural residues', 'Forestry energy residue', 'Secondary forestry residues', 'Secondary Forestry residues sawdust', 'Forestry residues from landscape care biomass', 'Municipal waste'] - not included: ['Bioethanol sugar beet biomass', 'Rapeseeds for biodiesel', 'sunflower and soya for Biodiesel', 'Starchy crops biomass', 'Grassy crops biomass', 'Willow biomass', 'Poplar biomass potential', 'Roundwood fuelwood', 'Roundwood Chips & Pellets'] - biogas: ['Manure biomass potential', 'Sludge biomass'] + solid biomass: + - Primary agricultural residues + - Forestry energy residue + - Secondary forestry residues + - Secondary Forestry residues sawdust + - Forestry residues from landscape care biomass + - Municipal waste + not included: + - Bioethanol sugar beet biomass + - Rapeseeds for biodiesel + - sunflower and soya for Biodiesel + - Starchy crops biomass + - Grassy crops biomass + - Willow biomass + - Poplar biomass potential + - Roundwood fuelwood + - Roundwood Chips & Pellets + biogas: + - Manure biomass potential + - Sludge biomass + + +solar_thermal: + clearsky_model: simple # should be "simple" or "enhanced"? + orientation: + slope: 45. + azimuth: 180. # only relevant for foresight = myopic or perfect existing_capacities: grouping_years: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] threshold_capacity: 10 - conventional_carriers: ['lignite', 'coal', 'oil', 'uranium'] + conventional_carriers: + - lignite + - coal + - oil + - uranium + sector: - 'central' : True - 'central_fraction' : 0.6 - 'bev_dsm_restriction_value' : 0.75 #Set to 0 for no restriction on BEV DSM - 'bev_dsm_restriction_time' : 7 #Time at which SOC of BEV has to be dsm_restriction_value - 'transport_heating_deadband_upper' : 20. - 'transport_heating_deadband_lower' : 15. - 'ICE_lower_degree_factor' : 0.375 #in per cent increase in fuel consumption per degree above deadband - 'ICE_upper_degree_factor' : 1.6 - 'EV_lower_degree_factor' : 0.98 - 'EV_upper_degree_factor' : 0.63 - 'district_heating_loss' : 0.15 - 'bev_dsm' : True #turns on EV battery - 'bev_availability' : 0.5 #How many cars do smart charging - 'v2g' : True #allows feed-in to grid from EV battery + central: true + central_fraction: 0.6 + bev_dsm_restriction_value: 0.75 #Set to 0 for no restriction on BEV DSM + bev_dsm_restriction_time: 7 #Time at which SOC of BEV has to be dsm_restriction_value + transport_heating_deadband_upper: 20. + transport_heating_deadband_lower: 15. + ICE_lower_degree_factor: 0.375 #in per cent increase in fuel consumption per degree above deadband + ICE_upper_degree_factor: 1.6 + EV_lower_degree_factor: 0.98 + EV_upper_degree_factor: 0.63 + district_heating_loss: 0.15 + bev_dsm: true #turns on EV battery + bev_availability: 0.5 #How many cars do smart charging + bev_energy: 0.05 #average battery size in MWh + bev_charge_efficiency: 0.9 #BEV (dis-)charging efficiency + bev_plug_to_wheel_efficiency: 0.2 #kWh/km from EPA https://www.fueleconomy.gov/feg/ for Tesla Model S + bev_charge_rate: 0.011 #3-phase charger with 11 kW + bev_avail_max: 0.95 + bev_avail_mean: 0.8 + v2g: true #allows feed-in to grid from EV battery #what is not EV or FCEV is oil-fuelled ICE - 'land_transport_fuel_cell_share': # 1 means all FCEVs + land_transport_fuel_cell_share: # 1 means all FCEVs 2020: 0 2030: 0.05 2040: 0.1 2050: 0.15 - 'land_transport_electric_share': # 1 means all EVs + land_transport_electric_share: # 1 means all EVs 2020: 0 2030: 0.25 2040: 0.6 2050: 0.85 - 'transport_fuel_cell_efficiency': 0.5 - 'transport_internal_combustion_efficiency': 0.3 - 'shipping_average_efficiency' : 0.4 #For conversion of fuel oil to propulsion in 2011 - 'time_dep_hp_cop' : True #time dependent heat pump coefficient of performance - 'heat_pump_sink_T' : 55. # Celsius, based on DTU / large area radiators; used in build_cop_profiles.py + transport_fuel_cell_efficiency: 0.5 + transport_internal_combustion_efficiency: 0.3 + shipping_average_efficiency: 0.4 #For conversion of fuel oil to propulsion in 2011 + time_dep_hp_cop: true #time dependent heat pump coefficient of performance + heat_pump_sink_T: 55. # Celsius, based on DTU / large area radiators; used in build_cop_profiles.py # conservatively high to cover hot water and space heating in poorly-insulated buildings - 'reduce_space_heat_exogenously': True # reduces space heat demand by a given factor (applied before losses in DH) + reduce_space_heat_exogenously: true # reduces space heat demand by a given factor (applied before losses in DH) # this can represent e.g. building renovation, building demolition, or if # the factor is negative: increasing floor area, increased thermal comfort, population growth - 'reduce_space_heat_exogenously_factor': # per unit reduction in space heat demand + reduce_space_heat_exogenously_factor: # per unit reduction in space heat demand # the default factors are determined by the LTS scenario from http://tool.european-calculator.eu/app/buildings/building-types-area/?levers=1ddd4444421213bdbbbddd44444ffffff11f411111221111211l212221 - 2020: 0.10 # this results in a space heat demand reduction of 10% - 2025: 0.09 # first heat demand increases compared to 2020 because of larger floor area per capita - 2030: 0.09 - 2035: 0.11 - 2040: 0.16 - 2045: 0.21 - 2050: 0.29 - 'retrofitting' : # co-optimises building renovation to reduce space heat demand - 'retro_endogen': False # co-optimise space heat savings - 'cost_factor' : 1.0 # weight costs for building renovation - 'interest_rate': 0.04 # for investment in building components - 'annualise_cost': True # annualise the investment costs - 'tax_weighting': False # weight costs depending on taxes in countries - 'construction_index': True # weight costs depending on labour/material costs per country - 'tes' : True - 'tes_tau' : 3. - 'boilers' : True - 'oil_boilers': False - 'chp' : True - 'micro_chp' : False - 'solar_thermal' : True - 'solar_cf_correction': 0.788457 # = >>> 1/1.2683 - 'marginal_cost_storage' : 0. #1e-4 - 'methanation' : True - 'helmeth' : True - 'dac' : True - 'co2_vent' : True - 'SMR' : True - 'co2_sequestration_potential' : 200 #MtCO2/a sequestration potential for Europe - 'co2_sequestration_cost' : 20 #EUR/tCO2 for transport and sequestration of CO2 - 'cc_fraction' : 0.9 # default fraction of CO2 captured with post-combustion capture - 'hydrogen_underground_storage' : True - 'use_fischer_tropsch_waste_heat' : True - 'use_fuel_cell_waste_heat' : True - 'electricity_distribution_grid' : False - 'electricity_distribution_grid_cost_factor' : 1.0 #multiplies cost in data/costs.csv - 'electricity_grid_connection' : True # only applies to onshore wind and utility PV - 'gas_distribution_grid' : True - 'gas_distribution_grid_cost_factor' : 1.0 #multiplies cost in data/costs.csv + 2020: 0.10 # this results in a space heat demand reduction of 10% + 2025: 0.09 # first heat demand increases compared to 2020 because of larger floor area per capita + 2030: 0.09 + 2035: 0.11 + 2040: 0.16 + 2045: 0.21 + 2050: 0.29 + retrofitting : # co-optimises building renovation to reduce space heat demand + retro_endogen: false # co-optimise space heat savings + cost_factor: 1.0 # weight costs for building renovation + interest_rate: 0.04 # for investment in building components + annualise_cost: true # annualise the investment costs + tax_weighting: false # weight costs depending on taxes in countries + construction_index: true # weight costs depending on labour/material costs per country + tes: true + tes_tau: # 180 day time constant for centralised, 3 day for decentralised + decentral: 3 + central: 180 + boilers: true + oil_boilers: false + chp: true + micro_chp: false + solar_thermal: true + solar_cf_correction: 0.788457 # = >>> 1/1.2683 + marginal_cost_storage: 0. #1e-4 + methanation: true + helmeth: true + dac: true + co2_vent: true + SMR: true + co2_sequestration_potential: 200 #MtCO2/a sequestration potential for Europe + co2_sequestration_cost: 20 #EUR/tCO2 for transport and sequestration of CO2 + cc_fraction: 0.9 # default fraction of CO2 captured with post-combustion capture + hydrogen_underground_storage: true + use_fischer_tropsch_waste_heat: true + use_fuel_cell_waste_heat: true + electricity_distribution_grid: false + electricity_distribution_grid_cost_factor: 1.0 #multiplies cost in data/costs.csv + electricity_grid_connection: true # only applies to onshore wind and utility PV + gas_distribution_grid: true + gas_distribution_grid_cost_factor: 1.0 #multiplies cost in data/costs.csv + conventional_generation: # generator : carrier + OCGT: gas + + +industry: + St_primary_fraction: 0.3 # fraction of steel produced via primary route (DRI + EAF) versus secondary route (EAF); today fraction is 0.6 + H2_DRI: 1.7 #H2 consumption in Direct Reduced Iron (DRI), MWh_H2,LHV/ton_Steel from 51kgH2/tSt in Vogl et al (2018) doi:10.1016/j.jclepro.2018.08.279 + elec_DRI: 0.322 #electricity consumption in Direct Reduced Iron (DRI) shaft, MWh/tSt HYBRIT brochure https://ssabwebsitecdn.azureedge.net/-/media/hybrit/files/hybrit_brochure.pdf + Al_primary_fraction: 0.2 # fraction of aluminium produced via the primary route versus scrap; today fraction is 0.4 + MWh_CH4_per_tNH3_SMR: 10.8 # 2012's demand from https://ec.europa.eu/docsroom/documents/4165/attachments/1/translations/en/renditions/pdf + MWh_elec_per_tNH3_SMR: 0.7 # same source, assuming 94-6% split methane-elec of total energy demand 11.5 MWh/tNH3 + MWh_H2_per_tNH3_electrolysis: 6.5 # from https://doi.org/10.1016/j.joule.2018.04.017, around 0.197 tH2/tHN3 (>3/17 since some H2 lost and used for energy) + MWh_elec_per_tNH3_electrolysis: 1.17 # from https://doi.org/10.1016/j.joule.2018.04.017 Table 13 (air separation and HB) + NH3_process_emissions: 24.5 # in MtCO2/a from SMR for H2 production for NH3 from UNFCCC for 2015 for EU28 + petrochemical_process_emissions: 25.5 # in MtCO2/a for petrochemical and other from UNFCCC for 2015 for EU28 + HVC_primary_fraction: 1.0 #fraction of current non-ammonia basic chemicals produced via primary route + hotmaps_locate_missing: false + reference_year: 2015 + costs: lifetime: 25 #default lifetime @@ -173,8 +251,8 @@ costs: # Marginal and capital costs can be overwritten # capital_cost: - # Wind: Bla - marginal_cost: # + # onwind: 500 + marginal_cost: solar: 0.01 onwind: 0.015 offwind: 0.015 @@ -196,17 +274,17 @@ solving: clip_p_max_pu: 1.e-2 load_shedding: false noisy_costs: true - - min_iterations: 1 - max_iterations: 1 - # nhours: 1 + skip_iterations: true + track_iterations: false + min_iterations: 4 + max_iterations: 6 solver: name: gurobi threads: 4 method: 2 # barrier crossover: 0 - BarConvTol: 1.e-5 + BarConvTol: 1.e-6 Seed: 123 AggFill: 0 PreDual: 0 @@ -221,182 +299,175 @@ solving: #feasopt_tolerance: 1.e-6 mem: 30000 #memory in MB; 20 GB enough for 50+B+I+H2; 100 GB for 181+B+I+H2 -industry: - 'St_primary_fraction' : 0.3 # fraction of steel produced via primary route (DRI + EAF) versus secondary route (EAF); today fraction is 0.6 - 'H2_DRI' : 1.7 #H2 consumption in Direct Reduced Iron (DRI), MWh_H2,LHV/ton_Steel from 51kgH2/tSt in Vogl et al (2018) doi:10.1016/j.jclepro.2018.08.279 - 'elec_DRI' : 0.322 #electricity consumption in Direct Reduced Iron (DRI) shaft, MWh/tSt HYBRIT brochure https://ssabwebsitecdn.azureedge.net/-/media/hybrit/files/hybrit_brochure.pdf - 'Al_primary_fraction' : 0.2 # fraction of aluminium produced via the primary route versus scrap; today fraction is 0.4 - 'MWh_CH4_per_tNH3_SMR' : 10.8 # 2012's demand from https://ec.europa.eu/docsroom/documents/4165/attachments/1/translations/en/renditions/pdf - 'MWh_elec_per_tNH3_SMR' : 0.7 # same source, assuming 94-6% split methane-elec of total energy demand 11.5 MWh/tNH3 - 'MWh_H2_per_tNH3_electrolysis' : 6.5 # from https://doi.org/10.1016/j.joule.2018.04.017, around 0.197 tH2/tHN3 (>3/17 since some H2 lost and used for energy) - 'MWh_elec_per_tNH3_electrolysis' : 1.17 # from https://doi.org/10.1016/j.joule.2018.04.017 Table 13 (air separation and HB) - 'NH3_process_emissions' : 24.5 # in MtCO2/a from SMR for H2 production for NH3 from UNFCCC for 2015 for EU28 - 'petrochemical_process_emissions' : 25.5 # in MtCO2/a for petrochemical and other from UNFCCC for 2015 for EU28 - 'HVC_primary_fraction' : 1.0 #fraction of current non-ammonia basic chemicals produced via primary route plotting: map: - figsize: [7, 7] - boundaries: [-10.2, 29, 35, 72] - p_nom: - bus_size_factor: 5.e+4 - linewidth_factor: 3.e+3 # 1.e+3 #3.e+3 - - costs_max: 1200 + boundaries: [-11, 30, 34, 71] + color_geomap: + ocean: white + land: whitesmoke + costs_max: 1000 costs_threshold: 1 - - - energy_max: 20000. - energy_min: -15000. - energy_threshold: 50. - - - vre_techs: ["onwind", "offwind-ac", "offwind-dc", "solar", "ror"] - renewable_storage_techs: ["PHS","hydro"] - conv_techs: ["OCGT", "CCGT", "Nuclear", "Coal"] - storage_techs: ["hydro+PHS", "battery", "H2"] - # store_techs: ["Li ion", "water tanks"] - load_carriers: ["AC load"] #, "heat load", "Li ion load"] - AC_carriers: ["AC line", "AC transformer"] - link_carriers: ["DC line", "Converter AC-DC"] - heat_links: ["heat pump", "resistive heater", "CHP heat", "CHP electric", - "gas boiler", "central heat pump", "central resistive heater", "central CHP heat", - "central CHP electric", "central gas boiler"] - heat_generators: ["gas boiler", "central gas boiler", "solar thermal collector", "central solar thermal collector"] + energy_max: 20000 + energy_min: -20000 + energy_threshold: 50 + vre_techs: + - onwind + - offwind-ac + - offwind-dc + - solar + - ror + renewable_storage_techs: + - PHS + - hydro + conv_techs: + - OCGT + - CCGT + - Nuclear + - Coal + storage_techs: + - hydro+PHS + - battery + - H2 + load_carriers: + - AC load + AC_carriers: + - AC line + - AC transformer + link_carriers: + - DC line + - Converter AC-DC + heat_links: + - heat pump + - resistive heater + - CHP heat + - CHP electric + - gas boiler + - central heat pump + - central resistive heater + - central CHP heat + - central CHP electric + - central gas boiler + heat_generators: + - gas boiler + - central gas boiler + - solar thermal collector + - central solar thermal collector tech_colors: - "onwind" : "b" - "onshore wind" : "b" - 'offwind' : "c" - 'offshore wind' : "c" - 'offwind-ac' : "c" - 'offshore wind (AC)' : "c" - 'offwind-dc' : "#009999" - 'offshore wind (DC)' : "#009999" - 'wave' : "#004444" - "hydro" : "#3B5323" - "hydro reservoir" : "#3B5323" - "ror" : "#78AB46" - "run of river" : "#78AB46" - 'hydroelectricity' : '#006400' - 'solar' : "y" - 'solar PV' : "y" - 'solar thermal' : 'coral' - 'solar rooftop' : '#e6b800' - "OCGT" : "wheat" - "OCGT marginal" : "sandybrown" - "OCGT-heat" : "orange" - "gas boiler" : "orange" - "gas boilers" : "orange" - "gas boiler marginal" : "orange" - "gas-to-power/heat" : "orange" - "gas" : "brown" - "natural gas" : "brown" - "SMR" : "#4F4F2F" - "oil" : "#B5A642" - "oil boiler" : "#B5A677" - "lines" : "k" - "transmission lines" : "k" - "H2" : "m" - "hydrogen storage" : "m" - "battery" : "slategray" - "battery storage" : "slategray" - "home battery" : "#614700" - "home battery storage" : "#614700" - "Nuclear" : "r" - "Nuclear marginal" : "r" - "nuclear" : "r" - "uranium" : "r" - "Coal" : "k" - "coal" : "k" - "Coal marginal" : "k" - "Lignite" : "grey" - "lignite" : "grey" - "Lignite marginal" : "grey" - "CCGT" : "orange" - "CCGT marginal" : "orange" - "heat pumps" : "#76EE00" - "heat pump" : "#76EE00" - "air heat pump" : "#76EE00" - "ground heat pump" : "#40AA00" - "power-to-heat" : "#40AA00" - "resistive heater" : "pink" - "Sabatier" : "#FF1493" - "methanation" : "#FF1493" - "power-to-gas" : "#FF1493" - "power-to-liquid" : "#FFAAE9" - "helmeth" : "#7D0552" - "helmeth" : "#7D0552" - "DAC" : "#E74C3C" - "co2 stored" : "#123456" - "CO2 sequestration" : "#123456" - "CC" : "k" - "co2" : "#123456" - "co2 vent" : "#654321" - "solid biomass for industry co2 from atmosphere" : "#654321" - "solid biomass for industry co2 to stored": "#654321" - "gas for industry co2 to atmosphere": "#654321" - "gas for industry co2 to stored": "#654321" - "Fischer-Tropsch" : "#44DD33" - "kerosene for aviation": "#44BB11" - "naphtha for industry" : "#44FF55" - "land transport oil" : "#44DD33" - "water tanks" : "#BBBBBB" - "hot water storage" : "#BBBBBB" - "hot water charging" : "#BBBBBB" - "hot water discharging" : "#999999" - "CHP" : "r" - "CHP heat" : "r" - "CHP electric" : "r" - "PHS" : "g" - "Ambient" : "k" - "Electric load" : "b" - "Heat load" : "r" - "heat" : "darkred" - "rural heat" : "#880000" - "central heat" : "#b22222" - "decentral heat" : "#800000" - "low-temperature heat for industry" : "#991111" - "process heat" : "#FF3333" - "heat demand" : "darkred" - "electric demand" : "k" - "Li ion" : "grey" - "district heating" : "#CC4E5C" - "retrofitting" : "purple" - "building retrofitting" : "purple" - "BEV charger" : "grey" - "V2G" : "grey" - "land transport EV" : "grey" - "electricity" : "k" - "gas for industry" : "#333333" - "solid biomass for industry" : "#555555" - "industry electricity" : "#222222" - "industry new electricity" : "#222222" - "process emissions to stored" : "#444444" - "process emissions to atmosphere" : "#888888" - "process emissions" : "#222222" - "oil emissions" : "#666666" - "land transport oil emissions" : "#666666" - "land transport fuel cell" : "#AAAAAA" - "biogas" : "#800000" - "solid biomass" : "#DAA520" - "today" : "#D2691E" - "shipping" : "#6495ED" - "electricity distribution grid" : "#333333" - nice_names: - # OCGT: "Gas" - # OCGT marginal: "Gas (marginal)" - offwind: "offshore wind" - onwind: "onshore wind" - battery: "Battery storage" - lines: "Transmission lines" - AC line: "AC lines" - AC-AC: "DC lines" - ror: "Run of river" - nice_names_n: - offwind: "offshore\nwind" - onwind: "onshore\nwind" - # OCGT: "Gas" - H2: "Hydrogen\nstorage" - # OCGT marginal: "Gas (marginal)" - lines: "transmission\nlines" - ror: "run of river" + onwind: "#235ebc" + onshore wind: "#235ebc" + offwind: "#6895dd" + offshore wind: "#6895dd" + offwind-ac: "#6895dd" + offshore wind (AC): "#6895dd" + offwind-dc: "#74c6f2" + offshore wind (DC): "#74c6f2" + wave: '#004444' + hydro: '#3B5323' + hydro reservoir: '#3B5323' + ror: '#78AB46' + run of river: '#78AB46' + hydroelectricity: '#006400' + solar: "#f9d002" + solar PV: "#f9d002" + solar thermal: coral + solar rooftop: '#ffef60' + OCGT: wheat + OCGT marginal: sandybrown + OCGT-heat: '#ee8340' + gas boiler: '#ee8340' + gas boilers: '#ee8340' + gas boiler marginal: '#ee8340' + gas-to-power/heat: '#ee8340' + gas: brown + natural gas: brown + SMR: '#4F4F2F' + oil: '#B5A642' + oil boiler: '#B5A677' + lines: k + transmission lines: k + H2: m + hydrogen storage: m + battery: slategray + battery storage: slategray + home battery: '#614700' + home battery storage: '#614700' + Nuclear: r + Nuclear marginal: r + nuclear: r + uranium: r + Coal: k + coal: k + Coal marginal: k + Lignite: grey + lignite: grey + Lignite marginal: grey + CCGT: '#ee8340' + CCGT marginal: '#ee8340' + heat pumps: '#76EE00' + heat pump: '#76EE00' + air heat pump: '#76EE00' + ground heat pump: '#40AA00' + power-to-heat: '#40AA00' + resistive heater: pink + Sabatier: '#FF1493' + methanation: '#FF1493' + power-to-gas: '#FF1493' + power-to-liquid: '#FFAAE9' + helmeth: '#7D0552' + DAC: '#E74C3C' + co2 stored: '#123456' + CO2 sequestration: '#123456' + CC: k + co2: '#123456' + co2 vent: '#654321' + solid biomass for industry co2 from atmosphere: '#654321' + solid biomass for industry co2 to stored: '#654321' + gas for industry co2 to atmosphere: '#654321' + gas for industry co2 to stored: '#654321' + Fischer-Tropsch: '#44DD33' + kerosene for aviation: '#44BB11' + naphtha for industry: '#44FF55' + land transport oil: '#44DD33' + water tanks: '#BBBBBB' + hot water storage: '#BBBBBB' + hot water charging: '#BBBBBB' + hot water discharging: '#999999' + CHP: r + CHP heat: r + CHP electric: r + PHS: g + Ambient: k + Electric load: b + Heat load: r + heat: darkred + rural heat: '#880000' + central heat: '#b22222' + decentral heat: '#800000' + low-temperature heat for industry: '#991111' + process heat: '#FF3333' + heat demand: darkred + electric demand: k + Li ion: grey + district heating: '#CC4E5C' + retrofitting: purple + building retrofitting: purple + BEV charger: grey + V2G: grey + land transport EV: grey + electricity: k + gas for industry: '#333333' + solid biomass for industry: '#555555' + industry electricity: '#222222' + industry new electricity: '#222222' + process emissions to stored: '#444444' + process emissions to atmosphere: '#888888' + process emissions: '#222222' + oil emissions: '#666666' + land transport oil emissions: '#666666' + land transport fuel cell: '#AAAAAA' + biogas: '#800000' + solid biomass: '#DAA520' + today: '#D2691E' + shipping: '#6495ED' + electricity distribution grid: '#333333' diff --git a/data/override_component_attrs/buses.csv b/data/override_component_attrs/buses.csv new file mode 100644 index 00000000..95e276f9 --- /dev/null +++ b/data/override_component_attrs/buses.csv @@ -0,0 +1,3 @@ +attribute,type,unit,default,description,status +location,string,n/a,n/a,Reference to original electricity bus,Input (optional) +unit,string,n/a,MWh,Unit of the bus (descriptive only), Input (optional) \ No newline at end of file diff --git a/data/override_component_attrs/generators.csv b/data/override_component_attrs/generators.csv new file mode 100644 index 00000000..bd3925fc --- /dev/null +++ b/data/override_component_attrs/generators.csv @@ -0,0 +1,3 @@ +attribute,type,unit,default,description,status +build_year,integer,year,n/a,build year,Input (optional) +lifetime,float,years,n/a,lifetime,Input (optional) diff --git a/data/override_component_attrs/links.csv b/data/override_component_attrs/links.csv new file mode 100644 index 00000000..709a9211 --- /dev/null +++ b/data/override_component_attrs/links.csv @@ -0,0 +1,13 @@ +attribute,type,unit,default,description,status +bus2,string,n/a,n/a,2nd bus,Input (optional) +bus3,string,n/a,n/a,3rd bus,Input (optional) +bus4,string,n/a,n/a,4th bus,Input (optional) +efficiency2,static or series,per unit,1.,2nd bus efficiency,Input (optional) +efficiency3,static or series,per unit,1.,3rd bus efficiency,Input (optional) +efficiency4,static or series,per unit,1.,4th bus efficiency,Input (optional) +p2,series,MW,0.,2nd bus output,Output +p3,series,MW,0.,3rd bus output,Output +p4,series,MW,0.,4th bus output,Output +build_year,integer,year,n/a,build year,Input (optional) +lifetime,float,years,n/a,lifetime,Input (optional) +carrier,string,n/a,n/a,carrier,Input (optional) diff --git a/data/override_component_attrs/loads.csv b/data/override_component_attrs/loads.csv new file mode 100644 index 00000000..16290e7c --- /dev/null +++ b/data/override_component_attrs/loads.csv @@ -0,0 +1,2 @@ +attribute,type,unit,default,description,status +carrier,string,n/a,n/a,carrier,Input (optional) \ No newline at end of file diff --git a/data/override_component_attrs/stores.csv b/data/override_component_attrs/stores.csv new file mode 100644 index 00000000..1228fea9 --- /dev/null +++ b/data/override_component_attrs/stores.csv @@ -0,0 +1,4 @@ +attribute,type,unit,default,description,status +build_year,integer,year,n/a,build year,Input (optional) +lifetime,float,years,n/a,lifetime,Input (optional) +carrier,string,n/a,n/a,carrier,Input (optional) diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 65398e33..3f2077ec 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -5,7 +5,59 @@ Release Notes Future release ============== -* Include new features here. +.. note:: + This unreleased version currently requires the master branches of PyPSA, PyPSA-Eur, and the technology-data repository. + +* Extended use of ``multiprocessing`` for much better performance + (from up to 20 minutes to less than one minute). +* Compatibility with ``atlite>=0.2``. Older versions of ``atlite`` will no longer work. +* Handle most input files (or base directories) via ``snakemake.input``. +* Use of ``mock_snakemake`` from PyPSA-Eur. +* Update ``solve_network`` rule to match implementation in PyPSA-Eur by using ``n.ilopf()`` and remove outdated code using ``pyomo``. + Allows the new setting to skip iterated impedance updates with ``solving: options: skip_iterations: true``. +* The component attributes that are to be overridden are now stored in the folder + ``data/override_component_attrs`` analogous to ``pypsa/component_attrs``. + This reduces verbosity and also allows circumventing the ``n.madd()`` hack + for individual components with non-default attributes. + This data is also tracked in the Snakefile. + + A function ``helper.override_component_attrs`` was added that loads this data + and can pass the overridden component attributes into ``pypsa.Network()``: + + >>> from helper import override_component_attrs + >>> overrides = override_component_attrs(snakemake.input.overrides) + >>> n = pypsa.Network("mynetwork.nc", override_component_attrs=overrides) + +* Add various parameters to ``config.default.yaml`` which were previously hardcoded inside the scripts + (e.g. energy reference years, BEV settings, solar thermal collector models, geomap colours). +* Removed stale industry demand rules ``build_industrial_energy_demand_per_country`` + and ``build_industrial_demand``. These are superseded with more regionally resolved rules. +* Use simpler and shorter ``gdf.sjoin()`` function to allocate industrial sites + from the Hotmaps database to onshore regions. This change also fixes a bug: + The previous version allocated sites to the closest bus, + but at country borders (where Voronoi cells are distorted by the borders), + this had resulted in e.g. a Spanish site close to the French border + being wrongly allocated to the French bus if the bus center was closer. +* Bugfix: Corrected calculation of "gas for industry" carbon capture efficiency. +* Retrofitting rule is now only triggered if endogeneously optimised. +* Show progress in build rules with ``tqdm`` progress bars. +* Reduced verbosity of ``Snakefile`` through directory prefixes. +* Improve legibility of ``config.default.yaml`` and remove unused options. +* Add optional function to use ``geopy`` to locate entries of the Hotmaps database of industrial sites + with missing location based on city and country, which reduces missing entries by half. It can be + activated by setting ``industry: hotmaps_locate_missing: true``, takes a few minutes longer, + and should only be used if spatial resolution is coarser than city level. +* Use the country-specific time zone mappings from ``pytz`` rather than a manual mapping. +* A function ``add_carrier_buses()`` was added to the ``prepare_network`` rule to reduce code duplication. +* In the ``prepare_network`` rule the cost and potential adjustment was moved into an + own function ``maybe_adjust_costs_and_potentials()``. +* Use ``matplotlibrc`` to set the default plotting style and backend``. +* Added benchmark files for each rule. +* Implements changes to ``n.snapshot_weightings`` in upcoming PyPSA version (cf. `PyPSA/#227 `_). +* New dependencies: ``tqdm``, ``atlite>=0.2.4``, ``pytz`` and ``geopy`` (optional). + These are included in the environment specifications of PyPSA-Eur. +* Consistent use of ``__main__`` block and further unspecific code cleaning. + PyPSA-Eur-Sec 0.5.0 (21st May 2021) diff --git a/matplotlibrc b/matplotlibrc new file mode 100644 index 00000000..db5e7ce8 --- /dev/null +++ b/matplotlibrc @@ -0,0 +1,4 @@ +backend: Agg +font.family: sans-serif +font.sans-serif: Ubuntu, DejaVu Sans +image.cmap: viridis \ No newline at end of file diff --git a/scripts/add_brownfield.py b/scripts/add_brownfield.py index 20677498..0952c752 100644 --- a/scripts/add_brownfield.py +++ b/scripts/add_brownfield.py @@ -2,43 +2,16 @@ import logging logger = logging.getLogger(__name__) + import pandas as pd idx = pd.IndexSlice -import numpy as np -import scipy as sp -import xarray as xr -import re, os - -from six import iteritems, string_types - import pypsa - import yaml -import pytz - from add_existing_baseyear import add_build_year_to_new_assets +from helper import override_component_attrs -#First tell PyPSA that links can have multiple outputs by -#overriding the component_attrs. This can be done for -#as many buses as you need with format busi for i = 2,3,4,5,.... -#See https://pypsa.org/doc/components.html#link-with-multiple-outputs-or-inputs - -override_component_attrs = pypsa.descriptors.Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()}) -override_component_attrs["Link"].loc["bus2"] = ["string",np.nan,np.nan,"2nd bus","Input (optional)"] -override_component_attrs["Link"].loc["bus3"] = ["string",np.nan,np.nan,"3rd bus","Input (optional)"] -override_component_attrs["Link"].loc["efficiency2"] = ["static or series","per unit",1.,"2nd bus efficiency","Input (optional)"] -override_component_attrs["Link"].loc["efficiency3"] = ["static or series","per unit",1.,"3rd bus efficiency","Input (optional)"] -override_component_attrs["Link"].loc["p2"] = ["series","MW",0.,"2nd bus output","Output"] -override_component_attrs["Link"].loc["p3"] = ["series","MW",0.,"3rd bus output","Output"] - -override_component_attrs["Link"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"] -override_component_attrs["Link"].loc["lifetime"] = ["float","years",np.nan,"build year","Input (optional)"] -override_component_attrs["Generator"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"] -override_component_attrs["Generator"].loc["lifetime"] = ["float","years",np.nan,"build year","Input (optional)"] -override_component_attrs["Store"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"] -override_component_attrs["Store"].loc["lifetime"] = ["float","years",np.nan,"build year","Input (optional)"] def add_brownfield(n, n_p, year): @@ -48,72 +21,85 @@ def add_brownfield(n, n_p, year): attr = "e" if c.name == "Store" else "p" - #first, remove generators, links and stores that track CO2 or global EU values - #since these are already in n - n_p.mremove(c.name, - c.df.index[c.df.lifetime.isna()]) + # first, remove generators, links and stores that track + # CO2 or global EU values since these are already in n + n_p.mremove( + c.name, + c.df.index[c.df.lifetime.isna()] + ) - #remove assets whose build_year + lifetime < year - n_p.mremove(c.name, - c.df.index[c.df.build_year + c.df.lifetime < year]) + # remove assets whose build_year + lifetime < year + n_p.mremove( + c.name, + c.df.index[c.df.build_year + c.df.lifetime < year] + ) - #remove assets if their optimized nominal capacity is lower than a threshold - #since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible - chp_heat = c.df.index[c.df[attr + "_nom_extendable"] & c.df.index.str.contains("urban central") & c.df.index.str.contains("CHP") & c.df.index.str.contains("heat")] + # remove assets if their optimized nominal capacity is lower than a threshold + # since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible + chp_heat = c.df.index[( + c.df[attr + "_nom_extendable"] + & c.df.index.str.contains("urban central") + & c.df.index.str.contains("CHP") + & c.df.index.str.contains("heat") + )] + + threshold = snakemake.config['existing_capacities']['threshold_capacity'] + if not chp_heat.empty: - n_p.mremove(c.name, - chp_heat[c.df.loc[chp_heat, attr + "_nom_opt"] < snakemake.config['existing_capacities']['threshold_capacity']*c.df.efficiency[chp_heat.str.replace("heat","electric")].values*c.df.p_nom_ratio[chp_heat.str.replace("heat","electric")].values/c.df.efficiency[chp_heat].values]) - n_p.mremove(c.name, - c.df.index[c.df[attr + "_nom_extendable"] & ~c.df.index.isin(chp_heat) & (c.df[attr + "_nom_opt"] < snakemake.config['existing_capacities']['threshold_capacity'])]) + threshold_chp_heat = (threshold + * c.df.efficiency[chp_heat.str.replace("heat", "electric")].values + * c.df.p_nom_ratio[chp_heat.str.replace("heat", "electric")].values + / c.df.efficiency[chp_heat].values + ) + n_p.mremove( + c.name, + chp_heat[c.df.loc[chp_heat, attr + "_nom_opt"] < threshold_chp_heat] + ) + + n_p.mremove( + c.name, + c.df.index[c.df[attr + "_nom_extendable"] & ~c.df.index.isin(chp_heat) & (c.df[attr + "_nom_opt"] < threshold)] + ) - #copy over assets but fix their capacity + # copy over assets but fix their capacity c.df[attr + "_nom"] = c.df[attr + "_nom_opt"] c.df[attr + "_nom_extendable"] = False - n.import_components_from_dataframe(c.df, - c.name) + n.import_components_from_dataframe(c.df, c.name) - #copy time-dependent - for tattr in n.component_attrs[c.name].index[(n.component_attrs[c.name].type.str.contains("series") & - n.component_attrs[c.name].status.str.contains("Input"))]: - n.import_series_from_dataframe(c.pnl[tattr], - c.name, - tattr) + # copy time-dependent + selection = ( + n.component_attrs[c.name].type.str.contains("series") + & n.component_attrs[c.name].status.str.contains("Input") + ) + for tattr in n.component_attrs[c.name].index[selection]: + n.import_series_from_dataframe(c.pnl[tattr], c.name, tattr) if __name__ == "__main__": - # Detect running outside of snakemake and mock snakemake for testing if 'snakemake' not in globals(): - from vresutils.snakemake import MockSnakemake - snakemake = MockSnakemake( - wildcards=dict(network='elec', simpl='', clusters='37', lv='1.0', - sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1', - co2_budget_name='go', - planning_horizons='2030'), - input=dict(network='pypsa-eur-sec/results/test/prenetworks/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_{planning_horizons}.nc', - network_p='pypsa-eur-sec/results/test/postnetworks/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_2020.nc', - costs='pypsa-eur-sec/data/costs/costs_{planning_horizons}.csv', - cop_air_total="pypsa-eur-sec/resources/cop_air_total_elec_s{simpl}_{clusters}.nc", - cop_soil_total="pypsa-eur-sec/resources/cop_soil_total_elec_s{simpl}_{clusters}.nc"), - output=['pypsa-eur-sec/results/test/prenetworks_brownfield/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'] + from helper import mock_snakemake + snakemake = mock_snakemake( + 'add_brownfield', + simpl='', + clusters=48, + lv=1.0, + sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1', + planning_horizons=2030, ) - import yaml - with open('config.yaml', encoding='utf8') as f: - snakemake.config = yaml.safe_load(f) print(snakemake.input.network_p) logging.basicConfig(level=snakemake.config['logging_level']) - year=int(snakemake.wildcards.planning_horizons) + year = int(snakemake.wildcards.planning_horizons) - n = pypsa.Network(snakemake.input.network, - override_component_attrs=override_component_attrs) + overrides = override_component_attrs(snakemake.input.overrides) + n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) add_build_year_to_new_assets(n, year) - n_p = pypsa.Network(snakemake.input.network_p, - override_component_attrs=override_component_attrs) -#%% + n_p = pypsa.Network(snakemake.input.network_p, override_component_attrs=overrides) + add_brownfield(n, n_p, year) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index 1b5451c4..47c31c0e 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -2,259 +2,244 @@ import logging logger = logging.getLogger(__name__) + import pandas as pd idx = pd.IndexSlice import numpy as np -import scipy as sp import xarray as xr -import re, os - -from six import iteritems, string_types import pypsa - import yaml -import pytz - -from vresutils.costdata import annuity - from prepare_sector_network import prepare_costs - -#First tell PyPSA that links can have multiple outputs by -#overriding the component_attrs. This can be done for -#as many buses as you need with format busi for i = 2,3,4,5,.... -#See https://pypsa.org/doc/components.html#link-with-multiple-outputs-or-inputs - -override_component_attrs = pypsa.descriptors.Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()}) -override_component_attrs["Link"].loc["bus2"] = ["string",np.nan,np.nan,"2nd bus","Input (optional)"] -override_component_attrs["Link"].loc["bus3"] = ["string",np.nan,np.nan,"3rd bus","Input (optional)"] -override_component_attrs["Link"].loc["efficiency2"] = ["static or series","per unit",1.,"2nd bus efficiency","Input (optional)"] -override_component_attrs["Link"].loc["efficiency3"] = ["static or series","per unit",1.,"3rd bus efficiency","Input (optional)"] -override_component_attrs["Link"].loc["p2"] = ["series","MW",0.,"2nd bus output","Output"] -override_component_attrs["Link"].loc["p3"] = ["series","MW",0.,"3rd bus output","Output"] - -override_component_attrs["Link"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"] -override_component_attrs["Link"].loc["lifetime"] = ["float","years",np.nan,"build year","Input (optional)"] -override_component_attrs["Generator"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"] -override_component_attrs["Generator"].loc["lifetime"] = ["float","years",np.nan,"build year","Input (optional)"] -override_component_attrs["Store"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"] -override_component_attrs["Store"].loc["lifetime"] = ["float","years",np.nan,"build year","Input (optional)"] +from helper import override_component_attrs def add_build_year_to_new_assets(n, baseyear): - """ - Parameters ---------- - n : network - - baseyear: year in which optimized assets are built + n : pypsa.Network + baseyear : int + year in which optimized assets are built """ - #Give assets with lifetimes and no build year the build year baseyear + # Give assets with lifetimes and no build year the build year baseyear for c in n.iterate_components(["Link", "Generator", "Store"]): assets = c.df.index[~c.df.lifetime.isna() & c.df.build_year.isna()] c.df.loc[assets, "build_year"] = baseyear - #add -baseyear to name + # add -baseyear to name rename = pd.Series(c.df.index, c.df.index) rename[assets] += "-" + str(baseyear) c.df.rename(index=rename, inplace=True) - #rename time-dependent - for attr in n.component_attrs[c.name].index[(n.component_attrs[c.name].type.str.contains("series") & - n.component_attrs[c.name].status.str.contains("Input"))]: + # rename time-dependent + selection = ( + n.component_attrs[c.name].type.str.contains("series") + & n.component_attrs[c.name].status.str.contains("Input") + ) + for attr in n.component_attrs[c.name].index[selection]: c.pnl[attr].rename(columns=rename, inplace=True) + def add_existing_renewables(df_agg): """ Append existing renewables to the df_agg pd.DataFrame with the conventional power plants. """ - cc = pd.read_csv('data/Country_codes.csv', - index_col=0) + cc = pd.read_csv(snakemake.input.country_codes, index_col=0) - carriers = {"solar" : "solar", - "onwind" : "onwind", - "offwind" : "offwind-ac"} + carriers = { + "solar": "solar", + "onwind": "onwind", + "offwind": "offwind-ac" + } for tech in ['solar', 'onwind', 'offwind']: + carrier = carriers[tech] - df = pd.read_csv('data/existing_infrastructure/{}_capacity_IRENA.csv'.format(tech), - index_col=0) - df = df.fillna(0.) + + df = pd.read_csv(snakemake.input[f"existing_{tech}"], index_col=0).fillna(0.) df.columns = df.columns.astype(int) - df.rename(index={'Czechia':'Czech Republic', - 'UK':'United Kingdom', - 'Bosnia Herzg':'Bosnia Herzegovina', - 'North Macedonia': 'Macedonia'}, inplace=True) + rename_countries = { + 'Czechia': 'Czech Republic', + 'UK': 'United Kingdom', + 'Bosnia Herzg': 'Bosnia Herzegovina', + 'North Macedonia': 'Macedonia' + } + + df.rename(index=rename_countries, inplace=True) df.rename(index=cc["2 letter code (ISO-3166-2)"], inplace=True) # calculate yearly differences df.insert(loc=0, value=.0, column='1999') - df = df.diff(axis=1).drop('1999', axis=1) - df = df.clip(lower=0) + df = df.diff(axis=1).drop('1999', axis=1).clip(lower=0) - - #distribute capacities among nodes according to capacity factor - #weighting with nodal_fraction + # distribute capacities among nodes according to capacity factor + # weighting with nodal_fraction elec_buses = n.buses.index[n.buses.carrier == "AC"].union(n.buses.index[n.buses.carrier == "DC"]) - nodal_fraction = pd.Series(0.,elec_buses) + nodal_fraction = pd.Series(0., elec_buses) - for country in n.buses.loc[elec_buses,"country"].unique(): + for country in n.buses.loc[elec_buses, "country"].unique(): gens = n.generators.index[(n.generators.index.str[:2] == country) & (n.generators.carrier == carrier)] cfs = n.generators_t.p_max_pu[gens].mean() - cfs_key = cfs/cfs.sum() - nodal_fraction.loc[n.generators.loc[gens,"bus"]] = cfs_key.values + cfs_key = cfs / cfs.sum() + nodal_fraction.loc[n.generators.loc[gens, "bus"]] = cfs_key.values - nodal_df = df.loc[n.buses.loc[elec_buses,"country"]] + nodal_df = df.loc[n.buses.loc[elec_buses, "country"]] nodal_df.index = elec_buses - nodal_df = nodal_df.multiply(nodal_fraction,axis=0) + nodal_df = nodal_df.multiply(nodal_fraction, axis=0) for year in nodal_df.columns: for node in nodal_df.index: name = f"{node}-{tech}-{year}" - capacity = nodal_df.loc[node,year] + capacity = nodal_df.loc[node, year] if capacity > 0.: - df_agg.at[name,"Fueltype"] = tech - df_agg.at[name,"Capacity"] = capacity - df_agg.at[name,"DateIn"] = year - df_agg.at[name,"cluster_bus"] = node + df_agg.at[name, "Fueltype"] = tech + df_agg.at[name, "Capacity"] = capacity + df_agg.at[name, "DateIn"] = year + df_agg.at[name, "cluster_bus"] = node + def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, baseyear): """ - Parameters ---------- - n : network - - grouping_years : intervals to group existing capacities - - costs : to read lifetime to estimate YearDecomissioning - - + n : pypsa.Network + grouping_years : + intervals to group existing capacities + costs : + to read lifetime to estimate YearDecomissioning + baseyear : int """ - print("adding power capacities installed before baseyear") + print("adding power capacities installed before baseyear from powerplants.csv") - - ### add conventional capacities using 'powerplants.csv' df_agg = pd.read_csv(snakemake.input.powerplants, index_col=0) - rename_fuel = {'Hard Coal':'coal', - 'Lignite':'lignite', - 'Nuclear':'nuclear', - 'Oil':'oil', - 'OCGT':'OCGT', - 'CCGT':'CCGT', - 'Natural Gas':'gas',} - fueltype_to_drop = ['Hydro', - 'Wind', - 'Solar', - 'Geothermal', - 'Bioenergy', - 'Waste', - 'Other', - 'CCGT, Thermal'] - technology_to_drop = ['Pv', - 'Storage Technologies'] + rename_fuel = { + 'Hard Coal': 'coal', + 'Lignite': 'lignite', + 'Nuclear': 'nuclear', + 'Oil': 'oil', + 'OCGT': 'OCGT', + 'CCGT': 'CCGT', + 'Natural Gas': 'gas' + } - df_agg.drop(df_agg.index[df_agg.Fueltype.isin(fueltype_to_drop)],inplace=True) - df_agg.drop(df_agg.index[df_agg.Technology.isin(technology_to_drop)],inplace=True) + fueltype_to_drop = [ + 'Hydro', + 'Wind', + 'Solar', + 'Geothermal', + 'Bioenergy', + 'Waste', + 'Other', + 'CCGT, Thermal' + ] + + technology_to_drop = [ + 'Pv', + 'Storage Technologies' + ] + + df_agg.drop(df_agg.index[df_agg.Fueltype.isin(fueltype_to_drop)], inplace=True) + df_agg.drop(df_agg.index[df_agg.Technology.isin(technology_to_drop)], inplace=True) df_agg.Fueltype = df_agg.Fueltype.map(rename_fuel) - #assign clustered bus - busmap_s = pd.read_csv(snakemake.input.busmap_s, index_col=0).squeeze() - busmap = pd.read_csv(snakemake.input.busmap, index_col=0).squeeze() + # assign clustered bus + busmap_s = pd.read_csv(snakemake.input.busmap_s, index_col=0, squeeze=True) + busmap = pd.read_csv(snakemake.input.busmap, index_col=0, squeeze=True) clustermaps = busmap_s.map(busmap) clustermaps.index = clustermaps.index.astype(int) df_agg["cluster_bus"] = df_agg.bus.map(clustermaps) - - #include renewables in df_agg + # include renewables in df_agg add_existing_renewables(df_agg) - df_agg["grouping_year"] = np.take(grouping_years, - np.digitize(df_agg.DateIn, - grouping_years, - right=True)) + df_agg["grouping_year"] = np.take( + grouping_years, + np.digitize(df_agg.DateIn, grouping_years, right=True) + ) - df = df_agg.pivot_table(index=["grouping_year",'Fueltype'], columns='cluster_bus', - values='Capacity', aggfunc='sum') + df = df_agg.pivot_table( + index=["grouping_year", 'Fueltype'], + columns='cluster_bus', + values='Capacity', + aggfunc='sum' + ) - carrier = {"OCGT" : "gas", - "CCGT" : "gas", - "coal" : "coal", - "oil" : "oil", - "lignite" : "lignite", - "nuclear" : "uranium"} + carrier = { + "OCGT": "gas", + "CCGT": "gas", + "coal": "coal", + "oil": "oil", + "lignite": "lignite", + "nuclear": "uranium" + } for grouping_year, generator in df.index: - #capacity is the capacity in MW at each node for this + + # capacity is the capacity in MW at each node for this capacity = df.loc[grouping_year, generator] capacity = capacity[~capacity.isna()] capacity = capacity[capacity > snakemake.config['existing_capacities']['threshold_capacity']] if generator in ['solar', 'onwind', 'offwind']: - if generator =='offwind': - p_max_pu=n.generators_t.p_max_pu[capacity.index + ' offwind-ac' + '-' + str(baseyear)] - else: - p_max_pu=n.generators_t.p_max_pu[capacity.index + ' ' + generator + '-' + str(baseyear)] + + rename = {"offwind": "offwind-ac"} + p_max_pu=n.generators_t.p_max_pu[capacity.index + ' ' + rename.get(generator, generator) + '-' + str(baseyear)] n.madd("Generator", - capacity.index, - suffix=' ' + generator +"-"+ str(grouping_year), - bus=capacity.index, - carrier=generator, - p_nom=capacity, - marginal_cost=costs.at[generator,'VOM'], - capital_cost=costs.at[generator,'fixed'], - efficiency=costs.at[generator, 'efficiency'], - p_max_pu=p_max_pu.rename(columns=n.generators.bus), - build_year=grouping_year, - lifetime=costs.at[generator,'lifetime']) + capacity.index, + suffix=' ' + generator +"-"+ str(grouping_year), + bus=capacity.index, + carrier=generator, + p_nom=capacity, + marginal_cost=costs.at[generator, 'VOM'], + capital_cost=costs.at[generator, 'fixed'], + efficiency=costs.at[generator, 'efficiency'], + p_max_pu=p_max_pu.rename(columns=n.generators.bus), + build_year=grouping_year, + lifetime=costs.at[generator, 'lifetime'] + ) + else: + n.madd("Link", - capacity.index, - suffix= " " + generator +"-" + str(grouping_year), - bus0="EU " + carrier[generator], - bus1=capacity.index, - bus2="co2 atmosphere", - carrier=generator, - marginal_cost=costs.at[generator,'efficiency']*costs.at[generator,'VOM'], #NB: VOM is per MWel - capital_cost=costs.at[generator,'efficiency']*costs.at[generator,'fixed'], #NB: fixed cost is per MWel - p_nom=capacity/costs.at[generator,'efficiency'], - efficiency=costs.at[generator,'efficiency'], - efficiency2=costs.at[carrier[generator],'CO2 intensity'], - build_year=grouping_year, - lifetime=costs.at[generator,'lifetime']) + capacity.index, + suffix= " " + generator +"-" + str(grouping_year), + bus0="EU " + carrier[generator], + bus1=capacity.index, + bus2="co2 atmosphere", + carrier=generator, + marginal_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'VOM'], #NB: VOM is per MWel + capital_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'fixed'], #NB: fixed cost is per MWel + p_nom=capacity / costs.at[generator, 'efficiency'], + efficiency=costs.at[generator, 'efficiency'], + efficiency2=costs.at[carrier[generator], 'CO2 intensity'], + build_year=grouping_year, + lifetime=costs.at[generator, 'lifetime'] + ) def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years, ashp_cop, gshp_cop, time_dep_hp_cop, costs, default_lifetime): - """ - Parameters ---------- - n : network - - baseyear: last year covered in the existing capacities database - + n : pypsa.Network + baseyear : last year covered in the existing capacities database grouping_years : intervals to group existing capacities - - linear decommissioning of heating capacities from 2020 to 2045 is - currently assumed - - heating capacities split between residential and services proportional - to heating load in both - 50% capacities in rural busess 50% in urban buses + linear decommissioning of heating capacities from 2020 to 2045 is + currently assumed heating capacities split between residential and + services proportional to heating load in both 50% capacities + in rural busess 50% in urban buses """ print("adding heating capacities installed before baseyear") @@ -263,43 +248,42 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years # heating/cooling fuel deployment (fossil/renewables) " # https://ec.europa.eu/energy/studies/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment_en?redir=1 # file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv". + # TODO start from original file # retrieve existing heating capacities - techs = ['gas boiler', - 'oil boiler', - 'resistive heater', - 'air heat pump', - 'ground heat pump'] - df = pd.read_csv('data/existing_infrastructure/existing_heating_raw.csv', - index_col=0, - header=0) - # data for Albania, Montenegro and Macedonia not included in database - df.loc['Albania']=np.nan - df.loc['Montenegro']=np.nan - df.loc['Macedonia']=np.nan - df.fillna(0, inplace=True) - df *= 1e3 # GW to MW + techs = [ + 'gas boiler', + 'oil boiler', + 'resistive heater', + 'air heat pump', + 'ground heat pump' + ] + df = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0) - cc = pd.read_csv('data/Country_codes.csv', - index_col=0) + # data for Albania, Montenegro and Macedonia not included in database + df.loc['Albania'] = np.nan + df.loc['Montenegro'] = np.nan + df.loc['Macedonia'] = np.nan + + df.fillna(0., inplace=True) + + # convert GW to MW + df *= 1e3 + + cc = pd.read_csv(snakemake.input.country_codes, index_col=0) df.rename(index=cc["2 letter code (ISO-3166-2)"], inplace=True) # coal and oil boilers are assimilated to oil boilers - df['oil boiler'] =df['oil boiler'] + df['coal boiler'] + df['oil boiler'] = df['oil boiler'] + df['coal boiler'] df.drop(['coal boiler'], axis=1, inplace=True) # distribute technologies to nodes by population - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, - index_col=0) - pop_layout["ct"] = pop_layout.index.str[:2] - ct_total = pop_layout.total.groupby(pop_layout["ct"]).sum() - pop_layout["ct_total"] = pop_layout["ct"].map(ct_total.get) - pop_layout["fraction"] = pop_layout["total"]/pop_layout["ct_total"] + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) nodal_df = df.loc[pop_layout.ct] nodal_df.index = pop_layout.index - nodal_df = nodal_df.multiply(pop_layout.fraction,axis=0) + nodal_df = nodal_df.multiply(pop_layout.fraction, axis=0) # split existing capacities between residential and services # proportional to energy demand @@ -309,122 +293,126 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years for node in nodal_df.index], index=nodal_df.index) for tech in techs: - nodal_df['residential ' + tech] = nodal_df[tech]*ratio_residential - nodal_df['services ' + tech] = nodal_df[tech]*(1-ratio_residential) + nodal_df['residential ' + tech] = nodal_df[tech] * ratio_residential + nodal_df['services ' + tech] = nodal_df[tech] * (1 - ratio_residential) - nodes={} - p_nom={} - for name in ["residential rural", - "services rural", - "residential urban decentral", - "services urban decentral", - "urban central"]: + names = [ + "residential rural", + "services rural", + "residential urban decentral", + "services urban decentral", + "urban central" + ] + + nodes = {} + p_nom = {} + for name in names: name_type = "central" if name == "urban central" else "decentral" - nodes[name] = pd.Index([n.buses.at[index,"location"] for index in n.buses.index[n.buses.index.str.contains(name) & n.buses.index.str.contains('heat')]]) + nodes[name] = pd.Index([n.buses.at[index, "location"] for index in n.buses.index[n.buses.index.str.contains(name) & n.buses.index.str.contains('heat')]]) heat_pump_type = "air" if "urban" in name else "ground" heat_type= "residential" if "residential" in name else "services" if name == "urban central": - p_nom[name]=nodal_df['air heat pump'][nodes[name]] + p_nom[name] = nodal_df['air heat pump'][nodes[name]] else: - p_nom[name] = nodal_df['{} {} heat pump'.format(heat_type, heat_pump_type)][nodes[name]] + p_nom[name] = nodal_df[f'{heat_type} {heat_pump_type} heat pump'][nodes[name]] # Add heat pumps - costs_name = "{} {}-sourced heat pump".format("decentral", heat_pump_type) + costs_name = f"decentral {heat_pump_type}-sourced heat pump" + cop = {"air": ashp_cop, "ground": gshp_cop} + + if time_dep_hp_cop: + efficiency = cop[heat_pump_type][nodes[name]] + else: + efficiency = costs.at[costs_name, 'efficiency'] + + for i, grouping_year in enumerate(grouping_years): - cop = {"air" : ashp_cop, "ground" : gshp_cop} - efficiency = cop[heat_pump_type][nodes[name]] if time_dep_hp_cop else costs.at[costs_name,'efficiency'] - for i,grouping_year in enumerate(grouping_years): if int(grouping_year) + default_lifetime <= int(baseyear): - ratio=0 + ratio = 0 else: - #installation is assumed to be linear for the past 25 years (default lifetime) - ratio = (int(grouping_year)-int(grouping_years[i-1]))/default_lifetime + # installation is assumed to be linear for the past 25 years (default lifetime) + ratio = (int(grouping_year) - int(grouping_years[i-1])) / default_lifetime n.madd("Link", - nodes[name], - suffix=" {} {} heat pump-{}".format(name,heat_pump_type, grouping_year), - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", - carrier="{} {} heat pump".format(name,heat_pump_type), - efficiency=efficiency, - capital_cost=costs.at[costs_name,'efficiency']*costs.at[costs_name,'fixed'], - p_nom=p_nom[name]*ratio/costs.at[costs_name,'efficiency'], - build_year=int(grouping_year), - lifetime=costs.at[costs_name,'lifetime']) + nodes[name], + suffix=f" {name} {heat_pump_type} heat pump-{grouping_year}", + bus0=nodes[name], + bus1=nodes[name] + " " + name + " heat", + carrier=f"{name} {heat_pump_type} heat pump", + efficiency=efficiency, + capital_cost=costs.at[costs_name, 'efficiency'] * costs.at[costs_name, 'fixed'], + p_nom=p_nom[name] * ratio / costs.at[costs_name, 'efficiency'], + build_year=int(grouping_year), + lifetime=costs.at[costs_name, 'lifetime'] + ) # add resistive heater, gas boilers and oil boilers # (50% capacities to rural buses, 50% to urban buses) n.madd("Link", - nodes[name], - suffix= " " + name + " resistive heater-{}".format(grouping_year), - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", - carrier=name + " resistive heater", - efficiency=costs.at[name_type + ' resistive heater','efficiency'], - capital_cost=costs.at[name_type + ' resistive heater','efficiency']*costs.at[name_type + ' resistive heater','fixed'], - p_nom=0.5*nodal_df['{} resistive heater'.format(heat_type)][nodes[name]]*ratio/costs.at[name_type + ' resistive heater','efficiency'], - build_year=int(grouping_year), - lifetime=costs.at[costs_name,'lifetime']) + nodes[name], + suffix=f" {name} resistive heater-{grouping_year}", + bus0=nodes[name], + bus1=nodes[name] + " " + name + " heat", + carrier=name + " resistive heater", + efficiency=costs.at[name_type + ' resistive heater', 'efficiency'], + capital_cost=costs.at[name_type + ' resistive heater', 'efficiency'] * costs.at[name_type + ' resistive heater', 'fixed'], + p_nom=0.5 * nodal_df[f'{heat_type} resistive heater'][nodes[name]] * ratio / costs.at[name_type + ' resistive heater', 'efficiency'], + build_year=int(grouping_year), + lifetime=costs.at[costs_name, 'lifetime'] + ) n.madd("Link", - nodes[name], - suffix= " " + name + " gas boiler-{}".format(grouping_year), - bus0=["EU gas"]*len(nodes[name]), - bus1=nodes[name] + " " + name + " heat", - bus2="co2 atmosphere", - carrier=name + " gas boiler", - efficiency=costs.at[name_type + ' gas boiler','efficiency'], - efficiency2=costs.at['gas','CO2 intensity'], - capital_cost=costs.at[name_type + ' gas boiler','efficiency']*costs.at[name_type + ' gas boiler','fixed'], - p_nom=0.5*nodal_df['{} gas boiler'.format(heat_type)][nodes[name]]*ratio/costs.at[name_type + ' gas boiler','efficiency'], - build_year=int(grouping_year), - lifetime=costs.at[name_type + ' gas boiler','lifetime']) + nodes[name], + suffix= f" {name} gas boiler-{grouping_year}", + bus0="EU gas", + bus1=nodes[name] + " " + name + " heat", + bus2="co2 atmosphere", + carrier=name + " gas boiler", + efficiency=costs.at[name_type + ' gas boiler', 'efficiency'], + efficiency2=costs.at['gas', 'CO2 intensity'], + capital_cost=costs.at[name_type + ' gas boiler', 'efficiency'] * costs.at[name_type + ' gas boiler', 'fixed'], + p_nom=0.5*nodal_df[f'{heat_type} gas boiler'][nodes[name]] * ratio / costs.at[name_type + ' gas boiler', 'efficiency'], + build_year=int(grouping_year), + lifetime=costs.at[name_type + ' gas boiler', 'lifetime'] + ) + n.madd("Link", - nodes[name], - suffix=" " + name + " oil boiler-{}".format(grouping_year), - bus0=["EU oil"]*len(nodes[name]), - bus1=nodes[name] + " " + name + " heat", - bus2="co2 atmosphere", - carrier=name + " oil boiler", - efficiency=costs.at['decentral oil boiler','efficiency'], - efficiency2=costs.at['oil','CO2 intensity'], - capital_cost=costs.at['decentral oil boiler','efficiency']*costs.at['decentral oil boiler','fixed'], - p_nom=0.5*nodal_df['{} oil boiler'.format(heat_type)][nodes[name]]*ratio/costs.at['decentral oil boiler','efficiency'], - build_year=int(grouping_year), - lifetime=costs.at[name_type + ' gas boiler','lifetime']) + nodes[name], + suffix=f" {name} oil boiler-{grouping_year}", + bus0="EU oil", + bus1=nodes[name] + " " + name + " heat", + bus2="co2 atmosphere", + carrier=name + " oil boiler", + efficiency=costs.at['decentral oil boiler', 'efficiency'], + efficiency2=costs.at['oil', 'CO2 intensity'], + capital_cost=costs.at['decentral oil boiler', 'efficiency'] * costs.at['decentral oil boiler', 'fixed'], + p_nom=0.5 * nodal_df[f'{heat_type} oil boiler'][nodes[name]] * ratio / costs.at['decentral oil boiler', 'efficiency'], + build_year=int(grouping_year), + lifetime=costs.at[name_type + ' gas boiler', 'lifetime'] + ) # delete links with p_nom=nan corresponding to extra nodes in country n.mremove("Link", [index for index in n.links.index.to_list() if str(grouping_year) in index and np.isnan(n.links.p_nom[index])]) # delete links if their lifetime is over and p_nom=0 - n.mremove("Link", [index for index in n.links.index.to_list() if str(grouping_year) in index and n.links.p_nom[index] 26% is non-electric + # Fix Norway space and water heating fractions + # http://www.ssb.no/en/energi-og-industri/statistikker/husenergi/hvert-3-aar/2014-07-14 + # The main heating source for about 73 per cent of the households is based on electricity + # => 26% is non-electric elec_fraction = 0.73 - without_norway = clean_df.drop("NO") + no_norway = df.drop("NO") - for sector in ["residential","services"]: + for sector in ["residential", "services"]: - #assume non-electric is heating - total_heating = (clean_df.loc["NO","{} {}".format("total",sector)]-clean_df.loc["NO","{} {}".format("electricity",sector)])/(1-elec_fraction) + # assume non-electric is heating + nonelectric = df.loc["NO", f"total {sector}"] - df.loc["NO", f"electricity {sector}"] + total_heating = nonelectric / (1 - elec_fraction) for use in uses: - fraction = ((without_norway["{} {} {}".format("total",sector,use)]-without_norway["{} {} {}".format("electricity",sector,use)])/ - (without_norway["{} {}".format("total",sector)]-without_norway["{} {}".format("electricity",sector)])).mean() - clean_df.loc["NO","{} {} {}".format("total",sector,use)] = total_heating*fraction - clean_df.loc["NO","{} {} {}".format("electricity",sector,use)] = total_heating*fraction*elec_fraction + nonelectric_use = no_norway[f"total {sector} {use}"] - no_norway[f"electricity {sector} {use}"] + nonelectric = no_norway[f"total {sector}"] - no_norway[f"electricity {sector}"] + fraction = nonelectric_use.div(nonelectric).mean() + df.loc["NO", f"total {sector} {use}"] = total_heating * fraction + df.loc["NO", f"electricity {sector} {use}"] = total_heating * fraction * elec_fraction - #Missing aviation - print("Aviation") - clean_df.loc[missing_in_eurostat,"total domestic aviation"] = eurostat.loc[idx[missing_in_eurostat,:,:,"Domestic aviation"],"Total all products"].groupby(level=0).sum() - clean_df.loc[missing_in_eurostat,"total international aviation"] = eurostat.loc[idx[missing_in_eurostat,:,:,"International aviation"],"Total all products"].groupby(level=0).sum() + # Missing aviation - print("Domestic navigation") - clean_df.loc[missing_in_eurostat,"total domestic navigation"] = eurostat.loc[idx[missing_in_eurostat,:,:,"Domestic Navigation"],"Total all products"].groupby(level=0).sum() + slicer = idx[to_fill, :, :, "Domestic aviation"] + fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=0).sum() + df.loc[to_fill, "total domestic aviation"] = fill_values + slicer = idx[to_fill, :, :, "International aviation"] + fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=0).sum() + df.loc[to_fill, "total international aviation"] = fill_values - #split road traffic for non-IDEES - missing = clean_df.index[clean_df["total passenger cars"].isnull()] - for fuel in ["total","electricity"]: - selection = [fuel+" passenger cars",fuel+" other road passenger",fuel+" light duty road freight"] + # missing domestic navigation + + slicer = idx[to_fill, :, :, "Domestic Navigation"] + fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=0).sum() + df.loc[to_fill, "total domestic navigation"] = fill_values + + # split road traffic for non-IDEES + missing = df.index[df["total passenger cars"].isna()] + for fuel in ["total", "electricity"]: + selection = [ + f"{fuel} passenger cars", + f"{fuel} other road passenger", + f"{fuel} light duty road freight", + ] if fuel == "total": - selection = [fuel+" two-wheel"] + selection + [fuel+" heavy duty road freight"] - road = clean_df[selection].sum() - road_fraction = road/road.sum() - for i in road_fraction.index: - clean_df.loc[missing,i] = road_fraction[i]*clean_df.loc[missing,fuel+" road"] + selection.extend([ + f"{fuel} two-wheel", + f"{fuel} heavy duty road freight" + ]) + road = df[selection].sum() + road_fraction = road / road.sum() + fill_values = cartesian(df.loc[missing, f"{fuel} road"], road_fraction) + df.loc[missing, road_fraction.index] = fill_values + # split rail traffic for non-IDEES + missing = df.index[df["total rail passenger"].isna()] + for fuel in ["total", "electricity"]: + selection = [f"{fuel} rail passenger", f"{fuel} rail freight"] + rail = df[selection].sum() + rail_fraction = rail / rail.sum() + fill_values = cartesian(df.loc[missing, f"{fuel} rail"], rail_fraction) + df.loc[missing, rail_fraction.index] = fill_values - #split rail traffic for non-IDEES - missing = clean_df.index[clean_df["total rail passenger"].isnull()] - for fuel in ["total","electricity"]: - selection = [fuel+" rail passenger",fuel+" rail freight"] - rail = clean_df[selection].sum() - rail_fraction = rail/rail.sum() - for i in rail_fraction.index: - clean_df.loc[missing,i] = rail_fraction[i]*clean_df.loc[missing,fuel+" rail"].values + # split aviation traffic for non-IDEES + missing = df.index[df["total domestic aviation passenger"].isna()] + for destination in ["domestic", "international"]: + selection = [ + f"total {destination} aviation passenger", + f"total {destination} aviation freight", + ] + aviation = df[selection].sum() + aviation_fraction = aviation / aviation.sum() + fill_values = cartesian(df.loc[missing, f"total {destination} aviation"], aviation_fraction) + df.loc[missing, aviation_fraction.index] = fill_values + for purpose in ["passenger", "freight"]: + attrs = [f"total domestic aviation {purpose}", f"total international aviation {purpose}"] + df.loc[missing, f"total aviation {purpose}"] = df.loc[missing, attrs].sum(axis=1) - #split aviation traffic for non-IDEES - missing = clean_df.index[clean_df["total domestic aviation passenger"].isnull()] - for destination in ["domestic","international"]: - selection = ["total " + destination+" aviation passenger","total " + destination+" aviation freight"] - aviation = clean_df[selection].sum() - aviation_fraction = aviation/aviation.sum() - for i in aviation_fraction.index: - clean_df.loc[missing,i] = aviation_fraction[i]*clean_df.loc[missing,"total "+ destination + " aviation"].values - clean_df.loc[missing,"total aviation passenger"] = clean_df.loc[missing,["total domestic aviation passenger","total international aviation passenger"]].sum(axis=1) - clean_df.loc[missing,"total aviation freight"] = clean_df.loc[missing,["total domestic aviation freight","total international aviation freight"]].sum(axis=1) + if "BA" in df.index: + # fill missing data for BA (services and road energy data) + # proportional to RS with ratio of total residential demand + missing = df.loc["BA"] == 0.0 + ratio = df.at["BA", "total residential"] / df.at["RS", "total residential"] + df.loc['BA', missing] = ratio * df.loc["RS", missing] - if "BA" in clean_df.index: - #fix missing data for BA (services and road energy data) - missing = (clean_df.loc["BA"] == 0.) - - #add back in proportional to RS with ratio of total residential demand - clean_df.loc["BA",missing] = clean_df.loc["BA","total residential"]/clean_df.loc["RS","total residential"]*clean_df.loc["RS",missing] - - clean_df.to_csv(snakemake.output.energy_name) - - return clean_df + return df def build_eea_co2(year=1990): - # see ../notebooks/compute_1990_Europe_emissions_for_targets.ipynb - #https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16 - #downloaded 201228 (modified by EEA last on 201221) - fn = "data/eea/UNFCCC_v23.csv" - df = pd.read_csv(fn, encoding="latin-1") - df.loc[df["Year"] == "1985-1987","Year"] = 1986 - df["Year"] = df["Year"].astype(int) - df = df.set_index(['Country_code', 'Pollutant_name', 'Year', 'Sector_name']).sort_index() + # https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16 + # downloaded 201228 (modified by EEA last on 201221) + df = pd.read_csv(snakemake.input.co2, encoding="latin-1") - e = pd.Series() - e["electricity"] = '1.A.1.a - Public Electricity and Heat Production' - e['residential non-elec'] = '1.A.4.b - Residential' - e['services non-elec'] = '1.A.4.a - Commercial/Institutional' - e['rail non-elec'] = "1.A.3.c - Railways" - e["road non-elec"] = '1.A.3.b - Road Transportation' - e["domestic navigation"] = "1.A.3.d - Domestic Navigation" - e['international navigation'] = '1.D.1.b - International Navigation' - e["domestic aviation"] = '1.A.3.a - Domestic Aviation' - e["international aviation"] = '1.D.1.a - International Aviation' - e['total energy'] = '1 - Energy' - e['industrial processes'] = '2 - Industrial Processes and Product Use' - e['agriculture'] = '3 - Agriculture' - e['LULUCF'] = '4 - Land Use, Land-Use Change and Forestry' - e['waste management'] = '5 - Waste management' - e['other'] = '6 - Other Sector' - e['indirect'] = 'ind_CO2 - Indirect CO2' - e["total wL"] = "Total (with LULUCF)" - e["total woL"] = "Total (without LULUCF)" + df.replace(dict(Year="1985-1987"), 1986, inplace=True) + df.Year = df.Year.astype(int) + index_col = ["Country_code", "Pollutant_name", "Year", "Sector_name"] + df = df.set_index(index_col).sort_index() + + emissions_scope = snakemake.config["energy"]["emissions"] + + cts = ["CH", "EUA", "NO"] + eu28_eea + + slicer = idx[cts, emissions_scope, year, to_ipcc.values()] + emissions = ( + df.loc[slicer, "emissions"] + .unstack("Sector_name") + .rename(columns=reverse(to_ipcc)) + .droplevel([1,2]) + ) + + emissions.rename(index={"EUA": "EU28", "UK": "GB"}, inplace=True) + + to_subtract = [ + "electricity", + "services non-elec", + "residential non-elec", + "road non-elec", + "rail non-elec", + "domestic aviation", + "international aviation", + "domestic navigation", + "international navigation", + ] + emissions["industrial non-elec"] = emissions["total energy"] - emissions[to_subtract].sum(axis=1) + + to_drop = ["total energy", "total wL", "total woL"] + emissions.drop(columns=to_drop, inplace=True) + + # convert from Gg to Mt + return emissions / 1e3 - pol = "CO2" #["All greenhouse gases - (CO2 equivalent)","CO2"] +def build_eurostat_co2(countries, year=1990): - cts = ["CH","EUA","NO"] + eu28_eea + eurostat = build_eurostat(countries, year) - emissions = df.loc[idx[cts,pol,year,e.values],"emissions"].unstack("Sector_name").rename(columns=pd.Series(e.index,e.values)).rename(index={"All greenhouse gases - (CO2 equivalent)" : "GHG"},level=1) + specific_emissions = pd.Series(index=eurostat.columns, dtype=float) - #only take level 0, since level 1 (pol) and level 2 (year) are trivial - emissions = emissions.groupby(level=0,axis=0).sum() + # emissions in tCO2_equiv per MWh_th + specific_emissions["Solid fuels"] = 0.36 # Approximates coal + specific_emissions["Oil (total)"] = 0.285 # Average of distillate and residue + specific_emissions["Gas"] = 0.2 # For natural gas - emissions.rename(index={"EUA" : "EU28", "UK" : "GB"},inplace=True) + # oil values from https://www.eia.gov/tools/faqs/faq.cfm?id=74&t=11 + # Distillate oil (No. 2) 0.276 + # Residual oil (No. 6) 0.298 + # https://www.eia.gov/electricity/annual/html/epa_a_03.html - emissions['industrial non-elec'] = emissions['total energy'] - emissions[['electricity', 'services non-elec','residential non-elec', 'road non-elec', - 'rail non-elec', 'domestic aviation', 'international aviation', 'domestic navigation', - 'international navigation']].sum(axis=1) - - emissions.drop(columns=["total energy", "total wL", "total woL"],inplace=True) - - return emissions/1e3 + return eurostat.multiply(specific_emissions).sum(axis=1) -def build_eurostat_co2(year=1990): +def build_co2_totals(countries, eea_co2, eurostat_co2): - eurostat_for_co2 = build_eurostat(year) + co2 = eea_co2.reindex(countries) - se = pd.Series(index=eurostat_for_co2.columns,dtype=float) + for ct in countries.intersection(["BA", "RS", "AL", "ME", "MK"]): - #emissions in tCO2_equiv per MWh_th - se["Solid fuels"] = 0.36 #Approximates coal - se["Oil (total)"] = 0.285 #Average of distillate and residue - se["Gas"] = 0.2 #For natural gas + mappings = { + "electricity": (ct, "+", "Conventional Thermal Power Stations", "of which From Coal"), + "residential non-elec": (ct, "+", "+", "Residential"), + "services non-elec": (ct, "+", "+", "Services"), + "road non-elec": (ct, "+", "+", "Road"), + "rail non-elec": (ct, "+", "+", "Rail"), + "domestic navigation": (ct, "+", "+", "Domestic Navigation"), + "international navigation": (ct, "-", "Bunkers"), + "domestic aviation": (ct, "+", "+", "Domestic aviation"), + "international aviation": (ct, "+", "+", "International aviation"), + # does not include industrial process emissions or fuel processing/refining + "industrial non-elec": (ct, "+", "Industry"), + # does not include non-energy emissions + "agriculture": (ct, "+", "+", "Agriculture / Forestry"), + } - #oil values from https://www.eia.gov/tools/faqs/faq.cfm?id=74&t=11 - #Distillate oil (No. 2) 0.276 - #Residual oil (No. 6) 0.298 - #https://www.eia.gov/electricity/annual/html/epa_a_03.html - - eurostat_co2 = eurostat_for_co2.multiply(se).sum(axis=1) - - return eurostat_co2 - - -def build_co2_totals(eea_co2, eurostat_co2): - - co2 = eea_co2.reindex(["EU28","NO","CH","BA","RS","AL","ME","MK"] + eu28) - - for ct in ["BA","RS","AL","ME","MK"]: - co2.loc[ct,"electricity"] = eurostat_co2[ct,"+","Conventional Thermal Power Stations","of which From Coal"].sum() - co2.loc[ct,"residential non-elec"] = eurostat_co2[ct,"+","+","Residential"].sum() - co2.loc[ct,"services non-elec"] = eurostat_co2[ct,"+","+","Services"].sum() - co2.loc[ct,"road non-elec"] = eurostat_co2[ct,"+","+","Road"].sum() - co2.loc[ct,"rail non-elec"] = eurostat_co2[ct,"+","+","Rail"].sum() - co2.loc[ct,"domestic navigation"] = eurostat_co2[ct,"+","+","Domestic Navigation"].sum() - co2.loc[ct,'international navigation'] = eurostat_co2[ct,"-","Bunkers"].sum() - co2.loc[ct,"domestic aviation"] = eurostat_co2[ct,"+","+","Domestic aviation"].sum() - co2.loc[ct,"international aviation"] = eurostat_co2[ct,"+","+","International aviation"].sum() - #doesn't include industrial process emissions or fuel processing/refining - co2.loc[ct,'industrial non-elec'] = eurostat_co2[ct,"+","Industry"].sum() - #doesn't include non-energy emissions - co2.loc[ct,'agriculture'] = eurostat_co2[ct,"+","+","Agriculture / Forestry"].sum() + for i, mi in mappings.items(): + co2.at[ct, i] = eurostat_co2.loc[mi].sum() return co2 -def build_transport_data(): +def build_transport_data(countries, population, idees): - transport_data = pd.DataFrame(columns=["number cars","average fuel efficiency"], - index=population.index) + transport_data = pd.DataFrame(index=countries) - ## collect number of cars + # collect number of cars transport_data["number cars"] = idees["passenger cars"] - #CH from http://ec.europa.eu/eurostat/statistics-explained/index.php/Passenger_cars_in_the_EU#Luxembourg_has_the_highest_number_of_passenger_cars_per_inhabitant - transport_data.loc["CH","number cars"] = 4.136e6 + # CH from http://ec.europa.eu/eurostat/statistics-explained/index.php/Passenger_cars_in_the_EU#Luxembourg_has_the_highest_number_of_passenger_cars_per_inhabitant + transport_data.at["CH", "number cars"] = 4.136e6 - missing = transport_data.index[transport_data["number cars"].isnull()] + missing = transport_data.index[transport_data["number cars"].isna()] + print(f"Missing data on cars from:\n{list(missing)}\nFilling gaps with averaged data.") - print("Missing data on cars from:") + cars_pp = transport_data["number cars"] / population + transport_data.loc[missing, "number cars"] = cars_pp.mean() * population - print(missing) - - cars_pp = transport_data["number cars"]/population - - transport_data.loc[missing,"number cars"] = cars_pp.mean()*population - - - ## collect average fuel efficiency in kWh/km + # collect average fuel efficiency in kWh/km transport_data["average fuel efficiency"] = idees["passenger car efficiency"] - missing = transport_data.index[transport_data["average fuel efficiency"].isnull()] + missing = transport_data.index[transport_data["average fuel efficiency"].isna()] + print(f"Missing data on fuel efficiency from:\n{list(missing)}\nFilling gapswith averaged data.") - print("Missing data on fuel efficiency from:") - - print(missing) - - transport_data.loc[missing,"average fuel efficiency"] = transport_data["average fuel efficiency"].mean() - - transport_data.to_csv(snakemake.output.transport_name) + fill_values = transport_data["average fuel efficiency"].mean() + transport_data.loc[missing, "average fuel efficiency"] = fill_values return transport_data - if __name__ == "__main__": - - # Detect running outside of snakemake and mock snakemake for testing if 'snakemake' not in globals(): - from vresutils import Dict - snakemake = Dict() - snakemake.output = Dict() - snakemake.output['energy_name'] = "data/energy_totals.csv" - snakemake.output['co2_name'] = "data/co2_totals.csv" - snakemake.output['transport_name'] = "data/transport_data.csv" + from helper import mock_snakemake + snakemake = mock_snakemake('build_energy_totals') - snakemake.input = Dict() - snakemake.input['nuts3_shapes'] = '../pypsa-eur/resources/nuts3_shapes.geojson' + config = snakemake.config["energy"] - nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index') - population = nuts3['pop'].groupby(nuts3.country).sum() + nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index") + population = nuts3["pop"].groupby(nuts3.country).sum() - data_year = 2011 - eurostat = build_eurostat(data_year) + countries = population.index + idees_countries = countries.intersection(eu28) + + data_year = config["energy_totals_year"] + eurostat = build_eurostat(countries, data_year) swiss = build_swiss(data_year) - idees = build_idees(data_year) + idees = build_idees(idees_countries, data_year) - build_energy_totals(eurostat, swiss, idees) + energy = build_energy_totals(countries, eurostat, swiss, idees) + energy.to_csv(snakemake.output.energy_name) - - base_year_emissions = 1990 + base_year_emissions = config["base_emissions_year"] eea_co2 = build_eea_co2(base_year_emissions) - eurostat_co2 = build_eurostat_co2(base_year_emissions) - - co2 = build_co2_totals(eea_co2, eurostat_co2) + eurostat_co2 = build_eurostat_co2(countries, base_year_emissions) + + co2 = build_co2_totals(countries, eea_co2, eurostat_co2) co2.to_csv(snakemake.output.co2_name) - - build_transport_data() + + transport = build_transport_data(countries, population, idees) + transport.to_csv(snakemake.output.transport_name) diff --git a/scripts/build_heat_demand.py b/scripts/build_heat_demand.py index 169e81f4..ed8a10b9 100644 --- a/scripts/build_heat_demand.py +++ b/scripts/build_heat_demand.py @@ -1,42 +1,46 @@ +"""Build heat demand time series.""" import geopandas as gpd import atlite import pandas as pd import xarray as xr -import scipy as sp -import helper +import numpy as np -if 'snakemake' not in globals(): - from vresutils import Dict - import yaml - snakemake = Dict() - with open('config.yaml') as f: - snakemake.config = yaml.safe_load(f) - snakemake.input = Dict() - snakemake.output = Dict() +if __name__ == '__main__': + if 'snakemake' not in globals(): + from helper import mock_snakemake + snakemake = mock_snakemake( + 'build_heat_demands', + simpl='', + clusters=48, + ) -time = pd.date_range(freq='m', **snakemake.config['snapshots']) -params = dict(years=slice(*time.year[[0, -1]]), months=slice(*time.month[[0, -1]])) + if 'snakemake' not in globals(): + from vresutils import Dict + import yaml + snakemake = Dict() + with open('config.yaml') as f: + snakemake.config = yaml.safe_load(f) + snakemake.input = Dict() + snakemake.output = Dict() -cutout = atlite.Cutout(snakemake.config['atlite']['cutout_name'], - cutout_dir=snakemake.config['atlite']['cutout_dir'], - **params) + time = pd.date_range(freq='h', **snakemake.config['snapshots']) + cutout_config = snakemake.config['atlite']['cutout'] + cutout = atlite.Cutout(cutout_config).sel(time=time) -clustered_busregions_as_geopd = gpd.read_file(snakemake.input.regions_onshore).set_index('name', drop=True) + clustered_regions = gpd.read_file( + snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze() -clustered_busregions = pd.Series(clustered_busregions_as_geopd.geometry, index=clustered_busregions_as_geopd.index) + I = cutout.indicatormatrix(clustered_regions) -helper.clean_invalid_geometries(clustered_busregions) + for area in ["rural", "urban", "total"]: -I = cutout.indicatormatrix(clustered_busregions) + pop_layout = xr.open_dataarray(snakemake.input[f'pop_layout_{area}']) + stacked_pop = pop_layout.stack(spatial=('y', 'x')) + M = I.T.dot(np.diag(I.dot(stacked_pop))) -for item in ["rural","urban","total"]: + heat_demand = cutout.heat_demand( + matrix=M.T, index=clustered_regions.index) - pop_layout = xr.open_dataarray(snakemake.input['pop_layout_'+item]) - - M = I.T.dot(sp.diag(I.dot(pop_layout.stack(spatial=('y', 'x'))))) - - heat_demand = cutout.heat_demand(matrix=M.T,index=clustered_busregions.index) - - heat_demand.to_netcdf(snakemake.output["heat_demand_"+item]) + heat_demand.to_netcdf(snakemake.output[f"heat_demand_{area}"]) diff --git a/scripts/build_industrial_demand.py b/scripts/build_industrial_demand.py deleted file mode 100644 index 829125f7..00000000 --- a/scripts/build_industrial_demand.py +++ /dev/null @@ -1,39 +0,0 @@ - -import pandas as pd - -idx = pd.IndexSlice - -def build_industrial_demand(): - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout,index_col=0) - pop_layout["ct"] = pop_layout.index.str[:2] - ct_total = pop_layout.total.groupby(pop_layout["ct"]).sum() - pop_layout["ct_total"] = pop_layout["ct"].map(ct_total) - pop_layout["fraction"] = pop_layout["total"]/pop_layout["ct_total"] - - industrial_demand_per_country = pd.read_csv(snakemake.input.industrial_demand_per_country,index_col=0) - - industrial_demand = industrial_demand_per_country.loc[pop_layout.ct].fillna(0.) - industrial_demand.index = pop_layout.index - industrial_demand = industrial_demand.multiply(pop_layout.fraction,axis=0) - - - industrial_demand.to_csv(snakemake.output.industrial_demand) - - - -if __name__ == "__main__": - - # Detect running outside of snakemake and mock snakemake for testing - if 'snakemake' not in globals(): - from vresutils import Dict - import yaml - snakemake = Dict() - snakemake.input = Dict() - snakemake.input['clustered_pop_layout'] = "resources/pop_layout_elec_s_128.csv" - snakemake.input['industrial_demand_per_country']="resources/industrial_demand_per_country.csv" - snakemake.output = Dict() - snakemake.output['industrial_demand'] = "resources/industrial_demand_elec_s_128.csv" - with open('config.yaml', encoding='utf8') as f: - snakemake.config = yaml.safe_load(f) - - build_industrial_demand() diff --git a/scripts/build_industrial_distribution_key.py b/scripts/build_industrial_distribution_key.py index c5d55a2c..ce5a59ed 100644 --- a/scripts/build_industrial_distribution_key.py +++ b/scripts/build_industrial_distribution_key.py @@ -1,153 +1,131 @@ +"""Build industrial distribution keys from hotmaps database.""" -import pypsa +import uuid import pandas as pd import geopandas as gpd -from shapely import wkt, prepared -from scipy.spatial import cKDTree as KDTree +from itertools import product -def prepare_hotmaps_database(): +def locate_missing_industrial_sites(df): + """ + Locate industrial sites without valid locations based on + city and countries. Should only be used if the model's + spatial resolution is coarser than individual cities. + """ - df = pd.read_csv(snakemake.input.hotmaps_industrial_database, - sep=";", - index_col=0) + try: + from geopy.geocoders import Nominatim + from geopy.extra.rate_limiter import RateLimiter + except: + raise ModuleNotFoundError("Optional dependency 'geopy' not found." + "Install via 'conda install -c conda-forge geopy'" + "or set 'industry: hotmaps_locate_missing: false'.") - #remove those sites without valid geometries - df.drop(df.index[df.geom.isna()], - inplace=True) + locator = Nominatim(user_agent=str(uuid.uuid4())) + geocode = RateLimiter(locator.geocode, min_delay_seconds=2) - #parse geometry - #https://geopandas.org/gallery/create_geopandas_from_pandas.html?highlight=parse#from-wkt-format - df["Coordinates"] = df.geom.apply(lambda x : wkt.loads(x[x.find(";POINT")+1:])) + def locate_missing(s): - gdf = gpd.GeoDataFrame(df, geometry='Coordinates') + if pd.isna(s.City) or s.City == "CONFIDENTIAL": + return None - europe_shape = gpd.read_file(snakemake.input.europe_shape).loc[0, 'geometry'] - europe_shape_prepped = prepared.prep(europe_shape) - not_in_europe = gdf.index[~gdf.geometry.apply(europe_shape_prepped.contains)] - print("Removing the following industrial facilities since they are not in European area:") - print(gdf.loc[not_in_europe]) - gdf.drop(not_in_europe, - inplace=True) + loc = geocode([s.City, s.Country], geometry='wkt') + if loc is not None: + print(f"Found:\t{loc}\nFor:\t{s['City']}, {s['Country']}\n") + return f"POINT({loc.longitude} {loc.latitude})" + else: + return None - country_to_code = { - 'Belgium' : 'BE', - 'Bulgaria' : 'BG', - 'Czech Republic' : 'CZ', - 'Denmark' : 'DK', - 'Germany' : 'DE', - 'Estonia' : 'EE', - 'Ireland' : 'IE', - 'Greece' : 'GR', - 'Spain' : 'ES', - 'France' : 'FR', - 'Croatia' : 'HR', - 'Italy' : 'IT', - 'Cyprus' : 'CY', - 'Latvia' : 'LV', - 'Lithuania' : 'LT', - 'Luxembourg' : 'LU', - 'Hungary' : 'HU', - 'Malta' : 'MA', - 'Netherland' : 'NL', - 'Austria' : 'AT', - 'Poland' : 'PL', - 'Portugal' : 'PT', - 'Romania' : 'RO', - 'Slovenia' : 'SI', - 'Slovakia' : 'SK', - 'Finland' : 'FI', - 'Sweden' : 'SE', - 'United Kingdom' : 'GB', - 'Iceland' : 'IS', - 'Norway' : 'NO', - 'Montenegro' : 'ME', - 'FYR of Macedonia' : 'MK', - 'Albania' : 'AL', - 'Serbia' : 'RS', - 'Turkey' : 'TU', - 'Bosnia and Herzegovina' : 'BA', - 'Switzerland' : 'CH', - 'Liechtenstein' : 'AT', - } - gdf["country_code"] = gdf.Country.map(country_to_code) + missing = df.index[df.geom.isna()] + df.loc[missing, 'coordinates'] = df.loc[missing].apply(locate_missing, axis=1) - if gdf["country_code"].isna().any(): - print("Warning, some countries not assigned an ISO code") + # report stats + num_still_missing = df.coordinates.isna().sum() + num_found = len(missing) - num_still_missing + share_missing = len(missing) / len(df) * 100 + share_still_missing = num_still_missing / len(df) * 100 + print(f"Found {num_found} missing locations.", + f"Share of missing locations reduced from {share_missing:.2f}% to {share_still_missing:.2f}%.") - gdf["x"] = gdf.geometry.x - gdf["y"] = gdf.geometry.y + return df + + +def prepare_hotmaps_database(regions): + """ + Load hotmaps database of industrial sites and map onto bus regions. + """ + + df = pd.read_csv(snakemake.input.hotmaps_industrial_database, sep=";", index_col=0) + + df[["srid", "coordinates"]] = df.geom.str.split(';', expand=True) + + if snakemake.config['industry'].get('hotmaps_locate_missing', False): + df = locate_missing_industrial_sites(df) + + # remove those sites without valid locations + df.drop(df.index[df.coordinates.isna()], inplace=True) + + df['coordinates'] = gpd.GeoSeries.from_wkt(df['coordinates']) + + gdf = gpd.GeoDataFrame(df, geometry='coordinates', crs="EPSG:4326") + + gdf = gpd.sjoin(gdf, regions, how="inner", op='within') + + gdf.rename(columns={"index_right": "bus"}, inplace=True) + gdf["country"] = gdf.bus.str[:2] return gdf -def assign_buses(gdf): +def build_nodal_distribution_key(hotmaps, regions): + """Build nodal distribution keys for each sector.""" - gdf["bus"] = "" + sectors = hotmaps.Subsector.unique() + countries = regions.index.str[:2].unique() - for c in n.buses.country.unique(): - buses_i = n.buses.index[n.buses.country == c] - kdtree = KDTree(n.buses.loc[buses_i, ['x','y']].values) + keys = pd.DataFrame(index=regions.index, columns=sectors, dtype=float) - industry_i = gdf.index[(gdf.country_code == c)] + pop = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) + pop['country'] = pop.index.str[:2] + ct_total = pop.total.groupby(pop['country']).sum() + keys['population'] = pop.total / pop.country.map(ct_total) - if industry_i.empty: - print("Skipping country with no industry:",c) - else: - tree_i = kdtree.query(gdf.loc[industry_i, ['x','y']].values)[1] - gdf.loc[industry_i, 'bus'] = buses_i[tree_i] + for sector, country in product(sectors, countries): - if (gdf.bus == "").any(): - print("Some industrial facilities have empty buses") - if gdf.bus.isna().any(): - print("Some industrial facilities have NaN buses") + regions_ct = regions.index[regions.index.str.contains(country)] + facilities = hotmaps.query("country == @country and Subsector == @sector") -def build_nodal_distribution_key(gdf): - - sectors = ['Iron and steel','Chemical industry','Cement','Non-metallic mineral products','Glass','Paper and printing','Non-ferrous metals'] - - distribution_keys = pd.DataFrame(index=n.buses.index, - columns=sectors, - dtype=float) - - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout,index_col=0) - pop_layout["ct"] = pop_layout.index.str[:2] - ct_total = pop_layout.total.groupby(pop_layout["ct"]).sum() - pop_layout["ct_total"] = pop_layout["ct"].map(ct_total) - distribution_keys["population"] = pop_layout["total"]/pop_layout["ct_total"] - - for c in n.buses.country.unique(): - buses = n.buses.index[n.buses.country == c] - for sector in sectors: - facilities = gdf.index[(gdf.country_code == c) & (gdf.Subsector == sector)] - if not facilities.empty: - emissions = gdf.loc[facilities,"Emissions_ETS_2014"] - if emissions.sum() == 0: - distribution_key = pd.Series(1/len(facilities), - facilities) - else: - #BEWARE: this is a strong assumption - emissions = emissions.fillna(emissions.mean()) - distribution_key = emissions/emissions.sum() - distribution_key = distribution_key.groupby(gdf.loc[facilities,"bus"]).sum().reindex(buses,fill_value=0.) + if not facilities.empty: + emissions = facilities["Emissions_ETS_2014"] + if emissions.sum() == 0: + key = pd.Series(1 / len(facilities), facilities.index) else: - distribution_key = distribution_keys.loc[buses,"population"] + #BEWARE: this is a strong assumption + emissions = emissions.fillna(emissions.mean()) + key = emissions / emissions.sum() + key = key.groupby(facilities.bus).sum().reindex(regions_ct, fill_value=0.) + else: + key = keys.loc[regions_ct, 'population'] - if abs(distribution_key.sum() - 1) > 1e-4: - print(c,sector,distribution_key) + keys.loc[regions_ct, sector] = key - distribution_keys.loc[buses,sector] = distribution_key + return keys - distribution_keys.to_csv(snakemake.output.industrial_distribution_key) if __name__ == "__main__": + if 'snakemake' not in globals(): + from helper import mock_snakemake + snakemake = mock_snakemake( + 'build_industrial_distribution_key', + simpl='', + clusters=48, + ) + regions = gpd.read_file(snakemake.input.regions_onshore).set_index('name') - n = pypsa.Network(snakemake.input.network) + hotmaps = prepare_hotmaps_database(regions) - hotmaps_database = prepare_hotmaps_database() + keys = build_nodal_distribution_key(hotmaps, regions) - assign_buses(hotmaps_database) - - build_nodal_distribution_key(hotmaps_database) + keys.to_csv(snakemake.output.industrial_distribution_key) diff --git a/scripts/build_industrial_energy_demand_per_country.py b/scripts/build_industrial_energy_demand_per_country.py deleted file mode 100644 index 6ee67f4f..00000000 --- a/scripts/build_industrial_energy_demand_per_country.py +++ /dev/null @@ -1,83 +0,0 @@ - -import pandas as pd -import numpy as np - - -tj_to_ktoe = 0.0238845 -ktoe_to_twh = 0.01163 - -eb_base_dir = "data/eurostat-energy_balances-may_2018_edition" -jrc_base_dir = "data/jrc-idees-2015" - -# import EU ratios df as csv -industry_sector_ratios=pd.read_csv(snakemake.input.industry_sector_ratios, - index_col=0) - -#material demand per country and industry (kton/a) -countries_production = pd.read_csv(snakemake.input.industrial_production_per_country, index_col=0) - -#Annual energy consumption in Switzerland by sector in 2015 (in TJ) -#From: Energieverbrauch in der Industrie und im Dienstleistungssektor, Der Bundesrat -#http://www.bfe.admin.ch/themen/00526/00541/00543/index.html?lang=de&dossier_id=00775 - -dic_Switzerland ={'Iron and steel': 7889., - 'Chemicals Industry': 26871., - 'Non-metallic mineral products': 15513.+3820., - 'Pulp, paper and printing': 12004., - 'Food, beverages and tobacco': 17728., - 'Non Ferrous Metals': 3037., - 'Transport Equipment': 14993., - 'Machinery Equipment': 4724., - 'Textiles and leather': 1742., - 'Wood and wood products': 0., - 'Other Industrial Sectors': 10825., - 'current electricity': 53760.} - - -eb_names={'NO':'Norway', 'AL':'Albania', 'BA':'Bosnia and Herzegovina', - 'MK':'FYR of Macedonia', 'GE':'Georgia', 'IS':'Iceland', - 'KO':'Kosovo', 'MD':'Moldova', 'ME':'Montenegro', 'RS':'Serbia', - 'UA':'Ukraine', 'TR':'Turkey', } - -jrc_names = {"GR" : "EL", - "GB" : "UK"} - -#final energy consumption per country and industry (TWh/a) -countries_df = countries_production.dot(industry_sector_ratios.T) -countries_df*= 0.001 #GWh -> TWh (ktCO2 -> MtCO2) - - - -non_EU = ['NO', 'CH', 'ME', 'MK', 'RS', 'BA', 'AL'] - - -# save current electricity consumption -for country in countries_df.index: - if country in non_EU: - if country == 'CH': - countries_df.loc[country, 'current electricity']=dic_Switzerland['current electricity']*tj_to_ktoe*ktoe_to_twh - else: - excel_balances = pd.read_excel('{}/{}.XLSX'.format(eb_base_dir,eb_names[country]), - sheet_name='2016', index_col=1,header=0, skiprows=1 ,squeeze=True) - - countries_df.loc[country, 'current electricity'] = excel_balances.loc['Industry', 'Electricity']*ktoe_to_twh - - else: - - excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(jrc_base_dir,jrc_names.get(country,country)), - sheet_name='Ind_Summary',index_col=0,header=0,squeeze=True) # the summary sheet - - s_out = excel_out.iloc[27:48,-1] - countries_df.loc[country, 'current electricity'] = s_out['Electricity']*ktoe_to_twh - - -rename_sectors = {'elec':'electricity', - 'biomass':'solid biomass', - 'heat':'low-temperature heat'} - -countries_df.rename(columns=rename_sectors,inplace=True) - -countries_df.index.name = "TWh/a (MtCO2/a)" - -countries_df.to_csv(snakemake.output.industrial_energy_demand_per_country, - float_format='%.2f') diff --git a/scripts/build_industrial_energy_demand_per_country_today.py b/scripts/build_industrial_energy_demand_per_country_today.py index 4de5d2c1..1d906b24 100644 --- a/scripts/build_industrial_energy_demand_per_country_today.py +++ b/scripts/build_industrial_energy_demand_per_country_today.py @@ -1,140 +1,165 @@ +"""Build industrial energy demand per country.""" import pandas as pd - -# sub-sectors as used in PyPSA-Eur-Sec and listed in JRC-IDEES industry sheets -sub_sectors = {'Iron and steel' : ['Integrated steelworks','Electric arc'], - 'Non-ferrous metals' : ['Alumina production','Aluminium - primary production','Aluminium - secondary production','Other non-ferrous metals'], - 'Chemicals' : ['Basic chemicals', 'Other chemicals', 'Pharmaceutical products etc.', 'Basic chemicals feedstock'], - 'Non-metalic mineral' : ['Cement','Ceramics & other NMM','Glass production'], - 'Printing' : ['Pulp production','Paper production','Printing and media reproduction'], - 'Food' : ['Food, beverages and tobacco'], - 'Transport equipment' : ['Transport Equipment'], - 'Machinery equipment' : ['Machinery Equipment'], - 'Textiles and leather' : ['Textiles and leather'], - 'Wood and wood products' : ['Wood and wood products'], - 'Other Industrial Sectors' : ['Other Industrial Sectors'], -} - - -# name in JRC-IDEES Energy Balances -eb_sheet_name = {'Integrated steelworks' : 'cisb', - 'Electric arc' : 'cise', - 'Alumina production' : 'cnfa', - 'Aluminium - primary production' : 'cnfp', - 'Aluminium - secondary production' : 'cnfs', - 'Other non-ferrous metals' : 'cnfo', - 'Basic chemicals' : 'cbch', - 'Other chemicals' : 'coch', - 'Pharmaceutical products etc.' : 'cpha', - 'Basic chemicals feedstock' : 'cpch', - 'Cement' : 'ccem', - 'Ceramics & other NMM' : 'ccer', - 'Glass production' : 'cgla', - 'Pulp production' : 'cpul', - 'Paper production' : 'cpap', - 'Printing and media reproduction' : 'cprp', - 'Food, beverages and tobacco' : 'cfbt', - 'Transport Equipment' : 'ctre', - 'Machinery Equipment' : 'cmae', - 'Textiles and leather' : 'ctel', - 'Wood and wood products' : 'cwwp', - 'Mining and quarrying' : 'cmiq', - 'Construction' : 'ccon', - 'Non-specified': 'cnsi', -} - - - -fuels = {'all' : ['All Products'], - 'solid' : ['Solid Fuels'], - 'liquid' : ['Total petroleum products (without biofuels)'], - 'gas' : ['Gases'], - 'heat' : ['Nuclear heat','Derived heat'], - 'biomass' : ['Biomass and Renewable wastes'], - 'waste' : ['Wastes (non-renewable)'], - 'electricity' : ['Electricity'], -} +import multiprocessing as mp +from tqdm import tqdm ktoe_to_twh = 0.011630 +# name in JRC-IDEES Energy Balances +sector_sheets = {'Integrated steelworks': 'cisb', + 'Electric arc': 'cise', + 'Alumina production': 'cnfa', + 'Aluminium - primary production': 'cnfp', + 'Aluminium - secondary production': 'cnfs', + 'Other non-ferrous metals': 'cnfo', + 'Basic chemicals': 'cbch', + 'Other chemicals': 'coch', + 'Pharmaceutical products etc.': 'cpha', + 'Basic chemicals feedstock': 'cpch', + 'Cement': 'ccem', + 'Ceramics & other NMM': 'ccer', + 'Glass production': 'cgla', + 'Pulp production': 'cpul', + 'Paper production': 'cpap', + 'Printing and media reproduction': 'cprp', + 'Food, beverages and tobacco': 'cfbt', + 'Transport Equipment': 'ctre', + 'Machinery Equipment': 'cmae', + 'Textiles and leather': 'ctel', + 'Wood and wood products': 'cwwp', + 'Mining and quarrying': 'cmiq', + 'Construction': 'ccon', + 'Non-specified': 'cnsi', + } + + +fuels = {'All Products': 'all', + 'Solid Fuels': 'solid', + 'Total petroleum products (without biofuels)': 'liquid', + 'Gases': 'gas', + 'Nuclear heat': 'heat', + 'Derived heat': 'heat', + 'Biomass and Renewable wastes': 'biomass', + 'Wastes (non-renewable)': 'waste', + 'Electricity': 'electricity' + } + eu28 = ['FR', 'DE', 'GB', 'IT', 'ES', 'PL', 'SE', 'NL', 'BE', 'FI', 'DK', 'PT', 'RO', 'AT', 'BG', 'EE', 'GR', 'LV', 'CZ', 'HU', 'IE', 'SK', 'LT', 'HR', 'LU', 'SI', 'CY', 'MT'] -jrc_names = {"GR" : "EL", - "GB" : "UK"} - -year = 2015 -summaries = {} - -#for some reason the Energy Balances list Other Industrial Sectors separately -ois_subs = ['Mining and quarrying','Construction','Non-specified'] +jrc_names = {"GR": "EL", "GB": "UK"} -#MtNH3/a -ammonia = pd.read_csv(snakemake.input.ammonia_production, - index_col=0)/1e3 +def industrial_energy_demand_per_country(country): + + jrc_dir = snakemake.input.jrc + jrc_country = jrc_names.get(country, country) + fn = f'{jrc_dir}/JRC-IDEES-2015_EnergyBalance_{jrc_country}.xlsx' + + sheets = list(sector_sheets.values()) + df_dict = pd.read_excel(fn, sheet_name=sheets, index_col=0) + + def get_subsector_data(sheet): + + df = df_dict[sheet][year].groupby(fuels).sum() + + df['other'] = df['all'] - df.loc[df.index != 'all'].sum() + + return df + + df = pd.concat({sub: get_subsector_data(sheet) + for sub, sheet in sector_sheets.items()}, axis=1) + + sel = ['Mining and quarrying', 'Construction', 'Non-specified'] + df['Other Industrial Sectors'] = df[sel].sum(axis=1) + df['Basic chemicals'] += df['Basic chemicals feedstock'] + + df.drop(columns=sel+['Basic chemicals feedstock'], index='all', inplace=True) + + df *= ktoe_to_twh + + return df +def add_ammonia_energy_demand(demand): -for ct in eu28: - print(ct) - filename = 'data/jrc-idees-2015/JRC-IDEES-2015_EnergyBalance_{}.xlsx'.format(jrc_names.get(ct,ct)) + # MtNH3/a + fn = snakemake.input.ammonia_production + ammonia = pd.read_csv(fn, index_col=0)[str(year)] / 1e3 - summary = pd.DataFrame(index=list(fuels.keys()) + ['other']) + def ammonia_by_fuel(x): - for sector in sub_sectors: - if sector == 'Other Industrial Sectors': - subs = ois_subs - else: - subs = sub_sectors[sector] + fuels = {'gas': config['MWh_CH4_per_tNH3_SMR'], + 'electricity': config['MWh_elec_per_tNH3_SMR']} - for sub in subs: - df = pd.read_excel(filename, - sheet_name=eb_sheet_name[sub], - index_col=0) + return pd.Series({k: x*v for k,v in fuels.items()}) - s = df[year].astype(float) + ammonia = ammonia.apply(ammonia_by_fuel).T - for fuel in fuels: - summary.at[fuel,sub] = s[fuels[fuel]].sum() - summary.at['other',sub] = summary.at['all',sub] - summary.loc[summary.index.symmetric_difference(['all','other']),sub].sum() + demand['Ammonia'] = ammonia.unstack().reindex(index=demand.index, fill_value=0.) - summary['Other Industrial Sectors'] = summary[ois_subs].sum(axis=1) - summary.drop(columns=ois_subs,inplace=True) + demand['Basic chemicals (without ammonia)'] = demand["Basic chemicals"] - demand["Ammonia"] - summary.drop(index=['all'],inplace=True) + demand['Basic chemicals (without ammonia)'].clip(lower=0, inplace=True) + demand.drop(columns='Basic chemicals', inplace=True) - summary *= ktoe_to_twh - - summary['Basic chemicals'] += summary['Basic chemicals feedstock'] - summary.drop(columns=['Basic chemicals feedstock'], inplace=True) - - summary['Ammonia'] = 0. - summary.at['gas','Ammonia'] = snakemake.config['industry']['MWh_CH4_per_tNH3_SMR']*ammonia[str(year)].get(ct,0.) - summary.at['electricity','Ammonia'] = snakemake.config['industry']['MWh_elec_per_tNH3_SMR']*ammonia[str(year)].get(ct,0.) - summary['Basic chemicals (without ammonia)'] = summary['Basic chemicals'] - summary['Ammonia'] - summary.loc[summary['Basic chemicals (without ammonia)'] < 0, 'Basic chemicals (without ammonia)'] = 0. - summary.drop(columns=['Basic chemicals'], inplace=True) - - summaries[ct] = summary - -final_summary = pd.concat(summaries,axis=1) - -# add in the non-EU28 based on their output (which is derived from their energy too) -# output in MtMaterial/a -output = pd.read_csv(snakemake.input.industrial_production_per_country, - index_col=0)/1e3 - -eu28_averages = final_summary.groupby(level=1,axis=1).sum().divide(output.loc[eu28].sum(),axis=1) - -non_eu28 = output.index.symmetric_difference(eu28) - -for ct in non_eu28: - print(ct) - final_summary = pd.concat((final_summary,pd.concat({ct : eu28_averages.multiply(output.loc[ct],axis=1)},axis=1)),axis=1) + return demand -final_summary.index.name = 'TWh/a' +def add_non_eu28_industrial_energy_demand(demand): -final_summary.to_csv(snakemake.output.industrial_energy_demand_per_country_today) + # output in MtMaterial/a + fn = snakemake.input.industrial_production_per_country + production = pd.read_csv(fn, index_col=0) / 1e3 + + eu28_production = production.loc[eu28].sum() + eu28_energy = demand.groupby(level=1).sum() + eu28_averages = eu28_energy / eu28_production + + non_eu28 = production.index.symmetric_difference(eu28) + + demand_non_eu28 = pd.concat({k: v * eu28_averages + for k, v in production.loc[non_eu28].iterrows()}) + + return pd.concat([demand, demand_non_eu28]) + + +def industrial_energy_demand(countries): + + nprocesses = snakemake.threads + func = industrial_energy_demand_per_country + tqdm_kwargs = dict(ascii=False, unit=' country', total=len(countries), + desc="Build industrial energy demand") + with mp.Pool(processes=nprocesses) as pool: + demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs)) + + demand = pd.concat(demand_l, keys=countries) + + return demand + + +if __name__ == '__main__': + if 'snakemake' not in globals(): + from helper import mock_snakemake + snakemake = mock_snakemake('build_industrial_energy_demand_per_country_today') + + config = snakemake.config['industry'] + year = config.get('reference_year', 2015) + + demand = industrial_energy_demand(eu28) + + demand = add_ammonia_energy_demand(demand) + + demand = add_non_eu28_industrial_energy_demand(demand) + + # for format compatibility + demand = demand.stack(dropna=False).unstack(level=[0,2]) + + # style and annotation + demand.index.name = 'TWh/a' + demand.sort_index(axis=1, inplace=True) + + fn = snakemake.output.industrial_energy_demand_per_country_today + demand.to_csv(fn) diff --git a/scripts/build_industrial_energy_demand_per_node.py b/scripts/build_industrial_energy_demand_per_node.py index 0c2300d1..cb085ad1 100644 --- a/scripts/build_industrial_energy_demand_per_node.py +++ b/scripts/build_industrial_energy_demand_per_node.py @@ -1,33 +1,44 @@ +"""Build industrial energy demand per node.""" import pandas as pd -import numpy as np -# import EU ratios df as csv -industry_sector_ratios=pd.read_csv(snakemake.input.industry_sector_ratios, - index_col=0) +if __name__ == '__main__': + if 'snakemake' not in globals(): + from helper import mock_snakemake + snakemake = mock_snakemake( + 'build_industrial_energy_demand_per_node', + simpl='', + clusters=48, + ) + + # import EU ratios df as csv + fn = snakemake.input.industry_sector_ratios + industry_sector_ratios = pd.read_csv(fn, index_col=0) -#material demand per node and industry (kton/a) -nodal_production = pd.read_csv(snakemake.input.industrial_production_per_node, - index_col=0) + # material demand per node and industry (kton/a) + fn = snakemake.input.industrial_production_per_node + nodal_production = pd.read_csv(fn, index_col=0) -#energy demand today to get current electricity -nodal_today = pd.read_csv(snakemake.input.industrial_energy_demand_per_node_today, - index_col=0) + # energy demand today to get current electricity + fn = snakemake.input.industrial_energy_demand_per_node_today + nodal_today = pd.read_csv(fn, index_col=0) -#final energy consumption per node and industry (TWh/a) -nodal_df = nodal_production.dot(industry_sector_ratios.T) -nodal_df*= 0.001 #GWh -> TWh (ktCO2 -> MtCO2) + # final energy consumption per node and industry (TWh/a) + nodal_df = nodal_production.dot(industry_sector_ratios.T) + + # convert GWh to TWh and ktCO2 to MtCO2 + nodal_df *= 0.001 + rename_sectors = { + 'elec': 'electricity', + 'biomass': 'solid biomass', + 'heat': 'low-temperature heat' + } + nodal_df.rename(columns=rename_sectors, inplace=True) -rename_sectors = {'elec':'electricity', - 'biomass':'solid biomass', - 'heat':'low-temperature heat'} + nodal_df["current electricity"] = nodal_today["electricity"] -nodal_df.rename(columns=rename_sectors,inplace=True) + nodal_df.index.name = "TWh/a (MtCO2/a)" -nodal_df["current electricity"] = nodal_today["electricity"] - -nodal_df.index.name = "TWh/a (MtCO2/a)" - -nodal_df.to_csv(snakemake.output.industrial_energy_demand_per_node, - float_format='%.2f') + fn = snakemake.output.industrial_energy_demand_per_node + nodal_df.to_csv(fn, float_format='%.2f') diff --git a/scripts/build_industrial_energy_demand_per_node_today.py b/scripts/build_industrial_energy_demand_per_node_today.py index 6caf1f58..366e3a95 100644 --- a/scripts/build_industrial_energy_demand_per_node_today.py +++ b/scripts/build_industrial_energy_demand_per_node_today.py @@ -1,54 +1,73 @@ +"""Build industrial energy demand per node.""" import pandas as pd import numpy as np +from itertools import product -def build_nodal_demand(): +# map JRC/our sectors to hotmaps sector, where mapping exist +sector_mapping = { + 'Electric arc': 'Iron and steel', + 'Integrated steelworks': 'Iron and steel', + 'DRI + Electric arc': 'Iron and steel', + 'Ammonia': 'Chemical industry', + 'Basic chemicals (without ammonia)': 'Chemical industry', + 'Other chemicals': 'Chemical industry', + 'Pharmaceutical products etc.': 'Chemical industry', + 'Cement': 'Cement', + 'Ceramics & other NMM': 'Non-metallic mineral products', + 'Glass production': 'Glass', + 'Pulp production': 'Paper and printing', + 'Paper production': 'Paper and printing', + 'Printing and media reproduction': 'Paper and printing', + 'Alumina production': 'Non-ferrous metals', + 'Aluminium - primary production': 'Non-ferrous metals', + 'Aluminium - secondary production': 'Non-ferrous metals', + 'Other non-ferrous metals': 'Non-ferrous metals', +} - industrial_demand = pd.read_csv(snakemake.input.industrial_energy_demand_per_country_today, - header=[0,1], - index_col=0) - distribution_keys = pd.read_csv(snakemake.input.industrial_distribution_key, - index_col=0) - distribution_keys["country"] = distribution_keys.index.str[:2] +def build_nodal_industrial_energy_demand(): - nodal_demand = pd.DataFrame(0., - index=distribution_keys.index, - columns=industrial_demand.index, - dtype=float) + fn = snakemake.input.industrial_energy_demand_per_country_today + industrial_demand = pd.read_csv(fn, header=[0, 1], index_col=0) - #map JRC/our sectors to hotmaps sector, where mapping exist - sector_mapping = {'Electric arc' : 'Iron and steel', - 'Integrated steelworks' : 'Iron and steel', - 'DRI + Electric arc' : 'Iron and steel', - 'Ammonia' : 'Chemical industry', - 'Basic chemicals (without ammonia)' : 'Chemical industry', - 'Other chemicals' : 'Chemical industry', - 'Pharmaceutical products etc.' : 'Chemical industry', - 'Cement' : 'Cement', - 'Ceramics & other NMM' : 'Non-metallic mineral products', - 'Glass production' : 'Glass', - 'Pulp production' : 'Paper and printing', - 'Paper production' : 'Paper and printing', - 'Printing and media reproduction' : 'Paper and printing', - 'Alumina production' : 'Non-ferrous metals', - 'Aluminium - primary production' : 'Non-ferrous metals', - 'Aluminium - secondary production' : 'Non-ferrous metals', - 'Other non-ferrous metals' : 'Non-ferrous metals', - } + fn = snakemake.input.industrial_distribution_key + keys = pd.read_csv(fn, index_col=0) + keys["country"] = keys.index.str[:2] - for c in distribution_keys.country.unique(): - buses = distribution_keys.index[distribution_keys.country == c] - for sector in industrial_demand.columns.levels[1]: - distribution_key = distribution_keys.loc[buses,sector_mapping.get(sector,"population")] - demand = industrial_demand[c,sector] - outer = pd.DataFrame(np.outer(distribution_key,demand),index=distribution_key.index,columns=demand.index) - nodal_demand.loc[buses] += outer + nodal_demand = pd.DataFrame(0., dtype=float, + index=keys.index, + columns=industrial_demand.index) + + countries = keys.country.unique() + sectors = industrial_demand.columns.levels[1] + + for country, sector in product(countries, sectors): + + buses = keys.index[keys.country == country] + mapping = sector_mapping.get(sector, 'population') + + key = keys.loc[buses, mapping] + demand = industrial_demand[country, sector] + + outer = pd.DataFrame(np.outer(key, demand), + index=key.index, + columns=demand.index) + + nodal_demand.loc[buses] += outer nodal_demand.index.name = "TWh/a" nodal_demand.to_csv(snakemake.output.industrial_energy_demand_per_node_today) -if __name__ == "__main__": - build_nodal_demand() +if __name__ == "__main__": + if 'snakemake' not in globals(): + from helper import mock_snakemake + snakemake = mock_snakemake( + 'build_industrial_energy_demand_per_node_today', + simpl='', + clusters=48, + ) + + build_nodal_industrial_energy_demand() diff --git a/scripts/build_industrial_production_per_country.py b/scripts/build_industrial_production_per_country.py index eed59d29..1754752a 100644 --- a/scripts/build_industrial_production_per_country.py +++ b/scripts/build_industrial_production_per_country.py @@ -1,218 +1,222 @@ +"""Build industrial production per country.""" import pandas as pd import numpy as np +import multiprocessing as mp +from tqdm import tqdm tj_to_ktoe = 0.0238845 ktoe_to_twh = 0.01163 -jrc_base_dir = "data/jrc-idees-2015" -eb_base_dir = "data/eurostat-energy_balances-may_2018_edition" - -# year for which data is retrieved -raw_year = 2015 -year = raw_year-2016 - -sub_sheet_name_dict = { 'Iron and steel':'ISI', - 'Chemicals Industry':'CHI', - 'Non-metallic mineral products': 'NMM', - 'Pulp, paper and printing': 'PPA', - 'Food, beverages and tobacco': 'FBT', - 'Non Ferrous Metals' : 'NFM', - 'Transport Equipment': 'TRE', - 'Machinery Equipment': 'MAE', - 'Textiles and leather':'TEL', - 'Wood and wood products': 'WWP', - 'Other Industrial Sectors': 'OIS'} - -index = ['elec','biomass','methane','hydrogen','heat','naphtha','process emission','process emission from feedstock'] +sub_sheet_name_dict = {'Iron and steel': 'ISI', + 'Chemicals Industry': 'CHI', + 'Non-metallic mineral products': 'NMM', + 'Pulp, paper and printing': 'PPA', + 'Food, beverages and tobacco': 'FBT', + 'Non Ferrous Metals': 'NFM', + 'Transport Equipment': 'TRE', + 'Machinery Equipment': 'MAE', + 'Textiles and leather': 'TEL', + 'Wood and wood products': 'WWP', + 'Other Industrial Sectors': 'OIS'} non_EU = ['NO', 'CH', 'ME', 'MK', 'RS', 'BA', 'AL'] -jrc_names = {"GR" : "EL", - "GB" : "UK"} +jrc_names = {"GR": "EL", "GB": "UK"} eu28 = ['FR', 'DE', 'GB', 'IT', 'ES', 'PL', 'SE', 'NL', 'BE', 'FI', 'DK', 'PT', 'RO', 'AT', 'BG', 'EE', 'GR', 'LV', 'CZ', 'HU', 'IE', 'SK', 'LT', 'HR', 'LU', 'SI', 'CY', 'MT'] - -countries = non_EU + eu28 - - -sectors = ['Iron and steel','Chemicals Industry','Non-metallic mineral products', - 'Pulp, paper and printing', 'Food, beverages and tobacco', 'Non Ferrous Metals', - 'Transport Equipment', 'Machinery Equipment', 'Textiles and leather', - 'Wood and wood products', 'Other Industrial Sectors'] - -sect2sub = {'Iron and steel':['Electric arc','Integrated steelworks'], +sect2sub = {'Iron and steel': ['Electric arc', 'Integrated steelworks'], 'Chemicals Industry': ['Basic chemicals', 'Other chemicals', 'Pharmaceutical products etc.'], - 'Non-metallic mineral products': ['Cement','Ceramics & other NMM','Glass production'], - 'Pulp, paper and printing': ['Pulp production','Paper production','Printing and media reproduction'], + 'Non-metallic mineral products': ['Cement', 'Ceramics & other NMM', 'Glass production'], + 'Pulp, paper and printing': ['Pulp production', 'Paper production', 'Printing and media reproduction'], 'Food, beverages and tobacco': ['Food, beverages and tobacco'], 'Non Ferrous Metals': ['Alumina production', 'Aluminium - primary production', 'Aluminium - secondary production', 'Other non-ferrous metals'], 'Transport Equipment': ['Transport Equipment'], 'Machinery Equipment': ['Machinery Equipment'], 'Textiles and leather': ['Textiles and leather'], - 'Wood and wood products' :['Wood and wood products'], - 'Other Industrial Sectors':['Other Industrial Sectors']} + 'Wood and wood products': ['Wood and wood products'], + 'Other Industrial Sectors': ['Other Industrial Sectors']} -subsectors = [ss for s in sectors for ss in sect2sub[s]] +sub2sect = {v: k for k, vv in sect2sub.items() for v in vv} -#material demand per country and industry (kton/a) -countries_demand = pd.DataFrame(index=countries, - columns=subsectors, - dtype=float) - - -out_dic ={'Electric arc': 'Electric arc', +fields = {'Electric arc': 'Electric arc', 'Integrated steelworks': 'Integrated steelworks', 'Basic chemicals': 'Basic chemicals (kt ethylene eq.)', - 'Other chemicals':'Other chemicals (kt ethylene eq.)', - 'Pharmaceutical products etc.':'Pharmaceutical products etc. (kt ethylene eq.)', - 'Cement':'Cement (kt)', - 'Ceramics & other NMM':'Ceramics & other NMM (kt bricks eq.)', - 'Glass production':'Glass production (kt)', - 'Pulp production':'Pulp production (kt)', - 'Paper production':'Paper production (kt)', - 'Printing and media reproduction':'Printing and media reproduction (kt paper eq.)', + 'Other chemicals': 'Other chemicals (kt ethylene eq.)', + 'Pharmaceutical products etc.': 'Pharmaceutical products etc. (kt ethylene eq.)', + 'Cement': 'Cement (kt)', + 'Ceramics & other NMM': 'Ceramics & other NMM (kt bricks eq.)', + 'Glass production': 'Glass production (kt)', + 'Pulp production': 'Pulp production (kt)', + 'Paper production': 'Paper production (kt)', + 'Printing and media reproduction': 'Printing and media reproduction (kt paper eq.)', 'Food, beverages and tobacco': 'Physical output (index)', - 'Alumina production':'Alumina production (kt)', + 'Alumina production': 'Alumina production (kt)', 'Aluminium - primary production': 'Aluminium - primary production', 'Aluminium - secondary production': 'Aluminium - secondary production', - 'Other non-ferrous metals' : 'Other non-ferrous metals (kt lead eq.)', + 'Other non-ferrous metals': 'Other non-ferrous metals (kt lead eq.)', 'Transport Equipment': 'Physical output (index)', 'Machinery Equipment': 'Physical output (index)', 'Textiles and leather': 'Physical output (index)', 'Wood and wood products': 'Physical output (index)', 'Other Industrial Sectors': 'Physical output (index)'} -loc_dic={'Iron and steel':[5,8], - 'Chemicals Industry': [7,11], - 'Non-metallic mineral products': [6,10], - 'Pulp, paper and printing': [7,11], - 'Food, beverages and tobacco': [2,6], - 'Non Ferrous Metals': [9,14], - 'Transport Equipment': [3,5], - 'Machinery Equipment': [3,5], - 'Textiles and leather': [3,5], - 'Wood and wood products': [3,5], - 'Other Industrial Sectors': [3,5]} +eb_names = {'NO': 'Norway', 'AL': 'Albania', 'BA': 'Bosnia and Herzegovina', + 'MK': 'FYR of Macedonia', 'GE': 'Georgia', 'IS': 'Iceland', + 'KO': 'Kosovo', 'MD': 'Moldova', 'ME': 'Montenegro', 'RS': 'Serbia', + 'UA': 'Ukraine', 'TR': 'Turkey', } -# In the summary sheet (IDEES database) some names include a white space -dic_sec_summary = {'Iron and steel': 'Iron and steel', - 'Chemicals Industry': 'Chemicals Industry', - 'Non-metallic mineral products': 'Non-metallic mineral products', - 'Pulp, paper and printing': 'Pulp, paper and printing', - 'Food, beverages and tobacco': ' Food, beverages and tobacco', - 'Non Ferrous Metals': 'Non Ferrous Metals', - 'Transport Equipment': ' Transport Equipment', - 'Machinery Equipment': ' Machinery Equipment', - 'Textiles and leather': ' Textiles and leather', - 'Wood and wood products': ' Wood and wood products', - 'Other Industrial Sectors': ' Other Industrial Sectors'} +eb_sectors = {'Iron & steel industry': 'Iron and steel', + 'Chemical and Petrochemical industry': 'Chemicals Industry', + 'Non-ferrous metal industry': 'Non-metallic mineral products', + 'Paper, Pulp and Print': 'Pulp, paper and printing', + 'Food and Tabacco': 'Food, beverages and tobacco', + 'Non-metallic Minerals (Glass, pottery & building mat. Industry)': 'Non Ferrous Metals', + 'Transport Equipment': 'Transport Equipment', + 'Machinery': 'Machinery Equipment', + 'Textile and Leather': 'Textiles and leather', + 'Wood and Wood Products': 'Wood and wood products', + 'Non-specified (Industry)': 'Other Industrial Sectors'} -#countries=['CH'] -eb_names={'NO':'Norway', 'AL':'Albania', 'BA':'Bosnia and Herzegovina', - 'MK':'FYR of Macedonia', 'GE':'Georgia', 'IS':'Iceland', - 'KO':'Kosovo', 'MD':'Moldova', 'ME':'Montenegro', 'RS':'Serbia', - 'UA':'Ukraine', 'TR':'Turkey', } - -dic_sec ={'Iron and steel':'Iron & steel industry', - 'Chemicals Industry': 'Chemical and Petrochemical industry', - 'Non-metallic mineral products': 'Non-ferrous metal industry', - 'Pulp, paper and printing': 'Paper, Pulp and Print', - 'Food, beverages and tobacco': 'Food and Tabacco', - 'Non Ferrous Metals': 'Non-metallic Minerals (Glass, pottery & building mat. Industry)', - 'Transport Equipment': 'Transport Equipment', - 'Machinery Equipment': 'Machinery', - 'Textiles and leather': 'Textile and Leather', - 'Wood and wood products': 'Wood and Wood Products', - 'Other Industrial Sectors': 'Non-specified (Industry)'} - # Mining and Quarrying, Construction - -#Annual energy consumption in Switzerland by sector in 2015 (in TJ) -#From: Energieverbrauch in der Industrie und im Dienstleistungssektor, Der Bundesrat -#http://www.bfe.admin.ch/themen/00526/00541/00543/index.html?lang=de&dossier_id=00775 - -dic_Switzerland ={'Iron and steel': 7889., - 'Chemicals Industry': 26871., - 'Non-metallic mineral products': 15513.+3820., - 'Pulp, paper and printing': 12004., - 'Food, beverages and tobacco': 17728., - 'Non Ferrous Metals': 3037., - 'Transport Equipment': 14993., - 'Machinery Equipment': 4724., - 'Textiles and leather': 1742., - 'Wood and wood products': 0., - 'Other Industrial Sectors': 10825., - 'current electricity': 53760.} - -dic_sec_position={} -for country in countries: - countries_demand.loc[country] = 0. - print(country) - for sector in sectors: - if country in non_EU: - if country == 'CH': - e_country = dic_Switzerland[sector]*tj_to_ktoe - else: - # estimate physical output - #energy consumption in the sector and country - excel_balances = pd.read_excel('{}/{}.XLSX'.format(eb_base_dir,eb_names[country]), - sheet_name='2016', index_col=2,header=0, skiprows=1 ,squeeze=True) - e_country = excel_balances.loc[dic_sec[sector], 'Total all products'] - - #energy consumption in the sector and EU28 - excel_sum_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_EU28.xlsx'.format(jrc_base_dir), - sheet_name='Ind_Summary', index_col=0,header=0,squeeze=True) # the summary sheet - s_sum_out = excel_sum_out.iloc[49:76,year] - e_EU28 = s_sum_out[dic_sec_summary[sector]] - - ratio_country_EU28=e_country/e_EU28 - - excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_EU28.xlsx'.format(jrc_base_dir), - sheet_name=sub_sheet_name_dict[sector],index_col=0,header=0,squeeze=True) # the summary sheet - - s_out = excel_out.iloc[loc_dic[sector][0]:loc_dic[sector][1],year] - - for subsector in sect2sub[sector]: - countries_demand.loc[country,subsector] = ratio_country_EU28*s_out[out_dic[subsector]] - - else: - - # read the input sheets - excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(jrc_base_dir,jrc_names.get(country,country)), sheet_name=sub_sheet_name_dict[sector],index_col=0,header=0,squeeze=True) # the summary sheet - - s_out = excel_out.iloc[loc_dic[sector][0]:loc_dic[sector][1],year] - - for subsector in sect2sub[sector]: - countries_demand.loc[country,subsector] = s_out[out_dic[subsector]] +# TODO: this should go in a csv in `data` +# Annual energy consumption in Switzerland by sector in 2015 (in TJ) +# From: Energieverbrauch in der Industrie und im Dienstleistungssektor, Der Bundesrat +# http://www.bfe.admin.ch/themen/00526/00541/00543/index.html?lang=de&dossier_id=00775 +e_switzerland = pd.Series({'Iron and steel': 7889., + 'Chemicals Industry': 26871., + 'Non-metallic mineral products': 15513.+3820., + 'Pulp, paper and printing': 12004., + 'Food, beverages and tobacco': 17728., + 'Non Ferrous Metals': 3037., + 'Transport Equipment': 14993., + 'Machinery Equipment': 4724., + 'Textiles and leather': 1742., + 'Wood and wood products': 0., + 'Other Industrial Sectors': 10825., + 'current electricity': 53760.}) -#include ammonia demand separately and remove ammonia from basic chemicals +def find_physical_output(df): + start = np.where(df.index.str.contains('Physical output', na=''))[0][0] + empty_row = np.where(df.index.isnull())[0] + end = empty_row[np.argmax(empty_row > start)] + return slice(start, end) -ammonia = pd.read_csv(snakemake.input.ammonia_production, - index_col=0) -there = ammonia.index.intersection(countries_demand.index) -missing = countries_demand.index.symmetric_difference(there) +def get_energy_ratio(country): -print("Following countries have no ammonia demand:", missing) + if country == 'CH': + e_country = e_switzerland * tj_to_ktoe + else: + # estimate physical output, energy consumption in the sector and country + fn = f"{eurostat_dir}/{eb_names[country]}.XLSX" + df = pd.read_excel(fn, sheet_name='2016', index_col=2, + header=0, skiprows=1, squeeze=True) + e_country = df.loc[eb_sectors.keys( + ), 'Total all products'].rename(eb_sectors) -countries_demand.insert(2,"Ammonia",0.) + fn = f'{jrc_dir}/JRC-IDEES-2015_Industry_EU28.xlsx' -countries_demand.loc[there,"Ammonia"] = ammonia.loc[there, str(raw_year)] + df = pd.read_excel(fn, sheet_name='Ind_Summary', + index_col=0, header=0, squeeze=True) -countries_demand["Basic chemicals"] -= countries_demand["Ammonia"] + assert df.index[48] == "by sector" + year_i = df.columns.get_loc(year) + e_eu28 = df.iloc[49:76, year_i] + e_eu28.index = e_eu28.index.str.lstrip() -#EE, HR and LT got negative demand through subtraction - poor data -countries_demand.loc[countries_demand["Basic chemicals"] < 0.,"Basic chemicals"] = 0. + e_ratio = e_country / e_eu28 -countries_demand.rename(columns={"Basic chemicals" : "Basic chemicals (without ammonia)"}, - inplace=True) + return pd.Series({k: e_ratio[v] for k, v in sub2sect.items()}) -countries_demand.index.name = "kton/a" -countries_demand.to_csv(snakemake.output.industrial_production_per_country, - float_format='%.2f') +def industry_production_per_country(country): + + def get_sector_data(sector, country): + + jrc_country = jrc_names.get(country, country) + fn = f'{jrc_dir}/JRC-IDEES-2015_Industry_{jrc_country}.xlsx' + sheet = sub_sheet_name_dict[sector] + df = pd.read_excel(fn, sheet_name=sheet, + index_col=0, header=0, squeeze=True) + + year_i = df.columns.get_loc(year) + df = df.iloc[find_physical_output(df), year_i] + + df = df.loc[map(fields.get, sect2sub[sector])] + df.index = sect2sub[sector] + + return df + + ct = "EU28" if country in non_EU else country + demand = pd.concat([get_sector_data(s, ct) for s in sect2sub.keys()]) + + if country in non_EU: + demand *= get_energy_ratio(country) + + demand.name = country + + return demand + + +def industry_production(countries): + + nprocesses = snakemake.threads + func = industry_production_per_country + tqdm_kwargs = dict(ascii=False, unit=' country', total=len(countries), + desc="Build industry production") + with mp.Pool(processes=nprocesses) as pool: + demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs)) + + demand = pd.concat(demand_l, axis=1).T + + demand.index.name = "kton/a" + + return demand + + +def add_ammonia_demand_separately(demand): + """Include ammonia demand separately and remove ammonia from basic chemicals.""" + + ammonia = pd.read_csv(snakemake.input.ammonia_production, index_col=0) + + there = ammonia.index.intersection(demand.index) + missing = demand.index.symmetric_difference(there) + + print("Following countries have no ammonia demand:", missing) + + demand.insert(2, "Ammonia", 0.) + + demand.loc[there, "Ammonia"] = ammonia.loc[there, str(year)] + + demand["Basic chemicals"] -= demand["Ammonia"] + + # EE, HR and LT got negative demand through subtraction - poor data + demand['Basic chemicals'].clip(lower=0., inplace=True) + + to_rename = {"Basic chemicals": "Basic chemicals (without ammonia)"} + demand.rename(columns=to_rename, inplace=True) + + +if __name__ == '__main__': + if 'snakemake' not in globals(): + from helper import mock_snakemake + snakemake = mock_snakemake('build_industrial_production_per_country') + + countries = non_EU + eu28 + + year = snakemake.config['industry']['reference_year'] + + jrc_dir = snakemake.input.jrc + eurostat_dir = snakemake.input.eurostat + + demand = industry_production(countries) + + add_ammonia_demand_separately(demand) + + fn = snakemake.output.industrial_production_per_country + demand.to_csv(fn, float_format='%.2f') diff --git a/scripts/build_industrial_production_per_country_tomorrow.py b/scripts/build_industrial_production_per_country_tomorrow.py index bc66077c..767779f8 100644 --- a/scripts/build_industrial_production_per_country_tomorrow.py +++ b/scripts/build_industrial_production_per_country_tomorrow.py @@ -1,29 +1,39 @@ +"""Build future industrial production per country.""" import pandas as pd -industrial_production = pd.read_csv(snakemake.input.industrial_production_per_country, - index_col=0) +if __name__ == '__main__': + if 'snakemake' not in globals(): + from helper import mock_snakemake + snakemake = mock_snakemake('build_industrial_production_per_country_tomorrow') -total_steel = industrial_production[["Integrated steelworks","Electric arc"]].sum(axis=1) + config = snakemake.config["industry"] -fraction_primary_stays_primary = snakemake.config["industry"]["St_primary_fraction"]*total_steel.sum()/industrial_production["Integrated steelworks"].sum() + fn = snakemake.input.industrial_production_per_country + production = pd.read_csv(fn, index_col=0) -industrial_production.insert(2, "DRI + Electric arc", - fraction_primary_stays_primary*industrial_production["Integrated steelworks"]) + keys = ["Integrated steelworks", "Electric arc"] + total_steel = production[keys].sum(axis=1) -industrial_production["Electric arc"] = total_steel - industrial_production["DRI + Electric arc"] -industrial_production["Integrated steelworks"] = 0. + int_steel = production["Integrated steelworks"].sum() + fraction_persistent_primary = config["St_primary_fraction"] * total_steel.sum() / int_steel + dri = fraction_persistent_primary * production["Integrated steelworks"] + production.insert(2, "DRI + Electric arc", dri) -total_aluminium = industrial_production[["Aluminium - primary production","Aluminium - secondary production"]].sum(axis=1) + production["Electric arc"] = total_steel - production["DRI + Electric arc"] + production["Integrated steelworks"] = 0. -fraction_primary_stays_primary = snakemake.config["industry"]["Al_primary_fraction"]*total_aluminium.sum()/industrial_production["Aluminium - primary production"].sum() + keys = ["Aluminium - primary production", "Aluminium - secondary production"] + total_aluminium = production[keys].sum(axis=1) -industrial_production["Aluminium - primary production"] = fraction_primary_stays_primary*industrial_production["Aluminium - primary production"] -industrial_production["Aluminium - secondary production"] = total_aluminium - industrial_production["Aluminium - primary production"] + key_pri = "Aluminium - primary production" + key_sec = "Aluminium - secondary production" + fraction_persistent_primary = config["Al_primary_fraction"] * total_aluminium.sum() / production[key_pri].sum() + production[key_pri] = fraction_persistent_primary * production[key_pri] + production[key_sec] = total_aluminium - production[key_pri] -industrial_production["Basic chemicals (without ammonia)"] *= snakemake.config["industry"]['HVC_primary_fraction'] + production["Basic chemicals (without ammonia)"] *= config['HVC_primary_fraction'] - -industrial_production.to_csv(snakemake.output.industrial_production_per_country_tomorrow, - float_format='%.2f') + fn = snakemake.output.industrial_production_per_country_tomorrow + production.to_csv(fn, float_format='%.2f') diff --git a/scripts/build_industrial_production_per_node.py b/scripts/build_industrial_production_per_node.py index 9e56e49a..b5361e6b 100644 --- a/scripts/build_industrial_production_per_node.py +++ b/scripts/build_industrial_production_per_node.py @@ -1,47 +1,63 @@ +"""Build industrial production per node.""" import pandas as pd +from itertools import product + +# map JRC/our sectors to hotmaps sector, where mapping exist +sector_mapping = { + 'Electric arc': 'Iron and steel', + 'Integrated steelworks': 'Iron and steel', + 'DRI + Electric arc': 'Iron and steel', + 'Ammonia': 'Chemical industry', + 'Basic chemicals (without ammonia)': 'Chemical industry', + 'Other chemicals': 'Chemical industry', + 'Pharmaceutical products etc.': 'Chemical industry', + 'Cement': 'Cement', + 'Ceramics & other NMM': 'Non-metallic mineral products', + 'Glass production': 'Glass', + 'Pulp production': 'Paper and printing', + 'Paper production': 'Paper and printing', + 'Printing and media reproduction': 'Paper and printing', + 'Alumina production': 'Non-ferrous metals', + 'Aluminium - primary production': 'Non-ferrous metals', + 'Aluminium - secondary production': 'Non-ferrous metals', + 'Other non-ferrous metals': 'Non-ferrous metals', +} + def build_nodal_industrial_production(): - industrial_production = pd.read_csv(snakemake.input.industrial_production_per_country_tomorrow, - index_col=0) + fn = snakemake.input.industrial_production_per_country_tomorrow + industrial_production = pd.read_csv(fn, index_col=0) - distribution_keys = pd.read_csv(snakemake.input.industrial_distribution_key, - index_col=0) - distribution_keys["country"] = distribution_keys.index.str[:2] + fn = snakemake.input.industrial_distribution_key + keys = pd.read_csv(fn, index_col=0) + keys["country"] = keys.index.str[:2] - nodal_industrial_production = pd.DataFrame(index=distribution_keys.index, - columns=industrial_production.columns, - dtype=float) + nodal_production = pd.DataFrame(index=keys.index, + columns=industrial_production.columns, + dtype=float) - #map JRC/our sectors to hotmaps sector, where mapping exist - sector_mapping = {'Electric arc' : 'Iron and steel', - 'Integrated steelworks' : 'Iron and steel', - 'DRI + Electric arc' : 'Iron and steel', - 'Ammonia' : 'Chemical industry', - 'Basic chemicals (without ammonia)' : 'Chemical industry', - 'Other chemicals' : 'Chemical industry', - 'Pharmaceutical products etc.' : 'Chemical industry', - 'Cement' : 'Cement', - 'Ceramics & other NMM' : 'Non-metallic mineral products', - 'Glass production' : 'Glass', - 'Pulp production' : 'Paper and printing', - 'Paper production' : 'Paper and printing', - 'Printing and media reproduction' : 'Paper and printing', - 'Alumina production' : 'Non-ferrous metals', - 'Aluminium - primary production' : 'Non-ferrous metals', - 'Aluminium - secondary production' : 'Non-ferrous metals', - 'Other non-ferrous metals' : 'Non-ferrous metals', - } + countries = keys.country.unique() + sectors = industrial_production.columns + + for country, sector in product(countries, sectors): - for c in distribution_keys.country.unique(): - buses = distribution_keys.index[distribution_keys.country == c] - for sector in industrial_production.columns: - distribution_key = distribution_keys.loc[buses,sector_mapping.get(sector,"population")] - nodal_industrial_production.loc[buses,sector] = industrial_production.at[c,sector]*distribution_key + buses = keys.index[keys.country == country] + mapping = sector_mapping.get(sector, "population") + + key = keys.loc[buses, mapping] + nodal_production.loc[buses, sector] = industrial_production.at[country, sector] * key + + nodal_production.to_csv(snakemake.output.industrial_production_per_node) - nodal_industrial_production.to_csv(snakemake.output.industrial_production_per_node) if __name__ == "__main__": + if 'snakemake' not in globals(): + from helper import mock_snakemake + snakemake = mock_snakemake('build_industrial_production_per_node', + simpl='', + clusters=48, + ) build_nodal_industrial_production() diff --git a/scripts/build_industry_sector_ratios.py b/scripts/build_industry_sector_ratios.py index 810b242a..adfb1d3c 100644 --- a/scripts/build_industry_sector_ratios.py +++ b/scripts/build_industry_sector_ratios.py @@ -1,1520 +1,1460 @@ - +"""Build industry sector ratios.""" import pandas as pd -import numpy as np -base_dir = "data/jrc-idees-2015" +# GWh/ktoe OR MWh/toe +toe_to_MWh = 11.630 + +eu28 = [ + "FR", + "DE", + "GB", + "IT", + "ES", + "PL", + "SE", + "NL", + "BE", + "FI", + "DK", + "PT", + "RO", + "AT", + "BG", + "EE", + "GR", + "LV", + "CZ", + "HU", + "IE", + "SK", + "LT", + "HR", + "LU", + "SI", + "CY", + "MT", +] + +sheet_names = { + "Iron and steel": "ISI", + "Chemicals Industry": "CHI", + "Non-metallic mineral products": "NMM", + "Pulp, paper and printing": "PPA", + "Food, beverages and tobacco": "FBT", + "Non Ferrous Metals": "NFM", + "Transport Equipment": "TRE", + "Machinery Equipment": "MAE", + "Textiles and leather": "TEL", + "Wood and wood products": "WWP", + "Other Industrial Sectors": "OIS", +} + + +index = [ + "elec", + "coal", + "coke", + "biomass", + "methane", + "hydrogen", + "heat", + "naphtha", + "process emission", + "process emission from feedstock", +] + + +def load_idees_data(sector, country="EU28"): + + suffixes = {"out": "", "fec": "_fec", "ued": "_ued", "emi": "_emi"} + sheets = {k: sheet_names[sector] + v for k, v in suffixes.items()} + + def usecols(x): + return isinstance(x, str) or x == year + + idees = pd.read_excel( + f"{snakemake.input.idees}/JRC-IDEES-2015_Industry_{country}.xlsx", + sheet_name=list(sheets.values()), + index_col=0, + header=0, + squeeze=True, + usecols=usecols, + ) + + for k, v in sheets.items(): + idees[k] = idees.pop(v) + + return idees + + +def iron_and_steel(): + + # There are two different approaches to produce iron and steel: + # i.e., integrated steelworks and electric arc. + # Electric arc approach has higher efficiency and relies more on electricity. + # We assume that integrated steelworks will be replaced by electric arc entirely. -# year for which data is retrieved -raw_year = 2015 -year = raw_year-2016 + sector = "Iron and steel" + idees = load_idees_data(sector) -conv_factor=11.630 #GWh/ktoe OR MWh/toe + df = pd.DataFrame(index=index) -country = 'EU28' + ## Electric arc + sector = "Electric arc" -sub_sheet_name_dict = { 'Iron and steel':'ISI', - 'Chemicals Industry':'CHI', - 'Non-metallic mineral products': 'NMM', - 'Pulp, paper and printing': 'PPA', - 'Food, beverages and tobacco': 'FBT', - 'Non Ferrous Metals' : 'NFM', - 'Transport Equipment': 'TRE', - 'Machinery Equipment': 'MAE', - 'Textiles and leather':'TEL', - 'Wood and wood products': 'WWP', - 'Other Industrial Sectors': 'OIS'} + df[sector] = 0.0 -index = ['elec','coal','coke','biomass','methane','hydrogen','heat','naphtha','process emission','process emission from feedstock'] + s_fec = idees["fec"][51:57] + assert s_fec.index[0] == sector -df = pd.DataFrame(index=index) + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.at["elec", sector] += s_fec[sel].sum() + df.at["heat", sector] += s_fec["Low enthalpy heat"] -## Iron and steel -# -#> There are two different approaches to produce iron and steel: i.e., integrated steelworks and electric arc. -# -#> Electric arc approach has higher efficiency and relies more on electricity. -# -#> We assume that integrated steelworks will be replaced by electric arc entirely. + subsector = "Steel: Smelters" + s_fec = idees["fec"][61:67] + s_ued = idees["ued"][61:67] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -sector = 'Iron and steel' + # efficiency changes due to transforming all the smelters into methane + key = "Natural gas (incl. biogas)" + eff_met = s_ued[key] / s_fec[key] -# read the input sheets -excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector], - index_col=0,header=0,squeeze=True) # the summary sheet + df.at["methane", sector] += s_ued[subsector] / eff_met -excel_fec = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_fec', - index_col=0,header=0,squeeze=True) # the final energy consumption sheet + subsector = "Steel: Electric arc" + s_fec = idees["fec"][67:68] + assert s_fec.index[0] == subsector -excel_ued = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_ued', - index_col=0,header=0,squeeze=True) # the used energy sheet + df.at["elec", sector] += s_fec[subsector] -excel_emi = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_emi', - index_col=0,header=0,squeeze=True) # the emission sheet + subsector = "Steel: Furnaces, Refining and Rolling" + s_fec = idees["fec"][68:75] + s_ued = idees["ued"][68:75] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -### Electric arc + key = "Steel: Furnaces, Refining and Rolling - Electric" + eff = s_ued[key] / s_fec[key] -sector = 'Electric arc' + # assume fully electrified, other processes scaled by used energy + df.at["elec", sector] += s_ued[subsector] / eff -df[sector] = 0. + subsector = "Steel: Products finishing" + s_fec = idees["fec"][75:92] + s_ued = idees["ued"][75:92] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -# read the corresponding lines -s_fec = excel_fec.iloc[51:57,year] + key = "Steel: Products finishing - Electric" + eff = s_ued[key] / s_fec[key] -assert s_fec.index[0] == sector + # assume fully electrified + df.at["elec", sector] += s_ued[subsector] / eff -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec',sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() + # Process emissions (per physical output) -# Low enthalpy heat -df.loc['heat',sector] += s_fec['Low enthalpy heat'] + s_emi = idees["emi"][51:93] + assert s_emi.index[0] == sector -#### Steel: Smelters + s_out = idees["out"][7:8] + assert s_out.index[0] == sector -subsector = 'Steel: Smelters' + # tCO2/t material + df.loc["process emission", sector] += s_emi["Process emissions"] / s_out[sector] -# read the corresponding lines -s_fec = excel_fec.iloc[61:67,year] + # final energy consumption MWh/t material + sel = ["elec", "heat", "methane"] + df.loc[sel, sector] = df.loc[sel, sector] * toe_to_MWh / s_out[sector] -s_ued = excel_ued.iloc[61:67,year] + ## DRI + Electric arc + # For primary route: DRI with H2 + EAF -assert s_fec.index[0] == subsector + sector = "DRI + Electric arc" -# Efficiency changes due to transforming all the smelters into methane -eff_met=s_ued['Natural gas (incl. biogas)']/s_fec['Natural gas (incl. biogas)'] + df[sector] = df["Electric arc"] -df.loc['methane', sector] += s_ued[subsector]/eff_met + # add H2 consumption for DRI at 1.7 MWh H2 /ton steel + df.at["hydrogen", sector] = config["H2_DRI"] -#### Steel: Electric arc + # add electricity consumption in DRI shaft (0.322 MWh/tSl) + df.at["elec", sector] += config["elec_DRI"] -subsector = 'Steel: Electric arc' + ## Integrated steelworks + # could be used in combination with CCS) + # Assume existing fuels are kept, except for furnaces, refining, rolling, finishing + # Ignore 'derived gases' since these are top gases from furnaces -# read the corresponding lines -s_fec = excel_fec.iloc[67:68,year] + sector = "Integrated steelworks" -assert s_fec.index[0] == subsector + df[sector] = 0.0 -# only electricity -df.loc['elec',sector] += s_fec[subsector] + s_fec = idees["fec"][3:9] + assert s_fec.index[0] == sector -#### Steel: Furnaces, Refining and Rolling -#> assume fully electrified -# -#> other processes are scaled by the used energy + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -subsector = 'Steel: Furnaces, Refining and Rolling' + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -# read the corresponding lines -s_fec = excel_fec.iloc[68:75,year] + subsector = "Steel: Sinter/Pellet making" -s_ued = excel_ued.iloc[68:75,year] + s_fec = idees["fec"][13:19] + s_ued = idees["ued"][13:19] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -assert s_fec.index[0] == subsector + df.loc["elec", sector] += s_fec["Electricity"] -# this process can be electrified -eff = s_ued['Steel: Furnaces, Refining and Rolling - Electric']/s_fec['Steel: Furnaces, Refining and Rolling - Electric'] + sel = ["Natural gas (incl. biogas)", "Residual fuel oil"] + df.loc["methane", sector] += s_fec[sel].sum() -df.loc['elec',sector] += s_ued[subsector]/eff + df.loc["coal", sector] += s_fec["Solids"] -#### Steel: Products finishing -#> assume fully electrified + subsector = "Steel: Blast /Basic oxygen furnace" -subsector = 'Steel: Products finishing' + s_fec = idees["fec"][19:25] + s_ued = idees["ued"][19:25] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -# read the corresponding lines -s_fec = excel_fec.iloc[75:92,year] + sel = ["Natural gas (incl. biogas)", "Residual fuel oil"] + df.loc["methane", sector] += s_fec[sel].sum() -s_ued = excel_ued.iloc[75:92,year] + df.loc["coal", sector] += s_fec["Solids"] -assert s_fec.index[0] == subsector + df.loc["coke", sector] = s_fec["Coke"] -# this process can be electrified -eff = s_ued['Steel: Products finishing - Electric']/s_fec['Steel: Products finishing - Electric'] + subsector = "Steel: Furnaces, Refining and Rolling" -df.loc['elec',sector] += s_ued[subsector]/eff + s_fec = idees["fec"][25:32] + s_ued = idees["ued"][25:32] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -#### Process emissions (per physical output) + key = "Steel: Furnaces, Refining and Rolling - Electric" + eff = s_ued[key] / s_fec[key] -s_emi = excel_emi.iloc[51:93,year] + # assume fully electrified, other processes scaled by used energy + df.loc["elec", sector] += s_ued[subsector] / eff -assert s_emi.index[0] == sector + subsector = "Steel: Products finishing" -s_out = excel_out.iloc[7:8,year] + s_fec = idees["fec"][32:49] + s_ued = idees["ued"][32:49] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -assert sector in str(s_out.index) + key = "Steel: Products finishing - Electric" + eff = s_ued[key] / s_fec[key] -df.loc['process emission',sector] = s_emi['Process emissions']/s_out[sector] # unit tCO2/t material + # assume fully electrified + df.loc["elec", sector] += s_ued[subsector] / eff -# final energy consumption per t -df.loc[['elec','heat','methane'],sector] = df.loc[['elec','heat','methane'],sector]*conv_factor/s_out[sector] # unit MWh/t material + # Process emissions (per physical output) -### For primary route: DRI with H2 + EAF + s_emi = idees["emi"][3:50] + assert s_emi.index[0] == sector -df['DRI + Electric arc'] = df['Electric arc'] + s_out = idees["out"][6:7] + assert s_out.index[0] == sector -# adding the Hydrogen necessary for the Direct Reduction of Iron. consumption 1.7 MWh H2 /ton steel -df.loc['hydrogen', 'DRI + Electric arc'] = snakemake.config["industry"]["H2_DRI"] -# add electricity consumption in DRI shaft (0.322 MWh/tSl) -df.loc['elec', 'DRI + Electric arc'] += snakemake.config["industry"]["elec_DRI"] + # tCO2/t material + df.loc["process emission", sector] = s_emi["Process emissions"] / s_out[sector] + # final energy consumption MWh/t material + sel = ["elec", "heat", "methane", "coke", "coal"] + df.loc[sel, sector] = df.loc[sel, sector] * toe_to_MWh / s_out[sector] -### Integrated steelworks (could be used in combination with CCS) -### Assume existing fuels are kept, except for furnaces, refining, rolling, finishing -### Ignore 'derived gases' since these are top gases from furnaces + return df -sector = 'Integrated steelworks' -df['Integrated steelworks']= 0. +def chemicals_industry(): + sector = "Chemicals Industry" + idees = load_idees_data(sector) -# read the corresponding lines -s_fec = excel_fec.iloc[3:9,year] + df = pd.DataFrame(index=index) -assert s_fec.index[0] == sector + # Basid chemicals -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec',sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() + sector = "Basic chemicals" -# Low enthalpy heat -df.loc['heat',sector] += s_fec['Low enthalpy heat'] + df[sector] = 0.0 + s_fec = idees["fec"][3:9] + assert s_fec.index[0] == sector -#### Steel: Sinter/Pellet making + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -subsector = 'Steel: Sinter/Pellet making' + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -# read the corresponding lines -s_fec = excel_fec.iloc[13:19,year] + subsector = "Chemicals: Feedstock (energy used as raw material)" + # There are Solids, Refinery gas, LPG, Diesel oil, Residual fuel oil, + # Other liquids, Naphtha, Natural gas for feedstock. + # Naphta represents 47%, methane 17%. LPG (18%) solids, refinery gas, + # diesel oil, residual fuel oils and other liquids are asimilated to Naphtha -s_ued = excel_ued.iloc[13:19,year] + s_fec = idees["fec"][13:22] + assert s_fec.index[0] == subsector -assert s_fec.index[0] == subsector + df.loc["naphtha", sector] += s_fec["Naphtha"] -df.loc['elec',sector] += s_fec['Electricity'] -df.loc['methane',sector] += s_fec['Natural gas (incl. biogas)'] -df.loc['methane',sector] += s_fec['Residual fuel oil'] -df.loc['coal',sector] += s_fec['Solids'] + df.loc["methane", sector] += s_fec["Natural gas"] + # LPG and other feedstock materials are assimilated to naphtha + # since they will be produced through Fischer-Tropsh process + sel = [ + "Solids", + "Refinery gas", + "LPG", + "Diesel oil", + "Residual fuel oil", + "Other liquids", + ] + df.loc["naphtha", sector] += s_fec[sel].sum() -#### Steel: Blast / Basic Oxygen Furnace + subsector = "Chemicals: Steam processing" + # All the final energy consumption in the steam processing is + # converted to methane, since we need >1000 C temperatures here. + # The current efficiency of methane is assumed in the conversion. -subsector = 'Steel: Blast /Basic oxygen furnace' + s_fec = idees["fec"][22:33] + s_ued = idees["ued"][22:33] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -# read the corresponding lines -s_fec = excel_fec.iloc[19:25,year] + # efficiency of natural gas + eff_ch4 = s_ued["Natural gas (incl. biogas)"] / s_fec["Natural gas (incl. biogas)"] -s_ued = excel_ued.iloc[19:25,year] + # replace all fec by methane + df.loc["methane", sector] += s_ued[subsector] / eff_ch4 -assert s_fec.index[0] == subsector + subsector = "Chemicals: Furnaces" -df.loc['methane',sector] += s_fec['Natural gas (incl. biogas)'] -df.loc['methane',sector] += s_fec['Residual fuel oil'] -df.loc['coal',sector] += s_fec['Solids'] -df.loc['coke',sector] += s_fec['Coke'] + s_fec = idees["fec"][33:41] + s_ued = idees["ued"][33:41] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector + # efficiency of electrification + key = "Chemicals: Furnaces - Electric" + eff_elec = s_ued[key] / s_fec[key] -#### Steel: Furnaces, Refining and Rolling -#> assume fully electrified -# -#> other processes are scaled by the used energy + # assume fully electrified + df.loc["elec", sector] += s_ued[subsector] / eff_elec -subsector = 'Steel: Furnaces, Refining and Rolling' + subsector = "Chemicals: Process cooling" -# read the corresponding lines -s_fec = excel_fec.iloc[25:32,year] + s_fec = idees["fec"][41:55] + s_ued = idees["ued"][41:55] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -s_ued = excel_ued.iloc[25:32,year] + key = "Chemicals: Process cooling - Electric" + eff_elec = s_ued[key] / s_fec[key] -assert s_fec.index[0] == subsector + # assume fully electrified + df.loc["elec", sector] += s_ued[subsector] / eff_elec -# this process can be electrified -eff = s_ued['Steel: Furnaces, Refining and Rolling - Electric']/s_fec['Steel: Furnaces, Refining and Rolling - Electric'] + subsector = "Chemicals: Generic electric process" -df.loc['elec',sector] += s_ued[subsector]/eff + s_fec = idees["fec"][55:56] + assert s_fec.index[0] == subsector -#### Steel: Products finishing -#> assume fully electrified + df.loc["elec", sector] += s_fec[subsector] -subsector = 'Steel: Products finishing' + # Process emissions -# read the corresponding lines -s_fec = excel_fec.iloc[32:49,year] + # Correct everything by subtracting 2015's ammonia demand and + # putting in ammonia demand for H2 and electricity separately -s_ued = excel_ued.iloc[32:49,year] + s_emi = idees["emi"][3:57] + s_out = idees["out"][8:9] + assert s_emi.index[0] == sector + assert sector in str(s_out.index) -assert s_fec.index[0] == subsector + ammonia = pd.read_csv(snakemake.input.ammonia_production, index_col=0) -# this process can be electrified -eff = s_ued['Steel: Products finishing - Electric']/s_fec['Steel: Products finishing - Electric'] + # ktNH3/a + ammonia_total = ammonia.loc[ammonia.index.intersection(eu28), str(year)].sum() -df.loc['elec',sector] += s_ued[subsector]/eff + s_out -= ammonia_total + # tCO2/t material + df.loc["process emission", sector] += ( + s_emi["Process emissions"] + - config["petrochemical_process_emissions"] * 1e3 + - config["NH3_process_emissions"] * 1e3 + ) / s_out.values -#### Process emissions (per physical output) + # emissions originating from feedstock, could be non-fossil origin + # tCO2/t material + df.loc["process emission from feedstock", sector] += ( + config["petrochemical_process_emissions"] * 1e3 + ) / s_out.values -s_emi = excel_emi.iloc[3:50,year] + # convert from ktoe/a to GWh/a + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] *= toe_to_MWh -assert s_emi.index[0] == sector + df.loc["methane", sector] -= ammonia_total * config["MWh_CH4_per_tNH3_SMR"] + df.loc["elec", sector] -= ammonia_total * config["MWh_elec_per_tNH3_SMR"] -s_out = excel_out.iloc[6:7,year] + # MWh/t material + df.loc[sources, sector] = df.loc[sources, sector] / s_out.values -assert sector in str(s_out.index) + to_rename = {sector: f"{sector} (without ammonia)"} + df.rename(columns=to_rename, inplace=True) -df.loc['process emission',sector] = s_emi['Process emissions']/s_out[sector] # unit tCO2/t material + # Ammonia -# final energy consumption per t -df.loc[['elec','heat','methane','coke','coal'],sector] = df.loc[['elec','heat','methane','coke','coal'],sector]*conv_factor/s_out[sector] # unit MWh/t material + sector = "Ammonia" + df[sector] = 0.0 + df.loc["hydrogen", sector] = config["MWh_H2_per_tNH3_electrolysis"] + df.loc["elec", sector] = config["MWh_elec_per_tNH3_electrolysis"] -## Chemicals Industry + # Other chemicals -sector = 'Chemicals Industry' + sector = "Other chemicals" -# read the input sheets -excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector], - index_col=0,header=0,squeeze=True) # the summary sheet + df[sector] = 0.0 -excel_fec = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_fec', - index_col=0,header=0,squeeze=True) # the final energy consumption sheet + s_fec = idees["fec"][58:64] + assert s_fec.index[0] == sector -excel_ued = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_ued', - index_col=0,header=0,squeeze=True) # the used energy sheet + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -excel_emi = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_emi', - index_col=0,header=0,squeeze=True) # the emission sheet + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -### Basic chemicals + subsector = "Chemicals: High enthalpy heat processing" -## Ammonia is separated afterwards + s_fec = idees["fec"][68:81] + s_ued = idees["ued"][68:81] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -sector = 'Basic chemicals' + key = "High enthalpy heat processing - Electric (microwave)" + eff_elec = s_ued[key] / s_fec[key] -df[sector] = 0 + # assume fully electrified + df.loc["elec", sector] += s_ued[subsector] / eff_elec -# read the corresponding lines -s_fec = excel_fec.iloc[3:9,year] + subsector = "Chemicals: Furnaces" -assert s_fec.index[0] == sector + s_fec = idees["fec"][81:89] + s_ued = idees["ued"][81:89] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec',sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() + key = "Chemicals: Furnaces - Electric" + eff_elec = s_ued[key] / s_fec[key] -# Low enthalpy heat -df.loc['heat',sector] += s_fec['Low enthalpy heat'] + # assume fully electrified + df.loc["elec", sector] += s_ued[subsector] / eff_elec -#### Chemicals: Feedstock (energy used as raw material) -#> There are Solids, Refinery gas, LPG, Diesel oil, Residual fuel oil, Other liquids, Naphtha, Natural gas for feedstock. -# -#> Naphta represents 47%, methane 17%. LPG (18%) solids, refinery gas, diesel oil, residual fuel oils and other liquids are asimilated to Naphtha + subsector = "Chemicals: Process cooling" + s_fec = idees["fec"][89:103] + s_ued = idees["ued"][89:103] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -subsector = 'Chemicals: Feedstock (energy used as raw material)' + key = "Chemicals: Process cooling - Electric" + eff = s_ued[key] / s_fec[key] -# read the corresponding lines -s_fec = excel_fec.iloc[13:22,year] + # assume fully electrified + df.loc["elec", sector] += s_ued[subsector] / eff -assert s_fec.index[0] == subsector + subsector = "Chemicals: Generic electric process" -# naphtha -df.loc['naphtha',sector] += s_fec['Naphtha'] + s_fec = idees["fec"][103:104] + assert s_fec.index[0] == subsector -# natural gas -df.loc['methane',sector] += s_fec['Natural gas'] + df.loc["elec", sector] += s_fec[subsector] -# LPG and other feedstock materials are assimilated to naphtha since they will be produced trough Fischer-Tropsh process -df.loc['naphtha',sector] += (s_fec['Solids'] + s_fec['Refinery gas'] + s_fec['LPG'] + s_fec['Diesel oil'] - + s_fec['Residual fuel oil'] + s_fec['Other liquids']) + # Process emissions -#### Chemicals: Steam processing -#> All the final energy consumption in the Steam processing is converted to methane, since we need >1000 C temperatures here. -# -#> The current efficiency of methane is assumed in the conversion. + s_emi = idees["emi"][58:105] + s_out = idees["out"][9:10] + assert s_emi.index[0] == sector + assert sector in str(s_out.index) -subsector = 'Chemicals: Steam processing' + # tCO2/t material + df.loc["process emission", sector] += s_emi["Process emissions"] / s_out.values -# read the corresponding lines -s_fec = excel_fec.iloc[22:33,year] + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = df.loc[sources, sector] * toe_to_MWh / s_out.values -s_ued = excel_ued.iloc[22:33,year] + # Pharmaceutical products -assert s_fec.index[0] == subsector + sector = "Pharmaceutical products etc." -# efficiency of natural gas -eff_ch4 = s_ued['Natural gas (incl. biogas)']/s_fec['Natural gas (incl. biogas)'] + df[sector] = 0.0 -# replace all fec by methane -df.loc['methane',sector] += s_ued[subsector]/eff_ch4 + s_fec = idees["fec"][106:112] + assert s_fec.index[0] == sector -#### Chemicals: Furnaces -#> assume fully electrified + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -subsector = 'Chemicals: Furnaces' + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -# read the corresponding lines -s_fec = excel_fec.iloc[33:41,year] + subsector = "Chemicals: High enthalpy heat processing" -s_ued = excel_ued.iloc[33:41,year] + s_fec = idees["fec"][116:129] + s_ued = idees["ued"][116:129] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -assert s_fec.index[0] == subsector + key = "High enthalpy heat processing - Electric (microwave)" + eff_elec = s_ued[key] / s_fec[key] -#efficiency of electrification -eff_elec = s_ued['Chemicals: Furnaces - Electric']/s_fec['Chemicals: Furnaces - Electric'] + # assume fully electrified + df.loc["elec", sector] += s_ued[subsector] / eff_elec -df.loc['elec',sector] += s_ued[subsector]/eff_elec + subsector = "Chemicals: Furnaces" -#### Chemicals: Process cooling -#> assume fully electrified + s_fec = idees["fec"][129:137] + s_ued = idees["ued"][129:137] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -subsector = 'Chemicals: Process cooling' + key = "Chemicals: Furnaces - Electric" + eff = s_ued[key] / s_fec[key] -# read the corresponding lines -s_fec = excel_fec.iloc[41:55,year] + # assume fully electrified + df.loc["elec", sector] += s_ued[subsector] / eff -s_ued = excel_ued.iloc[41:55,year] + subsector = "Chemicals: Process cooling" -assert s_fec.index[0] == subsector + s_fec = idees["fec"][137:151] + s_ued = idees["ued"][137:151] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -eff_elec = s_ued['Chemicals: Process cooling - Electric']/s_fec['Chemicals: Process cooling - Electric'] + key = "Chemicals: Process cooling - Electric" + eff_elec = s_ued[key] / s_fec[key] -df.loc['elec',sector] += s_ued[subsector]/eff_elec + # assume fully electrified + df.loc["elec", sector] += s_ued[subsector] / eff_elec -#### Chemicals: Generic electric process + subsector = "Chemicals: Generic electric process" -subsector = 'Chemicals: Generic electric process' + s_fec = idees["fec"][151:152] + s_out = idees["out"][10:11] + assert s_fec.index[0] == subsector + assert sector in str(s_out.index) -# read the corresponding lines -s_fec = excel_fec.iloc[55:56,year] + df.loc["elec", sector] += s_fec[subsector] -assert s_fec.index[0] == subsector + # tCO2/t material + df.loc["process emission", sector] += 0.0 -df.loc['elec',sector] += s_fec[subsector] + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = df.loc[sources, sector] * toe_to_MWh / s_out.values -#### Process emissions + return df -s_emi = excel_emi.iloc[3:57,year] -assert s_emi.index[0] == sector +def nonmetalic_mineral_products(): + # This includes cement, ceramic and glass production. + # This includes process emissions related to the fabrication of clinker. -## Correct everything by subtracting 2015's ammonia demand and putting in ammonia demand for H2 and electricity separately + sector = "Non-metallic mineral products" + idees = load_idees_data(sector) -s_out = excel_out.iloc[8:9,year] + df = pd.DataFrame(index=index) -assert sector in str(s_out.index) + # Cement -ammonia = pd.read_csv(snakemake.input.ammonia_production, - index_col=0) + # This sector has process-emissions. + # Includes three subcategories: + # (a) Grinding, milling of raw material, + # (b) Pre-heating and pre-calcination, + # (c) clinker production (kilns), + # (d) Grinding, packaging. + # (b)+(c) represent 94% of fec. So (a) is joined to (b) and (d) is joined to (c). + # Temperatures above 1400C are required for procesing limestone and sand into clinker. + # Everything (except current electricity and heat consumption and existing biomass) + # is transformed into methane for high T. -eu28 = ['FR', 'DE', 'GB', 'IT', 'ES', 'PL', 'SE', 'NL', 'BE', 'FI', - 'DK', 'PT', 'RO', 'AT', 'BG', 'EE', 'GR', 'LV', 'CZ', - 'HU', 'IE', 'SK', 'LT', 'HR', 'LU', 'SI', 'CY', 'MT'] + sector = "Cement" -#ktNH3/a -total_ammonia = ammonia.loc[ammonia.index.intersection(eu28),str(raw_year)].sum() + df[sector] = 0.0 -s_out -= total_ammonia + s_fec = idees["fec"][3:25] + s_ued = idees["ued"][3:25] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -df.loc['process emission',sector] += (s_emi['Process emissions'] - snakemake.config["industry"]['petrochemical_process_emissions']*1e3 - snakemake.config["industry"]['NH3_process_emissions']*1e3)/s_out.values # unit tCO2/t material + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -#these are emissions originating from feedstock, i.e. could be non-fossil origin -df.loc['process emission from feedstock',sector] += (snakemake.config["industry"]['petrochemical_process_emissions']*1e3)/s_out.values # unit tCO2/t material + df.loc["heat", sector] += s_fec["Low enthalpy heat"] + # pre-processing: keep existing elec and biomass, rest to methane + df.loc["elec", sector] += s_fec["Cement: Grinding, milling of raw material"] + df.loc["biomass", sector] += s_fec["Biomass"] + df.loc["methane", sector] += ( + s_fec["Cement: Pre-heating and pre-calcination"] - s_fec["Biomass"] + ) -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] + subsector = "Cement: Clinker production (kilns)" -#convert from ktoe/a to GWh/a -df.loc[sources,sector] *= conv_factor + s_fec = idees["fec"][34:43] + s_ued = idees["ued"][34:43] + assert s_fec.index[0] == subsector + assert s_ued.index[0] == subsector -df.loc['methane',sector] -= total_ammonia*snakemake.config['industry']['MWh_CH4_per_tNH3_SMR'] -df.loc['elec',sector] -= total_ammonia*snakemake.config['industry']['MWh_elec_per_tNH3_SMR'] + df.loc["biomass", sector] += s_fec["Biomass"] + df.loc["methane", sector] += ( + s_fec["Cement: Clinker production (kilns)"] - s_fec["Biomass"] + ) + df.loc["elec", sector] += s_fec["Cement: Grinding, packaging"] -df.loc[sources,sector] = df.loc[sources,sector]/s_out.values # unit MWh/t material + # Process emissions -df.rename(columns={sector : sector + " (without ammonia)"}, - inplace=True) + # come from calcination of limestone to chemically reactive calcium oxide (lime). + # Calcium carbonate -> lime + CO2 + # CaCO3 -> CaO + CO2 -sector = 'Ammonia' + s_emi = idees["emi"][3:44] + assert s_emi.index[0] == sector -df[sector] = 0. + s_out = idees["out"][7:8] + assert sector in str(s_out.index) -df.loc['hydrogen',sector] = snakemake.config['industry']['MWh_H2_per_tNH3_electrolysis'] -df.loc['elec',sector] = snakemake.config['industry']['MWh_elec_per_tNH3_electrolysis'] + # tCO2/t material + df.loc["process emission", sector] += s_emi["Process emissions"] / s_out.values + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = df.loc[sources, sector] * toe_to_MWh / s_out.values -### Other chemicals + # Ceramics & other NMM -sector = 'Other chemicals' + # This sector has process emissions. + # Includes four subcategories: + # (a) Mixing of raw material, + # (b) Drying and sintering of raw material, + # (c) Primary production process, + # (d) Product finishing. + # (b) represents 65% of fec and (a) 4%. So (a) is joined to (b). + # Everything is electrified -df[sector] = 0 -# read the corresponding lines -s_fec = excel_fec.iloc[58:64,year] + sector = "Ceramics & other NMM" -# check the position -assert s_fec.index[0] == sector + df[sector] = 0.0 -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec',sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() + s_fec = idees["fec"][45:94] + s_ued = idees["ued"][45:94] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -# Low enthalpy heat -df.loc['heat',sector] += s_fec['Low enthalpy heat'] + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -#### Chemicals: High enthalpy heat processing -#> assume fully electrified + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -subsector = 'Chemicals: High enthalpy heat processing' + # Efficiency changes due to electrification + key = "Ceramics: Microwave drying and sintering" + eff_elec = s_ued[key] / s_fec[key] -# read the corresponding lines -s_fec = excel_fec.iloc[68:81,year] + sel = [ + "Ceramics: Mixing of raw material", + "Ceramics: Drying and sintering of raw material", + ] + df.loc["elec", sector] += s_ued[sel].sum() / eff_elec -s_ued = excel_ued.iloc[68:81,year] + key = "Ceramics: Electric kiln" + eff_elec = s_ued[key] / s_fec[key] -assert s_fec.index[0] == subsector + df.loc["elec", sector] += s_ued["Ceramics: Primary production process"] / eff_elec -eff_elec = s_ued['High enthalpy heat processing - Electric (microwave)']/s_fec['High enthalpy heat processing - Electric (microwave)'] + key = "Ceramics: Electric furnace" + eff_elec = s_ued[key] / s_fec[key] -df.loc['elec',sector] += s_ued[subsector]/eff_elec + df.loc["elec", sector] += s_ued["Ceramics: Product finishing"] / eff_elec -#### Chemicals: Furnaces -#> assume fully electrified + s_emi = idees["emi"][45:94] + assert s_emi.index[0] == sector -subsector = 'Chemicals: Furnaces' + s_out = idees["out"][8:9] + assert sector in str(s_out.index) -# read the corresponding lines -s_fec = excel_fec.iloc[81:89,year] + # tCO2/t material + df.loc["process emission", sector] += s_emi["Process emissions"] / s_out.values -s_ued = excel_ued.iloc[81:89,year] + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = df.loc[sources, sector] * toe_to_MWh / s_out.values -assert s_fec.index[0] == subsector + # Glass production -eff_elec = s_ued['Chemicals: Furnaces - Electric']/s_fec['Chemicals: Furnaces - Electric'] + # This sector has process emissions. + # Includes four subcategories: + # (a) Melting tank + # (b) Forming + # (c) Annealing + # (d) Finishing processes. + # (a) represents 73%. (b), (d) are joined to (c). + # Everything is electrified. -df.loc['elec',sector] += s_ued[subsector]/eff_elec + sector = "Glass production" -#### Chemicals: Process cooling -#> assume fully electrified + df[sector] = 0.0 -subsector = 'Chemicals: Process cooling' + s_fec = idees["fec"][95:123] + s_ued = idees["ued"][95:123] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -# read the corresponding lines -s_fec = excel_fec.iloc[89:103,year] + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -s_ued = excel_ued.iloc[89:103,year] + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -assert s_fec.index[0] == subsector + # Efficiency changes due to electrification + key = "Glass: Electric melting tank" + eff_elec = s_ued[key] / s_fec[key] -eff = s_ued['Chemicals: Process cooling - Electric']/s_fec['Chemicals: Process cooling - Electric'] + df.loc["elec", sector] += s_ued["Glass: Melting tank"] / eff_elec -df.loc['elec',sector] += s_ued[subsector]/eff + key = "Glass: Annealing - electric" + eff_elec = s_ued[key] / s_fec[key] -#### Chemicals: Generic electric process + sel = ["Glass: Forming", "Glass: Annealing", "Glass: Finishing processes"] + df.loc["elec", sector] += s_ued[sel].sum() / eff_elec -subsector = 'Chemicals: Generic electric process' + s_emi = idees["emi"][95:124] + assert s_emi.index[0] == sector -# read the corresponding lines -s_fec = excel_fec.iloc[103:104,year] + s_out = idees["out"][9:10] + assert sector in str(s_out.index) -assert s_fec.index[0] == subsector + # tCO2/t material + df.loc["process emission", sector] += s_emi["Process emissions"] / s_out.values -df.loc['elec',sector] += s_fec[subsector] + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = df.loc[sources, sector] * toe_to_MWh / s_out.values -#### Process emissions + return df -s_emi = excel_emi.iloc[58:105,year] -assert s_emi.index[0] == sector +def pulp_paper_printing(): -s_out = excel_out.iloc[9:10,year] + # Pulp, paper and printing can be completely electrified. + # There are no process emissions associated to this sector. -assert sector in str(s_out.index) + sector = "Pulp, paper and printing" + idees = load_idees_data(sector) -df.loc['process emission',sector] += s_emi['Process emissions']/s_out.values # unit tCO2/t material + df = pd.DataFrame(index=index) -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] + # Pulp production -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out.values # unit MWh/t material -# 1 ktoe = 11630 MWh + # Includes three subcategories: + # (a) Wood preparation, grinding; + # (b) Pulping; + # (c) Cleaning. + # + # (b) Pulping is either biomass or electric; left like this (dominated by biomass). + # (a) Wood preparation, grinding and (c) Cleaning represent only 10% of their current + # energy consumption is assumed to be electrified without any change in efficiency -### Pharmaceutical products etc. + sector = "Pulp production" -sector = 'Pharmaceutical products etc.' + df[sector] = 0.0 -df[sector] = 0 + s_fec = idees["fec"][3:28] + s_ued = idees["ued"][3:28] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -# read the corresponding lines -s_fec = excel_fec.iloc[106:112,year] + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -assert s_fec.index[0] == sector + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec',sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() + # Industry-specific + sel = [ + "Pulp: Wood preparation, grinding", + "Pulp: Cleaning", + "Pulp: Pulping electric", + ] + df.loc["elec", sector] += s_fec[sel].sum() -# Low enthalpy heat -df.loc['heat',sector] += s_fec['Low enthalpy heat'] + # Efficiency changes due to biomass + eff_bio = s_ued["Biomass"] / s_fec["Biomass"] + df.loc["biomass", sector] += s_ued["Pulp: Pulping thermal"] / eff_bio -#### Chemicals: High enthalpy heat processing -#> assume fully electrified + s_out = idees["out"][8:9] + assert sector in str(s_out.index) -subsector = 'Chemicals: High enthalpy heat processing' + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = ( + df.loc[sources, sector] * toe_to_MWh / s_out["Pulp production (kt)"] + ) -# read the corresponding lines -s_fec = excel_fec.iloc[116:129,year] + # Paper production -s_ued = excel_ued.iloc[116:129,year] + # Includes three subcategories: + # (a) Stock preparation; + # (b) Paper machine; + # (c) Product finishing. + # + # (b) Paper machine and (c) Product finishing are left electric + # and thermal is moved to biomass. The efficiency is calculated + # from the pulping process that is already biomass. + # + # (a) Stock preparation represents only 7% and its current energy + # consumption is assumed to be electrified without any change in efficiency. -assert s_fec.index[0] == subsector + sector = "Paper production" -eff_elec = s_ued['High enthalpy heat processing - Electric (microwave)']/s_fec['High enthalpy heat processing - Electric (microwave)'] + df[sector] = 0.0 -df.loc['elec',sector] += s_ued[subsector]/eff_elec + s_fec = idees["fec"][29:78] + s_ued = idees["ued"][29:78] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -#### Chemicals: Furnaces -#> assume fully electrified + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -subsector = 'Chemicals: Furnaces' + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -# read the corresponding lines -s_fec = excel_fec.iloc[129:137,year] + # Industry-specific + df.loc["elec", sector] += s_fec["Paper: Stock preparation"] -s_ued = excel_ued.iloc[129:137,year] + # add electricity from process that is already electrified + df.loc["elec", sector] += s_fec["Paper: Paper machine - Electricity"] -assert s_fec.index[0] == subsector + # add electricity from process that is already electrified + df.loc["elec", sector] += s_fec["Paper: Product finishing - Electricity"] -eff = s_ued['Chemicals: Furnaces - Electric']/s_fec['Chemicals: Furnaces - Electric'] + s_fec = idees["fec"][53:64] + s_ued = idees["ued"][53:64] + assert s_fec.index[0] == "Paper: Paper machine - Steam use" + assert s_ued.index[0] == "Paper: Paper machine - Steam use" -df.loc['elec',sector] += s_ued[subsector]/eff + # Efficiency changes due to biomass + eff_bio = s_ued["Biomass"] / s_fec["Biomass"] + df.loc["biomass", sector] += s_ued["Paper: Paper machine - Steam use"] / eff_bio -#### Chemicals: Process cooling -#> assume fully electrified + s_fec = idees["fec"][66:77] + s_ued = idees["ued"][66:77] + assert s_fec.index[0] == "Paper: Product finishing - Steam use" + assert s_ued.index[0] == "Paper: Product finishing - Steam use" -subsector = 'Chemicals: Process cooling' + # Efficiency changes due to biomass + eff_bio = s_ued["Biomass"] / s_fec["Biomass"] + df.loc["biomass", sector] += s_ued["Paper: Product finishing - Steam use"] / eff_bio -# read the corresponding lines -s_fec = excel_fec.iloc[137:151,year] + s_out = idees["out"][9:10] + assert sector in str(s_out.index) -s_ued = excel_ued.iloc[137:151,year] + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = df.loc[sources, sector] * toe_to_MWh / s_out.values -assert s_fec.index[0] == subsector + # Printing and media reproduction -eff_elec = s_ued['Chemicals: Process cooling - Electric']/s_fec['Chemicals: Process cooling - Electric'] + # (a) Printing and publishing is assumed to be + # electrified without any change in efficiency. -df.loc['elec',sector] += s_ued[subsector]/eff_elec + sector = "Printing and media reproduction" -#### Chemicals: Generic electric process + df[sector] = 0.0 -subsector = 'Chemicals: Generic electric process' + s_fec = idees["fec"][79:90] + s_ued = idees["ued"][79:90] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -# read the corresponding lines -s_fec = excel_fec.iloc[151:152,year] + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() + df.loc["elec", sector] += s_ued[sel].sum() -assert s_fec.index[0] == subsector + df.loc["heat", sector] += s_fec["Low enthalpy heat"] + df.loc["heat", sector] += s_ued["Low enthalpy heat"] -df.loc['elec',sector] += s_fec[subsector] + # Industry-specific + df.loc["elec", sector] += s_fec["Printing and publishing"] + df.loc["elec", sector] += s_ued["Printing and publishing"] -# read the corresponding lines -s_out = excel_out.iloc[10:11,year] + s_out = idees["out"][10:11] + assert sector in str(s_out.index) -# check the position -assert sector in str(s_out.index) + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = df.loc[sources, sector] * toe_to_MWh / s_out.values -df.loc['process emission',sector] += 0 # unit tCO2/t material + return df -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat', 'naphtha'] -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out.values # unit MWh/t material -# 1 ktoe = 11630 MWh +def food_beverages_tobacco(): -## Non-metallic mineral products -# -#> This includes cement, ceramic and glass production. -# -#> This sector includes process-emissions related to the fabrication of clinker. + # Food, beverages and tobaco can be completely electrified. + # There are no process emissions associated to this sector. -sector = 'Non-metallic mineral products' + sector = "Food, beverages and tobacco" + idees = load_idees_data(sector) -# read the input sheets -excel_fec = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_fec', - index_col=0,header=0,squeeze=True) + df = pd.DataFrame(index=index) -excel_ued = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_ued', - index_col=0,header=0,squeeze=True) + df[sector] = 0.0 -excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector], - index_col=0,header=0,squeeze=True) + s_fec = idees["fec"][3:78] + s_ued = idees["ued"][3:78] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -excel_emi = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_emi', - index_col=0,header=0,squeeze=True) + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -### Cement -# -#> This sector has process-emissions. -# -#> Includes three subcategories: (a) Grinding, milling of raw material, (b) Pre-heating and pre-calcination, (c) clinker production (kilns), (d) Grinding, packaging. (b)+(c) represent 94% of fec. So (a) is joined to (b) and (d) is joined to (c). -# -#> Temperatures above 1400C are required for procesing limestone and sand into clinker. -# -#> Everything (except current electricity and heat consumption and existing biomass) is transformed into methane for high T. + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -sector = 'Cement' + # Efficiency changes due to electrification -df[sector] = 0 + key = "Food: Direct Heat - Electric" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Food: Oven (direct heat)"] / eff_elec -# read the corresponding lines -s_fec = excel_fec.iloc[3:25,year] + key = "Food: Process Heat - Electric" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Food: Specific process heat"] / eff_elec -s_ued = excel_ued.iloc[3:25,year] + key = "Food: Electric drying" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Food: Drying"] / eff_elec -assert s_fec.index[0] == sector + key = "Food: Electric cooling" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += ( + s_ued["Food: Process cooling and refrigeration"] / eff_elec + ) -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec',sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() + # Steam processing goes all to biomass without change in efficiency + df.loc["biomass", sector] += s_fec["Food: Steam processing"] -# Low enthalpy heat -df.loc['heat',sector] += s_fec['Low enthalpy heat'] + # add electricity from process that is already electrified + df.loc["elec", sector] += s_fec["Food: Electric machinery"] -# pre-processing: keep existing elec and biomass, rest to methane -df.loc['elec', sector] += s_fec['Cement: Grinding, milling of raw material'] -df.loc['biomass', sector] += s_fec['Biomass'] -df.loc['methane', sector] += s_fec['Cement: Pre-heating and pre-calcination'] - s_fec['Biomass'] + s_out = idees["out"][3:4] + assert "Physical output" in str(s_out.index) + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = ( + df.loc[sources, sector] * toe_to_MWh / s_out["Physical output (index)"] + ) -#### Cement: Clinker production (kilns) + return df -subsector = 'Cement: Clinker production (kilns)' -# read the corresponding lines -s_fec = excel_fec.iloc[34:43,year] +def non_ferrous_metals(): -s_ued = excel_ued.iloc[34:43,year] + sector = "Non Ferrous Metals" + idees = load_idees_data(sector) -assert s_fec.index[0] == subsector + df = pd.DataFrame(index=index) -df.loc['biomass', sector] += s_fec['Biomass'] -df.loc['methane', sector] += s_fec['Cement: Clinker production (kilns)'] - s_fec['Biomass'] -df.loc['elec', sector] += s_fec['Cement: Grinding, packaging'] + # Alumina + # High enthalpy heat is converted to methane. + # Process heat at T>500ºC is required here. + # Refining is electrified. + # There are no process emissions associated to Alumina manufacturing. -#### Process-emission came from the calcination of limestone to chemically reactive calcium oxide (lime). -#> Calcium carbonate -> lime + CO2 -# -#> CaCO3 -> CaO + CO2 + sector = "Alumina production" -s_emi = excel_emi.iloc[3:44,year] + df[sector] = 0.0 -assert s_emi.index[0] == sector + s_fec = idees["fec"][3:31] + s_ued = idees["ued"][3:31] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -s_out = excel_out.iloc[7:8,year] + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -assert sector in str(s_out.index) + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -df.loc['process emission',sector] +=s_emi['Process emissions']/s_out.values # unit tCO2/t material + # High-enthalpy heat is transformed into methane -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] + s_fec = idees["fec"][13:24] + s_ued = idees["ued"][13:24] + assert s_fec.index[0] == "Alumina production: High enthalpy heat" + assert s_ued.index[0] == "Alumina production: High enthalpy heat" -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out.values # unit MWh/t material + eff_met = s_ued["Natural gas (incl. biogas)"] / s_fec["Natural gas (incl. biogas)"] + df.loc["methane", sector] += ( + s_fec["Alumina production: High enthalpy heat"] / eff_met + ) -### Ceramics & other NMM -# -#> This sector has process emissions. -# -#> Includes four subcategories: (a) Mixing of raw material, (b) Drying and sintering of raw material, (c) Primary production process, (d) Product finishing. (b)represents 65% of fec and (a) 4%. So (a) is joined to (b). -# -#> Everything is electrified + # Efficiency changes due to electrification -sector = 'Ceramics & other NMM' + s_fec = idees["fec"][24:30] + s_ued = idees["ued"][24:30] + assert s_fec.index[0] == "Alumina production: Refining" + assert s_ued.index[0] == "Alumina production: Refining" -df[sector] = 0 + eff_elec = s_ued["Electricity"] / s_fec["Electricity"] + df.loc["elec", sector] += s_ued["Alumina production: Refining"] / eff_elec -# read the corresponding lines -s_fec = excel_fec.iloc[45:94,year] + s_out = idees["out"][9:10] + assert sector in str(s_out.index) -s_ued = excel_ued.iloc[45:94,year] + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = ( + df.loc[sources, sector] * toe_to_MWh / s_out["Alumina production (kt)"] + ) -assert s_fec.index[0] == sector + # Aluminium primary route -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec', sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() + # Production through the primary route is divided into 50% remains + # as today and 50% is transformed into secondary route. -# Low enthalpy heat -df.loc['heat', sector] += s_fec['Low enthalpy heat'] + sector = "Aluminium - primary production" -# Efficiency changes due to electrification -eff_elec=s_ued['Ceramics: Microwave drying and sintering']/s_fec['Ceramics: Microwave drying and sintering'] -df.loc['elec', sector] += s_ued[['Ceramics: Mixing of raw material','Ceramics: Drying and sintering of raw material']].sum()/eff_elec + df[sector] = 0.0 -eff_elec=s_ued['Ceramics: Electric kiln']/s_fec['Ceramics: Electric kiln'] -df.loc['elec', sector] += s_ued['Ceramics: Primary production process']/eff_elec + s_fec = idees["fec"][31:66] + s_ued = idees["ued"][31:66] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -eff_elec=s_ued['Ceramics: Electric furnace']/s_fec['Ceramics: Electric furnace'] -df.loc['elec', sector] += s_ued['Ceramics: Product finishing']/eff_elec + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -s_emi = excel_emi.iloc[45:94,year] + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -assert s_emi.index[0] == sector + # Add aluminium electrolysis (smelting + df.loc["elec", sector] += s_fec["Aluminium electrolysis (smelting)"] -s_out = excel_out.iloc[8:9,year] + # Efficiency changes due to electrification + key = "Aluminium processing - Electric" + eff_elec = s_ued[key] / s_fec[key] -assert sector in str(s_out.index) + key = "Aluminium processing (metallurgy e.g. cast house, reheating)" + df.loc["elec", sector] += s_ued[key] / eff_elec -df.loc['process emission',sector] += s_emi['Process emissions']/s_out.values # unit tCO2/t material + key = "Aluminium finishing - Electric" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Aluminium finishing"] / eff_elec -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] + s_emi = idees["emi"][31:67] + assert s_emi.index[0] == sector -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out.values # unit MWh/t material -# 1 ktoe = 11630 MWh + s_out = idees["out"][11:12] + assert sector in str(s_out.index) -### Glass production -# -#> This sector has process emissions. -# -#> Includes four subcategories: (a) Melting tank, (b) Forming, (c) Annealing, (d) Finishing processes. (a)represents 73%. (b), (d) are joined to (c). -# -#> Everything is electrified. + # tCO2/t material + df.loc["process emission", sector] = ( + s_emi["Process emissions"] / s_out["Aluminium - primary production"] + ) -sector = 'Glass production' + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = ( + df.loc[sources, sector] * toe_to_MWh / s_out["Aluminium - primary production"] + ) -df[sector] = 0 + # Aluminium secondary route -# read the corresponding lines -s_fec = excel_fec.iloc[95:123,year] + # All is coverted into secondary route fully electrified. -s_ued = excel_ued.iloc[95:123,year] + sector = "Aluminium - secondary production" -assert s_fec.index[0] == sector + df[sector] = 0.0 -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec', sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() + s_fec = idees["fec"][68:109] + s_ued = idees["ued"][68:109] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -# Low enthalpy heat -df.loc['heat', sector] += s_fec['Low enthalpy heat'] + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -# Efficiency changes due to electrification -eff_elec=s_ued['Glass: Electric melting tank']/s_fec['Glass: Electric melting tank'] -df.loc['elec', sector] += s_ued['Glass: Melting tank']/eff_elec + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -eff_elec=s_ued['Glass: Annealing - electric']/s_fec['Glass: Annealing - electric'] -df.loc['elec', sector] += s_ued[['Glass: Forming','Glass: Annealing','Glass: Finishing processes']].sum()/eff_elec + # Efficiency changes due to electrification + key = "Secondary aluminium - Electric" + eff_elec = s_ued[key] / s_fec[key] + key = "Secondary aluminium (incl. pre-treatment, remelting)" + df.loc["elec", sector] += s_ued[key] / eff_elec -s_emi = excel_emi.iloc[95:124,year] + key = "Aluminium processing - Electric" + eff_elec = s_ued[key] / s_fec[key] + key = "Aluminium processing (metallurgy e.g. cast house, reheating)" + df.loc["elec", sector] += s_ued[key] / eff_elec -assert s_emi.index[0] == sector + key = "Aluminium finishing - Electric" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Aluminium finishing"] / eff_elec -s_out = excel_out.iloc[9:10,year] + s_out = idees["out"][12:13] + assert sector in str(s_out.index) -assert sector in str(s_out.index) + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = ( + df.loc[sources, sector] * toe_to_MWh / s_out["Aluminium - secondary production"] + ) -df.loc['process emission',sector] += s_emi['Process emissions']/s_out.values # unit tCO2/t material + # Other non-ferrous metals -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] + sector = "Other non-ferrous metals" -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out.values # unit MWh/t material + df[sector] = 0.0 -## Pulp, paper and printing -# -#> Pulp, paper and printing can be completely electrified. -# -#> There are no process emissions associated to this sector. + s_fec = idees["fec"][110:152] + s_ued = idees["ued"][110:152] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -sector = 'Pulp, paper and printing' + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -# read the input sheets -excel_fec = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_fec', - index_col=0,header=0,squeeze=True) + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -excel_ued = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_ued', - index_col=0,header=0,squeeze=True) + # Efficiency changes due to electrification + key = "Metal production - Electric" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Other Metals: production"] / eff_elec -excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector], - index_col=0,header=0,squeeze=True) + key = "Metal processing - Electric" + eff_elec = s_ued[key] / s_fec[key] + key = "Metal processing (metallurgy e.g. cast house, reheating)" + df.loc["elec", sector] += s_ued[key] / eff_elec -### Pulp production -# -#> Includes three subcategories: (a) Wood preparation, grinding; (b) Pulping; (c) Cleaning. -# -#> (b) Pulping is either biomass or electric; left like this (dominated by biomass). -# -#> (a) Wood preparation, grinding and (c) Cleaning represent only 10% their current energy consumption is assumed to be electrified without any change in efficiency + key = "Metal finishing - Electric" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Metal finishing"] / eff_elec -sector = 'Pulp production' + s_emi = idees["emi"][110:153] + assert s_emi.index[0] == sector -df[sector] = 0 + s_out = idees["out"][13:14] + assert sector in str(s_out.index) -# read the corresponding lines -s_fec = excel_fec.iloc[3:28,year] + # tCO2/t material + df.loc["process emission", sector] = ( + s_emi["Process emissions"] / s_out["Other non-ferrous metals (kt lead eq.)"] + ) -s_ued = excel_ued.iloc[3:28,year] + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = ( + df.loc[sources, sector] + * toe_to_MWh + / s_out["Other non-ferrous metals (kt lead eq.)"] + ) -assert s_fec.index[0] == sector + return df -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec', sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() -# Low enthalpy heat -df.loc['heat', sector] += s_fec['Low enthalpy heat'] +def transport_equipment(): -# Industry-specific -df.loc['elec', sector] += s_fec[['Pulp: Wood preparation, grinding', 'Pulp: Cleaning', 'Pulp: Pulping electric']].sum() + sector = "Transport Equipment" + idees = load_idees_data(sector) -# Efficiency changes due to biomass -eff_bio=s_ued['Biomass']/s_fec['Biomass'] -df.loc['biomass', sector] += s_ued['Pulp: Pulping thermal']/eff_bio + df = pd.DataFrame(index=index) -s_out = excel_out.iloc[8:9,year] + df[sector] = 0.0 -assert sector in str(s_out.index) + s_fec = idees["fec"][3:45] + s_ued = idees["ued"][3:45] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out['Pulp production (kt)'] # unit MWh/t material + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -### Paper production -# -#> Includes three subcategories: (a) Stock preparation; (b) Paper machine; (c) Product finishing. -# -#> (b) Paper machine and (c) Product finishing are left electric and thermal is moved to biomass. The efficiency is calculated from the pulping process that is already biomass. -# -#> (a) Stock preparation represents only 7% and its current energy consumption is assumed to be electrified without any change in efficiency. + # Efficiency changes due to electrification + key = "Trans. Eq.: Electric Foundries" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Trans. Eq.: Foundries"] / eff_elec -sector = 'Paper production' + key = "Trans. Eq.: Electric connection" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Trans. Eq.: Connection techniques"] / eff_elec -df[sector] = 0 + key = "Trans. Eq.: Heat treatment - Electric" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Trans. Eq.: Heat treatment"] / eff_elec -# read the corresponding lines -s_fec = excel_fec.iloc[29:78,year] + df.loc["elec", sector] += s_fec["Trans. Eq.: General machinery"] + df.loc["elec", sector] += s_fec["Trans. Eq.: Product finishing"] -s_ued = excel_ued.iloc[29:78,year] + # Steam processing is supplied with biomass + eff_biomass = s_ued["Biomass"] / s_fec["Biomass"] + df.loc["biomass", sector] += s_ued["Trans. Eq.: Steam processing"] / eff_biomass -assert s_fec.index[0] == sector + s_out = idees["out"][3:4] + assert "Physical output" in str(s_out.index) -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec', sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = ( + df.loc[sources, sector] * toe_to_MWh / s_out["Physical output (index)"] + ) -# Low enthalpy heat -df.loc['heat', sector] += s_fec['Low enthalpy heat'] + return df -# Industry-specific -df.loc['elec', sector] += s_fec['Paper: Stock preparation'] -# add electricity from process that is already electrified -df.loc['elec', sector] += s_fec['Paper: Paper machine - Electricity'] +def machinery_equipment(): -# add electricity from process that is already electrified -df.loc['elec', sector] += s_fec['Paper: Product finishing - Electricity'] + sector = "Machinery Equipment" + idees = load_idees_data(sector) -s_fec = excel_fec.iloc[53:64,year] + df = pd.DataFrame(index=index) -s_ued = excel_ued.iloc[53:64,year] + df[sector] = 0.0 -assert s_fec.index[0] == 'Paper: Paper machine - Steam use' + s_fec = idees["fec"][3:45] + s_ued = idees["ued"][3:45] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -# Efficiency changes due to biomass -eff_bio=s_ued['Biomass']/s_fec['Biomass'] -df.loc['biomass', sector] += s_ued['Paper: Paper machine - Steam use']/eff_bio + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -s_fec = excel_fec.iloc[66:77,year] + # Efficiency changes due to electrification + key = "Mach. Eq.: Electric Foundries" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Mach. Eq.: Foundries"] / eff_elec -s_ued = excel_ued.iloc[66:77,year] + key = "Mach. Eq.: Electric connection" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Mach. Eq.: Connection techniques"] / eff_elec -assert s_fec.index[0] == 'Paper: Product finishing - Steam use' + key = "Mach. Eq.: Heat treatment - Electric" + eff_elec = s_ued[key] / s_fec[key] -# Efficiency changes due to biomass -eff_bio=s_ued['Biomass']/s_fec['Biomass'] -df.loc['biomass', sector] += s_ued['Paper: Product finishing - Steam use']/eff_bio + df.loc["elec", sector] += s_ued["Mach. Eq.: Heat treatment"] / eff_elec + df.loc["elec", sector] += s_fec["Mach. Eq.: General machinery"] + df.loc["elec", sector] += s_fec["Mach. Eq.: Product finishing"] -# read the corresponding lines -s_out = excel_out.iloc[9:10,year] + # Steam processing is supplied with biomass + eff_biomass = s_ued["Biomass"] / s_fec["Biomass"] + df.loc["biomass", sector] += s_ued["Mach. Eq.: Steam processing"] / eff_biomass -assert sector in str(s_out.index) + s_out = idees["out"][3:4] + assert "Physical output" in str(s_out.index) -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = ( + df.loc[sources, sector] * toe_to_MWh / s_out["Physical output (index)"] + ) -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out.values # unit MWh/t material\ + return df -### Printing and media reproduction -# -#> (a) Printing and publishing is assumed to be electrified without any change in efficiency. -sector='Printing and media reproduction' +def textiles_and_leather(): -df[sector] = 0 + sector = "Textiles and leather" -# read the corresponding lines -s_fec = excel_fec.iloc[79:90,year] + idees = load_idees_data(sector) -s_ued = excel_ued.iloc[79:90,year] + df = pd.DataFrame(index=index) -assert s_fec.index[0] == sector + df[sector] = 0.0 -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec',sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() -df.loc['elec',sector] += s_ued[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() + s_fec = idees["fec"][3:57] + s_ued = idees["ued"][3:57] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -# Low enthalpy heat -df.loc['heat',sector] += s_fec['Low enthalpy heat'] -df.loc['heat',sector] += s_ued['Low enthalpy heat'] + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -# Industry-specific -df.loc['elec', sector] += s_fec['Printing and publishing'] -df.loc['elec', sector] += s_ued['Printing and publishing'] + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -# read the corresponding lines -s_out = excel_out.iloc[10:11,year] + # Efficiency changes due to electrification + key = "Textiles: Electric drying" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Textiles: Drying"] / eff_elec -assert sector in str(s_out.index) + df.loc["elec", sector] += s_fec["Textiles: Electric general machinery"] + df.loc["elec", sector] += s_fec["Textiles: Finishing Electric"] -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] + # Steam processing is supplied with biomass + eff_biomass = s_ued[15:26]["Biomass"] / s_fec[15:26]["Biomass"] + df.loc["biomass", sector] += ( + s_ued["Textiles: Pretreatment with steam"] / eff_biomass + ) + df.loc["biomass", sector] += ( + s_ued["Textiles: Wet processing with steam"] / eff_biomass + ) -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out.values # unit MWh/t material + s_out = idees["out"][3:4] + assert "Physical output" in str(s_out.index) -## Food, beverages and tobaco -# -#> Food, beverages and tobaco can be completely electrified. -# -#> There are no process emissions associated to this sector. + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = ( + df.loc[sources, sector] * toe_to_MWh / s_out["Physical output (index)"] + ) -sector = 'Food, beverages and tobacco' + return df -# read the input sheets -excel_fec = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_fec', - index_col=0,header=0,squeeze=True) -excel_ued = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_ued', - index_col=0,header=0,squeeze=True) +def wood_and_wood_products(): -excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector], - index_col=0,header=0,squeeze=True) + sector = "Wood and wood products" -df[sector] = 0 + idees = load_idees_data(sector) -# read the corresponding lines -s_fec = excel_fec.iloc[3:78,year] + df = pd.DataFrame(index=index) -s_ued = excel_ued.iloc[3:78,year] + df[sector] = 0.0 -assert s_fec.index[0] == sector + s_fec = idees["fec"][3:46] + s_ued = idees["ued"][3:46] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec', sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -# Low enthalpy heat -df.loc['heat', sector] += s_fec['Low enthalpy heat'] + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -# Efficiency changes due to electrification -eff_elec=s_ued['Food: Direct Heat - Electric']/s_fec['Food: Direct Heat - Electric'] -df.loc['elec', sector] += s_ued['Food: Oven (direct heat)']/eff_elec + # Efficiency changes due to electrification + key = "Wood: Electric drying" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Wood: Drying"] / eff_elec -eff_elec=s_ued['Food: Process Heat - Electric']/s_fec['Food: Process Heat - Electric'] -df.loc['elec', sector] += s_ued['Food: Specific process heat']/eff_elec + df.loc["elec", sector] += s_fec["Wood: Electric mechanical processes"] + df.loc["elec", sector] += s_fec["Wood: Finishing Electric"] -eff_elec=s_ued['Food: Electric drying']/s_fec['Food: Electric drying'] -df.loc['elec', sector] += s_ued['Food: Drying']/eff_elec + # Steam processing is supplied with biomass + eff_biomass = s_ued[15:25]["Biomass"] / s_fec[15:25]["Biomass"] + df.loc["biomass", sector] += ( + s_ued["Wood: Specific processes with steam"] / eff_biomass + ) -eff_elec=s_ued['Food: Electric cooling']/s_fec['Food: Electric cooling'] -df.loc['elec', sector] += s_ued['Food: Process cooling and refrigeration']/eff_elec + s_out = idees["out"][3:4] + assert "Physical output" in str(s_out.index) -# Steam processing goes all to biomass without change in efficiency -df.loc['biomass', sector] += s_fec['Food: Steam processing'] + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = ( + df.loc[sources, sector] * toe_to_MWh / s_out["Physical output (index)"] + ) -# add electricity from process that is already electrified -df.loc['elec', sector] += s_fec['Food: Electric machinery'] + return df -# read the corresponding lines -s_out = excel_out.iloc[3:4,year] -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] +def other_industrial_sectors(): -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out['Physical output (index)'] # unit MWh/t material + sector = "Other Industrial Sectors" -## Non Ferrous Metals + idees = load_idees_data(sector) -sector = 'Non Ferrous Metals' + df = pd.DataFrame(index=index) -# read the input sheets -excel_fec = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_fec', - index_col=0,header=0,squeeze=True) + df[sector] = 0.0 -excel_ued = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_ued', - index_col=0,header=0,squeeze=True) + s_fec = idees["fec"][3:67] + s_ued = idees["ued"][3:67] + assert s_fec.index[0] == sector + assert s_ued.index[0] == sector -excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector], - index_col=0,header=0,squeeze=True) + sel = ["Lighting", "Air compressors", "Motor drives", "Fans and pumps"] + df.loc["elec", sector] += s_fec[sel].sum() -excel_emi = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_emi', - index_col=0,header=0,squeeze=True) # the emission sheet + df.loc["heat", sector] += s_fec["Low enthalpy heat"] -### Alumina -# -#> High enthalpy heat is converted to methane. Process heat at T>500ºC is required here. -# -#> Refining is electrified. -# -#> There are no process emissions associated to Alumina manufacturing + # Efficiency changes due to electrification + key = "Other Industrial sectors: Electric processing" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += ( + s_ued["Other Industrial sectors: Process heating"] / eff_elec + ) -sector = 'Alumina production' + key = "Other Industries: Electric drying" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += s_ued["Other Industrial sectors: Drying"] / eff_elec -df[sector] = 0 + key = "Other Industries: Electric cooling" + eff_elec = s_ued[key] / s_fec[key] + df.loc["elec", sector] += ( + s_ued["Other Industrial sectors: Process Cooling"] / eff_elec + ) -# read the corresponding lines -s_fec = excel_fec.iloc[3:31,year] + # Diesel motors are electrified + key = "Other Industrial sectors: Diesel motors (incl. biofuels)" + df.loc["elec", sector] += s_fec[key] + key = "Other Industrial sectors: Electric machinery" + df.loc["elec", sector] += s_fec[key] -s_ued = excel_ued.iloc[3:31,year] + # Steam processing is supplied with biomass + eff_biomass = s_ued[15:25]["Biomass"] / s_fec[15:25]["Biomass"] + df.loc["biomass", sector] += ( + s_ued["Other Industrial sectors: Steam processing"] / eff_biomass + ) -assert s_fec.index[0] == sector + s_out = idees["out"][3:4] + assert "Physical output" in str(s_out.index) -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec', sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() + # MWh/t material + sources = ["elec", "biomass", "methane", "hydrogen", "heat", "naphtha"] + df.loc[sources, sector] = ( + df.loc[sources, sector] * toe_to_MWh / s_out["Physical output (index)"] + ) -# Low enthalpy heat -df.loc['heat', sector] += s_fec['Low enthalpy heat'] + return df -# High-enthalpy heat is transformed into methane -s_fec = excel_fec.iloc[13:24,year] -s_ued = excel_ued.iloc[13:24,year] +if __name__ == "__main__": + if 'snakemake' not in globals(): + from helper import mock_snakemake + snakemake = mock_snakemake('build_industry_sector_ratios') -assert s_fec.index[0] == 'Alumina production: High enthalpy heat' + # TODO make config option + year = 2015 -eff_met=s_ued['Natural gas (incl. biogas)']/s_fec['Natural gas (incl. biogas)'] -df.loc['methane', sector] += s_fec['Alumina production: High enthalpy heat']/eff_met + config = snakemake.config["industry"] -# Efficiency changes due to electrification -s_fec = excel_fec.iloc[24:30,year] + df = pd.concat( + [ + iron_and_steel(), + chemicals_industry(), + nonmetalic_mineral_products(), + pulp_paper_printing(), + food_beverages_tobacco(), + non_ferrous_metals(), + transport_equipment(), + machinery_equipment(), + textiles_and_leather(), + wood_and_wood_products(), + other_industrial_sectors(), + ], + axis=1, + ) -s_ued = excel_ued.iloc[24:30,year] - -assert s_fec.index[0] == 'Alumina production: Refining' - -eff_elec=s_ued['Electricity']/s_fec['Electricity'] -df.loc['elec', sector] += s_ued['Alumina production: Refining']/eff_elec - -# read the corresponding lines -s_out = excel_out.iloc[9:10,year] - -assert sector in str(s_out.index) - -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out['Alumina production (kt)'] # unit MWh/t material - -### Aluminium primary route -# -#> Production through the primary route is divided into 50% remains as today and 50% is transformed into secondary route - -sector = 'Aluminium - primary production' - -df[sector] = 0 - -# read the corresponding lines -s_fec = excel_fec.iloc[31:66,year] - -s_ued = excel_ued.iloc[31:66,year] - -assert s_fec.index[0] == sector - -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec', sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() - -# Low enthalpy heat -df.loc['heat', sector] += s_fec['Low enthalpy heat'] - -# Add aluminium electrolysis (smelting -df.loc['elec', sector] += s_fec['Aluminium electrolysis (smelting)'] - -# Efficiency changes due to electrification -eff_elec=s_ued['Aluminium processing - Electric']/s_fec['Aluminium processing - Electric'] -df.loc['elec', sector] += s_ued['Aluminium processing (metallurgy e.g. cast house, reheating)']/eff_elec - -# Efficiency changes due to electrification -eff_elec=s_ued['Aluminium finishing - Electric']/s_fec['Aluminium finishing - Electric'] -df.loc['elec', sector] += s_ued['Aluminium finishing']/eff_elec - -s_emi = excel_emi.iloc[31:67,year] - -assert s_emi.index[0] == sector - -s_out = excel_out.iloc[11:12,year] - -assert sector in str(s_out.index) - -df.loc['process emission',sector] = s_emi['Process emissions']/s_out['Aluminium - primary production'] # unit tCO2/t material - -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out['Aluminium - primary production'] # unit MWh/t material - -### Aluminium secondary route -# -#> All is coverted into secondary route fully electrified - -sector = 'Aluminium - secondary production' - -df[sector] = 0 - -# read the corresponding lines -s_fec = excel_fec.iloc[68:109,year] - -s_ued = excel_ued.iloc[68:109,year] - -assert s_fec.index[0] == sector - -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec', sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() - -# Low enthalpy heat -df.loc['heat', sector] += s_fec['Low enthalpy heat'] - -# Efficiency changes due to electrification -eff_elec=s_ued['Secondary aluminium - Electric']/s_fec['Secondary aluminium - Electric'] -df.loc['elec', sector] += s_ued['Secondary aluminium (incl. pre-treatment, remelting)']/eff_elec - -# Efficiency changes due to electrification -eff_elec=s_ued['Aluminium processing - Electric']/s_fec['Aluminium processing - Electric'] -df.loc['elec', sector] += s_ued['Aluminium processing (metallurgy e.g. cast house, reheating)']/eff_elec - -# Efficiency changes due to electrification -eff_elec=s_ued['Aluminium finishing - Electric']/s_fec['Aluminium finishing - Electric'] -df.loc['elec', sector] += s_ued['Aluminium finishing']/eff_elec - -# read the corresponding lines -s_out = excel_out.iloc[12:13,year] - -assert sector in str(s_out.index) - -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out['Aluminium - secondary production'] # unit MWh/t material -# 1 ktoe = 11630 MWh - - -### Other non-ferrous metals - -sector = 'Other non-ferrous metals' - -df[sector] = 0 - -# read the corresponding lines -s_fec = excel_fec.iloc[110:152,year] - -s_ued = excel_ued.iloc[110:152,year] - -assert s_fec.index[0] == sector - -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec', sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() - -# Low enthalpy heat -df.loc['heat', sector] += s_fec['Low enthalpy heat'] - -# Efficiency changes due to electrification -eff_elec=s_ued['Metal production - Electric']/s_fec['Metal production - Electric'] -df.loc['elec', sector] += s_ued['Other Metals: production']/eff_elec - -# Efficiency changes due to electrification -eff_elec=s_ued['Metal processing - Electric']/s_fec['Metal processing - Electric'] -df.loc['elec', sector] += s_ued['Metal processing (metallurgy e.g. cast house, reheating)']/eff_elec - -# Efficiency changes due to electrification -eff_elec=s_ued['Metal finishing - Electric']/s_fec['Metal finishing - Electric'] -df.loc['elec', sector] += s_ued['Metal finishing']/eff_elec - -s_emi = excel_emi.iloc[110:153,year] - -assert s_emi.index[0] == sector - -s_out = excel_out.iloc[13:14,year] - -assert sector in str(s_out.index) - -df.loc['process emission',sector] = s_emi['Process emissions']/s_out['Other non-ferrous metals (kt lead eq.)'] # unit tCO2/t material - -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out['Other non-ferrous metals (kt lead eq.)'] # unit MWh/t material - -## Transport Equipment - -sector = 'Transport Equipment' -# read the input sheets -excel_fec = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_fec', - index_col=0,header=0,squeeze=True) - -excel_ued = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_ued', - index_col=0,header=0,squeeze=True) - -excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector], - index_col=0,header=0,squeeze=True) - -excel_emi = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_emi', - index_col=0,header=0,squeeze=True) # the emission sheet - -df[sector] = 0 - -# read the corresponding lines -s_fec = excel_fec.iloc[3:45,year] - -s_ued = excel_ued.iloc[3:45,year] - -assert s_fec.index[0] == sector - -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec', sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() - -# Low enthalpy heat -df.loc['heat', sector] += s_fec['Low enthalpy heat'] - -# Efficiency changes due to electrification -eff_elec=s_ued['Trans. Eq.: Electric Foundries']/s_fec['Trans. Eq.: Electric Foundries'] -df.loc['elec', sector] += s_ued['Trans. Eq.: Foundries']/eff_elec - -# Efficiency changes due to electrification -eff_elec=s_ued['Trans. Eq.: Electric connection']/s_fec['Trans. Eq.: Electric connection'] -df.loc['elec', sector] += s_ued['Trans. Eq.: Connection techniques']/eff_elec - -# Efficiency changes due to electrification -eff_elec=s_ued['Trans. Eq.: Heat treatment - Electric']/s_fec['Trans. Eq.: Heat treatment - Electric'] -df.loc['elec', sector] += s_ued['Trans. Eq.: Heat treatment']/eff_elec - -df.loc['elec', sector] += s_fec['Trans. Eq.: General machinery'] -df.loc['elec', sector] += s_fec['Trans. Eq.: Product finishing'] - -# Steam processing is supplied with biomass -eff_biomass=s_ued['Biomass']/s_fec['Biomass'] -df.loc['biomass', sector] += s_ued['Trans. Eq.: Steam processing']/eff_biomass - -# read the corresponding lines -s_out = excel_out.iloc[3:4,year] -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out['Physical output (index)'] # unit MWh/t material -# 1 ktoe = 11630 MWh - -## Machinery Equipment - -sector = 'Machinery Equipment' - -# read the input sheets -excel_fec = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_fec', - index_col=0,header=0,squeeze=True) - -excel_ued = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_ued', - index_col=0,header=0,squeeze=True) - -excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector], - index_col=0,header=0,squeeze=True) - -excel_emi = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_emi', - index_col=0,header=0,squeeze=True) # the emission sheet - -df[sector] = 0 - -# read the corresponding lines -s_fec = excel_fec.iloc[3:45,year] - -s_ued = excel_ued.iloc[3:45,year] - -assert s_fec.index[0] == sector - -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec', sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() - -# Low enthalpy heat -df.loc['heat', sector] += s_fec['Low enthalpy heat'] - -# Efficiency changes due to electrification -eff_elec=s_ued['Mach. Eq.: Electric Foundries']/s_fec['Mach. Eq.: Electric Foundries'] -df.loc['elec', sector] += s_ued['Mach. Eq.: Foundries']/eff_elec - -# Efficiency changes due to electrification -eff_elec=s_ued['Mach. Eq.: Electric connection']/s_fec['Mach. Eq.: Electric connection'] -df.loc['elec', sector] += s_ued['Mach. Eq.: Connection techniques']/eff_elec - -# Efficiency changes due to electrification -eff_elec=s_ued['Mach. Eq.: Heat treatment - Electric']/s_fec['Mach. Eq.: Heat treatment - Electric'] -df.loc['elec', sector] += s_ued['Mach. Eq.: Heat treatment']/eff_elec - -df.loc['elec', sector] += s_fec['Mach. Eq.: General machinery'] -df.loc['elec', sector] += s_fec['Mach. Eq.: Product finishing'] - -# Steam processing is supplied with biomass -eff_biomass=s_ued['Biomass']/s_fec['Biomass'] -df.loc['biomass', sector] += s_ued['Mach. Eq.: Steam processing']/eff_biomass - -# read the corresponding lines -s_out = excel_out.iloc[3:4,year] - -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out['Physical output (index)'] # unit MWh/t material - -## Textiles and leather - -sector = 'Textiles and leather' -# read the input sheets -excel_fec = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_fec', - index_col=0,header=0,squeeze=True) - -excel_ued = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_ued', - index_col=0,header=0,squeeze=True) - -excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector], - index_col=0,header=0,squeeze=True) - -excel_emi = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_emi', - index_col=0,header=0,squeeze=True) # the emission sheet - -df[sector] = 0 - -# read the corresponding lines -s_fec = excel_fec.iloc[3:57,year] - -s_ued = excel_ued.iloc[3:57,year] - -assert s_fec.index[0] == sector - -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec', sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() - -# Low enthalpy heat -df.loc['heat', sector] += s_fec['Low enthalpy heat'] - -# Efficiency changes due to electrification -eff_elec=s_ued['Textiles: Electric drying']/s_fec['Textiles: Electric drying'] -df.loc['elec', sector] += s_ued['Textiles: Drying']/eff_elec - -df.loc['elec', sector] += s_fec['Textiles: Electric general machinery'] -df.loc['elec', sector] += s_fec['Textiles: Finishing Electric'] - -# Steam processing is supplied with biomass -eff_biomass=s_ued[15:26]['Biomass']/s_fec[15:26]['Biomass'] -df.loc['biomass', sector] += s_ued['Textiles: Pretreatment with steam']/eff_biomass -df.loc['biomass', sector] += s_ued['Textiles: Wet processing with steam']/eff_biomass - -# read the corresponding lines -s_out = excel_out.iloc[3:4,year] - -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out['Physical output (index)'] # unit MWh/t material - -## Wood and wood products - -sector = 'Wood and wood products' -# read the input sheets -excel_fec = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_fec', - index_col=0,header=0,squeeze=True) - -excel_ued = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_ued', - index_col=0,header=0,squeeze=True) - -excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector], - index_col=0,header=0,squeeze=True) - -excel_emi = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_emi', - index_col=0,header=0,squeeze=True) # the emission sheet - -df[sector] = 0 - -# read the corresponding lines -s_fec = excel_fec.iloc[3:46,year] - -s_ued = excel_ued.iloc[3:46,year] - -assert s_fec.index[0] == sector - -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec', sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() - -# Low enthalpy heat -df.loc['heat', sector] += s_fec['Low enthalpy heat'] - -# Efficiency changes due to electrification -eff_elec=s_ued['Wood: Electric drying']/s_fec['Wood: Electric drying'] -df.loc['elec', sector] += s_ued['Wood: Drying']/eff_elec - -df.loc['elec', sector] += s_fec['Wood: Electric mechanical processes'] -df.loc['elec', sector] += s_fec['Wood: Finishing Electric'] - -# Steam processing is supplied with biomass -eff_biomass=s_ued[15:25]['Biomass']/s_fec[15:25]['Biomass'] -df.loc['biomass', sector] += s_ued['Wood: Specific processes with steam']/eff_biomass - -# read the corresponding lines -s_out = excel_out.iloc[3:4,year] - -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out['Physical output (index)'] # unit MWh/t material - -## Other Industrial Sectors - -sector = 'Other Industrial Sectors' -# read the input sheets -excel_fec = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_fec', - index_col=0,header=0,squeeze=True) - -excel_ued = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_ued', - index_col=0,header=0,squeeze=True) - -excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector], - index_col=0,header=0,squeeze=True) - -excel_emi = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(base_dir,country), sheet_name=sub_sheet_name_dict[sector]+'_emi', - index_col=0,header=0,squeeze=True) # the emission sheet - -df[sector] = 0 - -# read the corresponding lines -s_fec = excel_fec.iloc[3:67,year] - -s_ued = excel_ued.iloc[3:67,year] - -assert s_fec.index[0] == sector - -# Lighting, Air compressors, Motor drives, Fans and pumps -df.loc['elec', sector] += s_fec[['Lighting','Air compressors','Motor drives','Fans and pumps']].sum() - -# Low enthalpy heat -df.loc['heat', sector] += s_fec['Low enthalpy heat'] - -# Efficiency changes due to electrification -eff_elec=s_ued['Other Industrial sectors: Electric processing']/s_fec['Other Industrial sectors: Electric processing'] -df.loc['elec', sector] += s_ued['Other Industrial sectors: Process heating']/eff_elec - -eff_elec=s_ued['Other Industries: Electric drying']/s_fec['Other Industries: Electric drying'] -df.loc['elec', sector] += s_ued['Other Industrial sectors: Drying']/eff_elec - -eff_elec=s_ued['Other Industries: Electric cooling']/s_fec['Other Industries: Electric cooling'] -df.loc['elec', sector] += s_ued['Other Industrial sectors: Process Cooling']/eff_elec - -# Diesel motors are electrified -df.loc['elec', sector] += s_fec['Other Industrial sectors: Diesel motors (incl. biofuels)'] -df.loc['elec', sector] += s_fec['Other Industrial sectors: Electric machinery'] - -# Steam processing is supplied with biomass -eff_biomass=s_ued[15:25]['Biomass']/s_fec[15:25]['Biomass'] -df.loc['biomass', sector] += s_ued['Other Industrial sectors: Steam processing']/eff_biomass - -# read the corresponding lines -s_out = excel_out.iloc[3:4,year] - -# final energy consumption per t -sources=['elec','biomass', 'methane', 'hydrogen', 'heat','naphtha'] -df.loc[sources,sector] = df.loc[sources,sector]*conv_factor/s_out['Physical output (index)'] # unit MWh/t material - - -df.index.name = "MWh/tMaterial" -df.to_csv('resources/industry_sector_ratios.csv') + df.index.name = "MWh/tMaterial" + df.to_csv(snakemake.output.industry_sector_ratios) diff --git a/scripts/build_population_layouts.py b/scripts/build_population_layouts.py index 497e399a..57934fb2 100644 --- a/scripts/build_population_layouts.py +++ b/scripts/build_population_layouts.py @@ -1,103 +1,98 @@ +"""Build mapping between grid cells and population (total, urban, rural)""" -# Build mapping between grid cells and population (total, urban, rural) - +import multiprocessing as mp import atlite +import numpy as np import pandas as pd import xarray as xr +import geopandas as gpd from vresutils import shapes as vshapes -import geopandas as gpd +if __name__ == '__main__': + if 'snakemake' not in globals(): + from helper import mock_snakemake + snakemake = mock_snakemake('build_population_layouts') + cutout = atlite.Cutout(snakemake.config['atlite']['cutout']) -if 'snakemake' not in globals(): - from vresutils import Dict - import yaml - snakemake = Dict() - with open('config.yaml') as f: - snakemake.config = yaml.safe_load(f) - snakemake.input = Dict() - snakemake.output = Dict() + grid_cells = cutout.grid_cells() - snakemake.input["urban_percent"] = "data/urban_percent.csv" + # nuts3 has columns country, gdp, pop, geometry + # population is given in dimensions of 1e3=k + nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index') -cutout = atlite.Cutout(snakemake.config['atlite']['cutout_name'], - cutout_dir=snakemake.config['atlite']['cutout_dir']) + # Indicator matrix NUTS3 -> grid cells + I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells) -grid_cells = cutout.grid_cells() + # Indicator matrix grid_cells -> NUTS3; inprinciple Iinv*I is identity + # but imprecisions mean not perfect + Iinv = cutout.indicatormatrix(nuts3.geometry) -#nuts3 has columns country, gdp, pop, geometry -#population is given in dimensions of 1e3=k -nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index') + countries = np.sort(nuts3.country.unique()) + urban_fraction = pd.read_csv(snakemake.input.urban_percent, + header=None, index_col=0, + names=['fraction'], squeeze=True) / 100. -# Indicator matrix NUTS3 -> grid cells -I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells) + # fill missing Balkans values + missing = ["AL", "ME", "MK"] + reference = ["RS", "BA"] + average = urban_fraction[reference].mean() + fill_values = pd.Series({ct: average for ct in missing}) + urban_fraction = urban_fraction.append(fill_values) -# Indicator matrix grid_cells -> NUTS3; inprinciple Iinv*I is identity -# but imprecisions mean not perfect -Iinv = cutout.indicatormatrix(nuts3.geometry) + # population in each grid cell + pop_cells = pd.Series(I.dot(nuts3['pop'])) -countries = nuts3.country.value_counts().index.sort_values() + # in km^2 + with mp.Pool(processes=snakemake.threads) as pool: + cell_areas = pd.Series(pool.map(vshapes.area, grid_cells)) / 1e6 -urban_fraction = pd.read_csv(snakemake.input.urban_percent, - header=None,index_col=0,squeeze=True)/100. + # pop per km^2 + density_cells = pop_cells / cell_areas -#fill missing Balkans values -missing = ["AL","ME","MK"] -reference = ["RS","BA"] -urban_fraction = urban_fraction.reindex(urban_fraction.index.union(missing)) -urban_fraction.loc[missing] = urban_fraction[reference].mean() + # rural or urban population in grid cell + pop_rural = pd.Series(0., density_cells.index) + pop_urban = pd.Series(0., density_cells.index) + for ct in countries: + print(ct, urban_fraction[ct]) -#population in each grid cell -pop_cells = pd.Series(I.dot(nuts3['pop'])) + indicator_nuts3_ct = nuts3.country.apply(lambda x: 1. if x == ct else 0.) -#in km^2 -cell_areas = pd.Series(cutout.grid_cells()).map(vshapes.area)/1e6 + indicator_cells_ct = pd.Series(Iinv.T.dot(indicator_nuts3_ct)) -#pop per km^2 -density_cells = pop_cells/cell_areas + density_cells_ct = indicator_cells_ct * density_cells + pop_cells_ct = indicator_cells_ct * pop_cells -#rural or urban population in grid cell -pop_rural = pd.Series(0.,density_cells.index) -pop_urban = pd.Series(0.,density_cells.index) + # correct for imprecision of Iinv*I + pop_ct = nuts3.loc[nuts3.country==ct,'pop'].sum() + pop_cells_ct *= pop_ct / pop_cells_ct.sum() -for ct in countries: - print(ct,urban_fraction[ct]) + # The first low density grid cells to reach rural fraction are rural + asc_density_i = density_cells_ct.sort_values().index + asc_density_cumsum = pop_cells_ct[asc_density_i].cumsum() / pop_cells_ct.sum() + rural_fraction_ct = 1 - urban_fraction[ct] + pop_ct_rural_b = asc_density_cumsum < rural_fraction_ct + pop_ct_urban_b = ~pop_ct_rural_b - indicator_nuts3_ct = pd.Series(0.,nuts3.index) - indicator_nuts3_ct[nuts3.index[nuts3.country==ct]] = 1. + pop_ct_rural_b[indicator_cells_ct == 0.] = False + pop_ct_urban_b[indicator_cells_ct == 0.] = False - indicator_cells_ct = pd.Series(Iinv.T.dot(indicator_nuts3_ct)) + pop_rural += pop_cells_ct.where(pop_ct_rural_b, 0.) + pop_urban += pop_cells_ct.where(pop_ct_urban_b, 0.) - density_cells_ct = indicator_cells_ct*density_cells + pop_cells = {"total": pop_cells} + pop_cells["rural"] = pop_rural + pop_cells["urban"] = pop_urban - pop_cells_ct = indicator_cells_ct*pop_cells + for key, pop in pop_cells.items(): - #correct for imprecision of Iinv*I - pop_ct = nuts3['pop'][indicator_nuts3_ct.index[indicator_nuts3_ct == 1.]].sum() - pop_cells_ct = pop_cells_ct*pop_ct/pop_cells_ct.sum() + ycoords = ('y', cutout.coords['y']) + xcoords = ('x', cutout.coords['x']) + values = pop.values.reshape(cutout.shape) + layout = xr.DataArray(values, [ycoords, xcoords]) - # The first low density grid cells to reach rural fraction are rural - index_from_low_d_to_high_d = density_cells_ct.sort_values().index - pop_ct_rural_b = pop_cells_ct[index_from_low_d_to_high_d].cumsum()/pop_cells_ct.sum() < (1-urban_fraction[ct]) - pop_ct_urban_b = ~pop_ct_rural_b - - pop_ct_rural_b[indicator_cells_ct==0.] = False - pop_ct_urban_b[indicator_cells_ct==0.] = False - - pop_rural += pop_cells_ct.where(pop_ct_rural_b,0.) - pop_urban += pop_cells_ct.where(pop_ct_urban_b,0.) - -pop_cells = {"total" : pop_cells} - -pop_cells["rural"] = pop_rural -pop_cells["urban"] = pop_urban - -for key in pop_cells.keys(): - layout = xr.DataArray(pop_cells[key].values.reshape(cutout.shape), - [('y', cutout.coords['y']), ('x', cutout.coords['x'])]) - - layout.to_netcdf(snakemake.output["pop_layout_"+key]) + layout.to_netcdf(snakemake.output[f"pop_layout_{key}"]) diff --git a/scripts/build_retro_cost.py b/scripts/build_retro_cost.py index 985c530f..bec71a53 100644 --- a/scripts/build_retro_cost.py +++ b/scripts/build_retro_cost.py @@ -441,7 +441,7 @@ def prepare_temperature_data(): temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365 """ - temperature = xr.open_dataarray(snakemake.input.air_temperature).T.to_pandas() + temperature = xr.open_dataarray(snakemake.input.air_temperature).to_pandas() d_heat = (temperature.groupby(temperature.columns.str[:2], axis=1).mean() .resample("1D").mean() 0.] = 0. # load += -stores - weighted_prices.loc[carrier,label] = (load*n.buses_t.marginal_price[buses]).sum().sum()/load.sum().sum() + weighted_prices.loc[carrier,label] = (load * n.buses_t.marginal_price[buses]).sum().sum() / load.sum().sum() if carrier[:5] == "space": - print(load*n.buses_t.marginal_price[buses]) + print(load * n.buses_t.marginal_price[buses]) return weighted_prices - - def calculate_market_values(n, label, market_values): # Warning: doesn't include storage units @@ -463,41 +446,40 @@ def calculate_market_values(n, label, market_values): ## First do market value of generators ## - generators = n.generators.index[n.buses.loc[n.generators.bus,"carrier"] == carrier] + generators = n.generators.index[n.buses.loc[n.generators.bus, "carrier"] == carrier] - techs = n.generators.loc[generators,"carrier"].value_counts().index + techs = n.generators.loc[generators, "carrier"].value_counts().index market_values = market_values.reindex(market_values.index.union(techs)) for tech in techs: - gens = generators[n.generators.loc[generators,"carrier"] == tech] + gens = generators[n.generators.loc[generators, "carrier"] == tech] - dispatch = n.generators_t.p[gens].groupby(n.generators.loc[gens,"bus"],axis=1).sum().reindex(columns=buses,fill_value=0.) + dispatch = n.generators_t.p[gens].groupby(n.generators.loc[gens, "bus"], axis=1).sum().reindex(columns=buses, fill_value=0.) - revenue = dispatch*n.buses_t.marginal_price[buses] - - market_values.at[tech,label] = revenue.sum().sum()/dispatch.sum().sum() + revenue = dispatch * n.buses_t.marginal_price[buses] + market_values.at[tech,label] = revenue.sum().sum() / dispatch.sum().sum() ## Now do market value of links ## - for i in ["0","1"]: - all_links = n.links.index[n.buses.loc[n.links["bus"+i],"carrier"] == carrier] + for i in ["0", "1"]: + all_links = n.links.index[n.buses.loc[n.links["bus"+i], "carrier"] == carrier] - techs = n.links.loc[all_links,"carrier"].value_counts().index + techs = n.links.loc[all_links, "carrier"].value_counts().index market_values = market_values.reindex(market_values.index.union(techs)) for tech in techs: - links = all_links[n.links.loc[all_links,"carrier"] == tech] + links = all_links[n.links.loc[all_links, "carrier"] == tech] - dispatch = n.links_t["p"+i][links].groupby(n.links.loc[links,"bus"+i],axis=1).sum().reindex(columns=buses,fill_value=0.) + dispatch = n.links_t["p"+i][links].groupby(n.links.loc[links, "bus"+i], axis=1).sum().reindex(columns=buses, fill_value=0.) - revenue = dispatch*n.buses_t.marginal_price[buses] + revenue = dispatch * n.buses_t.marginal_price[buses] - market_values.at[tech,label] = revenue.sum().sum()/dispatch.sum().sum() + market_values.at[tech,label] = revenue.sum().sum() / dispatch.sum().sum() return market_values @@ -505,17 +487,17 @@ def calculate_market_values(n, label, market_values): def calculate_price_statistics(n, label, price_statistics): - price_statistics = price_statistics.reindex(price_statistics.index.union(pd.Index(["zero_hours","mean","standard_deviation"]))) + price_statistics = price_statistics.reindex(price_statistics.index.union(pd.Index(["zero_hours", "mean", "standard_deviation"]))) buses = n.buses.index[n.buses.carrier == "AC"] - threshold = 0.1 #higher than phoney marginal_cost of wind/solar + threshold = 0.1 # higher than phoney marginal_cost of wind/solar - df = pd.DataFrame(data=0.,columns=buses,index=n.snapshots) + df = pd.DataFrame(data=0., columns=buses, index=n.snapshots) df[n.buses_t.marginal_price[buses] < threshold] = 1. - price_statistics.at["zero_hours", label] = df.sum().sum()/(df.shape[0]*df.shape[1]) + price_statistics.at["zero_hours", label] = df.sum().sum() / (df.shape[0] * df.shape[1]) price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].unstack().mean() @@ -524,38 +506,41 @@ def calculate_price_statistics(n, label, price_statistics): return price_statistics -outputs = ["nodal_costs", - "nodal_capacities", - "nodal_cfs", - "cfs", - "costs", - "capacities", - "curtailment", - "energy", - "supply", - "supply_energy", - "prices", - "weighted_prices", - "price_statistics", - "market_values", - "metrics", - ] - def make_summaries(networks_dict): - columns = pd.MultiIndex.from_tuples(networks_dict.keys(),names=["cluster","lv","opt","planning_horizon"]) + outputs = [ + "nodal_costs", + "nodal_capacities", + "nodal_cfs", + "cfs", + "costs", + "capacities", + "curtailment", + "energy", + "supply", + "supply_energy", + "prices", + "weighted_prices", + "price_statistics", + "market_values", + "metrics", + ] + + columns = pd.MultiIndex.from_tuples( + networks_dict.keys(), + names=["cluster", "lv", "opt", "planning_horizon"] + ) df = {} for output in outputs: - df[output] = pd.DataFrame(columns=columns,dtype=float) + df[output] = pd.DataFrame(columns=columns, dtype=float) - for label, filename in iteritems(networks_dict): + for label, filename in networks_dict.items(): print(label, filename) - n = pypsa.Network(filename, - override_component_attrs=override_component_attrs) - + overrides = override_component_attrs(snakemake.input.overrides) + n = pypsa.Network(filename, override_component_attrs=overrides) assign_carriers(n) assign_locations(n) @@ -567,56 +552,37 @@ def make_summaries(networks_dict): def to_csv(df): - for key in df: df[key].to_csv(snakemake.output[key]) if __name__ == "__main__": - # Detect running outside of snakemake and mock snakemake for testing if 'snakemake' not in globals(): - from vresutils import Dict - import yaml - snakemake = Dict() - with open('config.yaml', encoding='utf8') as f: - snakemake.config = yaml.safe_load(f) - - #overwrite some options - snakemake.config["run"] = "version-8" - snakemake.config["scenario"]["lv"] = [1.0] - snakemake.config["scenario"]["sector_opts"] = ["3H-T-H-B-I-solar3-dist1"] - snakemake.config["planning_horizons"] = ['2020', '2030', '2040', '2050'] - snakemake.input = Dict() - snakemake.input['heat_demand_name'] = 'data/heating/daily_heat_demand.h5' - snakemake.input['costs'] = snakemake.config['costs_dir'] + "costs_{}.csv".format(snakemake.config['scenario']['planning_horizons'][0]) - snakemake.output = Dict() - for item in outputs: - snakemake.output[item] = snakemake.config['summary_dir'] + '/{name}/csvs/{item}.csv'.format(name=snakemake.config['run'],item=item) - snakemake.output['cumulative_cost'] = snakemake.config['summary_dir'] + '/{name}/csvs/cumulative_cost.csv'.format(name=snakemake.config['run']) - networks_dict = {(cluster, lv, opt+sector_opt, planning_horizon) : - snakemake.config['results_dir'] + snakemake.config['run'] + '/postnetworks/elec_s{simpl}_{cluster}_lv{lv}_{opt}_{sector_opt}_{planning_horizon}.nc'\ - .format(simpl=simpl, - cluster=cluster, - opt=opt, - lv=lv, - sector_opt=sector_opt, - planning_horizon=planning_horizon)\ - for simpl in snakemake.config['scenario']['simpl'] \ - for cluster in snakemake.config['scenario']['clusters'] \ - for opt in snakemake.config['scenario']['opts'] \ - for sector_opt in snakemake.config['scenario']['sector_opts'] \ - for lv in snakemake.config['scenario']['lv'] \ - for planning_horizon in snakemake.config['scenario']['planning_horizons']} + from helper import mock_snakemake + snakemake = mock_snakemake('make_summary') + + networks_dict = { + (cluster, lv, opt+sector_opt, planning_horizon) : + snakemake.config['results_dir'] + snakemake.config['run'] + f'/postnetworks/elec_s{simpl}_{cluster}_lv{lv}_{opt}_{sector_opt}_{planning_horizon}.nc' \ + for simpl in snakemake.config['scenario']['simpl'] \ + for cluster in snakemake.config['scenario']['clusters'] \ + for opt in snakemake.config['scenario']['opts'] \ + for sector_opt in snakemake.config['scenario']['sector_opts'] \ + for lv in snakemake.config['scenario']['lv'] \ + for planning_horizon in snakemake.config['scenario']['planning_horizons'] + } print(networks_dict) Nyears = 1 - costs_db = prepare_costs(snakemake.input.costs, - snakemake.config['costs']['USD2013_to_EUR2013'], - snakemake.config['costs']['discountrate'], - Nyears, - snakemake.config['costs']['lifetime']) + costs_db = prepare_costs( + snakemake.input.costs, + snakemake.config['costs']['USD2013_to_EUR2013'], + snakemake.config['costs']['discountrate'], + Nyears, + snakemake.config['costs']['lifetime'] + ) df = make_summaries(networks_dict) diff --git a/scripts/plot_network.py b/scripts/plot_network.py index 2aaef6bc..cd74d3ea 100644 --- a/scripts/plot_network.py +++ b/scripts/plot_network.py @@ -1,44 +1,20 @@ +import pypsa +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt import cartopy.crs as ccrs + from matplotlib.legend_handler import HandlerPatch from matplotlib.patches import Circle, Ellipse + from make_summary import assign_carriers from plot_summary import rename_techs, preferred_order -import numpy as np -import pypsa -import matplotlib.pyplot as plt -import pandas as pd +from helper import override_component_attrs -# allow plotting without Xwindows -import matplotlib -matplotlib.use('Agg') +plt.style.use('ggplot') -# from sector/scripts/paper_graphics-co2_sweep.py - - -override_component_attrs = pypsa.descriptors.Dict( - {k: v.copy() for k, v in pypsa.components.component_attrs.items()}) -override_component_attrs["Link"].loc["bus2"] = [ - "string", np.nan, np.nan, "2nd bus", "Input (optional)"] -override_component_attrs["Link"].loc["bus3"] = [ - "string", np.nan, np.nan, "3rd bus", "Input (optional)"] -override_component_attrs["Link"].loc["efficiency2"] = [ - "static or series", "per unit", 1., "2nd bus efficiency", "Input (optional)"] -override_component_attrs["Link"].loc["efficiency3"] = [ - "static or series", "per unit", 1., "3rd bus efficiency", "Input (optional)"] -override_component_attrs["Link"].loc["p2"] = [ - "series", "MW", 0., "2nd bus output", "Output"] -override_component_attrs["Link"].loc["p3"] = [ - "series", "MW", 0., "3rd bus output", "Output"] -override_component_attrs["StorageUnit"].loc["p_dispatch"] = [ - "series", "MW", 0., "Storage discharging.", "Output"] -override_component_attrs["StorageUnit"].loc["p_store"] = [ - "series", "MW", 0., "Storage charging.", "Output"] - - - -# ----------------- PLOT HELPERS --------------------------------------------- def rename_techs_tyndp(tech): tech = rename_techs(tech) if "heat pump" in tech or "resistive heater" in tech: @@ -61,8 +37,7 @@ def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False): fig = ax.get_figure() def axes2pt(): - return np.diff(ax.transData.transform([(0, 0), (1, 1)]), axis=0)[ - 0] * (72. / fig.dpi) + return np.diff(ax.transData.transform([(0, 0), (1, 1)]), axis=0)[0] * (72. / fig.dpi) ellipses = [] if not dont_resize_actively: @@ -90,20 +65,14 @@ def make_legend_circles_for(sizes, scale=1.0, **kw): def assign_location(n): for c in n.iterate_components(n.one_port_components | n.branch_components): - ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index) - for i in ifind.value_counts().index: # these have already been assigned defaults - if i == -1: - continue - + if i == -1: continue names = ifind.index[ifind == i] - c.df.loc[names, 'location'] = names.str[:i] -# ----------------- PLOT FUNCTIONS -------------------------------------------- def plot_map(network, components=["links", "stores", "storage_units", "generators"], bus_size_factor=1.7e10, transmission=False): @@ -126,6 +95,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator costs = pd.concat([costs, costs_c], axis=1) print(comp, costs) + costs = costs.groupby(costs.columns, axis=1).sum() costs.drop(list(costs.columns[(costs == 0.).all()]), axis=1, inplace=True) @@ -193,24 +163,34 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()}) fig.set_size_inches(7, 6) - n.plot(bus_sizes=costs / bus_size_factor, - bus_colors=snakemake.config['plotting']['tech_colors'], - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, boundaries=(-10, 30, 34, 70), - color_geomap={'ocean': 'lightblue', 'land': "palegoldenrod"}) + n.plot( + bus_sizes=costs / bus_size_factor, + bus_colors=snakemake.config['plotting']['tech_colors'], + line_colors=ac_color, + link_colors=dc_color, + line_widths=line_widths / linewidth_factor, + link_widths=link_widths / linewidth_factor, + ax=ax, **map_opts + ) handles = make_legend_circles_for( - [5e9, 1e9], scale=bus_size_factor, facecolor="gray") + [5e9, 1e9], + scale=bus_size_factor, + facecolor="gray" + ) + labels = ["{} bEUR/a".format(s) for s in (5, 1)] - l2 = ax.legend(handles, labels, - loc="upper left", bbox_to_anchor=(0.01, 1.01), - labelspacing=1.0, - framealpha=1., - title='System cost', - handler_map=make_handler_map_to_scale_circles_as_in(ax)) + + l2 = ax.legend( + handles, labels, + loc="upper left", + bbox_to_anchor=(0.01, 1.01), + labelspacing=1.0, + frameon=False, + title='System cost', + handler_map=make_handler_map_to_scale_circles_as_in(ax) + ) + ax.add_artist(l2) handles = [] @@ -221,16 +201,23 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator linewidth=s * 1e3 / linewidth_factor)) labels.append("{} GW".format(s)) - l1_1 = ax.legend(handles, labels, - loc="upper left", bbox_to_anchor=(0.30, 1.01), - framealpha=1, - labelspacing=0.8, handletextpad=1.5, - title=title) + l1_1 = ax.legend( + handles, labels, + loc="upper left", + bbox_to_anchor=(0.22, 1.01), + frameon=False, + labelspacing=0.8, + handletextpad=1.5, + title=title + ) ax.add_artist(l1_1) - fig.savefig(snakemake.output.map, transparent=True, - bbox_inches="tight") + fig.savefig( + snakemake.output.map, + transparent=True, + bbox_inches="tight" + ) def plot_h2_map(network): @@ -253,7 +240,7 @@ def plot_h2_map(network): elec = n.links.index[n.links.carrier == "H2 Electrolysis"] - bus_sizes = n.links.loc[elec,"p_nom_opt"].groupby(n.links.loc[elec,"bus0"]).sum() / bus_size_factor + bus_sizes = n.links.loc[elec,"p_nom_opt"].groupby(n.links.loc[elec, "bus0"]).sum() / bus_size_factor # make a fake MultiIndex so that area is correct for legend bus_sizes.index = pd.MultiIndex.from_product( @@ -271,26 +258,38 @@ def plot_h2_map(network): print(n.links[["bus0", "bus1"]]) - fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()}) + fig, ax = plt.subplots( + figsize=(7, 6), + subplot_kw={"projection": ccrs.PlateCarree()} + ) - fig.set_size_inches(7, 6) - - n.plot(bus_sizes=bus_sizes, - bus_colors={"electrolysis": bus_color}, - link_colors=link_color, - link_widths=link_widths, - branch_components=["Link"], - ax=ax, boundaries=(-10, 30, 34, 70)) + n.plot( + bus_sizes=bus_sizes, + bus_colors={"electrolysis": bus_color}, + link_colors=link_color, + link_widths=link_widths, + branch_components=["Link"], + ax=ax, **map_opts + ) handles = make_legend_circles_for( - [50000, 10000], scale=bus_size_factor, facecolor=bus_color) + [50000, 10000], + scale=bus_size_factor, + facecolor=bus_color + ) + labels = ["{} GW".format(s) for s in (50, 10)] - l2 = ax.legend(handles, labels, - loc="upper left", bbox_to_anchor=(0.01, 1.01), - labelspacing=1.0, - framealpha=1., - title='Electrolyzer capacity', - handler_map=make_handler_map_to_scale_circles_as_in(ax)) + + l2 = ax.legend( + handles, labels, + loc="upper left", + bbox_to_anchor=(0.01, 1.01), + labelspacing=1.0, + frameon=False, + title='Electrolyzer capacity', + handler_map=make_handler_map_to_scale_circles_as_in(ax) + ) + ax.add_artist(l2) handles = [] @@ -300,15 +299,24 @@ def plot_h2_map(network): handles.append(plt.Line2D([0], [0], color=link_color, linewidth=s * 1e3 / linewidth_factor)) labels.append("{} GW".format(s)) - l1_1 = ax.legend(handles, labels, - loc="upper left", bbox_to_anchor=(0.30, 1.01), - framealpha=1, - labelspacing=0.8, handletextpad=1.5, - title='H2 pipeline capacity') + + l1_1 = ax.legend( + handles, labels, + loc="upper left", + bbox_to_anchor=(0.28, 1.01), + frameon=False, + labelspacing=0.8, + handletextpad=1.5, + title='H2 pipeline capacity' + ) + ax.add_artist(l1_1) - fig.savefig(snakemake.output.map.replace("-costs-all","-h2_network"), transparent=True, - bbox_inches="tight") + fig.savefig( + snakemake.output.map.replace("-costs-all","-h2_network"), + transparent=True, + bbox_inches="tight" + ) def plot_map_without(network): @@ -319,9 +327,10 @@ def plot_map_without(network): # Drop non-electric buses so they don't clutter the plot n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True) - fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()}) - - fig.set_size_inches(7, 6) + fig, ax = plt.subplots( + figsize=(7, 6), + subplot_kw={"projection": ccrs.PlateCarree()} + ) # PDF has minimum width, so set these to zero line_lower_threshold = 200. @@ -333,8 +342,8 @@ def plot_map_without(network): # hack because impossible to drop buses... n.buses.loc["EU gas", ["x", "y"]] = n.buses.loc["DE0 0", ["x", "y"]] - n.links.drop(n.links.index[(n.links.carrier != "DC") & ( - n.links.carrier != "B2B")], inplace=True) + to_drop = n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")] + n.links.drop(to_drop, inplace=True) if snakemake.wildcards["lv"] == "1.0": line_widths = n.lines.s_nom @@ -349,13 +358,14 @@ def plot_map_without(network): line_widths[line_widths > line_upper_threshold] = line_upper_threshold link_widths[link_widths > line_upper_threshold] = line_upper_threshold - n.plot(bus_colors="k", - line_colors=ac_color, - link_colors=dc_color, - line_widths=line_widths / linewidth_factor, - link_widths=link_widths / linewidth_factor, - ax=ax, boundaries=(-10, 30, 34, 70), - color_geomap={'ocean': 'lightblue', 'land': "palegoldenrod"}) + n.plot( + bus_colors="k", + line_colors=ac_color, + link_colors=dc_color, + line_widths=line_widths / linewidth_factor, + link_widths=link_widths / linewidth_factor, + ax=ax, **map_opts + ) handles = [] labels = [] @@ -366,12 +376,16 @@ def plot_map_without(network): labels.append("{} GW".format(s)) l1_1 = ax.legend(handles, labels, loc="upper left", bbox_to_anchor=(0.05, 1.01), - framealpha=1, + frameon=False, labelspacing=0.8, handletextpad=1.5, title='Today\'s transmission') ax.add_artist(l1_1) - fig.savefig(snakemake.output.today, transparent=True, bbox_inches="tight") + fig.savefig( + snakemake.output.today, + transparent=True, + bbox_inches="tight" + ) def plot_series(network, carrier="AC", name="test"): @@ -488,7 +502,7 @@ def plot_series(network, carrier="AC", name="test"): new_handles.append(handles[i]) new_labels.append(labels[i]) - ax.legend(new_handles, new_labels, ncol=3, loc="upper left") + ax.legend(new_handles, new_labels, ncol=3, loc="upper left", frameon=False) ax.set_xlim([start, stop]) ax.set_ylim([-1300, 1900]) ax.grid(True) @@ -502,41 +516,28 @@ def plot_series(network, carrier="AC", name="test"): transparent=True) -# %% if __name__ == "__main__": - # Detect running outside of snakemake and mock snakemake for testing if 'snakemake' not in globals(): - from vresutils import Dict - import yaml - snakemake = Dict() - with open('config.yaml') as f: - snakemake.config = yaml.safe_load(f) - snakemake.config['run'] = "retro_vs_noretro" - snakemake.wildcards = {"lv": "1.0"} # lv1.0, lv1.25, lvopt - name = "elec_s_48_lv{}__Co2L0-3H-T-H-B".format(snakemake.wildcards["lv"]) - suffix = "_retro_tes" - name = name + suffix - snakemake.input = Dict() - snakemake.output = Dict( - map=(snakemake.config['results_dir'] + snakemake.config['run'] - + "/maps/{}".format(name)), - today=(snakemake.config['results_dir'] + snakemake.config['run'] - + "/maps/{}.pdf".format(name))) - snakemake.input.scenario = "lv" + snakemake.wildcards["lv"] -# snakemake.config["run"] = "bio_costs" - path = snakemake.config['results_dir'] + snakemake.config['run'] - snakemake.input.network = (path + - "/postnetworks/{}.nc" - .format(name)) - snakemake.output.network = (path + - "/maps/{}" - .format(name)) + from helper import mock_snakemake + snakemake = mock_snakemake( + 'plot_network', + simpl='', + clusters=48, + lv=1.0, + sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1', + planning_horizons=2050, + ) - n = pypsa.Network(snakemake.input.network, - override_component_attrs=override_component_attrs) + overrides = override_component_attrs(snakemake.input.overrides) + n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) - plot_map(n, components=["generators", "links", "stores", "storage_units"], - bus_size_factor=1.5e10, transmission=False) + map_opts = snakemake.config['plotting']['map'] + + plot_map(n, + components=["generators", "links", "stores", "storage_units"], + bus_size_factor=1.5e10, + transmission=False + ) plot_h2_map(n) plot_map_without(n) diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index 9b3a81e1..45a442a1 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -3,41 +3,58 @@ import numpy as np import pandas as pd -#allow plotting without Xwindows -import matplotlib -matplotlib.use('Agg') - import matplotlib.pyplot as plt +plt.style.use('ggplot') from prepare_sector_network import co2_emissions_year #consolidate and rename def rename_techs(label): - prefix_to_remove = ["residential ","services ","urban ","rural ","central ","decentral "] + prefix_to_remove = [ + "residential ", + "services ", + "urban ", + "rural ", + "central ", + "decentral " + ] - rename_if_contains = ["CHP","gas boiler","biogas","solar thermal","air heat pump","ground heat pump","resistive heater","Fischer-Tropsch"] + rename_if_contains = [ + "CHP", + "gas boiler", + "biogas", + "solar thermal", + "air heat pump", + "ground heat pump", + "resistive heater", + "Fischer-Tropsch" + ] - rename_if_contains_dict = {"water tanks" : "hot water storage", - "retrofitting" : "building retrofitting", - "H2" : "hydrogen storage", - "battery" : "battery storage", - "CC" : "CC"} + rename_if_contains_dict = { + "water tanks": "hot water storage", + "retrofitting": "building retrofitting", + "H2": "hydrogen storage", + "battery": "battery storage", + "CC": "CC" + } - rename = {"solar" : "solar PV", - "Sabatier" : "methanation", - "offwind" : "offshore wind", - "offwind-ac" : "offshore wind (AC)", - "offwind-dc" : "offshore wind (DC)", - "onwind" : "onshore wind", - "ror" : "hydroelectricity", - "hydro" : "hydroelectricity", - "PHS" : "hydroelectricity", - "co2 Store" : "DAC", - "co2 stored" : "CO2 sequestration", - "AC" : "transmission lines", - "DC" : "transmission lines", - "B2B" : "transmission lines"} + rename = { + "solar": "solar PV", + "Sabatier": "methanation", + "offwind": "offshore wind", + "offwind-ac": "offshore wind (AC)", + "offwind-dc": "offshore wind (DC)", + "onwind": "onshore wind", + "ror": "hydroelectricity", + "hydro": "hydroelectricity", + "PHS": "hydroelectricity", + "co2 Store": "DAC", + "co2 stored": "CO2 sequestration", + "AC": "transmission lines", + "DC": "transmission lines", + "B2B": "transmission lines" + } for ptr in prefix_to_remove: if label[:len(ptr)] == ptr: @@ -57,18 +74,56 @@ def rename_techs(label): return label -preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","solid biomass","biogas","onshore wind","offshore wind","offshore wind (AC)","offshore wind (DC)","solar PV","solar thermal","solar","building retrofitting","ground heat pump","air heat pump","heat pump","resistive heater","power-to-heat","gas-to-power/heat","CHP","OCGT","gas boiler","gas","natural gas","helmeth","methanation","hydrogen storage","power-to-gas","power-to-liquid","battery storage","hot water storage","CO2 sequestration"]) +preferred_order = pd.Index([ + "transmission lines", + "hydroelectricity", + "hydro reservoir", + "run of river", + "pumped hydro storage", + "solid biomass", + "biogas", + "onshore wind", + "offshore wind", + "offshore wind (AC)", + "offshore wind (DC)", + "solar PV", + "solar thermal", + "solar", + "building retrofitting", + "ground heat pump", + "air heat pump", + "heat pump", + "resistive heater", + "power-to-heat", + "gas-to-power/heat", + "CHP", + "OCGT", + "gas boiler", + "gas", + "natural gas", + "helmeth", + "methanation", + "hydrogen storage", + "power-to-gas", + "power-to-liquid", + "battery storage", + "hot water storage", + "CO2 sequestration" +]) def plot_costs(): - cost_df = pd.read_csv(snakemake.input.costs,index_col=list(range(3)),header=list(range(n_header))) - + cost_df = pd.read_csv( + snakemake.input.costs, + index_col=list(range(3)), + header=list(range(n_header)) + ) df = cost_df.groupby(cost_df.index.get_level_values(2)).sum() #convert to billions - df = df/1e9 + df = df / 1e9 df = df.groupby(df.index.map(rename_techs)).sum() @@ -86,11 +141,14 @@ def plot_costs(): new_columns = df.sum().sort_values().index - fig, ax = plt.subplots() - fig.set_size_inches((12,8)) - - df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index]) + fig, ax = plt.subplots(figsize=(12,8)) + df.loc[new_index,new_columns].T.plot( + kind="bar", + ax=ax, + stacked=True, + color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index] + ) handles,labels = ax.get_legend_handles_labels() @@ -103,24 +161,25 @@ def plot_costs(): ax.set_xlabel("") - ax.grid(axis="y") + ax.grid(axis='x') - ax.legend(handles,labels,ncol=4,loc="upper left") + ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1,1], frameon=False) - - fig.tight_layout() - - fig.savefig(snakemake.output.costs,transparent=True) + fig.savefig(snakemake.output.costs, bbox_inches='tight') def plot_energy(): - energy_df = pd.read_csv(snakemake.input.energy,index_col=list(range(2)),header=list(range(n_header))) + energy_df = pd.read_csv( + snakemake.input.energy, + index_col=list(range(2)), + header=list(range(n_header)) + ) df = energy_df.groupby(energy_df.index.get_level_values(1)).sum() #convert MWh to TWh - df = df/1e6 + df = df / 1e6 df = df.groupby(df.index.map(rename_techs)).sum() @@ -139,53 +198,57 @@ def plot_energy(): new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order)) new_columns = df.columns.sort_values() - #new_columns = df.sum().sort_values().index - fig, ax = plt.subplots() - fig.set_size_inches((12,8)) + + fig, ax = plt.subplots(figsize=(12,8)) - print(df.loc[new_index,new_columns]) - - df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index]) + print(df.loc[new_index, new_columns]) + df.loc[new_index, new_columns].T.plot( + kind="bar", + ax=ax, + stacked=True, + color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index] + ) handles,labels = ax.get_legend_handles_labels() handles.reverse() labels.reverse() - ax.set_ylim([snakemake.config['plotting']['energy_min'],snakemake.config['plotting']['energy_max']]) + ax.set_ylim([snakemake.config['plotting']['energy_min'], snakemake.config['plotting']['energy_max']]) ax.set_ylabel("Energy [TWh/a]") ax.set_xlabel("") - ax.grid(axis="y") + ax.grid(axis="x") - ax.legend(handles,labels,ncol=4,loc="upper left") + ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False) - - fig.tight_layout() - - fig.savefig(snakemake.output.energy,transparent=True) + fig.savefig(snakemake.output.energy, bbox_inches='tight') def plot_balances(): - co2_carriers = ["co2","co2 stored","process emissions"] + co2_carriers = ["co2", "co2 stored", "process emissions"] - balances_df = pd.read_csv(snakemake.input.balances,index_col=list(range(3)),header=list(range(n_header))) + balances_df = pd.read_csv( + snakemake.input.balances, + index_col=list(range(3)), + header=list(range(n_header)) + ) - balances = {i.replace(" ","_") : [i] for i in balances_df.index.levels[0]} + balances = {i.replace(" ","_"): [i] for i in balances_df.index.levels[0]} balances["energy"] = [i for i in balances_df.index.levels[0] if i not in co2_carriers] - for k,v in balances.items(): + for k, v in balances.items(): df = balances_df.loc[v] df = df.groupby(df.index.get_level_values(2)).sum() #convert MWh to TWh - df = df/1e6 + df = df / 1e6 #remove trailing link ports df.index = [i[:-1] if ((i != "co2") and (i[-1:] in ["0","1","2","3"])) else i for i in df.index] @@ -209,9 +272,7 @@ def plot_balances(): new_columns = df.columns.sort_values() - - fig, ax = plt.subplots() - fig.set_size_inches((12,8)) + fig, ax = plt.subplots(figsize=(12,8)) df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index]) @@ -228,14 +289,13 @@ def plot_balances(): ax.set_xlabel("") - ax.grid(axis="y") + ax.grid(axis="x") - ax.legend(handles,labels,ncol=4,loc="upper left") + ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False) - fig.tight_layout() + fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches='tight') - fig.savefig(snakemake.output.balances[:-10] + k + ".pdf",transparent=True) def historical_emissions(cts): """ @@ -369,25 +429,11 @@ def plot_carbon_budget_distribution(): path_cb_plot = snakemake.config['results_dir'] + snakemake.config['run'] + '/graphs/' plt.savefig(path_cb_plot+'carbon_budget_plot.pdf', dpi=300) + if __name__ == "__main__": - # Detect running outside of snakemake and mock snakemake for testing if 'snakemake' not in globals(): - from vresutils import Dict - import yaml - snakemake = Dict() - with open('config.yaml', encoding='utf8') as f: - snakemake.config = yaml.safe_load(f) - snakemake.input = Dict() - snakemake.output = Dict() - snakemake.wildcards = Dict() - #snakemake.wildcards['sector_opts']='3H-T-H-B-I-solar3-dist1-cb48be3' - - for item in ["costs", "energy"]: - snakemake.input[item] = snakemake.config['summary_dir'] + '/{name}/csvs/{item}.csv'.format(name=snakemake.config['run'],item=item) - snakemake.output[item] = snakemake.config['summary_dir'] + '/{name}/graphs/{item}.pdf'.format(name=snakemake.config['run'],item=item) - snakemake.input["balances"] = snakemake.config['summary_dir'] + '/{name}/csvs/supply_energy.csv'.format(name=snakemake.config['run'],item=item) - snakemake.output["balances"] = snakemake.config['summary_dir'] + '/{name}/graphs/balances-energy.csv'.format(name=snakemake.config['run'],item=item) - + from helper import mock_snakemake + snakemake = mock_snakemake('plot_summary') n_header = 4 diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 94fb5048..6e498b63 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -1,61 +1,68 @@ # coding: utf-8 +import pypsa +import re +import os +import pytz + +import pandas as pd +import numpy as np +import xarray as xr + +from itertools import product +from scipy.stats import beta +from vresutils.costdata import annuity + +from build_energy_totals import build_eea_co2, build_eurostat_co2, build_co2_totals +from helper import override_component_attrs + import logging logger = logging.getLogger(__name__) -import pandas as pd -idx = pd.IndexSlice - -import numpy as np -import xarray as xr -import re, os, sys - -from six import iteritems, string_types - -import pypsa - -import yaml - -import pytz - -from vresutils.costdata import annuity - -from scipy.stats import beta -from build_energy_totals import build_eea_co2, build_eurostat_co2, build_co2_totals - -#First tell PyPSA that links can have multiple outputs by -#overriding the component_attrs. This can be done for -#as many buses as you need with format busi for i = 2,3,4,5,.... -#See https://pypsa.org/doc/components.html#link-with-multiple-outputs-or-inputs -override_component_attrs = pypsa.descriptors.Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()}) -override_component_attrs["Link"].loc["bus2"] = ["string",np.nan,np.nan,"2nd bus","Input (optional)"] -override_component_attrs["Link"].loc["bus3"] = ["string",np.nan,np.nan,"3rd bus","Input (optional)"] -override_component_attrs["Link"].loc["bus4"] = ["string",np.nan,np.nan,"4th bus","Input (optional)"] -override_component_attrs["Link"].loc["efficiency2"] = ["static or series","per unit",1.,"2nd bus efficiency","Input (optional)"] -override_component_attrs["Link"].loc["efficiency3"] = ["static or series","per unit",1.,"3rd bus efficiency","Input (optional)"] -override_component_attrs["Link"].loc["efficiency4"] = ["static or series","per unit",1.,"4th bus efficiency","Input (optional)"] -override_component_attrs["Link"].loc["p2"] = ["series","MW",0.,"2nd bus output","Output"] -override_component_attrs["Link"].loc["p3"] = ["series","MW",0.,"3rd bus output","Output"] -override_component_attrs["Link"].loc["p4"] = ["series","MW",0.,"4th bus output","Output"] +def emission_sectors_from_opts(opts): -override_component_attrs["Link"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"] -override_component_attrs["Link"].loc["lifetime"] = ["float","years",np.nan,"lifetime","Input (optional)"] -override_component_attrs["Generator"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"] -override_component_attrs["Generator"].loc["lifetime"] = ["float","years",np.nan,"lifetime","Input (optional)"] -override_component_attrs["Store"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"] -override_component_attrs["Store"].loc["lifetime"] = ["float","years",np.nan,"lifetime","Input (optional)"] + sectors = ["electricity"] + if "T" in opts: + sectors += [ + "rail non-elec", + "road non-elec" + ] + if "H" in opts: + sectors += [ + "residential non-elec", + "services non-elec" + ] + if "I" in opts: + sectors += [ + "industrial non-elec", + "industrial processes", + "domestic aviation", + "international aviation", + "domestic navigation", + "international navigation" + ] + + return sectors +def get(item, investment_year=None): + """Check whether item depends on investment year""" + if isinstance(item, dict): + return item[investment_year] + else: + return item -def co2_emissions_year(cts, opts, year): + +def co2_emissions_year(countries, opts, year): """ Calculate CO2 emissions in one specific year (e.g. 1990 or 2018). """ + eea_co2 = build_eea_co2(year) - # TODO: read Eurostat data from year>2014, this only affects the estimation of - # CO2 emissions for "BA","RS","AL","ME","MK" + # TODO: read Eurostat data from year > 2014 + # this only affects the estimation of CO2 emissions for BA, RS, AL, ME, MK if year > 2014: eurostat_co2 = build_eurostat_co2(year=2014) else: @@ -63,91 +70,92 @@ def co2_emissions_year(cts, opts, year): co2_totals = build_co2_totals(eea_co2, eurostat_co2) - co2_emissions = co2_totals.loc[cts, "electricity"].sum() + sectors = emission_sectors_from_opts(opts) + + co2_emissions = co2_totals.loc[countries, sectors].sum().sum() + + # convert MtCO2 to GtCO2 + co2_emissions *= 0.001 - if "T" in opts: - co2_emissions += co2_totals.loc[cts, [i+ " non-elec" for i in ["rail","road"]]].sum().sum() - if "H" in opts: - co2_emissions += co2_totals.loc[cts, [i+ " non-elec" for i in ["residential","services"]]].sum().sum() - if "I" in opts: - co2_emissions += co2_totals.loc[cts, ["industrial non-elec","industrial processes", - "domestic aviation","international aviation", - "domestic navigation","international navigation"]].sum().sum() - - co2_emissions *= 0.001 # Convert MtCO2 to GtCO2 return co2_emissions -def build_carbon_budget(o): - #distribute carbon budget following beta or exponential transition path +# TODO: move to own rule with sector-opts wildcard? +def build_carbon_budget(o, fn): + """ + Distribute carbon budget following beta or exponential transition path. + """ + # opts? + if "be" in o: #beta decay carbon_budget = float(o[o.find("cb")+2:o.find("be")]) - be=float(o[o.find("be")+2:]) + be = float(o[o.find("be")+2:]) if "ex" in o: #exponential decay carbon_budget = float(o[o.find("cb")+2:o.find("ex")]) - r=float(o[o.find("ex")+2:]) + r = float(o[o.find("ex")+2:]) + countries = n.buses.country.dropna().unique() - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - pop_layout["ct"] = pop_layout.index.str[:2] - cts = pop_layout.ct.value_counts().index - - e_1990 = co2_emissions_year(cts, opts, year=1990) + e_1990 = co2_emissions_year(countries, opts, year=1990) #emissions at the beginning of the path (last year available 2018) - e_0 = co2_emissions_year(cts, opts, year=2018) + e_0 = co2_emissions_year(countries, opts, year=2018) + #emissions in 2019 and 2020 assumed equal to 2018 and substracted - carbon_budget -= 2*e_0 + carbon_budget -= 2 * e_0 + planning_horizons = snakemake.config['scenario']['planning_horizons'] - CO2_CAP = pd.DataFrame(index = pd.Series(data=planning_horizons, - name='planning_horizon'), - columns=pd.Series(data=[], - name='paths', - dtype='float')) t_0 = planning_horizons[0] + if "be" in o: - #beta decay - t_f = t_0 + (2*carbon_budget/e_0).round(0) # final year in the path + + # final year in the path + t_f = t_0 + (2 * carbon_budget / e_0).round(0) + + def beta_decay(t): + cdf_term = (t - t_0) / (t_f - t_0) + return (e_0 / e_1990) * (1 - beta.cdf(cdf_term, be, be)) + #emissions (relative to 1990) - CO2_CAP[o] = [(e_0/e_1990)*(1-beta.cdf((t-t_0)/(t_f-t_0), be, be)) for t in planning_horizons] + co2_cap = pd.Series({t: beta_decay(t) for t in planning_horizons}, name=o) if "ex" in o: - #exponential decay without delay - T=carbon_budget/e_0 - m=(1+np.sqrt(1+r*T))/T - CO2_CAP[o] = [(e_0/e_1990)*(1+(m+r)*(t-t_0))*np.exp(-m*(t-t_0)) for t in planning_horizons] + + T = carbon_budget / e_0 + m = (1 + np.sqrt(1 + r * T)) / T + + def exponential_decay(t): + return (e_0 / e_1990) * (1 + (m + r) * (t - t_0)) * np.exp(-m * (t - t_0)) + + co2_cap = pd.Series({t: exponential_decay(t) for t in planning_horizons}, name=o) + + # TODO log in Snakefile + if not os.path.exists(fn): + os.makedirs(fn) + co2_cap.to_csv(fn, float_format='%.3f') - CO2_CAP.to_csv(path_cb + 'carbon_budget_distribution.csv', sep=',', - line_terminator='\n', float_format='%.3f') - countries=pd.Series(data=cts) - countries.to_csv(path_cb + 'countries.csv', sep=',', - line_terminator='\n', float_format='%.3f') +def add_lifetime_wind_solar(n, costs): + """Add lifetime for solar and wind generators.""" + for carrier in ['solar', 'onwind', 'offwind']: + gen_i = n.generators.index.str.contains(carrier) + n.generators.loc[gen_i, "lifetime"] = costs.at[carrier, 'lifetime'] -def add_lifetime_wind_solar(n): - """ - Add lifetime for solar and wind generators - """ - for carrier in ['solar', 'onwind', 'offwind-dc', 'offwind-ac']: - carrier_name='offwind' if carrier in ['offwind-dc', 'offwind-ac'] else carrier - n.generators.loc[[index for index in n.generators.index.to_list() - if carrier in index], 'lifetime']=costs.at[carrier_name,'lifetime'] - -def update_wind_solar_costs(n,costs): +# TODO merge issue with PyPSA-Eur +def update_wind_solar_costs(n, costs): """ Update costs for wind and solar generators added with pypsa-eur to those cost in the planning year - """ #NB: solar costs are also manipulated for rooftop #when distribution grid is inserted - n.generators.loc[n.generators.carrier=='solar','capital_cost'] = costs.at['solar-utility', 'fixed'] + n.generators.loc[n.generators.carrier=='solar', 'capital_cost'] = costs.at['solar-utility', 'fixed'] - n.generators.loc[n.generators.carrier=='onwind','capital_cost'] = costs.at['onwind', 'fixed'] + n.generators.loc[n.generators.carrier=='onwind', 'capital_cost'] = costs.at['onwind', 'fixed'] #for offshore wind, need to calculated connection costs @@ -164,7 +172,7 @@ def update_wind_solar_costs(n,costs): clustermaps = busmap_s.map(busmap) #code adapted from pypsa-eur/scripts/add_electricity.py - for connection in ['dc','ac']: + for connection in ['dc', 'ac']: tech = "offwind-" + connection profile = snakemake.input['profile_offwind_' + connection] with xr.open_dataset(profile) as ds: @@ -196,43 +204,45 @@ def update_wind_solar_costs(n,costs): logger.info("Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}" .format(connection_cost[0].min(), connection_cost[0].max(), tech)) - n.generators.loc[n.generators.carrier==tech,'capital_cost'] = capital_cost.rename(index=lambda node: node + ' ' + tech) + n.generators.loc[n.generators.carrier==tech, 'capital_cost'] = capital_cost.rename(index=lambda node: node + ' ' + tech) def add_carrier_buses(n, carriers): """ Add buses to connect e.g. coal, nuclear and oil plants """ + if isinstance(carriers, str): + carriers = [carriers] for carrier in carriers: - n.add("Carrier", - carrier) + n.add("Carrier", carrier) - #use madd to get location inserted - n.madd("Bus", - ["EU " + carrier], - location="EU", - carrier=carrier) + n.add("Bus", + "EU " + carrier, + location="EU", + carrier=carrier + ) - #use madd to get carrier inserted - n.madd("Store", - ["EU " + carrier + " Store"], - bus=["EU " + carrier], - e_nom_extendable=True, - e_cyclic=True, - carrier=carrier, - capital_cost=0.) #could correct to e.g. 0.2 EUR/kWh * annuity and O&M + #capital cost could be corrected to e.g. 0.2 EUR/kWh * annuity and O&M + n.add("Store", + "EU " + carrier + " Store", + bus="EU " + carrier, + e_nom_extendable=True, + e_cyclic=True, + carrier=carrier, + ) n.add("Generator", - "EU " + carrier, - bus="EU " + carrier, - p_nom_extendable=True, - carrier=carrier, - capital_cost=0., - marginal_cost=costs.at[carrier,'fuel']) + "EU " + carrier, + bus="EU " + carrier, + p_nom_extendable=True, + carrier=carrier, + marginal_cost=costs.at[carrier, 'fuel'] + ) +# TODO: PyPSA-Eur merge issue def remove_elec_base_techs(n): """remove conventional generators (e.g. OCGT) and storage units (e.g. batteries and H2) from base electricity-only network, since they're added here differently using links @@ -241,13 +251,14 @@ def remove_elec_base_techs(n): for c in n.iterate_components(snakemake.config["pypsa_eur"]): to_keep = snakemake.config["pypsa_eur"][c.name] to_remove = pd.Index(c.df.carrier.unique()).symmetric_difference(to_keep) - print("Removing",c.list_name,"with carrier",to_remove) + print("Removing", c.list_name, "with carrier", to_remove) names = c.df.index[c.df.carrier.isin(to_remove)] print(names) n.mremove(c.name, names) n.carriers.drop(to_remove, inplace=True, errors="ignore") +# TODO: PyPSA-Eur merge issue def remove_non_electric_buses(n): """ remove buses from pypsa-eur with carriers which are not AC buses @@ -256,166 +267,127 @@ def remove_non_electric_buses(n): n.buses = n.buses[n.buses.carrier.isin(["AC", "DC"])] -def add_co2_tracking(n): +def patch_electricity_network(n): + remove_elec_base_techs(n) + remove_non_electric_buses(n) + update_wind_solar_costs(n, costs) + n.loads["carrier"] = "electricity" + n.buses["location"] = n.buses.index - #minus sign because opposite to how fossil fuels used: - #CH4 burning puts CH4 down, atmosphere up - n.add("Carrier","co2", +def add_co2_tracking(n, options): + + # minus sign because opposite to how fossil fuels used: + # CH4 burning puts CH4 down, atmosphere up + n.add("Carrier", "co2", co2_emissions=-1.) - #this tracks CO2 in the atmosphere - n.madd("Bus", - ["co2 atmosphere"], - location="EU", - carrier="co2") + # this tracks CO2 in the atmosphere + n.add("Bus", + "co2 atmosphere", + location="EU", + carrier="co2" + ) - #NB: can also be negative - n.madd("Store",["co2 atmosphere"], - e_nom_extendable=True, - e_min_pu=-1, - carrier="co2", - bus="co2 atmosphere") + # can also be negative + n.add("Store", + "co2 atmosphere", + e_nom_extendable=True, + e_min_pu=-1, + carrier="co2", + bus="co2 atmosphere" + ) - #this tracks CO2 stored, e.g. underground - n.madd("Bus", - ["co2 stored"], - location="EU", - carrier="co2 stored") + # this tracks CO2 stored, e.g. underground + n.add("Bus", + "co2 stored", + location="EU", + carrier="co2 stored" + ) - n.madd("Store",["co2 stored"], - e_nom_extendable=True, - e_nom_max=options['co2_sequestration_potential']*1e6, - capital_cost=options['co2_sequestration_cost'], - carrier="co2 stored", - bus="co2 stored") + n.add("Store", + "co2 stored", + e_nom_extendable=True, + e_nom_max=options['co2_sequestration_potential'] * 1e6, + capital_cost=options['co2_sequestration_cost'], + carrier="co2 stored", + bus="co2 stored" + ) if options['co2_vent']: - n.madd("Link",["co2 vent"], - bus0="co2 stored", - bus1="co2 atmosphere", - carrier="co2 vent", - efficiency=1., - p_nom_extendable=True) -def add_dac(n): - - heat_buses = n.buses.index[n.buses.carrier.isin(["urban central heat", - "services urban decentral heat"])] - locations = n.buses.location[heat_buses] - - n.madd("Link", - locations, - suffix=" DAC", - bus0="co2 atmosphere", - bus1="co2 stored", - bus2=locations.values, - bus3=heat_buses, - carrier="DAC", - capital_cost=costs.at['direct air capture','fixed'], - efficiency=1., - efficiency2=-(costs.at['direct air capture','electricity-input'] + costs.at['direct air capture','compression-electricity-input']), - efficiency3=-(costs.at['direct air capture','heat-input'] - costs.at['direct air capture','compression-heat-output']), - p_nom_extendable=True, - lifetime=costs.at['direct air capture','lifetime']) - - -def add_co2limit(n, Nyears=1.,limit=0.): - - cts = pop_layout.ct.value_counts().index - - co2_limit = co2_totals.loc[cts, "electricity"].sum() - - if "T" in opts: - co2_limit += co2_totals.loc[cts, [i+ " non-elec" for i in ["rail","road"]]].sum().sum() - if "H" in opts: - co2_limit += co2_totals.loc[cts, [i+ " non-elec" for i in ["residential","services"]]].sum().sum() - if "I" in opts: - co2_limit += co2_totals.loc[cts, ["industrial non-elec","industrial processes", - "domestic aviation","international aviation", - "domestic navigation","international navigation"]].sum().sum() - - co2_limit *= limit*Nyears - - n.add("GlobalConstraint", "CO2Limit", - carrier_attribute="co2_emissions", sense="<=", - constant=co2_limit) - -def add_emission_prices(n, emission_prices=None, exclude_co2=False): - assert False, "Needs to be fixed, adds NAN" - - if emission_prices is None: - emission_prices = snakemake.config['costs']['emission_prices'] - if exclude_co2: emission_prices.pop('co2') - ep = (pd.Series(emission_prices).rename(lambda x: x+'_emissions') * n.carriers).sum(axis=1) - n.generators['marginal_cost'] += n.generators.carrier.map(ep) - n.storage_units['marginal_cost'] += n.storage_units.carrier.map(ep) - -def set_line_s_max_pu(n): - # set n-1 security margin to 0.5 for 37 clusters and to 0.7 from 200 clusters - # 128 reproduces 98% of line volume in TWkm, but clustering distortions inside node - n_clusters = len(n.buses.index[n.buses.carrier == "AC"]) - s_max_pu = np.clip(0.5 + 0.2 * (n_clusters - 37) / (200 - 37), 0.5, 0.7) - n.lines['s_max_pu'] = s_max_pu - - dc_b = n.links.carrier == 'DC' - n.links.loc[dc_b, 'p_max_pu'] = snakemake.config['links']['p_max_pu'] - n.links.loc[dc_b, 'p_min_pu'] = - snakemake.config['links']['p_max_pu'] - -def set_line_volume_limit(n, lv): - - dc_b = n.links.carrier == 'DC' - - if lv != "opt": - lv = float(lv) - - # Either line_volume cap or cost - n.lines['capital_cost'] = 0. - n.links.loc[dc_b,'capital_cost'] = 0. - else: - n.lines['capital_cost'] = (n.lines['length'] * - costs.at['HVAC overhead', 'fixed']) - - #add HVDC inverter post factor, to maintain consistency with LV limit - n.links.loc[dc_b, 'capital_cost'] = (n.links.loc[dc_b, 'length'] * - costs.at['HVDC overhead', 'fixed'])# + - #costs.at['HVDC inverter pair', 'fixed']) - - - - if lv != 1.0: - lines_s_nom = n.lines.s_nom.where( - n.lines.type == '', - np.sqrt(3) * n.lines.num_parallel * - n.lines.type.map(n.line_types.i_nom) * - n.lines.bus0.map(n.buses.v_nom) + n.add("Link", + "co2 vent", + bus0="co2 stored", + bus1="co2 atmosphere", + carrier="co2 vent", + efficiency=1., + p_nom_extendable=True ) - n.lines['s_nom_min'] = lines_s_nom - n.links.loc[dc_b,'p_nom_min'] = n.links['p_nom'] +def add_dac(n, costs): - n.lines['s_nom_extendable'] = True - n.links.loc[dc_b,'p_nom_extendable'] = True + heat_carriers = ["urban central heat", "services urban decentral heat"] + heat_buses = n.buses.index[n.buses.carrier.isin(heat_carriers)] + locations = n.buses.location[heat_buses] - if lv != "opt": - n.line_volume_limit = lv * ((lines_s_nom * n.lines['length']).sum() + - n.links.loc[dc_b].eval('p_nom * length').sum()) + efficiency2 = -(costs.at['direct air capture', 'electricity-input'] + costs.at['direct air capture', 'compression-electricity-input']) + efficiency3 = -(costs.at['direct air capture', 'heat-input'] - costs.at['direct air capture', 'compression-heat-output']) - return n + n.madd("Link", + locations, + suffix=" DAC", + bus0="co2 atmosphere", + bus1="co2 stored", + bus2=locations.values, + bus3=heat_buses, + carrier="DAC", + capital_cost=costs.at['direct air capture', 'fixed'], + efficiency=1., + efficiency2=efficiency2, + efficiency3=efficiency3, + p_nom_extendable=True, + lifetime=costs.at['direct air capture', 'lifetime'] + ) + +def add_co2limit(n, Nyears=1., limit=0.): + + print("Adding CO2 budget limit as per unit of 1990 levels of", limit) + + countries = n.buses.country.dropna().unique() + + sectors = emission_sectors_from_opts(opts) + + # convert Mt to tCO2 + co2_totals = 1e6 * pd.read_csv(snakemake.input.co2_totals_name, index_col=0) + + co2_limit = co2_totals.loc[countries, sectors].sum().sum() + + co2_limit *= limit * Nyears + + n.add("GlobalConstraint", + "CO2Limit", + carrier_attribute="co2_emissions", + sense="<=", + constant=co2_limit + ) + +# TODO PyPSA-Eur merge issue def average_every_nhours(n, offset): - logger.info('Resampling the network to {}'.format(offset)) + logger.info(f'Resampling the network to {offset}') m = n.copy(with_time=False) + # TODO is this still needed? #fix copying of network attributes #copied from pypsa/io.py, should be in pypsa/components.py#Network.copy() - allowed_types = (float,int,bool) + string_types + tuple(np.typeDict.values()) + allowed_types = (float, int, bool, str) + tuple(np.typeDict.values()) attrs = dict((attr, getattr(n, attr)) for attr in dir(n) if (not attr.startswith("__") and isinstance(getattr(n,attr), allowed_types))) - for k,v in iteritems(attrs): + for k,v in attrs.items(): setattr(m,k,v) snapshot_weightings = n.snapshot_weightings.resample(offset).sum() @@ -424,7 +396,7 @@ def average_every_nhours(n, offset): for c in n.iterate_components(): pnl = getattr(m, c.list_name+"_t") - for k, df in iteritems(c.pnl): + for k, df in c.pnl.items(): if not df.empty: if c.list_name == "stores" and k == "e_max_pu": pnl[k] = df.resample(offset).min() @@ -436,45 +408,45 @@ def average_every_nhours(n, offset): return m -def generate_periodic_profiles(dt_index=pd.date_range("2011-01-01 00:00","2011-12-31 23:00",freq="H",tz="UTC"), - nodes=[], - weekly_profile=range(24*7)): - """Give a 24*7 long list of weekly hourly profiles, generate this for - each country for the period dt_index, taking account of time - zones and Summer Time. - +def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None): + """ + Give a 24*7 long list of weekly hourly profiles, generate this for each + country for the period dt_index, taking account of time zones and summer time. """ + weekly_profile = pd.Series(weekly_profile, range(24*7)) - weekly_profile = pd.Series(weekly_profile,range(24*7)) + week_df = pd.DataFrame(index=dt_index, columns=nodes) - week_df = pd.DataFrame(index=dt_index,columns=nodes) + for node in nodes: + timezone = pytz.timezone(pytz.country_timezones[node[:2]][0]) + tz_dt_index = dt_index.tz_convert(timezone) + week_df[node] = [24 * dt.weekday() + dt.hour for dt in tz_dt_index] + week_df[node] = week_df[node].map(weekly_profile) - for ct in nodes: - week_df[ct] = [24*dt.weekday()+dt.hour for dt in dt_index.tz_convert(pytz.timezone(timezone_mappings[ct[:2]]))] - week_df[ct] = week_df[ct].map(weekly_profile) + week_df = week_df.tz_localize(localize) return week_df - -def shift_df(df,hours=1): - """Works both on Series and DataFrame""" +def cycling_shift(df, steps=1): + """Cyclic shift on index of pd.Series|pd.DataFrame by number of steps""" df = df.copy() - df.values[:] = np.concatenate([df.values[-hours:], - df.values[:-hours]]) + new_index = np.roll(df.index, steps) + df.values[:] = df.reindex(index=new_index).values return df -def transport_degree_factor(temperature,deadband_lower=15,deadband_upper=20, - lower_degree_factor=0.5, - upper_degree_factor=1.6): - - """Work out how much energy demand in vehicles increases due to heating and cooling. +def transport_degree_factor( + temperature, + deadband_lower=15, + deadband_upper=20, + lower_degree_factor=0.5, + upper_degree_factor=1.6): + """ + Work out how much energy demand in vehicles increases due to heating and cooling. There is a deadband where there is no increase. - Degree factors are % increase in demand compared to no heating/cooling fuel consumption. - Returns per unit increase in demand for each place and time """ @@ -482,14 +454,17 @@ def transport_degree_factor(temperature,deadband_lower=15,deadband_upper=20, dd[(temperature > deadband_lower) & (temperature < deadband_upper)] = 0. - dd[temperature < deadband_lower] = lower_degree_factor/100.*(deadband_lower-temperature[temperature < deadband_lower]) + dT_lower = deadband_lower - temperature[temperature < deadband_lower] + dd[temperature < deadband_lower] = lower_degree_factor / 100 * dT_lower - dd[temperature > deadband_upper] = upper_degree_factor/100.*(temperature[temperature > deadband_upper]-deadband_upper) + dT_upper = temperature[temperature > deadband_upper] - deadband_upper + dd[temperature > deadband_upper] = upper_degree_factor / 100 * dT_upper return dd -def prepare_data(network): +# TODO separate sectors and move into own rules +def prepare_data(n): ############## @@ -497,155 +472,151 @@ def prepare_data(network): ############## - ashp_cop = xr.open_dataarray(snakemake.input.cop_air_total).T.to_pandas().reindex(index=network.snapshots) - gshp_cop = xr.open_dataarray(snakemake.input.cop_soil_total).T.to_pandas().reindex(index=network.snapshots) + ashp_cop = xr.open_dataarray(snakemake.input.cop_air_total).to_pandas().reindex(index=n.snapshots) + gshp_cop = xr.open_dataarray(snakemake.input.cop_soil_total).to_pandas().reindex(index=n.snapshots) - solar_thermal = xr.open_dataarray(snakemake.input.solar_thermal_total).T.to_pandas().reindex(index=network.snapshots) - #1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2 - solar_thermal = options['solar_cf_correction'] * solar_thermal/1e3 + solar_thermal = xr.open_dataarray(snakemake.input.solar_thermal_total).to_pandas().reindex(index=n.snapshots) + # 1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2 + solar_thermal = options['solar_cf_correction'] * solar_thermal / 1e3 - energy_totals = pd.read_csv(snakemake.input.energy_totals_name,index_col=0) + energy_totals = pd.read_csv(snakemake.input.energy_totals_name, index_col=0) nodal_energy_totals = energy_totals.loc[pop_layout.ct].fillna(0.) nodal_energy_totals.index = pop_layout.index - nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction,axis=0) + nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0) - #copy forward the daily average heat demand into each hour, so it can be multipled by the intraday profile - daily_space_heat_demand = xr.open_dataarray(snakemake.input.heat_demand_total).T.to_pandas().reindex(index=network.snapshots, method="ffill") + # copy forward the daily average heat demand into each hour, so it can be multipled by the intraday profile + daily_space_heat_demand = xr.open_dataarray(snakemake.input.heat_demand_total).to_pandas().reindex(index=n.snapshots, method="ffill") - intraday_profiles = pd.read_csv(snakemake.input.heat_profile,index_col=0) + intraday_profiles = pd.read_csv(snakemake.input.heat_profile, index_col=0) - sectors = ["residential","services"] - uses = ["water","space"] + sectors = ["residential", "services"] + uses = ["water", "space"] heat_demand = {} electric_heat_supply = {} - for sector in sectors: - for use in uses: - intraday_year_profile = generate_periodic_profiles(daily_space_heat_demand.index.tz_localize("UTC"), - nodes=daily_space_heat_demand.columns, - weekly_profile=(list(intraday_profiles["{} {} weekday".format(sector,use)])*5 + list(intraday_profiles["{} {} weekend".format(sector,use)])*2)).tz_localize(None) + for sector, use in product(sectors, uses): + weekday = list(intraday_profiles[f"{sector} {use} weekday"]) + weekend = list(intraday_profiles[f"{sector} {use} weekend"]) + weekly_profile = weekday * 5 + weekend * 2 + intraday_year_profile = generate_periodic_profiles( + daily_space_heat_demand.index.tz_localize("UTC"), + nodes=daily_space_heat_demand.columns, + weekly_profile=weekly_profile + ) - if use == "space": - heat_demand_shape = daily_space_heat_demand*intraday_year_profile - else: - heat_demand_shape = intraday_year_profile + if use == "space": + heat_demand_shape = daily_space_heat_demand * intraday_year_profile + else: + heat_demand_shape = intraday_year_profile - heat_demand["{} {}".format(sector,use)] = (heat_demand_shape/heat_demand_shape.sum()).multiply(nodal_energy_totals["total {} {}".format(sector,use)])*1e6 - electric_heat_supply["{} {}".format(sector,use)] = (heat_demand_shape/heat_demand_shape.sum()).multiply(nodal_energy_totals["electricity {} {}".format(sector,use)])*1e6 + heat_demand[f"{sector} {use}"] = (heat_demand_shape/heat_demand_shape.sum()).multiply(nodal_energy_totals[f"total {sector} {use}"]) * 1e6 + electric_heat_supply[f"{sector} {use}"] = (heat_demand_shape/heat_demand_shape.sum()).multiply(nodal_energy_totals[f"electricity {sector} {use}"]) * 1e6 - heat_demand = pd.concat(heat_demand,axis=1) - electric_heat_supply = pd.concat(electric_heat_supply,axis=1) + heat_demand = pd.concat(heat_demand, axis=1) + electric_heat_supply = pd.concat(electric_heat_supply, axis=1) - #subtract from electricity load since heat demand already in heat_demand + # subtract from electricity load since heat demand already in heat_demand electric_nodes = n.loads.index[n.loads.carrier == "electricity"] - n.loads_t.p_set[electric_nodes] = n.loads_t.p_set[electric_nodes] - electric_heat_supply.groupby(level=1,axis=1).sum()[electric_nodes] + n.loads_t.p_set[electric_nodes] = n.loads_t.p_set[electric_nodes] - electric_heat_supply.groupby(level=1, axis=1).sum()[electric_nodes] ############## #Transport ############## - ## Get overall demand curve for all vehicles - traffic = pd.read_csv(os.path.join(snakemake.input.traffic_data,"KFZ__count"), - skiprows=2)["count"] + traffic = pd.read_csv(snakemake.input.traffic_data_KFZ, skiprows=2, usecols=["count"], squeeze=True) #Generate profiles - transport_shape = generate_periodic_profiles(dt_index=network.snapshots.tz_localize("UTC"), - nodes=pop_layout.index, - weekly_profile=traffic.values).tz_localize(None) - transport_shape = transport_shape/transport_shape.sum() + transport_shape = generate_periodic_profiles( + dt_index=n.snapshots.tz_localize("UTC"), + nodes=pop_layout.index, + weekly_profile=traffic.values + ) + transport_shape = transport_shape / transport_shape.sum() - transport_data = pd.read_csv(snakemake.input.transport_name, - index_col=0) + transport_data = pd.read_csv(snakemake.input.transport_name, index_col=0) nodal_transport_data = transport_data.loc[pop_layout.ct].fillna(0.) nodal_transport_data.index = pop_layout.index - nodal_transport_data["number cars"] = pop_layout["fraction"]*nodal_transport_data["number cars"] - nodal_transport_data.loc[nodal_transport_data["average fuel efficiency"] == 0.,"average fuel efficiency"] = transport_data["average fuel efficiency"].mean() + nodal_transport_data["number cars"] = pop_layout["fraction"] * nodal_transport_data["number cars"] + nodal_transport_data.loc[nodal_transport_data["average fuel efficiency"] == 0., "average fuel efficiency"] = transport_data["average fuel efficiency"].mean() - #electric motors are more efficient, so alter transport demand + # electric motors are more efficient, so alter transport demand - #kWh/km from EPA https://www.fueleconomy.gov/feg/ for Tesla Model S - plug_to_wheels_eta = 0.20 - battery_to_wheels_eta = plug_to_wheels_eta*0.9 - - efficiency_gain = nodal_transport_data["average fuel efficiency"]/battery_to_wheels_eta + plug_to_wheels_eta = options.get("bev_plug_to_wheel_efficiency", 0.2) + battery_to_wheels_eta = plug_to_wheels_eta * options.get("bev_charge_efficiency", 0.9) + efficiency_gain = nodal_transport_data["average fuel efficiency"] / battery_to_wheels_eta #get heating demand for correction to demand time series - temperature = xr.open_dataarray(snakemake.input.temp_air_total).T.to_pandas() + temperature = xr.open_dataarray(snakemake.input.temp_air_total).to_pandas() - #correction factors for vehicle heating - dd_ICE = transport_degree_factor(temperature, - options['transport_heating_deadband_lower'], - options['transport_heating_deadband_upper'], - options['ICE_lower_degree_factor'], - options['ICE_upper_degree_factor']) + # correction factors for vehicle heating + dd_ICE = transport_degree_factor( + temperature, + options['transport_heating_deadband_lower'], + options['transport_heating_deadband_upper'], + options['ICE_lower_degree_factor'], + options['ICE_upper_degree_factor'] + ) - dd_EV = transport_degree_factor(temperature, - options['transport_heating_deadband_lower'], - options['transport_heating_deadband_upper'], - options['EV_lower_degree_factor'], - options['EV_upper_degree_factor']) + dd_EV = transport_degree_factor( + temperature, + options['transport_heating_deadband_lower'], + options['transport_heating_deadband_upper'], + options['EV_lower_degree_factor'], + options['EV_upper_degree_factor'] + ) - #divide out the heating/cooling demand from ICE totals - ICE_correction = (transport_shape*(1+dd_ICE)).sum()/transport_shape.sum() + # divide out the heating/cooling demand from ICE totals + # and multiply back in the heating/cooling demand for EVs + ice_correction = (transport_shape * (1 + dd_ICE)).sum() / transport_shape.sum() - transport = (transport_shape.multiply(nodal_energy_totals["total road"] + nodal_energy_totals["total rail"] - - nodal_energy_totals["electricity rail"])*1e6*Nyears).divide(efficiency_gain*ICE_correction) - - #multiply back in the heating/cooling demand for EVs - transport = transport.multiply(1+dd_EV) + energy_totals_transport = nodal_energy_totals["total road"] + nodal_energy_totals["total rail"] - nodal_energy_totals["electricity rail"] + transport = (transport_shape.multiply(energy_totals_transport) * 1e6 * Nyears).divide(efficiency_gain * ice_correction).multiply(1 + dd_EV) ## derive plugged-in availability for PKW's (cars) - traffic = pd.read_csv(os.path.join(snakemake.input.traffic_data,"Pkw__count"), - skiprows=2)["count"] + traffic = pd.read_csv(snakemake.input.traffic_data_Pkw, skiprows=2, usecols=["count"], squeeze=True) - avail_max = 0.95 + avail_max = options.get("bev_avail_max", 0.95) + avail_mean = options.get("bev_avail_mean", 0.8) - avail_mean = 0.8 + avail = avail_max - (avail_max - avail_mean) * (traffic - traffic.min()) / (traffic.mean() - traffic.min()) - avail = avail_max - (avail_max - avail_mean)*(traffic - traffic.min())/(traffic.mean() - traffic.min()) - - avail_profile = generate_periodic_profiles(dt_index=network.snapshots.tz_localize("UTC"), - nodes=pop_layout.index, - weekly_profile=avail.values).tz_localize(None) + avail_profile = generate_periodic_profiles( + dt_index=n.snapshots.tz_localize("UTC"), + nodes=pop_layout.index, + weekly_profile=avail.values + ) dsm_week = np.zeros((24*7,)) - dsm_week[(np.arange(0,7,1)*24+options['bev_dsm_restriction_time'])] = options['bev_dsm_restriction_value'] + dsm_week[(np.arange(0,7,1) * 24 + options['bev_dsm_restriction_time'])] = options['bev_dsm_restriction_value'] - dsm_profile = generate_periodic_profiles(dt_index=network.snapshots.tz_localize("UTC"), - nodes=pop_layout.index, - weekly_profile=dsm_week).tz_localize(None) + dsm_profile = generate_periodic_profiles( + dt_index=n.snapshots.tz_localize("UTC"), + nodes=pop_layout.index, + weekly_profile=dsm_week + ) - ############### - #CO2 - ############### - - #1e6 to convert Mt to tCO2 - co2_totals = 1e6*pd.read_csv(snakemake.input.co2_totals_name,index_col=0) - - - - return nodal_energy_totals, heat_demand, ashp_cop, gshp_cop, solar_thermal, transport, avail_profile, dsm_profile, co2_totals, nodal_transport_data - + return nodal_energy_totals, heat_demand, ashp_cop, gshp_cop, solar_thermal, transport, avail_profile, dsm_profile, nodal_transport_data +# TODO checkout PyPSA-Eur script def prepare_costs(cost_file, USD_to_EUR, discount_rate, Nyears, lifetime): #set all asset costs and other parameters - costs = pd.read_csv(cost_file,index_col=list(range(2))).sort_index() + costs = pd.read_csv(cost_file, index_col=[0,1]).sort_index() #correct units to MW and EUR - costs.loc[costs.unit.str.contains("/kW"),"value"]*=1e3 - costs.loc[costs.unit.str.contains("USD"),"value"]*=USD_to_EUR + costs.loc[costs.unit.str.contains("/kW"), "value"] *= 1e3 + costs.loc[costs.unit.str.contains("USD"), "value"] *= USD_to_EUR #min_count=1 is important to generate NaNs which are then filled by fillna costs = costs.loc[:, "value"].unstack(level=1).groupby("technology").sum(min_count=1) @@ -659,253 +630,259 @@ def prepare_costs(cost_file, USD_to_EUR, discount_rate, Nyears, lifetime): "lifetime" : lifetime }) - costs["fixed"] = [(annuity(v["lifetime"],v["discount rate"])+v["FOM"]/100.)*v["investment"]*Nyears for i,v in costs.iterrows()] + annuity_factor = lambda v: annuity(v["lifetime"], v["discount rate"]) + v["FOM"] / 100 + costs["fixed"] = [annuity_factor(v) * v["investment"] * Nyears for i, v in costs.iterrows()] + return costs -def add_generation(network): + +def add_generation(n, costs): + print("adding electricity generation") + nodes = pop_layout.index - conventionals = [("OCGT","gas")] + fallback = {"OCGT": "gas"} + conventionals = options.get("conventional_generation", fallback) - for generator,carrier in [("OCGT","gas")]: - network.add("Carrier", - carrier) + add_carrier_buses(n, np.unique(list(conventionals.values()))) - network.madd("Bus", - ["EU " + carrier], - location="EU", - carrier=carrier) + for generator, carrier in conventionals.items(): - #use madd to get carrier inserted - network.madd("Store", - ["EU " + carrier + " Store"], - bus=["EU " + carrier], - e_nom_extendable=True, - e_cyclic=True, - carrier=carrier, - capital_cost=0.) #could correct to e.g. 0.2 EUR/kWh * annuity and O&M - - network.add("Generator", - "EU " + carrier, - bus="EU " + carrier, - p_nom_extendable=True, - carrier=carrier, - capital_cost=0., - marginal_cost=costs.at[carrier,'fuel']) + n.madd("Link", + nodes + " " + generator, + bus0="EU " + carrier, + bus1=nodes, + bus2="co2 atmosphere", + marginal_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'VOM'], #NB: VOM is per MWel + capital_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'fixed'], #NB: fixed cost is per MWel + p_nom_extendable=True, + carrier=generator, + efficiency=costs.at[generator, 'efficiency'], + efficiency2=costs.at[carrier, 'CO2 intensity'], + lifetime=costs.at[generator, 'lifetime'] + ) - network.madd("Link", - nodes + " " + generator, - bus0=["EU " + carrier]*len(nodes), - bus1=nodes, - bus2="co2 atmosphere", - marginal_cost=costs.at[generator,'efficiency']*costs.at[generator,'VOM'], #NB: VOM is per MWel - capital_cost=costs.at[generator,'efficiency']*costs.at[generator,'fixed'], #NB: fixed cost is per MWel - p_nom_extendable=True, - carrier=generator, - efficiency=costs.at[generator,'efficiency'], - efficiency2=costs.at[carrier,'CO2 intensity'], - lifetime=costs.at[generator,'lifetime']) +def add_wave(n, wave_cost_factor): -def add_wave(network, wave_cost_factor): + # TODO: handle in Snakefile wave_fn = "data/WindWaveWEC_GLTB.xlsx" - locations = ["FirthForth","Hebrides"] - #in kW - capacity = pd.Series([750,1000,600],["Attenuator","F2HB","MultiPA"]) + capacity = pd.Series({"Attenuator": 750, + "F2HB": 1000, + "MultiPA": 600}) #in EUR/MW - costs = wave_cost_factor*pd.Series([2.5,2,1.5],["Attenuator","F2HB","MultiPA"])*1e6 + annuity_factor = annuity(25,0.07) + 0.03 + costs = 1e6 * wave_cost_factor * annuity_factor * pd.Series({"Attenuator": 2.5, + "F2HB": 2, + "MultiPA": 1.5}) - sheets = {} + sheets = pd.read_excel(wave_fn, sheet_name=["FirthForth", "Hebrides"], + usecols=["Attenuator", "F2HB", "MultiPA"], + index_col=0, skiprows=[0], parse_dates=True) - for l in locations: - sheets[l] = pd.read_excel(wave_fn, - index_col=0,skiprows=[0],parse_dates=True, - sheet_name=l) - - to_drop = ["Vestas 3MW","Vestas 8MW"] - wave = pd.concat([sheets[l].drop(to_drop,axis=1).divide(capacity,axis=1) for l in locations], + wave = pd.concat([sheets[l].divide(capacity, axis=1) for l in locations], keys=locations, axis=1) for wave_type in costs.index: n.add("Generator", - "Hebrides "+wave_type, - bus="GB4 0", - p_nom_extendable=True, - carrier="wave", - capital_cost=(annuity(25,0.07)+0.03)*costs[wave_type], - p_max_pu=wave["Hebrides",wave_type]) + "Hebrides " + wave_type, + bus="GB4 0", # TODO this location is hardcoded + p_nom_extendable=True, + carrier="wave", + capital_cost=costs[wave_type], + p_max_pu=wave["Hebrides", wave_type] + ) +def insert_electricity_distribution_grid(n, costs): + # TODO pop_layout? + # TODO options? -def insert_electricity_distribution_grid(network): print("Inserting electricity distribution grid with investment cost factor of", - snakemake.config["sector"]['electricity_distribution_grid_cost_factor']) + options['electricity_distribution_grid_cost_factor']) nodes = pop_layout.index - network.madd("Bus", - nodes+ " low voltage", - location=nodes, - carrier="low voltage") + cost_factor = options['electricity_distribution_grid_cost_factor'] - network.madd("Link", - nodes + " electricity distribution grid", - bus0=nodes, - bus1=nodes + " low voltage", - p_nom_extendable=True, - p_min_pu=-1, - carrier="electricity distribution grid", - efficiency=1, - marginal_cost=0, - lifetime=costs.at['electricity distribution grid','lifetime'], - capital_cost=costs.at['electricity distribution grid','fixed']*snakemake.config["sector"]['electricity_distribution_grid_cost_factor']) + n.madd("Bus", + nodes + " low voltage", + location=nodes, + carrier="low voltage" + ) + n.madd("Link", + nodes + " electricity distribution grid", + bus0=nodes, + bus1=nodes + " low voltage", + p_nom_extendable=True, + p_min_pu=-1, + carrier="electricity distribution grid", + efficiency=1, + lifetime=costs.at['electricity distribution grid', 'lifetime'], + capital_cost=costs.at['electricity distribution grid', 'fixed'] * cost_factor + ) - #this catches regular electricity load and "industry electricity" - loads = network.loads.index[network.loads.carrier.str.contains("electricity")] - network.loads.loc[loads,"bus"] += " low voltage" + # this catches regular electricity load and "industry electricity" + loads = n.loads.index[n.loads.carrier.str.contains("electricity")] + n.loads.loc[loads, "bus"] += " low voltage" - bevs = network.links.index[network.links.carrier == "BEV charger"] - network.links.loc[bevs,"bus0"] += " low voltage" + bevs = n.links.index[n.links.carrier == "BEV charger"] + n.links.loc[bevs, "bus0"] += " low voltage" - v2gs = network.links.index[network.links.carrier == "V2G"] - network.links.loc[v2gs,"bus1"] += " low voltage" + v2gs = n.links.index[n.links.carrier == "V2G"] + n.links.loc[v2gs, "bus1"] += " low voltage" - hps = network.links.index[network.links.carrier.str.contains("heat pump")] - network.links.loc[hps,"bus0"] += " low voltage" + hps = n.links.index[n.links.carrier.str.contains("heat pump")] + n.links.loc[hps, "bus0"] += " low voltage" - rh = network.links.index[network.links.carrier.str.contains("resistive heater")] - network.links.loc[rh, "bus0"] += " low voltage" + rh = n.links.index[n.links.carrier.str.contains("resistive heater")] + n.links.loc[rh, "bus0"] += " low voltage" - mchp = network.links.index[network.links.carrier.str.contains("micro gas")] - network.links.loc[mchp, "bus1"] += " low voltage" + mchp = n.links.index[n.links.carrier.str.contains("micro gas")] + n.links.loc[mchp, "bus1"] += " low voltage" - #set existing solar to cost of utility cost rather the 50-50 rooftop-utility - solar = network.generators.index[network.generators.carrier == "solar"] - network.generators.loc[solar, "capital_cost"] = costs.at['solar-utility', - 'fixed'] + # set existing solar to cost of utility cost rather the 50-50 rooftop-utility + solar = n.generators.index[n.generators.carrier == "solar"] + n.generators.loc[solar, "capital_cost"] = costs.at['solar-utility', 'fixed'] if snakemake.wildcards.clusters[-1:] == "m": + simplified_pop_layout = pd.read_csv(snakemake.input.simplified_pop_layout, index_col=0) pop_solar = simplified_pop_layout.total.rename(index = lambda x: x + " solar") else: pop_solar = pop_layout.total.rename(index = lambda x: x + " solar") # add max solar rooftop potential assuming 0.1 kW/m2 and 10 m2/person, - #i.e. 1 kW/person (population data is in thousands of people) so we get MW - potential = 0.1*10*pop_solar + # i.e. 1 kW/person (population data is in thousands of people) so we get MW + potential = 0.1 * 10 * pop_solar - network.madd("Generator", - solar, - suffix=" rooftop", - bus=network.generators.loc[solar, "bus"] + " low voltage", - carrier="solar rooftop", - p_nom_extendable=True, - p_nom_max=potential, - marginal_cost=network.generators.loc[solar, 'marginal_cost'], - capital_cost=costs.at['solar-rooftop', 'fixed'], - efficiency=network.generators.loc[solar, 'efficiency'], - p_max_pu=network.generators_t.p_max_pu[solar]) + n.madd("Generator", + solar, + suffix=" rooftop", + bus=n.generators.loc[solar, "bus"] + " low voltage", + carrier="solar rooftop", + p_nom_extendable=True, + p_nom_max=potential, + marginal_cost=n.generators.loc[solar, 'marginal_cost'], + capital_cost=costs.at['solar-rooftop', 'fixed'], + efficiency=n.generators.loc[solar, 'efficiency'], + p_max_pu=n.generators_t.p_max_pu[solar] + ) + + n.add("Carrier", "home battery") + + n.madd("Bus", + nodes + " home battery", + location=nodes, + carrier="home battery" + ) + + n.madd("Store", + nodes + " home battery", + bus=nodes + " home battery", + e_cyclic=True, + e_nom_extendable=True, + carrier="home battery", + capital_cost=costs.at['battery storage', 'fixed'], + lifetime=costs.at['battery storage', 'lifetime'] + ) + + n.madd("Link", + nodes + " home battery charger", + bus0=nodes + " low voltage", + bus1=nodes + " home battery", + carrier="home battery charger", + efficiency=costs.at['battery inverter', 'efficiency']**0.5, + capital_cost=costs.at['battery inverter', 'fixed'], + p_nom_extendable=True, + lifetime=costs.at['battery inverter', 'lifetime'] + ) + + n.madd("Link", + nodes + " home battery discharger", + bus0=nodes + " home battery", + bus1=nodes + " low voltage", + carrier="home battery discharger", + efficiency=costs.at['battery inverter', 'efficiency']**0.5, + marginal_cost=options['marginal_cost_storage'], + p_nom_extendable=True, + lifetime=costs.at['battery inverter', 'lifetime'] + ) - network.add("Carrier","home battery") +def insert_gas_distribution_costs(n, costs): + # TODO options? - network.madd("Bus", - nodes + " home battery", - location=nodes, - carrier="home battery") - - network.madd("Store", - nodes + " home battery", - bus=nodes + " home battery", - e_cyclic=True, - e_nom_extendable=True, - carrier="home battery", - capital_cost=costs.at['battery storage','fixed'], - lifetime=costs.at['battery storage','lifetime']) - - network.madd("Link", - nodes + " home battery charger", - bus0=nodes + " low voltage", - bus1=nodes + " home battery", - carrier="home battery charger", - efficiency=costs.at['battery inverter','efficiency']**0.5, - capital_cost=costs.at['battery inverter','fixed'], - p_nom_extendable=True, - lifetime=costs.at['battery inverter','lifetime']) - - network.madd("Link", - nodes + " home battery discharger", - bus0=nodes + " home battery", - bus1=nodes + " low voltage", - carrier="home battery discharger", - efficiency=costs.at['battery inverter','efficiency']**0.5, - marginal_cost=options['marginal_cost_storage'], - p_nom_extendable=True, - lifetime=costs.at['battery inverter','lifetime']) - - -def insert_gas_distribution_costs(network): f_costs = options['gas_distribution_grid_cost_factor'] - print("Inserting gas distribution grid with investment cost\ - factor of", f_costs) + + print("Inserting gas distribution grid with investment cost factor of", f_costs) + + capital_cost = costs.loc['electricity distribution grid']["fixed"] * f_costs # gas boilers - gas_b = network.links[network.links.carrier.str.contains("gas boiler") & - (~network.links.carrier.str.contains("urban central"))].index - network.links.loc[gas_b, "capital_cost"] += costs.loc['electricity distribution grid']["fixed"] * f_costs + gas_b = n.links.index[n.links.carrier.str.contains("gas boiler") & + (~n.links.carrier.str.contains("urban central"))] + n.links.loc[gas_b, "capital_cost"] += capital_cost + # micro CHPs - mchp = network.links.index[network.links.carrier.str.contains("micro gas")] - network.links.loc[mchp, "capital_cost"] += costs.loc['electricity distribution grid']["fixed"] * f_costs + mchp = n.links.index[n.links.carrier.str.contains("micro gas")] + n.links.loc[mchp, "capital_cost"] += capital_cost -def add_electricity_grid_connection(network): - carriers = ["onwind","solar"] +def add_electricity_grid_connection(n, costs): - gens = network.generators.index[network.generators.carrier.isin(carriers)] + carriers = ["onwind", "solar"] - network.generators.loc[gens,"capital_cost"] += costs.at['electricity grid connection','fixed'] + gens = n.generators.index[n.generators.carrier.isin(carriers)] + + n.generators.loc[gens, "capital_cost"] += costs.at['electricity grid connection', 'fixed'] + + +def add_storage(n, costs): + # TODO pop_layout + # TODO options? -def add_storage(network): print("adding electricity storage") + nodes = pop_layout.index - network.add("Carrier","H2") + n.add("Carrier", "H2") + n.madd("Bus", + nodes + " H2", + location=nodes, + carrier="H2" + ) - network.madd("Bus", - nodes+ " H2", - location=nodes, - carrier="H2") + n.madd("Link", + nodes + " H2 Electrolysis", + bus1=nodes + " H2", + bus0=nodes, + p_nom_extendable=True, + carrier="H2 Electrolysis", + efficiency=costs.at["electrolysis", "efficiency"], + capital_cost=costs.at["electrolysis", "fixed"], + lifetime=costs.at['electrolysis', 'lifetime'] + ) - network.madd("Link", - nodes + " H2 Electrolysis", - bus1=nodes + " H2", - bus0=nodes, - p_nom_extendable=True, - carrier="H2 Electrolysis", - efficiency=costs.at["electrolysis","efficiency"], - capital_cost=costs.at["electrolysis","fixed"], - lifetime=costs.at['electrolysis','lifetime']) - - network.madd("Link", - nodes + " H2 Fuel Cell", - bus0=nodes + " H2", - bus1=nodes, - p_nom_extendable=True, - carrier ="H2 Fuel Cell", - efficiency=costs.at["fuel cell","efficiency"], - capital_cost=costs.at["fuel cell","fixed"]*costs.at["fuel cell","efficiency"], #NB: fixed cost is per MWel - lifetime=costs.at['fuel cell','lifetime']) + n.madd("Link", + nodes + " H2 Fuel Cell", + bus0=nodes + " H2", + bus1=nodes, + p_nom_extendable=True, + carrier ="H2 Fuel Cell", + efficiency=costs.at["fuel cell", "efficiency"], + capital_cost=costs.at["fuel cell", "fixed"] * costs.at["fuel cell", "efficiency"], #NB: fixed cost is per MWel + lifetime=costs.at['fuel cell', 'lifetime'] + ) cavern_nodes = pd.DataFrame() - if options['hydrogen_underground_storage']: - h2_salt_cavern_potential = pd.read_csv(snakemake.input.h2_cavern, - index_col=0,squeeze=True) + h2_salt_cavern_potential = pd.read_csv(snakemake.input.h2_cavern, index_col=0, squeeze=True) h2_cavern_ct = h2_salt_cavern_potential[~h2_salt_cavern_potential.isna()] cavern_nodes = pop_layout[pop_layout.ct.isin(h2_cavern_ct.index)] @@ -913,270 +890,290 @@ def add_storage(network): # assumptions: weight storage potential in a country by population # TODO: fix with real geographic potentials - #convert TWh to MWh with 1e6 + # convert TWh to MWh with 1e6 h2_pot = h2_cavern_ct.loc[cavern_nodes.ct] h2_pot.index = cavern_nodes.index h2_pot = h2_pot * cavern_nodes.fraction * 1e6 - network.madd("Store", - cavern_nodes.index + " H2 Store", - bus=cavern_nodes.index + " H2", - e_nom_extendable=True, - e_nom_max=h2_pot.values, - e_cyclic=True, - carrier="H2 Store", - capital_cost=h2_capital_cost) + n.madd("Store", + cavern_nodes.index + " H2 Store", + bus=cavern_nodes.index + " H2", + e_nom_extendable=True, + e_nom_max=h2_pot.values, + e_cyclic=True, + carrier="H2 Store", + capital_cost=h2_capital_cost + ) - # hydrogen stored overground + # hydrogen stored overground (where not already underground) h2_capital_cost = costs.at["hydrogen storage tank", "fixed"] - nodes_overground = nodes.symmetric_difference(cavern_nodes.index) + nodes_overground = cavern_nodes.index.symmetric_difference(nodes) - network.madd("Store", - nodes_overground + " H2 Store", - bus=nodes_overground + " H2", - e_nom_extendable=True, - e_cyclic=True, - carrier="H2 Store", - capital_cost=h2_capital_cost) + n.madd("Store", + nodes_overground + " H2 Store", + bus=nodes_overground + " H2", + e_nom_extendable=True, + e_cyclic=True, + carrier="H2 Store", + capital_cost=h2_capital_cost + ) - h2_links = pd.DataFrame(columns=["bus0","bus1","length"]) - prefix = "H2 pipeline " - connector = " -> " - attrs = ["bus0","bus1","length"] + attrs = ["bus0", "bus1", "length"] + h2_links = pd.DataFrame(columns=attrs) - candidates = pd.concat([network.lines[attrs],network.links.loc[network.links.carrier == "DC",attrs]], - keys=["lines","links"]) + candidates = pd.concat({"lines": n.lines[attrs], + "links": n.links.loc[n.links.carrier == "DC", attrs]}) for candidate in candidates.index: - buses = [candidates.at[candidate,"bus0"],candidates.at[candidate,"bus1"]] + buses = [candidates.at[candidate, "bus0"], candidates.at[candidate, "bus1"]] buses.sort() - name = prefix + buses[0] + connector + buses[1] + name = f"H2 pipeline {buses[0]} -> {buses[1]}" if name not in h2_links.index: - h2_links.at[name,"bus0"] = buses[0] - h2_links.at[name,"bus1"] = buses[1] - h2_links.at[name,"length"] = candidates.at[candidate,"length"] + h2_links.at[name, "bus0"] = buses[0] + h2_links.at[name, "bus1"] = buses[1] + h2_links.at[name, "length"] = candidates.at[candidate, "length"] - #TODO Add efficiency losses - network.madd("Link", - h2_links.index, - bus0=h2_links.bus0.values + " H2", - bus1=h2_links.bus1.values + " H2", - p_min_pu=-1, - p_nom_extendable=True, - length=h2_links.length.values, - capital_cost=costs.at['H2 pipeline','fixed']*h2_links.length.values, - carrier="H2 pipeline", - lifetime=costs.at['H2 pipeline','lifetime']) + # TODO Add efficiency losses + n.madd("Link", + h2_links.index, + bus0=h2_links.bus0.values + " H2", + bus1=h2_links.bus1.values + " H2", + p_min_pu=-1, + p_nom_extendable=True, + length=h2_links.length.values, + capital_cost=costs.at['H2 pipeline', 'fixed'] * h2_links.length.values, + carrier="H2 pipeline", + lifetime=costs.at['H2 pipeline', 'lifetime'] + ) + n.add("Carrier", "battery") - network.add("Carrier","battery") + n.madd("Bus", + nodes + " battery", + location=nodes, + carrier="battery" + ) - network.madd("Bus", - nodes + " battery", - location=nodes, - carrier="battery") + n.madd("Store", + nodes + " battery", + bus=nodes + " battery", + e_cyclic=True, + e_nom_extendable=True, + carrier="battery", + capital_cost=costs.at['battery storage', 'fixed'], + lifetime=costs.at['battery storage', 'lifetime'] + ) - network.madd("Store", - nodes + " battery", - bus=nodes + " battery", - e_cyclic=True, - e_nom_extendable=True, - carrier="battery", - capital_cost=costs.at['battery storage','fixed'], - lifetime=costs.at['battery storage','lifetime']) - - network.madd("Link", - nodes + " battery charger", - bus0=nodes, - bus1=nodes + " battery", - carrier="battery charger", - efficiency=costs.at['battery inverter','efficiency']**0.5, - capital_cost=costs.at['battery inverter','fixed'], - p_nom_extendable=True, - lifetime=costs.at['battery inverter','lifetime']) - - network.madd("Link", - nodes + " battery discharger", - bus0=nodes + " battery", - bus1=nodes, - carrier="battery discharger", - efficiency=costs.at['battery inverter','efficiency']**0.5, - marginal_cost=options['marginal_cost_storage'], - p_nom_extendable=True, - lifetime=costs.at['battery inverter','lifetime']) + n.madd("Link", + nodes + " battery charger", + bus0=nodes, + bus1=nodes + " battery", + carrier="battery charger", + efficiency=costs.at['battery inverter', 'efficiency']**0.5, + capital_cost=costs.at['battery inverter', 'fixed'], + p_nom_extendable=True, + lifetime=costs.at['battery inverter', 'lifetime'] + ) + n.madd("Link", + nodes + " battery discharger", + bus0=nodes + " battery", + bus1=nodes, + carrier="battery discharger", + efficiency=costs.at['battery inverter', 'efficiency']**0.5, + marginal_cost=options['marginal_cost_storage'], + p_nom_extendable=True, + lifetime=costs.at['battery inverter', 'lifetime'] + ) if options['methanation']: - network.madd("Link", - nodes + " Sabatier", - bus0=nodes+" H2", - bus1=["EU gas"]*len(nodes), - bus2="co2 stored", - p_nom_extendable=True, - carrier="Sabatier", - efficiency=costs.at["methanation","efficiency"], - efficiency2=-costs.at["methanation","efficiency"]*costs.at['gas','CO2 intensity'], - capital_cost=costs.at["methanation","fixed"], - lifetime=costs.at['methanation','lifetime']) + + n.madd("Link", + nodes + " Sabatier", + bus0=nodes + " H2", + bus1="EU gas", + bus2="co2 stored", + p_nom_extendable=True, + carrier="Sabatier", + efficiency=costs.at["methanation", "efficiency"], + efficiency2=-costs.at["methanation", "efficiency"] * costs.at['gas', 'CO2 intensity'], + capital_cost=costs.at["methanation", "fixed"], + lifetime=costs.at['methanation', 'lifetime'] + ) if options['helmeth']: - network.madd("Link", - nodes + " helmeth", - bus0=nodes, - bus1=["EU gas"]*len(nodes), - bus2="co2 stored", - carrier="helmeth", - p_nom_extendable=True, - efficiency=costs.at["helmeth","efficiency"], - efficiency2=-costs.at["helmeth","efficiency"]*costs.at['gas','CO2 intensity'], - capital_cost=costs.at["helmeth","fixed"], - lifetime=costs.at['helmeth','lifetime']) + + n.madd("Link", + nodes + " helmeth", + bus0=nodes, + bus1="EU gas", + bus2="co2 stored", + carrier="helmeth", + p_nom_extendable=True, + efficiency=costs.at["helmeth", "efficiency"], + efficiency2=-costs.at["helmeth", "efficiency"] * costs.at['gas', 'CO2 intensity'], + capital_cost=costs.at["helmeth", "fixed"], + lifetime=costs.at['helmeth', 'lifetime'] + ) if options['SMR']: - network.madd("Link", - nodes + " SMR CC", - bus0=["EU gas"]*len(nodes), - bus1=nodes+" H2", - bus2="co2 atmosphere", - bus3="co2 stored", - p_nom_extendable=True, - carrier="SMR CC", - efficiency=costs.at["SMR CC","efficiency"], - efficiency2=costs.at['gas','CO2 intensity']*(1-options["cc_fraction"]), - efficiency3=costs.at['gas','CO2 intensity']*options["cc_fraction"], - capital_cost=costs.at["SMR CC","fixed"], - lifetime=costs.at['SMR CC','lifetime']) - network.madd("Link", - nodes + " SMR", - bus0=["EU gas"]*len(nodes), - bus1=nodes+" H2", - bus2="co2 atmosphere", - p_nom_extendable=True, - carrier="SMR", - efficiency=costs.at["SMR","efficiency"], - efficiency2=costs.at['gas','CO2 intensity'], - capital_cost=costs.at["SMR","fixed"], - lifetime=costs.at['SMR','lifetime']) + n.madd("Link", + nodes + " SMR CC", + bus0="EU gas", + bus1=nodes + " H2", + bus2="co2 atmosphere", + bus3="co2 stored", + p_nom_extendable=True, + carrier="SMR CC", + efficiency=costs.at["SMR CC", "efficiency"], + efficiency2=costs.at['gas', 'CO2 intensity'] * (1 - options["cc_fraction"]), + efficiency3=costs.at['gas', 'CO2 intensity'] * options["cc_fraction"], + capital_cost=costs.at["SMR CC", "fixed"], + lifetime=costs.at['SMR CC', 'lifetime'] + ) + + n.madd("Link", + nodes + " SMR", + bus0="EU gas", + bus1=nodes + " H2", + bus2="co2 atmosphere", + p_nom_extendable=True, + carrier="SMR", + efficiency=costs.at["SMR", "efficiency"], + efficiency2=costs.at['gas', 'CO2 intensity'], + capital_cost=costs.at["SMR", "fixed"], + lifetime=costs.at['SMR', 'lifetime'] + ) -def add_land_transport(network): +def add_land_transport(n, costs): + # TODO options? print("adding land transport") - fuel_cell_share = get_parameter(options["land_transport_fuel_cell_share"]) - electric_share = get_parameter(options["land_transport_electric_share"]) + fuel_cell_share = get(options["land_transport_fuel_cell_share"], investment_year) + electric_share = get(options["land_transport_electric_share"], investment_year) ice_share = 1 - fuel_cell_share - electric_share - print("shares of FCEV, EV and ICEV are", - fuel_cell_share, - electric_share, - ice_share) + print("FCEV share", fuel_cell_share) + print("EV share", electric_share) + print("ICEV share", ice_share) - if ice_share < 0: - print("Error, more FCEV and EV share than 1.") - sys.exit() + assert ice_share >= 0, "Error, more FCEV and EV share than 1." nodes = pop_layout.index - if electric_share > 0: - network.add("Carrier","Li ion") + n.add("Carrier", "Li ion") - network.madd("Bus", - nodes, - location=nodes, - suffix=" EV battery", - carrier="Li ion") + n.madd("Bus", + nodes, + location=nodes, + suffix=" EV battery", + carrier="Li ion" + ) + + p_set = electric_share * (transport[nodes] + cycling_shift(transport[nodes], 1) + cycling_shift(transport[nodes], 2)) / 3 - network.madd("Load", - nodes, - suffix=" land transport EV", - bus=nodes + " EV battery", - carrier="land transport EV", - p_set=electric_share*(transport[nodes]+shift_df(transport[nodes],1)+shift_df(transport[nodes],2))/3.) - - p_nom = nodal_transport_data["number cars"]*0.011*electric_share #3-phase charger with 11 kW * x% of time grid-connected - - network.madd("Link", - nodes, - suffix= " BEV charger", - bus0=nodes, - bus1=nodes + " EV battery", - p_nom=p_nom, - carrier="BEV charger", - p_max_pu=avail_profile[nodes], - efficiency=0.9, #[B] - #These were set non-zero to find LU infeasibility when availability = 0.25 - #p_nom_extendable=True, - #p_nom_min=p_nom, - #capital_cost=1e6, #i.e. so high it only gets built where necessary + n.madd("Load", + nodes, + suffix=" land transport EV", + bus=nodes + " EV battery", + carrier="land transport EV", + p_set=p_set ) - if options["v2g"]: + + p_nom = nodal_transport_data["number cars"] * options.get("bev_charge_rate", 0.011) * electric_share - network.madd("Link", - nodes, - suffix=" V2G", - bus1=nodes, - bus0=nodes + " EV battery", - p_nom=p_nom, - carrier="V2G", - p_max_pu=avail_profile[nodes], - efficiency=0.9) #[B] + n.madd("Link", + nodes, + suffix= " BEV charger", + bus0=nodes, + bus1=nodes + " EV battery", + p_nom=p_nom, + carrier="BEV charger", + p_max_pu=avail_profile[nodes], + efficiency=options.get("bev_charge_efficiency", 0.9), + #These were set non-zero to find LU infeasibility when availability = 0.25 + #p_nom_extendable=True, + #p_nom_min=p_nom, + #capital_cost=1e6, #i.e. so high it only gets built where necessary + ) + if electric_share > 0 and options["v2g"]: + n.madd("Link", + nodes, + suffix=" V2G", + bus1=nodes, + bus0=nodes + " EV battery", + p_nom=p_nom, + carrier="V2G", + p_max_pu=avail_profile[nodes], + efficiency=options.get("bev_charge_efficiency", 0.9), + ) - if options["bev_dsm"]: + if electric_share > 0 and options["bev_dsm"]: - network.madd("Store", - nodes, - suffix=" battery storage", - bus=nodes + " EV battery", - carrier="battery storage", - e_cyclic=True, - e_nom=nodal_transport_data["number cars"]*0.05*options["bev_availability"]*electric_share, #50 kWh battery http://www.zeit.de/mobilitaet/2014-10/auto-fahrzeug-bestand - e_max_pu=1, - e_min_pu=dsm_profile[nodes]) + e_nom = nodal_transport_data["number cars"] * options.get("bev_energy", 0.05) * options["bev_availability"] * electric_share + n.madd("Store", + nodes, + suffix=" battery storage", + bus=nodes + " EV battery", + carrier="battery storage", + e_cyclic=True, + e_nom=e_nom, + e_max_pu=1, + e_min_pu=dsm_profile[nodes] + ) if fuel_cell_share > 0: - network.madd("Load", - nodes, - suffix=" land transport fuel cell", - bus=nodes + " H2", - carrier="land transport fuel cell", - p_set=fuel_cell_share/options['transport_fuel_cell_efficiency']*transport[nodes]) - + n.madd("Load", + nodes, + suffix=" land transport fuel cell", + bus=nodes + " H2", + carrier="land transport fuel cell", + p_set=fuel_cell_share / options['transport_fuel_cell_efficiency'] * transport[nodes] + ) if ice_share > 0: - if "EU oil" not in network.buses.index: - network.madd("Bus", - ["EU oil"], - location="EU", - carrier="oil") + if "EU oil" not in n.buses.index: + n.add("Bus", + "EU oil", + location="EU", + carrier="oil" + ) - network.madd("Load", - nodes, - suffix=" land transport oil", - bus="EU oil", - carrier="land transport oil", - p_set=ice_share/options['transport_internal_combustion_efficiency']*transport[nodes]) + ice_efficiency = options['transport_internal_combustion_efficiency'] - co2 = ice_share/options['transport_internal_combustion_efficiency']*transport[nodes].sum().sum()/8760.*costs.at["oil",'CO2 intensity'] + n.madd("Load", + nodes, + suffix=" land transport oil", + bus="EU oil", + carrier="land transport oil", + p_set=ice_share / ice_efficiency * transport[nodes] + ) - network.madd("Load", - ["land transport oil emissions"], - bus="co2 atmosphere", - carrier="land transport oil emissions", - p_set=-co2) + co2 = ice_share / ice_efficiency * transport[nodes].sum().sum() / 8760 * costs.at["oil", 'CO2 intensity'] + + n.madd("Load", + ["land transport oil emissions"], + bus="co2 atmosphere", + carrier="land transport oil emissions", + p_set=-co2 + ) -def add_heat(network): +def add_heat(n, costs): + # TODO options? + # TODO pop_layout? print("adding heat") @@ -1186,203 +1183,224 @@ def add_heat(network): #NB: must add costs of central heating afterwards (EUR 400 / kWpeak, 50a, 1% FOM from Fraunhofer ISE) - urban_fraction = options['central_fraction']*pop_layout["urban"]/(pop_layout[["urban","rural"]].sum(axis=1)) + urban_fraction = options['central_fraction'] * pop_layout["urban"] / pop_layout[["urban", "rural"]].sum(axis=1) # exogenously reduce space heat demand if options["reduce_space_heat_exogenously"]: - dE = get_parameter(options["reduce_space_heat_exogenously_factor"]) - print("assumed space heat reduction of {} %".format(dE*100)) + dE = get(options["reduce_space_heat_exogenously_factor"], investment_year) + print(f"assumed space heat reduction of {dE*100} %") for sector in sectors: - heat_demand[sector + " space"] = (1-dE)*heat_demand[sector + " space"] + heat_demand[sector + " space"] = (1 - dE) * heat_demand[sector + " space"] - heat_systems = ["residential rural", "services rural", - "residential urban decentral","services urban decentral", - "urban central"] + heat_systems = [ + "residential rural", + "services rural", + "residential urban decentral", + "services urban decentral", + "urban central" + ] + for name in heat_systems: name_type = "central" if name == "urban central" else "decentral" - network.add("Carrier",name + " heat") + n.add("Carrier", name + " heat") - network.madd("Bus", - nodes[name] + " " + name + " heat", - location=nodes[name], - carrier=name + " heat") + n.madd("Bus", + nodes[name] + f" {name} heat", + location=nodes[name], + carrier=name + " heat" + ) ## Add heat load for sector in sectors: if "rural" in name: - factor = 1-urban_fraction[nodes[name]] + factor = 1 - urban_fraction[nodes[name]] elif "urban" in name: factor = urban_fraction[nodes[name]] - else: - factor = None if sector in name: heat_load = heat_demand[[sector + " water",sector + " space"]].groupby(level=1,axis=1).sum()[nodes[name]].multiply(factor) - if name == "urban central": - heat_load = heat_demand.groupby(level=1,axis=1).sum()[nodes[name]].multiply(urban_fraction[nodes[name]]*(1+options['district_heating_loss'])) - - network.madd("Load", - nodes[name], - suffix=" " + name + " heat", - bus=nodes[name] + " " + name + " heat", - carrier=name + " heat", - p_set=heat_load) + heat_load = heat_demand.groupby(level=1,axis=1).sum()[nodes[name]].multiply(urban_fraction[nodes[name]] * (1 + options['district_heating_loss'])) + n.madd("Load", + nodes[name], + suffix=f" {name} heat", + bus=nodes[name] + f" {name} heat", + carrier=name + " heat", + p_set=heat_load + ) ## Add heat pumps heat_pump_type = "air" if "urban" in name else "ground" - costs_name = "{} {}-sourced heat pump".format(name_type,heat_pump_type) + costs_name = f"{name_type} {heat_pump_type}-sourced heat pump" cop = {"air" : ashp_cop, "ground" : gshp_cop} - efficiency = cop[heat_pump_type][nodes[name]] if options["time_dep_hp_cop"] else costs.at[costs_name,'efficiency'] - - network.madd("Link", - nodes[name], - suffix=" {} {} heat pump".format(name,heat_pump_type), - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", - carrier="{} {} heat pump".format(name,heat_pump_type), - efficiency=efficiency, - capital_cost=costs.at[costs_name,'efficiency']*costs.at[costs_name,'fixed'], - p_nom_extendable=True, - lifetime=costs.at[costs_name,'lifetime']) + efficiency = cop[heat_pump_type][nodes[name]] if options["time_dep_hp_cop"] else costs.at[costs_name, 'efficiency'] + n.madd("Link", + nodes[name], + suffix=f" {name} {heat_pump_type} heat pump", + bus0=nodes[name], + bus1=nodes[name] + f" {name} heat", + carrier=f"{name} {heat_pump_type} heat pump", + efficiency=efficiency, + capital_cost=costs.at[costs_name, 'efficiency'] * costs.at[costs_name, 'fixed'], + p_nom_extendable=True, + lifetime=costs.at[costs_name, 'lifetime'] + ) if options["tes"]: - network.add("Carrier",name + " water tanks") + n.add("Carrier", name + " water tanks") - network.madd("Bus", - nodes[name] + " " + name + " water tanks", - location=nodes[name], - carrier=name + " water tanks") + n.madd("Bus", + nodes[name] + f" {name} water tanks", + location=nodes[name], + carrier=name + " water tanks" + ) - network.madd("Link", - nodes[name] + " " + name + " water tanks charger", - bus0=nodes[name] + " " + name + " heat", - bus1=nodes[name] + " " + name + " water tanks", - efficiency=costs.at['water tank charger','efficiency'], - carrier=name + " water tanks charger", - p_nom_extendable=True) + n.madd("Link", + nodes[name] + f" {name} water tanks charger", + bus0=nodes[name] + f" {name} heat", + bus1=nodes[name] + f" {name} water tanks", + efficiency=costs.at['water tank charger', 'efficiency'], + carrier=name + " water tanks charger", + p_nom_extendable=True + ) - network.madd("Link", - nodes[name] + " " + name + " water tanks discharger", - bus0=nodes[name] + " " + name + " water tanks", - bus1=nodes[name] + " " + name + " heat", - carrier=name + " water tanks discharger", - efficiency=costs.at['water tank discharger','efficiency'], - p_nom_extendable=True) + n.madd("Link", + nodes[name] + f" {name} water tanks discharger", + bus0=nodes[name] + f" {name} water tanks", + bus1=nodes[name] + f" {name} heat", + carrier=name + " water tanks discharger", + efficiency=costs.at['water tank discharger', 'efficiency'], + p_nom_extendable=True + ) - # [HP] 180 day time constant for centralised, 3 day for decentralised - tes_time_constant_days = options["tes_tau"] if name_type == "decentral" else 180. + + if isinstance(options["tes_tau"], dict): + tes_time_constant_days = options["tes_tau"][name_type] + else: + logger.warning("Deprecated: a future version will require you to specify 'tes_tau' ", + "for 'decentral' and 'central' separately.") + tes_time_constant_days = options["tes_tau"] if name_type == "decentral" else 180. + + # conversion from EUR/m^3 to EUR/MWh for 40 K diff and 1.17 kWh/m^3/K + capital_cost = costs.at[name_type + ' water tank storage', 'fixed'] / 0.00117 / 40 - network.madd("Store", - nodes[name] + " " + name + " water tanks", - bus=nodes[name] + " " + name + " water tanks", - e_cyclic=True, - e_nom_extendable=True, - carrier=name + " water tanks", - standing_loss=1-np.exp(-1/(24.*tes_time_constant_days)), - capital_cost=costs.at[name_type + ' water tank storage','fixed']/(1.17e-3*40), #conversion from EUR/m^3 to EUR/MWh for 40 K diff and 1.17 kWh/m^3/K - lifetime=costs.at[name_type + ' water tank storage','lifetime']) + n.madd("Store", + nodes[name] + f" {name} water tanks", + bus=nodes[name] + f" {name} water tanks", + e_cyclic=True, + e_nom_extendable=True, + carrier=name + " water tanks", + standing_loss=1 - np.exp(- 1 / 24 / tes_time_constant_days), + capital_cost=capital_cost, + lifetime=costs.at[name_type + ' water tank storage', 'lifetime'] + ) if options["boilers"]: - network.madd("Link", - nodes[name] + " " + name + " resistive heater", - bus0=nodes[name], - bus1=nodes[name] + " " + name + " heat", - carrier=name + " resistive heater", - efficiency=costs.at[name_type + ' resistive heater','efficiency'], - capital_cost=costs.at[name_type + ' resistive heater','efficiency']*costs.at[name_type + ' resistive heater','fixed'], - p_nom_extendable=True, - lifetime=costs.at[name_type + ' resistive heater','lifetime']) + key = f"{name_type} resistive heater" - network.madd("Link", - nodes[name] + " " + name + " gas boiler", - p_nom_extendable=True, - bus0=["EU gas"]*len(nodes[name]), - bus1=nodes[name] + " " + name + " heat", - bus2="co2 atmosphere", - carrier=name + " gas boiler", - efficiency=costs.at[name_type + ' gas boiler','efficiency'], - efficiency2=costs.at['gas','CO2 intensity'], - capital_cost=costs.at[name_type + ' gas boiler','efficiency']*costs.at[name_type + ' gas boiler','fixed'], - lifetime=costs.at[name_type + ' gas boiler','lifetime']) + n.madd("Link", + nodes[name] + f" {name} resistive heater", + bus0=nodes[name], + bus1=nodes[name] + f" {name} heat", + carrier=name + " resistive heater", + efficiency=costs.at[key, 'efficiency'], + capital_cost=costs.at[key, 'efficiency'] * costs.at[key, 'fixed'], + p_nom_extendable=True, + lifetime=costs.at[key, 'lifetime'] + ) + key = f"{name_type} gas boiler" + n.madd("Link", + nodes[name] + f" {name} gas boiler", + p_nom_extendable=True, + bus0="EU gas", + bus1=nodes[name] + f" {name} heat", + bus2="co2 atmosphere", + carrier=name + " gas boiler", + efficiency=costs.at[key, 'efficiency'], + efficiency2=costs.at['gas', 'CO2 intensity'], + capital_cost=costs.at[key, 'efficiency'] * costs.at[key, 'fixed'], + lifetime=costs.at[key, 'lifetime'] + ) if options["solar_thermal"]: - network.add("Carrier",name + " solar thermal") + n.add("Carrier", name + " solar thermal") - network.madd("Generator", - nodes[name], - suffix=" " + name + " solar thermal collector", - bus=nodes[name] + " " + name + " heat", - carrier=name + " solar thermal", - p_nom_extendable=True, - capital_cost=costs.at[name_type + ' solar thermal','fixed'], - p_max_pu=solar_thermal[nodes[name]], - lifetime=costs.at[name_type + ' solar thermal','lifetime']) + n.madd("Generator", + nodes[name], + suffix=f" {name} solar thermal collector", + bus=nodes[name] + f" {name} heat", + carrier=name + " solar thermal", + p_nom_extendable=True, + capital_cost=costs.at[name_type + ' solar thermal', 'fixed'], + p_max_pu=solar_thermal[nodes[name]], + lifetime=costs.at[name_type + ' solar thermal', 'lifetime'] + ) + if options["chp"] and name == "urban central": - if options["chp"]: + # add gas CHP; biomass CHP is added in biomass section + n.madd("Link", + nodes[name] + " urban central gas CHP", + bus0="EU gas", + bus1=nodes[name], + bus2=nodes[name] + " urban central heat", + bus3="co2 atmosphere", + carrier="urban central gas CHP", + p_nom_extendable=True, + capital_cost=costs.at['central gas CHP', 'fixed'] * costs.at['central gas CHP', 'efficiency'], + marginal_cost=costs.at['central gas CHP', 'VOM'], + efficiency=costs.at['central gas CHP', 'efficiency'], + efficiency2=costs.at['central gas CHP', 'efficiency'] / costs.at['central gas CHP', 'c_b'], + efficiency3=costs.at['gas', 'CO2 intensity'], + lifetime=costs.at['central gas CHP', 'lifetime'] + ) - if name == "urban central": - #add gas CHP; biomass CHP is added in biomass section - network.madd("Link", - nodes[name] + " urban central gas CHP", - bus0="EU gas", - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", - bus3="co2 atmosphere", - carrier="urban central gas CHP", - p_nom_extendable=True, - capital_cost=costs.at['central gas CHP','fixed']*costs.at['central gas CHP','efficiency'], - marginal_cost=costs.at['central gas CHP','VOM'], - efficiency=costs.at['central gas CHP','efficiency'], - efficiency2=costs.at['central gas CHP','efficiency']/costs.at['central gas CHP','c_b'], - efficiency3=costs.at['gas','CO2 intensity'], - lifetime=costs.at['central gas CHP','lifetime']) + n.madd("Link", + nodes[name] + " urban central gas CHP CC", + bus0="EU gas", + bus1=nodes[name], + bus2=nodes[name] + " urban central heat", + bus3="co2 atmosphere", + bus4="co2 stored", + carrier="urban central gas CHP CC", + p_nom_extendable=True, + capital_cost=costs.at['central gas CHP', 'fixed']*costs.at['central gas CHP', 'efficiency'] + costs.at['biomass CHP capture', 'fixed']*costs.at['gas', 'CO2 intensity'], + marginal_cost=costs.at['central gas CHP', 'VOM'], + efficiency=costs.at['central gas CHP', 'efficiency'] - costs.at['gas', 'CO2 intensity'] * (costs.at['biomass CHP capture', 'electricity-input'] + costs.at['biomass CHP capture', 'compression-electricity-input']), + efficiency2=costs.at['central gas CHP', 'efficiency'] / costs.at['central gas CHP', 'c_b'] + costs.at['gas', 'CO2 intensity'] * (costs.at['biomass CHP capture', 'heat-output'] + costs.at['biomass CHP capture', 'compression-heat-output'] - costs.at['biomass CHP capture', 'heat-input']), + efficiency3=costs.at['gas', 'CO2 intensity'] * (1-costs.at['biomass CHP capture', 'capture_rate']), + efficiency4=costs.at['gas', 'CO2 intensity'] * costs.at['biomass CHP capture', 'capture_rate'], + lifetime=costs.at['central gas CHP', 'lifetime'] + ) - network.madd("Link", - nodes[name] + " urban central gas CHP CC", - bus0="EU gas", - bus1=nodes[name], - bus2=nodes[name] + " urban central heat", - bus3="co2 atmosphere", - bus4="co2 stored", - carrier="urban central gas CHP CC", - p_nom_extendable=True, - capital_cost=costs.at['central gas CHP','fixed']*costs.at['central gas CHP','efficiency'] + costs.at['biomass CHP capture','fixed']*costs.at['gas','CO2 intensity'], - marginal_cost=costs.at['central gas CHP','VOM'], - efficiency=costs.at['central gas CHP','efficiency'] - costs.at['gas','CO2 intensity']*(costs.at['biomass CHP capture','electricity-input'] + costs.at['biomass CHP capture','compression-electricity-input']), - efficiency2=costs.at['central gas CHP','efficiency']/costs.at['central gas CHP','c_b'] + costs.at['gas','CO2 intensity']*(costs.at['biomass CHP capture','heat-output'] + costs.at['biomass CHP capture','compression-heat-output'] - costs.at['biomass CHP capture','heat-input']), - efficiency3=costs.at['gas','CO2 intensity']*(1-costs.at['biomass CHP capture','capture_rate']), - efficiency4=costs.at['gas','CO2 intensity']*costs.at['biomass CHP capture','capture_rate'], - lifetime=costs.at['central gas CHP','lifetime']) + if options["chp"] and options["micro_chp"] and name != "urban central": - else: - if options["micro_chp"]: - network.madd("Link", - nodes[name] + " " + name + " micro gas CHP", - p_nom_extendable=True, - bus0="EU gas", - bus1=nodes[name], - bus2=nodes[name] + " " + name + " heat", - bus3="co2 atmosphere", - carrier=name + " micro gas CHP", - efficiency=costs.at['micro CHP','efficiency'], - efficiency2=costs.at['micro CHP','efficiency-heat'], - efficiency3=costs.at['gas','CO2 intensity'], - capital_cost=costs.at['micro CHP','fixed'], - lifetime=costs.at['micro CHP','lifetime']) + n.madd("Link", + nodes[name] + f" {name} micro gas CHP", + p_nom_extendable=True, + bus0="EU gas", + bus1=nodes[name], + bus2=nodes[name] + f" {name} heat", + bus3="co2 atmosphere", + carrier=name + " micro gas CHP", + efficiency=costs.at['micro CHP', 'efficiency'], + efficiency2=costs.at['micro CHP', 'efficiency-heat'], + efficiency3=costs.at['gas', 'CO2 intensity'], + capital_cost=costs.at['micro CHP', 'fixed'], + lifetime=costs.at['micro CHP', 'lifetime'] + ) if options['retrofitting']['retro_endogen']: @@ -1405,7 +1423,7 @@ def add_heat(network): # heated floor area [10^6 * m^2] per country floor_area = pd.read_csv(snakemake.input.floor_area, index_col=[0, 1]) - network.add("Carrier", "retrofitting") + n.add("Carrier", "retrofitting") # share of space heat demand 'w_space' of total heat demand w_space = {} @@ -1417,9 +1435,9 @@ def add_heat(network): heat_demand_r.groupby(level=[1], axis=1).sum()) - for name in network.loads[network.loads.carrier.isin([x + " heat" for x in heat_systems])].index: + for name in n.loads[n.loads.carrier.isin([x + " heat" for x in heat_systems])].index: - node = network.buses.loc[name, "location"] + node = n.buses.loc[name, "location"] ct = pop_layout.loc[node, "ct"] # weighting 'f' depending on the size of the population at the node @@ -1433,7 +1451,7 @@ def add_heat(network): floor_area_node = ((pop_layout.loc[node].fraction * floor_area.loc[ct, "value"] * 10**6).loc[sec] * f) # total heat demand at node [MWh] - demand = (network.loads_t.p_set[name].resample(hours[0]) + demand = (n.loads_t.p_set[name].resample(hours[0]) .mean()) # space heat demand at node [MWh] @@ -1456,503 +1474,507 @@ def add_heat(network): # check that ambitious retrofitting has higher costs per MWh than moderate retrofitting if (capital_cost.diff() < 0).sum(): - print( - "warning, costs are not linear for ", ct, " ", sec) + print(f"Warning: costs are not linear for {ct} {sec}") s = capital_cost[(capital_cost.diff() < 0)].index strengths = strengths.drop(s) # reindex normed time profile of space heat demand back to hourly resolution - space_pu = (space_pu.reindex(index=heat_demand.index) - .fillna(method="ffill")) + space_pu = space_pu.reindex(index=heat_demand.index).fillna(method="ffill") # add for each retrofitting strength a generator with heat generation profile following the profile of the heat demand for strength in strengths: - network.madd('Generator', - [node], - suffix=' retrofitting ' + strength + " " + name[6::], - bus=name, - carrier="retrofitting", - p_nom_extendable=True, - p_nom_max=dE_diff[strength] * space_heat_demand.max(), # maximum energy savings for this renovation strength - p_max_pu=space_pu, - p_min_pu=space_pu, - country=ct, - capital_cost=capital_cost[strength] * options['retrofitting']['cost_factor']) - + n.madd('Generator', + [node], + suffix=' retrofitting ' + strength + " " + name[6::], + bus=name, + carrier="retrofitting", + p_nom_extendable=True, + p_nom_max=dE_diff[strength] * space_heat_demand.max(), # maximum energy savings for this renovation strength + p_max_pu=space_pu, + p_min_pu=space_pu, + country=ct, + capital_cost=capital_cost[strength] * options['retrofitting']['cost_factor'] + ) def create_nodes_for_heat_sector(): - sectors = ["residential", "services"] - # stores the different groups of nodes - nodes = {} + # TODO pop_layout + # rural are areas with low heating density and individual heating # urban are areas with high heating density # urban can be split into district heating (central) and individual heating (decentral) + + sectors = ["residential", "services"] + + nodes = {} for sector in sectors: nodes[sector + " rural"] = pop_layout.index if options["central"]: + # TODO: this looks hardcoded, move to config urban_decentral_ct = pd.Index(["ES", "GR", "PT", "IT", "BG"]) nodes[sector + " urban decentral"] = pop_layout.index[pop_layout.ct.isin(urban_decentral_ct)] else: nodes[sector + " urban decentral"] = pop_layout.index + # for central nodes, residential and services are aggregated nodes["urban central"] = pop_layout.index.symmetric_difference(nodes["residential urban decentral"]) + return nodes -def add_biomass(network): +def add_biomass(n, costs): print("adding biomass") - nodes = pop_layout.index + # biomass distributed at country level - i.e. transport within country allowed + countries = n.buses.country.dropna().unique() - #biomass distributed at country level - i.e. transport within country allowed - cts = pop_layout.ct.value_counts().index + biomass_potentials = pd.read_csv(snakemake.input.biomass_potentials, index_col=0) - biomass_potentials = pd.read_csv(snakemake.input.biomass_potentials, - index_col=0) + n.add("Carrier", "biogas") - network.add("Carrier","biogas") - network.add("Carrier","solid biomass") + n.add("Carrier", "solid biomass") - network.madd("Bus", - ["EU biogas"], - location="EU", - carrier="biogas") + n.add("Bus", + "EU biogas", + location="EU", + carrier="biogas" + ) - network.madd("Bus", - ["EU solid biomass"], - location="EU", - carrier="solid biomass") + n.add("Bus", + "EU solid biomass", + location="EU", + carrier="solid biomass" + ) - network.madd("Store", - ["EU biogas"], - bus="EU biogas", - carrier="biogas", - e_nom=biomass_potentials.loc[cts,"biogas"].sum(), - marginal_cost=costs.at['biogas','fuel'], - e_initial=biomass_potentials.loc[cts,"biogas"].sum()) + n.add("Store", + "EU biogas", + bus="EU biogas", + carrier="biogas", + e_nom=biomass_potentials.loc[countries, "biogas"].sum(), + marginal_cost=costs.at['biogas', 'fuel'], + e_initial=biomass_potentials.loc[countries, "biogas"].sum() + ) - network.madd("Store", - ["EU solid biomass"], - bus="EU solid biomass", - carrier="solid biomass", - e_nom=biomass_potentials.loc[cts,"solid biomass"].sum(), - marginal_cost=costs.at['solid biomass','fuel'], - e_initial=biomass_potentials.loc[cts,"solid biomass"].sum()) + n.add("Store", + "EU solid biomass", + bus="EU solid biomass", + carrier="solid biomass", + e_nom=biomass_potentials.loc[countries, "solid biomass"].sum(), + marginal_cost=costs.at['solid biomass', 'fuel'], + e_initial=biomass_potentials.loc[countries, "solid biomass"].sum() + ) - network.madd("Link", - ["biogas to gas"], - bus0="EU biogas", - bus1="EU gas", - bus2="co2 atmosphere", - carrier="biogas to gas", - capital_cost=costs.loc["biogas upgrading", "fixed"], - marginal_cost=costs.loc["biogas upgrading", "VOM"], - efficiency2=-costs.at['gas','CO2 intensity'], - p_nom_extendable=True) + n.add("Link", + "biogas to gas", + bus0="EU biogas", + bus1="EU gas", + bus2="co2 atmosphere", + carrier="biogas to gas", + capital_cost=costs.loc["biogas upgrading", "fixed"], + marginal_cost=costs.loc["biogas upgrading", "VOM"], + efficiency2=-costs.at['gas', 'CO2 intensity'], + p_nom_extendable=True + ) #AC buses with district heating - urban_central = network.buses.index[network.buses.carrier == "urban central heat"] + urban_central = n.buses.index[n.buses.carrier == "urban central heat"] if not urban_central.empty and options["chp"]: urban_central = urban_central.str[:-len(" urban central heat")] - network.madd("Link", - urban_central + " urban central solid biomass CHP", - bus0="EU solid biomass", - bus1=urban_central, - bus2=urban_central + " urban central heat", - carrier="urban central solid biomass CHP", - p_nom_extendable=True, - capital_cost=costs.at['central solid biomass CHP','fixed']*costs.at['central solid biomass CHP','efficiency'], - marginal_cost=costs.at['central solid biomass CHP','VOM'], - efficiency=costs.at['central solid biomass CHP','efficiency'], - efficiency2=costs.at['central solid biomass CHP','efficiency-heat'], - lifetime=costs.at['central solid biomass CHP','lifetime']) + key = 'central solid biomass CHP' - network.madd("Link", - urban_central + " urban central solid biomass CHP CC", - bus0="EU solid biomass", - bus1=urban_central, - bus2=urban_central + " urban central heat", - bus3="co2 atmosphere", - bus4="co2 stored", - carrier="urban central solid biomass CHP CC", - p_nom_extendable=True, - capital_cost=costs.at['central solid biomass CHP','fixed']*costs.at['central solid biomass CHP','efficiency'] + costs.at['biomass CHP capture','fixed']*costs.at['solid biomass','CO2 intensity'], - marginal_cost=costs.at['central solid biomass CHP','VOM'], - efficiency=costs.at['central solid biomass CHP','efficiency'] - costs.at['solid biomass','CO2 intensity']*(costs.at['biomass CHP capture','electricity-input'] + costs.at['biomass CHP capture','compression-electricity-input']), - efficiency2=costs.at['central solid biomass CHP','efficiency-heat'] + costs.at['solid biomass','CO2 intensity']*(costs.at['biomass CHP capture','heat-output'] + costs.at['biomass CHP capture','compression-heat-output'] - costs.at['biomass CHP capture','heat-input']), - efficiency3=-costs.at['solid biomass','CO2 intensity']*costs.at['biomass CHP capture','capture_rate'], - efficiency4=costs.at['solid biomass','CO2 intensity']*costs.at['biomass CHP capture','capture_rate'], - lifetime=costs.at['central solid biomass CHP','lifetime']) + n.madd("Link", + urban_central + " urban central solid biomass CHP", + bus0="EU solid biomass", + bus1=urban_central, + bus2=urban_central + " urban central heat", + carrier="urban central solid biomass CHP", + p_nom_extendable=True, + capital_cost=costs.at[key, 'fixed'] * costs.at[key, 'efficiency'], + marginal_cost=costs.at[key, 'VOM'], + efficiency=costs.at[key, 'efficiency'], + efficiency2=costs.at[key, 'efficiency-heat'], + lifetime=costs.at[key, 'lifetime'] + ) + + n.madd("Link", + urban_central + " urban central solid biomass CHP CC", + bus0="EU solid biomass", + bus1=urban_central, + bus2=urban_central + " urban central heat", + bus3="co2 atmosphere", + bus4="co2 stored", + carrier="urban central solid biomass CHP CC", + p_nom_extendable=True, + capital_cost=costs.at[key, 'fixed'] * costs.at[key, 'efficiency'] + costs.at['biomass CHP capture', 'fixed'] * costs.at['solid biomass', 'CO2 intensity'], + marginal_cost=costs.at[key, 'VOM'], + efficiency=costs.at[key, 'efficiency'] - costs.at['solid biomass', 'CO2 intensity'] * (costs.at['biomass CHP capture', 'electricity-input'] + costs.at['biomass CHP capture', 'compression-electricity-input']), + efficiency2=costs.at[key, 'efficiency-heat'] + costs.at['solid biomass', 'CO2 intensity'] * (costs.at['biomass CHP capture', 'heat-output'] + costs.at['biomass CHP capture', 'compression-heat-output'] - costs.at['biomass CHP capture', 'heat-input']), + efficiency3=-costs.at['solid biomass', 'CO2 intensity'] * costs.at['biomass CHP capture', 'capture_rate'], + efficiency4=costs.at['solid biomass', 'CO2 intensity'] * costs.at['biomass CHP capture', 'capture_rate'], + lifetime=costs.at[key, 'lifetime'] + ) - -def add_industry(network): +def add_industry(n, costs): print("adding industrial demand") nodes = pop_layout.index - #1e6 to convert TWh to MWh - industrial_demand = 1e6*pd.read_csv(snakemake.input.industrial_demand, - index_col=0) + # 1e6 to convert TWh to MWh + industrial_demand = pd.read_csv(snakemake.input.industrial_demand, index_col=0) * 1e6 solid_biomass_by_country = industrial_demand["solid biomass"].groupby(pop_layout.ct).sum() - countries = solid_biomass_by_country.index - network.madd("Bus", - ["solid biomass for industry"], - location="EU", - carrier="solid biomass for industry") + n.add("Bus", + "solid biomass for industry", + location="EU", + carrier="solid biomass for industry" + ) - network.madd("Load", - ["solid biomass for industry"], - bus="solid biomass for industry", - carrier="solid biomass for industry", - p_set=solid_biomass_by_country.sum()/8760.) + n.add("Load", + "solid biomass for industry", + bus="solid biomass for industry", + carrier="solid biomass for industry", + p_set=solid_biomass_by_country.sum() / 8760 + ) - network.madd("Link", - ["solid biomass for industry"], - bus0="EU solid biomass", - bus1="solid biomass for industry", - carrier="solid biomass for industry", - p_nom_extendable=True, - efficiency=1.) + n.add("Link", + "solid biomass for industry", + bus0="EU solid biomass", + bus1="solid biomass for industry", + carrier="solid biomass for industry", + p_nom_extendable=True, + efficiency=1. + ) - network.madd("Link", - ["solid biomass for industry CC"], - bus0="EU solid biomass", - bus1="solid biomass for industry", - bus2="co2 atmosphere", - bus3="co2 stored", - carrier="solid biomass for industry CC", - p_nom_extendable=True, - capital_cost=costs.at["cement capture","fixed"]*costs.at['solid biomass','CO2 intensity'], - efficiency=0.9, - efficiency2=-costs.at['solid biomass','CO2 intensity']*costs.at["cement capture","capture_rate"], - efficiency3=costs.at['solid biomass','CO2 intensity']*costs.at["cement capture","capture_rate"], - lifetime=costs.at['cement capture','lifetime']) + n.add("Link", + "solid biomass for industry CC", + bus0="EU solid biomass", + bus1="solid biomass for industry", + bus2="co2 atmosphere", + bus3="co2 stored", + carrier="solid biomass for industry CC", + p_nom_extendable=True, + capital_cost=costs.at["cement capture", "fixed"] * costs.at['solid biomass', 'CO2 intensity'], + efficiency=0.9, # TODO: make config option + efficiency2=-costs.at['solid biomass', 'CO2 intensity'] * costs.at["cement capture", "capture_rate"], + efficiency3=costs.at['solid biomass', 'CO2 intensity'] * costs.at["cement capture", "capture_rate"], + lifetime=costs.at['cement capture', 'lifetime'] + ) + + n.add("Bus", + "gas for industry", + location="EU", + carrier="gas for industry") + + n.add("Load", + "gas for industry", + bus="gas for industry", + carrier="gas for industry", + p_set=industrial_demand.loc[nodes, "methane"].sum() / 8760 + ) + + n.add("Link", + "gas for industry", + bus0="EU gas", + bus1="gas for industry", + bus2="co2 atmosphere", + carrier="gas for industry", + p_nom_extendable=True, + efficiency=1., + efficiency2=costs.at['gas', 'CO2 intensity'] + ) + + n.add("Link", + "gas for industry CC", + bus0="EU gas", + bus1="gas for industry", + bus2="co2 atmosphere", + bus3="co2 stored", + carrier="gas for industry CC", + p_nom_extendable=True, + capital_cost=costs.at["cement capture", "fixed"] * costs.at['gas', 'CO2 intensity'], + efficiency=0.9, + efficiency2=costs.at['gas', 'CO2 intensity'] * (1 - costs.at["cement capture", "capture_rate"]), + efficiency3=costs.at['gas', 'CO2 intensity'] * costs.at["cement capture", "capture_rate"], + lifetime=costs.at['cement capture', 'lifetime'] + ) - network.madd("Bus", - ["gas for industry"], - location="EU", - carrier="gas for industry") + n.madd("Load", + nodes, + suffix=" H2 for industry", + bus=nodes + " H2", + carrier="H2 for industry", + p_set=industrial_demand.loc[nodes, "hydrogen"] / 8760 + ) - network.madd("Load", - ["gas for industry"], - bus="gas for industry", - carrier="gas for industry", - p_set=industrial_demand.loc[nodes,"methane"].sum()/8760.) + all_navigation = ["total international navigation", "total domestic navigation"] + efficiency = options['shipping_average_efficiency'] / costs.at["fuel cell", "efficiency"] + p_set = nodal_energy_totals.loc[nodes, all_navigation].sum(axis=1) * 1e6 * efficiency / 8760 - network.madd("Link", - ["gas for industry"], - bus0="EU gas", - bus1="gas for industry", - bus2="co2 atmosphere", - carrier="gas for industry", - p_nom_extendable=True, - efficiency=1., - efficiency2=costs.at['gas','CO2 intensity']) + n.madd("Load", + nodes, + suffix=" H2 for shipping", + bus=nodes + " H2", + carrier="H2 for shipping", + p_set=p_set + ) - network.madd("Link", - ["gas for industry CC"], - bus0="EU gas", - bus1="gas for industry", - bus2="co2 atmosphere", - bus3="co2 stored", - carrier="gas for industry CC", - p_nom_extendable=True, - capital_cost=costs.at["cement capture","fixed"]*costs.at['gas','CO2 intensity'], - efficiency=0.9, - efficiency2=costs.at['gas','CO2 intensity']*(1-costs.at["cement capture","capture_rate"]), - efficiency3=costs.at['gas','CO2 intensity']**costs.at["cement capture","capture_rate"], - lifetime=costs.at['cement capture','lifetime']) + if "EU oil" not in n.buses.index: + n.add("Bus", + "EU oil", + location="EU", + carrier="oil" + ) - network.madd("Load", - nodes, - suffix=" H2 for industry", - bus=nodes + " H2", - carrier="H2 for industry", - p_set=industrial_demand.loc[nodes,"hydrogen"]/8760.) + if "EU oil Store" not in n.stores.index: + + #could correct to e.g. 0.001 EUR/kWh * annuity and O&M + n.add("Store", + "EU oil Store", + bus="EU oil", + e_nom_extendable=True, + e_cyclic=True, + carrier="oil", + ) + if "EU oil" not in n.generators.index: - network.madd("Load", - nodes, - suffix=" H2 for shipping", - bus=nodes + " H2", - carrier="H2 for shipping", - p_set = nodal_energy_totals.loc[nodes,["total international navigation","total domestic navigation"]].sum(axis=1)*1e6*options['shipping_average_efficiency']/costs.at["fuel cell","efficiency"]/8760.) - - if "EU oil" not in network.buses.index: - network.madd("Bus", - ["EU oil"], - location="EU", - carrier="oil") - - #use madd to get carrier inserted - if "EU oil Store" not in network.stores.index: - network.madd("Store", - ["EU oil Store"], - bus="EU oil", - e_nom_extendable=True, - e_cyclic=True, - carrier="oil", - capital_cost=0.) #could correct to e.g. 0.001 EUR/kWh * annuity and O&M - - if "EU oil" not in network.generators.index: - network.add("Generator", - "EU oil", - bus="EU oil", - p_nom_extendable=True, - carrier="oil", - capital_cost=0., - marginal_cost=costs.at["oil",'fuel']) + n.add("Generator", + "EU oil", + bus="EU oil", + p_nom_extendable=True, + carrier="oil", + marginal_cost=costs.at["oil", 'fuel'] + ) if options["oil_boilers"]: nodes_heat = create_nodes_for_heat_sector() for name in ["residential rural", "services rural", "residential urban decentral", "services urban decentral"]: - network.madd("Link", - nodes_heat[name] + " " + name + " oil boiler", - p_nom_extendable=True, - bus0="EU oil", - bus1=nodes_heat[name] + " " + name + " heat", - bus2="co2 atmosphere", - carrier=name + " oil boiler", - efficiency=costs.at['decentral oil boiler', 'efficiency'], - efficiency2=costs.at['oil', 'CO2 intensity'], - capital_cost=costs.at['decentral oil boiler', 'efficiency'] * costs.at[ - 'decentral oil boiler', 'fixed'], - lifetime=costs.at['decentral oil boiler','lifetime']) - network.madd("Link", - nodes + " Fischer-Tropsch", - bus0=nodes + " H2", - bus1="EU oil", - bus2="co2 stored", - carrier="Fischer-Tropsch", - efficiency=costs.at["Fischer-Tropsch",'efficiency'], - capital_cost=costs.at["Fischer-Tropsch",'fixed'], - efficiency2=-costs.at["oil",'CO2 intensity']*costs.at["Fischer-Tropsch",'efficiency'], - p_nom_extendable=True, - lifetime=costs.at['Fischer-Tropsch','lifetime']) + n.madd("Link", + nodes_heat[name] + f" {name} oil boiler", + p_nom_extendable=True, + bus0="EU oil", + bus1=nodes_heat[name] + f" {name} heat", + bus2="co2 atmosphere", + carrier=f"{name} oil boiler", + efficiency=costs.at['decentral oil boiler', 'efficiency'], + efficiency2=costs.at['oil', 'CO2 intensity'], + capital_cost=costs.at['decentral oil boiler', 'efficiency'] * costs.at['decentral oil boiler', 'fixed'], + lifetime=costs.at['decentral oil boiler', 'lifetime'] + ) - network.madd("Load", - ["naphtha for industry"], - bus="EU oil", - carrier="naphtha for industry", - p_set = industrial_demand.loc[nodes,"naphtha"].sum()/8760.) + n.madd("Link", + nodes + " Fischer-Tropsch", + bus0=nodes + " H2", + bus1="EU oil", + bus2="co2 stored", + carrier="Fischer-Tropsch", + efficiency=costs.at["Fischer-Tropsch", 'efficiency'], + capital_cost=costs.at["Fischer-Tropsch", 'fixed'], + efficiency2=-costs.at["oil", 'CO2 intensity'] * costs.at["Fischer-Tropsch", 'efficiency'], + p_nom_extendable=True, + lifetime=costs.at['Fischer-Tropsch', 'lifetime'] + ) - network.madd("Load", - ["kerosene for aviation"], - bus="EU oil", - carrier="kerosene for aviation", - p_set = nodal_energy_totals.loc[nodes,["total international aviation","total domestic aviation"]].sum(axis=1).sum()*1e6/8760.) + n.add("Load", + "naphtha for industry", + bus="EU oil", + carrier="naphtha for industry", + p_set=industrial_demand.loc[nodes, "naphtha"].sum() / 8760 + ) + + all_aviation = ["total international aviation", "total domestic aviation"] + p_set = nodal_energy_totals.loc[nodes, all_aviation].sum(axis=1).sum() * 1e6 / 8760 + + n.add("Load", + "kerosene for aviation", + bus="EU oil", + carrier="kerosene for aviation", + p_set=p_set + ) #NB: CO2 gets released again to atmosphere when plastics decay or kerosene is burned #except for the process emissions when naphtha is used for petrochemicals, which can be captured with other industry process emissions #tco2 per hour - co2 = network.loads.loc[["naphtha for industry","kerosene for aviation"],"p_set"].sum()*costs.at["oil",'CO2 intensity'] - industrial_demand.loc[nodes,"process emission from feedstock"].sum()/8760. + co2_release = ["naphtha for industry", "kerosene for aviation"] + co2 = n.loads.loc[co2_release, "p_set"].sum() * costs.at["oil", 'CO2 intensity'] - industrial_demand.loc[nodes, "process emission from feedstock"].sum() / 8760 - network.madd("Load", - ["oil emissions"], - bus="co2 atmosphere", - carrier="oil emissions", - p_set=-co2) + n.add("Load", + "oil emissions", + bus="co2 atmosphere", + carrier="oil emissions", + p_set=-co2 + ) - network.madd("Load", - nodes, - suffix=" low-temperature heat for industry", - bus=[node + " urban central heat" if node + " urban central heat" in network.buses.index else node + " services urban decentral heat" for node in nodes], - carrier="low-temperature heat for industry", - p_set=industrial_demand.loc[nodes,"low-temperature heat"]/8760.) + # TODO simplify bus expression + n.madd("Load", + nodes, + suffix=" low-temperature heat for industry", + bus=[node + " urban central heat" if node + " urban central heat" in n.buses.index else node + " services urban decentral heat" for node in nodes], + carrier="low-temperature heat for industry", + p_set=industrial_demand.loc[nodes, "low-temperature heat"] / 8760 + ) - #remove today's industrial electricity demand by scaling down total electricity demand - for ct in n.buses.country.unique(): - loads = n.loads.index[(n.loads.index.str[:2] == ct) & (n.loads.carrier == "electricity")] - if n.loads_t.p_set[loads].empty: continue - factor = 1 - industrial_demand.loc[loads,"current electricity"].sum()/n.loads_t.p_set[loads].sum().sum() - n.loads_t.p_set[loads] *= factor + # remove today's industrial electricity demand by scaling down total electricity demand + for ct in n.buses.country.dropna().unique(): + # TODO map onto n.bus.country + loads_i = n.loads.index[(n.loads.index.str[:2] == ct) & (n.loads.carrier == "electricity")] + if n.loads_t.p_set[loads_i].empty: continue + factor = 1 - industrial_demand.loc[loads_i, "current electricity"].sum() / n.loads_t.p_set[loads_i].sum().sum() + n.loads_t.p_set[loads_i] *= factor - network.madd("Load", - nodes, - suffix=" industry electricity", - bus=nodes, - carrier="industry electricity", - p_set=industrial_demand.loc[nodes,"electricity"]/8760.) + n.madd("Load", + nodes, + suffix=" industry electricity", + bus=nodes, + carrier="industry electricity", + p_set=industrial_demand.loc[nodes, "electricity"] / 8760 + ) - network.madd("Bus", - ["process emissions"], - location="EU", - carrier="process emissions") + n.add("Bus", + "process emissions", + location="EU", + carrier="process emissions" + ) - #this should be process emissions fossil+feedstock - #then need load on atmosphere for feedstock emissions that are currently going to atmosphere via Link Fischer-Tropsch demand - network.madd("Load", - ["process emissions"], - bus="process emissions", - carrier="process emissions", - p_set = -industrial_demand.loc[nodes,["process emission","process emission from feedstock"]].sum(axis=1).sum()/8760.) + # this should be process emissions fossil+feedstock + # then need load on atmosphere for feedstock emissions that are currently going to atmosphere via Link Fischer-Tropsch demand + n.add("Load", + "process emissions", + bus="process emissions", + carrier="process emissions", + p_set=-industrial_demand.loc[nodes,["process emission", "process emission from feedstock"]].sum(axis=1).sum() / 8760 + ) - network.madd("Link", - ["process emissions"], - bus0="process emissions", - bus1="co2 atmosphere", - carrier="process emissions", - p_nom_extendable=True, - efficiency=1.) + n.add("Link", + "process emissions", + bus0="process emissions", + bus1="co2 atmosphere", + carrier="process emissions", + p_nom_extendable=True, + efficiency=1. + ) #assume enough local waste heat for CC - network.madd("Link", - ["process emissions CC"], - bus0="process emissions", - bus1="co2 atmosphere", - bus2="co2 stored", - carrier="process emissions CC", - p_nom_extendable=True, - capital_cost=costs.at["cement capture","fixed"], - efficiency=(1-costs.at["cement capture","capture_rate"]), - efficiency2=costs.at["cement capture","capture_rate"], - lifetime=costs.at['cement capture','lifetime']) + n.add("Link", + "process emissions CC", + bus0="process emissions", + bus1="co2 atmosphere", + bus2="co2 stored", + carrier="process emissions CC", + p_nom_extendable=True, + capital_cost=costs.at["cement capture", "fixed"], + efficiency=1 - costs.at["cement capture", "capture_rate"], + efficiency2=costs.at["cement capture", "capture_rate"], + lifetime=costs.at['cement capture', 'lifetime'] + ) - -def add_waste_heat(network): +def add_waste_heat(n): + # TODO options? print("adding possibility to use industrial waste heat in district heating") #AC buses with district heating - urban_central = network.buses.index[network.buses.carrier == "urban central heat"] + urban_central = n.buses.index[n.buses.carrier == "urban central heat"] if not urban_central.empty: urban_central = urban_central.str[:-len(" urban central heat")] + # TODO what is the 0.95 and should it be a config option? if options['use_fischer_tropsch_waste_heat']: - network.links.loc[urban_central + " Fischer-Tropsch","bus3"] = urban_central + " urban central heat" - network.links.loc[urban_central + " Fischer-Tropsch","efficiency3"] = 0.95 - network.links.loc[urban_central + " Fischer-Tropsch","efficiency"] + n.links.loc[urban_central + " Fischer-Tropsch", "bus3"] = urban_central + " urban central heat" + n.links.loc[urban_central + " Fischer-Tropsch", "efficiency3"] = 0.95 - n.links.loc[urban_central + " Fischer-Tropsch", "efficiency"] if options['use_fuel_cell_waste_heat']: - network.links.loc[urban_central + " H2 Fuel Cell","bus2"] = urban_central + " urban central heat" - network.links.loc[urban_central + " H2 Fuel Cell","efficiency2"] = 0.95 - network.links.loc[urban_central + " H2 Fuel Cell","efficiency"] + n.links.loc[urban_central + " H2 Fuel Cell", "bus2"] = urban_central + " urban central heat" + n.links.loc[urban_central + " H2 Fuel Cell", "efficiency2"] = 0.95 - n.links.loc[urban_central + " H2 Fuel Cell", "efficiency"] + def decentral(n): - n.lines.drop(n.lines.index,inplace=True) - n.links.drop(n.links.index[n.links.carrier.isin(["DC","B2B"])],inplace=True) + """Removes the electricity transmission system.""" + n.lines.drop(n.lines.index, inplace=True) + n.links.drop(n.links.index[n.links.carrier.isin(["DC", "B2B"])], inplace=True) + def remove_h2_network(n): - nodes = pop_layout.index + n.links.drop(n.links.index[n.links.carrier == "H2 pipeline"], inplace=True) - n.links.drop(n.links.index[n.links.carrier.isin(["H2 pipeline"])],inplace=True) + if "EU H2 Store" in n.stores.index: + n.stores.drop("EU H2 Store", inplace=True) - n.stores.drop(["EU H2 Store"],inplace=True) - if options['hydrogen_underground_storage']: - h2_capital_cost = costs.at["gas storage","fixed"] - #h2_capital_cost = costs.at["hydrogen underground storage","fixed"] - else: - h2_capital_cost = costs.at["hydrogen storage","fixed"] +def maybe_adjust_costs_and_potentials(n, opts): - #put back nodal H2 storage - n.madd("Store", - nodes + " H2 Store", - bus=nodes + " H2", - e_nom_extendable=True, - e_cyclic=True, - carrier="H2 Store", - capital_cost=h2_capital_cost) + for o in opts: + if "+" not in o: continue + oo = o.split("+") + carrier_list = np.hstack((n.generators.carrier.unique(), n.links.carrier.unique(), + n.stores.carrier.unique(), n.storage_units.carrier.unique())) + suptechs = map(lambda c: c.split("-", 2)[0], carrier_list) + if oo[0].startswith(tuple(suptechs)): + carrier = oo[0] + attr_lookup = {"p": "p_nom_max", "c": "capital_cost"} + attr = attr_lookup[oo[1][0]] + factor = float(oo[1][1:]) + #beware if factor is 0 and p_nom_max is np.inf, 0*np.inf is nan + if carrier == "AC": # lines do not have carrier + n.lines[attr] *= factor + else: + comps = {"Generator", "Link", "StorageUnit"} if attr == 'p_nom_max' else {"Generator", "Link", "StorageUnit", "Store"} + for c in n.iterate_components(comps): + if carrier=='solar': + sel = c.df.carrier.str.contains(carrier) & ~c.df.carrier.str.contains("solar rooftop") + else: + sel = c.df.carrier.str.contains(carrier) + c.df.loc[sel,attr] *= factor + print("changing", attr , "for", carrier, "by factor", factor) -def get_parameter(item): - """Check whether it depends on investment year""" - if type(item) is dict: - return item[investment_year] - else: - return item +# TODO this should rather be a config no wildcard +def limit_individual_line_extension(n, maxext): + print(f"limiting new HVAC and HVDC extensions to {maxext} MW") + n.lines['s_nom_max'] = n.lines['s_nom'] + maxext + hvdc = n.links.index[n.links.carrier == 'DC'] + n.links.loc[hvdc, 'p_nom_max'] = n.links.loc[hvdc, 'p_nom'] + maxext if __name__ == "__main__": - # Detect running outside of snakemake and mock snakemake for testing if 'snakemake' not in globals(): - from vresutils.snakemake import MockSnakemake - snakemake = MockSnakemake( - wildcards=dict(network='elec', simpl='', clusters='37', lv='1.0', - opts='', planning_horizons='2020', - sector_opts='120H-T-H-B-I-onwind+p3-dist1-cb48be3'), - - input=dict( network='../pypsa-eur/networks/elec_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc', - energy_totals_name='resources/energy_totals.csv', - co2_totals_name='resources/co2_totals.csv', - transport_name='resources/transport_data.csv', - traffic_data = "data/emobility/", - biomass_potentials='resources/biomass_potentials.csv', - timezone_mappings='data/timezone_mappings.csv', - heat_profile="data/heat_load_profile_BDEW.csv", - costs="../technology-data/outputs/costs_{planning_horizons}.csv", - h2_cavern = "data/hydrogen_salt_cavern_potentials.csv", - profile_offwind_ac="../pypsa-eur/resources/profile_offwind-ac.nc", - profile_offwind_dc="../pypsa-eur/resources/profile_offwind-dc.nc", - busmap_s="../pypsa-eur/resources/busmap_elec_s{simpl}.csv", - busmap="../pypsa-eur/resources/busmap_elec_s{simpl}_{clusters}.csv", - clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", - simplified_pop_layout="resources/pop_layout_elec_s{simpl}.csv", - industrial_demand="resources/industrial_energy_demand_elec_s{simpl}_{clusters}.csv", - heat_demand_urban="resources/heat_demand_urban_elec_s{simpl}_{clusters}.nc", - heat_demand_rural="resources/heat_demand_rural_elec_s{simpl}_{clusters}.nc", - heat_demand_total="resources/heat_demand_total_elec_s{simpl}_{clusters}.nc", - temp_soil_total="resources/temp_soil_total_elec_s{simpl}_{clusters}.nc", - temp_soil_rural="resources/temp_soil_rural_elec_s{simpl}_{clusters}.nc", - temp_soil_urban="resources/temp_soil_urban_elec_s{simpl}_{clusters}.nc", - temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc", - temp_air_rural="resources/temp_air_rural_elec_s{simpl}_{clusters}.nc", - temp_air_urban="resources/temp_air_urban_elec_s{simpl}_{clusters}.nc", - cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc", - cop_soil_rural="resources/cop_soil_rural_elec_s{simpl}_{clusters}.nc", - cop_soil_urban="resources/cop_soil_urban_elec_s{simpl}_{clusters}.nc", - cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc", - cop_air_rural="resources/cop_air_rural_elec_s{simpl}_{clusters}.nc", - cop_air_urban="resources/cop_air_urban_elec_s{simpl}_{clusters}.nc", - solar_thermal_total="resources/solar_thermal_total_elec_s{simpl}_{clusters}.nc", - solar_thermal_urban="resources/solar_thermal_urban_elec_s{simpl}_{clusters}.nc", - solar_thermal_rural="resources/solar_thermal_rural_elec_s{simpl}_{clusters}.nc", - retro_cost_energy = "resources/retro_cost_elec_s{simpl}_{clusters}.csv", - floor_area = "resources/floor_area_elec_s{simpl}_{clusters}.csv" - ), - output=['results/version-cb48be3/prenetworks/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'] + from helper import mock_snakemake + snakemake = mock_snakemake( + 'prepare_sector_network', + simpl='', + clusters=48, + lv=1.0, + sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1', + planning_horizons=2020, ) - import yaml - with open('config.yaml', encoding='utf8') as f: - snakemake.config = yaml.safe_load(f) - logging.basicConfig(level=snakemake.config['logging_level']) - timezone_mappings = pd.read_csv(snakemake.input.timezone_mappings,index_col=0,squeeze=True,header=None) - options = snakemake.config["sector"] opts = snakemake.wildcards.sector_opts.split('-') - investment_year=int(snakemake.wildcards.planning_horizons[-4:]) + investment_year = int(snakemake.wildcards.planning_horizons[-4:]) - n = pypsa.Network(snakemake.input.network, - override_component_attrs=override_component_attrs) + overrides = override_component_attrs(snakemake.input.overrides) + n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) - Nyears = n.snapshot_weightings.sum()/8760. - - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout,index_col=0) - pop_layout["ct"] = pop_layout.index.str[:2] - ct_total = pop_layout.total.groupby(pop_layout["ct"]).sum() - pop_layout["ct_total"] = pop_layout["ct"].map(ct_total.get) - pop_layout["fraction"] = pop_layout["total"]/pop_layout["ct_total"] - - simplified_pop_layout = pd.read_csv(snakemake.input.simplified_pop_layout,index_col=0) + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) + Nyears = n.snapshot_weightings.generators.sum() / 8760 costs = prepare_costs(snakemake.input.costs, snakemake.config['costs']['USD2013_to_EUR2013'], @@ -1960,57 +1982,53 @@ if __name__ == "__main__": Nyears, snakemake.config['costs']['lifetime']) - remove_elec_base_techs(n) + patch_electricity_network(n) - n.loads["carrier"] = "electricity" + if snakemake.config["foresight"] == 'myopic': + + add_lifetime_wind_solar(n, costs) - remove_non_electric_buses(n) + conventional = snakemake.config['existing_capacities']['conventional_carriers'] + add_carrier_buses(n, conventional) - n.buses["location"] = n.buses.index + add_co2_tracking(n, options) - update_wind_solar_costs(n, costs) + add_generation(n, costs) - if snakemake.config["foresight"]=='myopic': - add_lifetime_wind_solar(n) - add_carrier_buses(n,snakemake.config['existing_capacities']['conventional_carriers']) - - add_co2_tracking(n) - - add_generation(n) - - add_storage(n) + add_storage(n, costs) + # TODO merge with opts cost adjustment below for o in opts: if o[:4] == "wave": - wave_cost_factor = float(o[4:].replace("p",".").replace("m","-")) + wave_cost_factor = float(o[4:].replace("p", ".").replace("m", "-")) print("Including wave generators with cost factor of", wave_cost_factor) add_wave(n, wave_cost_factor) if o[:4] == "dist": - snakemake.config["sector"]['electricity_distribution_grid'] = True - snakemake.config["sector"]['electricity_distribution_grid_cost_factor'] = float(o[4:].replace("p",".").replace("m","-")) + options['electricity_distribution_grid'] = True + options['electricity_distribution_grid_cost_factor'] = float(o[4:].replace("p", ".").replace("m", "-")) - nodal_energy_totals, heat_demand, ashp_cop, gshp_cop, solar_thermal, transport, avail_profile, dsm_profile, co2_totals, nodal_transport_data = prepare_data(n) + nodal_energy_totals, heat_demand, ashp_cop, gshp_cop, solar_thermal, transport, avail_profile, dsm_profile, nodal_transport_data = prepare_data(n) if "nodistrict" in opts: options["central"] = False if "T" in opts: - add_land_transport(n) + add_land_transport(n, costs) if "H" in opts: - add_heat(n) + add_heat(n, costs) if "B" in opts: - add_biomass(n) + add_biomass(n, costs) if "I" in opts: - add_industry(n) + add_industry(n, costs) if "I" in opts and "H" in opts: add_waste_heat(n) if options['dac']: - add_dac(n) + add_dac(n, costs) if "decentral" in opts: decentral(n) @@ -2023,78 +2041,42 @@ if __name__ == "__main__": if m is not None: n = average_every_nhours(n, m.group(0)) break - else: - logger.info("No resampling") - - #process CO2 limit - limit = get_parameter(snakemake.config["co2_budget"]) - print("CO2 limit set to",limit) + limit_type = "config" + limit = get(snakemake.config["co2_budget"], investment_year) for o in opts: - - if "cb" in o: - path_cb = snakemake.config['results_dir'] + snakemake.config['run'] + '/csvs/' - if not os.path.exists(path_cb): - os.makedirs(path_cb) - try: - CO2_CAP=pd.read_csv(path_cb + 'carbon_budget_distribution.csv', index_col=0) - except: - build_carbon_budget(o) - CO2_CAP=pd.read_csv(path_cb + 'carbon_budget_distribution.csv', index_col=0) - - limit=CO2_CAP.loc[investment_year] - print("overriding CO2 limit with scenario limit",limit) - - + if not "cb" in o: continue + limit_type = "carbon budget" + fn = snakemake.config['results_dir'] + snakemake.config['run'] + '/csvs/carbon_budget_distribution.csv' + if not os.path.exists(fn): + build_carbon_budget(o, fn) + co2_cap = pd.read_csv(fn, index_col=0, squeeze=True) + limit = co2_cap[investment_year] + break for o in opts: - if "Co2L" in o: - limit = o[o.find("Co2L")+4:] - limit = float(limit.replace("p",".").replace("m","-")) - print("overriding CO2 limit with scenario limit",limit) - - print("adding CO2 budget limit as per unit of 1990 levels of",limit) + if not "Co2L" in o: continue + limit_type = "wildcard" + limit = o[o.find("Co2L")+4:] + limit = float(limit.replace("p", ".").replace("m", "-")) + break + print("add CO2 limit from", limit_type) add_co2limit(n, Nyears, limit) for o in opts: + if not o[:10] == 'linemaxext': continue + maxext = float(o[10:]) * 1e3 + limit_individual_line_extension(n, maxext) + break - if o[:10] == 'linemaxext': - maxext = float(o[10:])*1e3 - print("limiting new HVAC and HVDC extensions to",maxext,"MW") - n.lines['s_nom_max'] = n.lines['s_nom'] + maxext - hvdc = n.links.index[n.links.carrier == 'DC'] - n.links.loc[hvdc,'p_nom_max'] = n.links.loc[hvdc,'p_nom'] + maxext + if options['electricity_distribution_grid']: + insert_electricity_distribution_grid(n, costs) + maybe_adjust_costs_and_potentials(n, opts) - if snakemake.config["sector"]['electricity_distribution_grid']: - insert_electricity_distribution_grid(n) - for o in opts: - if "+" in o: - oo = o.split("+") - carrier_list=np.hstack((n.generators.carrier.unique(), n.links.carrier.unique(), - n.stores.carrier.unique(), n.storage_units.carrier.unique())) - suptechs = map(lambda c: c.split("-", 2)[0], carrier_list) - if oo[0].startswith(tuple(suptechs)): - carrier = oo[0] - attr_lookup = {"p": "p_nom_max", "c": "capital_cost"} - attr = attr_lookup[oo[1][0]] - factor = float(oo[1][1:]) - #beware if factor is 0 and p_nom_max is np.inf, 0*np.inf is nan - if carrier == "AC": # lines do not have carrier - n.lines[attr] *= factor - else: - comps = {"Generator", "Link", "StorageUnit"} if attr=='p_nom_max' else {"Generator", "Link", "StorageUnit", "Store"} - for c in n.iterate_components(comps): - if carrier=='solar': - sel = c.df.carrier.str.contains(carrier) & ~c.df.carrier.str.contains("solar rooftop") - else: - sel = c.df.carrier.str.contains(carrier) - c.df.loc[sel,attr] *= factor - print("changing", attr ,"for",carrier,"by factor",factor) + if options['gas_distribution_grid']: + insert_gas_distribution_costs(n, costs) - - if snakemake.config["sector"]['gas_distribution_grid']: - insert_gas_distribution_costs(n) - if snakemake.config["sector"]['electricity_grid_connection']: - add_electricity_grid_connection(n) + if options['electricity_grid_connection']: + add_electricity_grid_connection(n, costs) n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index 1e8f7a64..8c0313f1 100644 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -1,55 +1,35 @@ - -import numpy as np -import pandas as pd -import logging -logger = logging.getLogger(__name__) -import gc -import os +"""Solve network.""" import pypsa +import numpy as np + from pypsa.linopt import get_var, linexpr, define_constraints -from pypsa.descriptors import free_output_series_dataframes - -# Suppress logging of the slack bus choices -pypsa.pf.logger.setLevel(logging.WARNING) +from pypsa.linopf import network_lopf, ilopf from vresutils.benchmark import memory_logger +from helper import override_component_attrs + +import logging +logger = logging.getLogger(__name__) +pypsa.pf.logger.setLevel(logging.WARNING) -#First tell PyPSA that links can have multiple outputs by -#overriding the component_attrs. This can be done for -#as many buses as you need with format busi for i = 2,3,4,5,.... -#See https://pypsa.org/doc/components.html#link-with-multiple-outputs-or-inputs +def add_land_use_constraint(n): + #warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind' + for carrier in ['solar', 'onwind', 'offwind-ac', 'offwind-dc']: + existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"].groupby(n.generators.bus.map(n.buses.location)).sum() + existing.index += " " + carrier + "-" + snakemake.wildcards.planning_horizons + n.generators.loc[existing.index, "p_nom_max"] -= existing -override_component_attrs = pypsa.descriptors.Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()}) -override_component_attrs["Link"].loc["bus2"] = ["string",np.nan,np.nan,"2nd bus","Input (optional)"] -override_component_attrs["Link"].loc["bus3"] = ["string",np.nan,np.nan,"3rd bus","Input (optional)"] -override_component_attrs["Link"].loc["bus4"] = ["string",np.nan,np.nan,"4th bus","Input (optional)"] -override_component_attrs["Link"].loc["efficiency2"] = ["static or series","per unit",1.,"2nd bus efficiency","Input (optional)"] -override_component_attrs["Link"].loc["efficiency3"] = ["static or series","per unit",1.,"3rd bus efficiency","Input (optional)"] -override_component_attrs["Link"].loc["efficiency4"] = ["static or series","per unit",1.,"4th bus efficiency","Input (optional)"] -override_component_attrs["Link"].loc["p2"] = ["series","MW",0.,"2nd bus output","Output"] -override_component_attrs["Link"].loc["p3"] = ["series","MW",0.,"3rd bus output","Output"] -override_component_attrs["Link"].loc["p4"] = ["series","MW",0.,"4th bus output","Output"] + n.generators.p_nom_max.clip(lower=0, inplace=True) - -def patch_pyomo_tmpdir(tmpdir): - # PYOMO should write its lp files into tmp here - import os - if not os.path.isdir(tmpdir): - os.mkdir(tmpdir) - from pyutilib.services import TempfileManager - TempfileManager.tempdir = tmpdir - def prepare_network(n, solve_opts=None): - if solve_opts is None: - solve_opts = snakemake.config['solving']['options'] - + if 'clip_p_max_pu' in solve_opts: for df in (n.generators_t.p_max_pu, n.generators_t.p_min_pu, n.storage_units_t.inflow): df.where(df>solve_opts['clip_p_max_pu'], other=0., inplace=True) @@ -73,50 +53,31 @@ def prepare_network(n, solve_opts=None): # t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5) if 'marginal_cost' in t.df: np.random.seed(174) - t.df['marginal_cost'] += 1e-2 + 2e-3*(np.random.random(len(t.df)) - 0.5) + t.df['marginal_cost'] += 1e-2 + 2e-3 * (np.random.random(len(t.df)) - 0.5) for t in n.iterate_components(['Line', 'Link']): np.random.seed(123) - t.df['capital_cost'] += (1e-1 + 2e-2*(np.random.random(len(t.df)) - 0.5)) * t.df['length'] + t.df['capital_cost'] += (1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5)) * t.df['length'] if solve_opts.get('nhours'): nhours = solve_opts['nhours'] n.set_snapshots(n.snapshots[:nhours]) n.snapshot_weightings[:] = 8760./nhours - if snakemake.config['foresight']=='myopic': + if snakemake.config['foresight'] == 'myopic': add_land_use_constraint(n) return n -def add_opts_constraints(n, opts=None): - if opts is None: - opts = snakemake.wildcards.opts.split('-') - - if 'BAU' in opts: - mincaps = snakemake.config['electricity']['BAU_mincapacities'] - def bau_mincapacities_rule(model, carrier): - gens = n.generators.index[n.generators.p_nom_extendable & (n.generators.carrier == carrier)] - return sum(model.generator_p_nom[gen] for gen in gens) >= mincaps[carrier] - n.model.bau_mincapacities = pypsa.opt.Constraint(list(mincaps), rule=bau_mincapacities_rule) - - if 'SAFE' in opts: - peakdemand = (1. + snakemake.config['electricity']['SAFE_reservemargin']) * n.loads_t.p_set.sum(axis=1).max() - conv_techs = snakemake.config['plotting']['conv_techs'] - exist_conv_caps = n.generators.loc[n.generators.carrier.isin(conv_techs) & ~n.generators.p_nom_extendable, 'p_nom'].sum() - ext_gens_i = n.generators.index[n.generators.carrier.isin(conv_techs) & n.generators.p_nom_extendable] - n.model.safe_peakdemand = pypsa.opt.Constraint(expr=sum(n.model.generator_p_nom[gen] for gen in ext_gens_i) >= peakdemand - exist_conv_caps) - -def add_eps_storage_constraint(n): - if not hasattr(n, 'epsilon'): - n.epsilon = 1e-5 - fix_sus_i = n.storage_units.index[~ n.storage_units.p_nom_extendable] - n.model.objective.expr += sum(n.epsilon * n.model.state_of_charge[su, n.snapshots[0]] for su in fix_sus_i) def add_battery_constraints(n): - chargers = n.links.index[n.links.carrier.str.contains("battery charger") & n.links.p_nom_extendable] - dischargers = chargers.str.replace("charger","discharger") + chargers_b = n.links.carrier.str.contains("battery charger") + chargers = n.links.index[chargers_b & n.links.p_nom_extendable] + dischargers = chargers.str.replace("charger", "discharger") + + if chargers.empty or ('Link', 'p_nom') not in n.variables.index: + return link_p_nom = get_var(n, "Link", "p_nom") @@ -138,44 +99,28 @@ def add_chp_constraints(n): electric = n.links.index[electric_bool] heat = n.links.index[heat_bool] + electric_ext = n.links.index[electric_bool & n.links.p_nom_extendable] heat_ext = n.links.index[heat_bool & n.links.p_nom_extendable] + electric_fix = n.links.index[electric_bool & ~n.links.p_nom_extendable] heat_fix = n.links.index[heat_bool & ~n.links.p_nom_extendable] + link_p = get_var(n, "Link", "p") if not electric_ext.empty: link_p_nom = get_var(n, "Link", "p_nom") #ratio of output heat to electricity set by p_nom_ratio - lhs = linexpr((n.links.loc[electric_ext,"efficiency"] - *n.links.loc[electric_ext,'p_nom_ratio'], + lhs = linexpr((n.links.loc[electric_ext, "efficiency"] + *n.links.loc[electric_ext, "p_nom_ratio"], link_p_nom[electric_ext]), - (-n.links.loc[heat_ext,"efficiency"].values, + (-n.links.loc[heat_ext, "efficiency"].values, link_p_nom[heat_ext].values)) + define_constraints(n, lhs, "=", 0, 'chplink', 'fix_p_nom_ratio') - - if not electric.empty: - - link_p = get_var(n, "Link", "p") - - #backpressure - lhs = linexpr((n.links.loc[electric,'c_b'].values - *n.links.loc[heat,"efficiency"], - link_p[heat]), - (-n.links.loc[electric,"efficiency"].values, - link_p[electric].values)) - - define_constraints(n, lhs, "<=", 0, 'chplink', 'backpressure') - - - if not electric_ext.empty: - - link_p_nom = get_var(n, "Link", "p_nom") - link_p = get_var(n, "Link", "p") - #top_iso_fuel_line for extendable lhs = linexpr((1,link_p[heat_ext]), (1,link_p[electric_ext].values), @@ -183,222 +128,93 @@ def add_chp_constraints(n): define_constraints(n, lhs, "<=", 0, 'chplink', 'top_iso_fuel_line_ext') - if not electric_fix.empty: - link_p = get_var(n, "Link", "p") - #top_iso_fuel_line for fixed lhs = linexpr((1,link_p[heat_fix]), (1,link_p[electric_fix].values)) - define_constraints(n, lhs, "<=", n.links.loc[electric_fix,"p_nom"].values, 'chplink', 'top_iso_fuel_line_fix') + rhs = n.links.loc[electric_fix, "p_nom"].values -def add_land_use_constraint(n): + define_constraints(n, lhs, "<=", rhs, 'chplink', 'top_iso_fuel_line_fix') - #warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind' - for carrier in ['solar', 'onwind', 'offwind-ac', 'offwind-dc']: - existing_capacities = n.generators.loc[n.generators.carrier==carrier,"p_nom"].groupby(n.generators.bus.map(n.buses.location)).sum() - existing_capacities.index += " " + carrier + "-" + snakemake.wildcards.planning_horizons - n.generators.loc[existing_capacities.index,"p_nom_max"] -= existing_capacities + if not electric.empty: + + #backpressure + lhs = linexpr((n.links.loc[electric, "c_b"].values + *n.links.loc[heat, "efficiency"], + link_p[heat]), + (-n.links.loc[electric, "efficiency"].values, + link_p[electric].values)) + + define_constraints(n, lhs, "<=", 0, 'chplink', 'backpressure') - n.generators.p_nom_max[n.generators.p_nom_max<0]=0. def extra_functionality(n, snapshots): - #add_opts_constraints(n, opts) - #add_eps_storage_constraint(n) add_chp_constraints(n) add_battery_constraints(n) -def fix_branches(n, lines_s_nom=None, links_p_nom=None): - if lines_s_nom is not None and len(lines_s_nom) > 0: - n.lines.loc[lines_s_nom.index,"s_nom"] = lines_s_nom.values - n.lines.loc[lines_s_nom.index,"s_nom_extendable"] = False - if links_p_nom is not None and len(links_p_nom) > 0: - n.links.loc[links_p_nom.index,"p_nom"] = links_p_nom.values - n.links.loc[links_p_nom.index,"p_nom_extendable"] = False - -def solve_network(n, config=None, solver_log=None, opts=None): - if config is None: - config = snakemake.config['solving'] - solve_opts = config['options'] - - solver_options = config['solver'].copy() - if solver_log is None: - solver_log = snakemake.log.solver +def solve_network(n, config, opts='', **kwargs): + solver_options = config['solving']['solver'].copy() solver_name = solver_options.pop('name') + cf_solving = config['solving']['options'] + track_iterations = cf_solving.get('track_iterations', False) + min_iterations = cf_solving.get('min_iterations', 4) + max_iterations = cf_solving.get('max_iterations', 6) - def run_lopf(n, allow_warning_status=False, fix_zero_lines=False, fix_ext_lines=False): - free_output_series_dataframes(n) - - if fix_zero_lines: - fix_lines_b = (n.lines.s_nom_opt == 0.) & n.lines.s_nom_extendable - fix_links_b = (n.links.carrier=='DC') & (n.links.p_nom_opt == 0.) & n.links.p_nom_extendable - fix_branches(n, - lines_s_nom=pd.Series(0., n.lines.index[fix_lines_b]), - links_p_nom=pd.Series(0., n.links.index[fix_links_b])) - - if fix_ext_lines: - fix_branches(n, - lines_s_nom=n.lines.loc[n.lines.s_nom_extendable, 's_nom_opt'], - links_p_nom=n.links.loc[(n.links.carrier=='DC') & n.links.p_nom_extendable, 'p_nom_opt']) - if "line_volume_constraint" in n.global_constraints.index: - n.global_constraints.drop("line_volume_constraint",inplace=True) - else: - if "line_volume_constraint" not in n.global_constraints.index: - line_volume = getattr(n, 'line_volume_limit', None) - if line_volume is not None and not np.isinf(line_volume): - n.add("GlobalConstraint", - "line_volume_constraint", - type="transmission_volume_expansion_limit", - carrier_attribute="AC,DC", - sense="<=", - constant=line_volume) - - - # Firing up solve will increase memory consumption tremendously, so - # make sure we freed everything we can - gc.collect() - - #from pyomo.opt import ProblemFormat - #print("Saving model to MPS") - #n.model.write('/home/ka/ka_iai/ka_kc5996/projects/pypsa-eur/128-B-I.mps', format=ProblemFormat.mps) - #print("Model is saved to MPS") - #sys.exit() - - - status, termination_condition = n.lopf(pyomo=False, - solver_name=solver_name, - solver_logfile=solver_log, - solver_options=solver_options, - solver_dir=tmpdir, - extra_functionality=extra_functionality, - formulation=solve_opts['formulation']) - #extra_postprocessing=extra_postprocessing - #keep_files=True - #free_memory={'pypsa'} - - assert status == "ok" or allow_warning_status and status == 'warning', \ - ("network_lopf did abort with status={} " - "and termination_condition={}" - .format(status, termination_condition)) - - if not fix_ext_lines and "line_volume_constraint" in n.global_constraints.index: - n.line_volume_limit_dual = n.global_constraints.at["line_volume_constraint","mu"] - print("line volume limit dual:",n.line_volume_limit_dual) - - return status, termination_condition - - lines_ext_b = n.lines.s_nom_extendable - if lines_ext_b.any(): - # puh: ok, we need to iterate, since there is a relation - # between s/p_nom and r, x for branches. - msq_threshold = 0.01 - lines = pd.DataFrame(n.lines[['r', 'x', 'type', 'num_parallel']]) - - lines['s_nom'] = ( - np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) * - n.lines.bus0.map(n.buses.v_nom) - ).where(n.lines.type != '', n.lines['s_nom']) - - lines_ext_typed_b = (n.lines.type != '') & lines_ext_b - lines_ext_untyped_b = (n.lines.type == '') & lines_ext_b - - def update_line_parameters(n, zero_lines_below=10, fix_zero_lines=False): - if zero_lines_below > 0: - n.lines.loc[n.lines.s_nom_opt < zero_lines_below, 's_nom_opt'] = 0. - n.links.loc[(n.links.carrier=='DC') & (n.links.p_nom_opt < zero_lines_below), 'p_nom_opt'] = 0. - - if lines_ext_untyped_b.any(): - for attr in ('r', 'x'): - n.lines.loc[lines_ext_untyped_b, attr] = ( - lines[attr].multiply(lines['s_nom']/n.lines['s_nom_opt']) - ) - - if lines_ext_typed_b.any(): - n.lines.loc[lines_ext_typed_b, 'num_parallel'] = ( - n.lines['s_nom_opt']/lines['s_nom'] - ) - logger.debug("lines.num_parallel={}".format(n.lines.loc[lines_ext_typed_b, 'num_parallel'])) - - iteration = 1 - - lines['s_nom_opt'] = lines['s_nom'] * n.lines['num_parallel'].where(n.lines.type != '', 1.) - status, termination_condition = run_lopf(n, allow_warning_status=True) - - def msq_diff(n): - lines_err = np.sqrt(((n.lines['s_nom_opt'] - lines['s_nom_opt'])**2).mean())/lines['s_nom_opt'].mean() - logger.info("Mean square difference after iteration {} is {}".format(iteration, lines_err)) - return lines_err - - min_iterations = solve_opts.get('min_iterations', 2) - max_iterations = solve_opts.get('max_iterations', 999) - - while msq_diff(n) > msq_threshold or iteration < min_iterations: - if iteration >= max_iterations: - logger.info("Iteration {} beyond max_iterations {}. Stopping ...".format(iteration, max_iterations)) - break - - update_line_parameters(n) - lines['s_nom_opt'] = n.lines['s_nom_opt'] - iteration += 1 - - status, termination_condition = run_lopf(n, allow_warning_status=True) - - update_line_parameters(n, zero_lines_below=100) - - logger.info("Starting last run with fixed extendable lines") - - # Not really needed, could also be taken out - # if 'snakemake' in globals(): - # fn = os.path.basename(snakemake.output[0]) - # n.export_to_netcdf('/home/vres/data/jonas/playground/pypsa-eur/' + fn) - - status, termination_condition = run_lopf(n, allow_warning_status=True, fix_ext_lines=True) - - # Drop zero lines from network - # zero_lines_i = n.lines.index[(n.lines.s_nom_opt == 0.) & n.lines.s_nom_extendable] - # if len(zero_lines_i): - # n.mremove("Line", zero_lines_i) - # zero_links_i = n.links.index[(n.links.p_nom_opt == 0.) & n.links.p_nom_extendable] - # if len(zero_links_i): - # n.mremove("Link", zero_links_i) - + # add to network for extra_functionality + n.config = config + n.opts = opts + if cf_solving.get('skip_iterations', False): + network_lopf(n, solver_name=solver_name, solver_options=solver_options, + extra_functionality=extra_functionality, **kwargs) + else: + ilopf(n, solver_name=solver_name, solver_options=solver_options, + track_iterations=track_iterations, + min_iterations=min_iterations, + max_iterations=max_iterations, + extra_functionality=extra_functionality, **kwargs) return n + if __name__ == "__main__": - # Detect running outside of snakemake and mock snakemake for testing if 'snakemake' not in globals(): - from vresutils.snakemake import MockSnakemake, Dict - snakemake = MockSnakemake( - wildcards=dict(network='elec', simpl='', clusters='39', lv='1.0', - sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1', - co2_budget_name='b30b3', planning_horizons='2050'), - input=dict(network="pypsa-eur-sec/results/test/prenetworks_brownfield/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_{planning_horizons}.nc"), - output=["results/networks/s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}-test.nc"], - log=dict(gurobi="logs/elec_s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}_gurobi-test.log", - python="logs/elec_s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}_python-test.log") + from helper import mock_snakemake + snakemake = mock_snakemake( + 'solve_network', + simpl='', + clusters=48, + lv=1.0, + sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1', + planning_horizons=2050, ) - import yaml - with open('config.yaml', encoding='utf8') as f: - snakemake.config = yaml.safe_load(f) - tmpdir = snakemake.config['solving'].get('tmpdir') - if tmpdir is not None: - patch_pyomo_tmpdir(tmpdir) logging.basicConfig(filename=snakemake.log.python, level=snakemake.config['logging_level']) - with memory_logger(filename=getattr(snakemake.log, 'memory', None), interval=30.) as mem: + tmpdir = snakemake.config['solving'].get('tmpdir') + if tmpdir is not None: + Path(tmpdir).mkdir(parents=True, exist_ok=True) + opts = snakemake.wildcards.opts.split('-') + solve_opts = snakemake.config['solving']['options'] - n = pypsa.Network(snakemake.input.network, - override_component_attrs=override_component_attrs) + fn = getattr(snakemake.log, 'memory', None) + with memory_logger(filename=fn, interval=30.) as mem: - n = prepare_network(n) + overrides = override_component_attrs(snakemake.input.overrides) + n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) - n = solve_network(n) + n = prepare_network(n, solve_opts) + + n = solve_network(n, config=snakemake.config, opts=opts, + solver_dir=tmpdir, + solver_logfile=snakemake.log.solver) + + if "lv_limit" in n.global_constraints.index: + n.line_volume_limit = n.global_constraints.at["lv_limit", "constant"] + n.line_volume_limit_dual = n.global_constraints.at["lv_limit", "mu"] n.export_to_netcdf(snakemake.output[0])