diff --git a/.gitignore b/.gitignore index 3909265b..a55300e2 100644 --- a/.gitignore +++ b/.gitignore @@ -26,6 +26,7 @@ gurobi.log /data/switzerland* /data/.nfs* /data/Industrial_Database.csv +/data/retro/tabula-calculator-calcsetbuilding.csv *.org diff --git a/Snakefile b/Snakefile index a7c1814d..af506cd5 100644 --- a/Snakefile +++ b/Snakefile @@ -51,9 +51,9 @@ rule build_clustered_population_layouts: pop_layout_total="resources/pop_layout_total.nc", pop_layout_urban="resources/pop_layout_urban.nc", pop_layout_rural="resources/pop_layout_rural.nc", - regions_onshore=pypsaeur('resources/regions_onshore_{network}_s{simpl}_{clusters}.geojson') + regions_onshore=pypsaeur('resources/regions_onshore_elec_s{simpl}_{clusters}.geojson') output: - clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv" + clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv" resources: mem_mb=10000 script: "scripts/build_clustered_population_layouts.py" @@ -63,9 +63,9 @@ rule build_simplified_population_layouts: pop_layout_total="resources/pop_layout_total.nc", pop_layout_urban="resources/pop_layout_urban.nc", pop_layout_rural="resources/pop_layout_rural.nc", - regions_onshore=pypsaeur('resources/regions_onshore_{network}_s{simpl}.geojson') + regions_onshore=pypsaeur('resources/regions_onshore_elec_s{simpl}.geojson') output: - clustered_pop_layout="resources/pop_layout_{network}_s{simpl}.csv" + clustered_pop_layout="resources/pop_layout_elec_s{simpl}.csv" resources: mem_mb=10000 script: "scripts/build_clustered_population_layouts.py" @@ -84,11 +84,11 @@ rule build_heat_demands: pop_layout_total="resources/pop_layout_total.nc", pop_layout_urban="resources/pop_layout_urban.nc", pop_layout_rural="resources/pop_layout_rural.nc", - regions_onshore=pypsaeur("resources/regions_onshore_{network}_s{simpl}_{clusters}.geojson") + regions_onshore=pypsaeur("resources/regions_onshore_elec_s{simpl}_{clusters}.geojson") output: - heat_demand_urban="resources/heat_demand_urban_{network}_s{simpl}_{clusters}.nc", - heat_demand_rural="resources/heat_demand_rural_{network}_s{simpl}_{clusters}.nc", - heat_demand_total="resources/heat_demand_total_{network}_s{simpl}_{clusters}.nc" + heat_demand_urban="resources/heat_demand_urban_elec_s{simpl}_{clusters}.nc", + heat_demand_rural="resources/heat_demand_rural_elec_s{simpl}_{clusters}.nc", + heat_demand_total="resources/heat_demand_total_elec_s{simpl}_{clusters}.nc" resources: mem_mb=20000 script: "scripts/build_heat_demand.py" @@ -97,33 +97,33 @@ rule build_temperature_profiles: pop_layout_total="resources/pop_layout_total.nc", pop_layout_urban="resources/pop_layout_urban.nc", pop_layout_rural="resources/pop_layout_rural.nc", - regions_onshore=pypsaeur("resources/regions_onshore_{network}_s{simpl}_{clusters}.geojson") + regions_onshore=pypsaeur("resources/regions_onshore_elec_s{simpl}_{clusters}.geojson") output: - temp_soil_total="resources/temp_soil_total_{network}_s{simpl}_{clusters}.nc", - temp_soil_rural="resources/temp_soil_rural_{network}_s{simpl}_{clusters}.nc", - temp_soil_urban="resources/temp_soil_urban_{network}_s{simpl}_{clusters}.nc", - temp_air_total="resources/temp_air_total_{network}_s{simpl}_{clusters}.nc", - temp_air_rural="resources/temp_air_rural_{network}_s{simpl}_{clusters}.nc", - temp_air_urban="resources/temp_air_urban_{network}_s{simpl}_{clusters}.nc" + temp_soil_total="resources/temp_soil_total_elec_s{simpl}_{clusters}.nc", + temp_soil_rural="resources/temp_soil_rural_elec_s{simpl}_{clusters}.nc", + temp_soil_urban="resources/temp_soil_urban_elec_s{simpl}_{clusters}.nc", + temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc", + temp_air_rural="resources/temp_air_rural_elec_s{simpl}_{clusters}.nc", + temp_air_urban="resources/temp_air_urban_elec_s{simpl}_{clusters}.nc" resources: mem_mb=20000 script: "scripts/build_temperature_profiles.py" rule build_cop_profiles: input: - temp_soil_total="resources/temp_soil_total_{network}_s{simpl}_{clusters}.nc", - temp_soil_rural="resources/temp_soil_rural_{network}_s{simpl}_{clusters}.nc", - temp_soil_urban="resources/temp_soil_urban_{network}_s{simpl}_{clusters}.nc", - temp_air_total="resources/temp_air_total_{network}_s{simpl}_{clusters}.nc", - temp_air_rural="resources/temp_air_rural_{network}_s{simpl}_{clusters}.nc", - temp_air_urban="resources/temp_air_urban_{network}_s{simpl}_{clusters}.nc" + temp_soil_total="resources/temp_soil_total_elec_s{simpl}_{clusters}.nc", + temp_soil_rural="resources/temp_soil_rural_elec_s{simpl}_{clusters}.nc", + temp_soil_urban="resources/temp_soil_urban_elec_s{simpl}_{clusters}.nc", + temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc", + temp_air_rural="resources/temp_air_rural_elec_s{simpl}_{clusters}.nc", + temp_air_urban="resources/temp_air_urban_elec_s{simpl}_{clusters}.nc" output: - cop_soil_total="resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc", - cop_soil_rural="resources/cop_soil_rural_{network}_s{simpl}_{clusters}.nc", - cop_soil_urban="resources/cop_soil_urban_{network}_s{simpl}_{clusters}.nc", - cop_air_total="resources/cop_air_total_{network}_s{simpl}_{clusters}.nc", - cop_air_rural="resources/cop_air_rural_{network}_s{simpl}_{clusters}.nc", - cop_air_urban="resources/cop_air_urban_{network}_s{simpl}_{clusters}.nc" + cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc", + cop_soil_rural="resources/cop_soil_rural_elec_s{simpl}_{clusters}.nc", + cop_soil_urban="resources/cop_soil_urban_elec_s{simpl}_{clusters}.nc", + cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc", + cop_air_rural="resources/cop_air_rural_elec_s{simpl}_{clusters}.nc", + cop_air_urban="resources/cop_air_urban_elec_s{simpl}_{clusters}.nc" resources: mem_mb=20000 script: "scripts/build_cop_profiles.py" @@ -133,11 +133,11 @@ rule build_solar_thermal_profiles: pop_layout_total="resources/pop_layout_total.nc", pop_layout_urban="resources/pop_layout_urban.nc", pop_layout_rural="resources/pop_layout_rural.nc", - regions_onshore=pypsaeur("resources/regions_onshore_{network}_s{simpl}_{clusters}.geojson") + regions_onshore=pypsaeur("resources/regions_onshore_elec_s{simpl}_{clusters}.geojson") output: - solar_thermal_total="resources/solar_thermal_total_{network}_s{simpl}_{clusters}.nc", - solar_thermal_urban="resources/solar_thermal_urban_{network}_s{simpl}_{clusters}.nc", - solar_thermal_rural="resources/solar_thermal_rural_{network}_s{simpl}_{clusters}.nc" + solar_thermal_total="resources/solar_thermal_total_elec_s{simpl}_{clusters}.nc", + solar_thermal_urban="resources/solar_thermal_urban_elec_s{simpl}_{clusters}.nc", + solar_thermal_rural="resources/solar_thermal_rural_elec_s{simpl}_{clusters}.nc" resources: mem_mb=20000 script: "scripts/build_solar_thermal_profiles.py" @@ -208,12 +208,12 @@ rule build_industrial_production_per_country_tomorrow: rule build_industrial_distribution_key: input: - clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv", + clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", europe_shape=pypsaeur('resources/europe_shape.geojson'), hotmaps_industrial_database="data/Industrial_Database.csv", - network=pypsaeur('networks/{network}_s{simpl}_{clusters}.nc') + network=pypsaeur('networks/elec_s{simpl}_{clusters}.nc') output: - industrial_distribution_key="resources/industrial_distribution_key_{network}_s{simpl}_{clusters}.csv" + industrial_distribution_key="resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv" threads: 1 resources: mem_mb=1000 script: 'scripts/build_industrial_distribution_key.py' @@ -222,10 +222,10 @@ rule build_industrial_distribution_key: rule build_industrial_production_per_node: input: - industrial_distribution_key="resources/industrial_distribution_key_{network}_s{simpl}_{clusters}.csv", + industrial_distribution_key="resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv", industrial_production_per_country_tomorrow="resources/industrial_production_per_country_tomorrow.csv" output: - industrial_production_per_node="resources/industrial_production_{network}_s{simpl}_{clusters}.csv" + industrial_production_per_node="resources/industrial_production_elec_s{simpl}_{clusters}.csv" threads: 1 resources: mem_mb=1000 script: 'scripts/build_industrial_production_per_node.py' @@ -234,10 +234,10 @@ rule build_industrial_production_per_node: rule build_industrial_energy_demand_per_node: input: industry_sector_ratios="resources/industry_sector_ratios.csv", - industrial_production_per_node="resources/industrial_production_{network}_s{simpl}_{clusters}.csv", - industrial_energy_demand_per_node_today="resources/industrial_energy_demand_today_{network}_s{simpl}_{clusters}.csv" + industrial_production_per_node="resources/industrial_production_elec_s{simpl}_{clusters}.csv", + industrial_energy_demand_per_node_today="resources/industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv" output: - industrial_energy_demand_per_node="resources/industrial_energy_demand_{network}_s{simpl}_{clusters}.csv" + industrial_energy_demand_per_node="resources/industrial_energy_demand_elec_s{simpl}_{clusters}.csv" threads: 1 resources: mem_mb=1000 script: 'scripts/build_industrial_energy_demand_per_node.py' @@ -256,10 +256,10 @@ rule build_industrial_energy_demand_per_country_today: rule build_industrial_energy_demand_per_node_today: input: - industrial_distribution_key="resources/industrial_distribution_key_{network}_s{simpl}_{clusters}.csv", + industrial_distribution_key="resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv", industrial_energy_demand_per_country_today="resources/industrial_energy_demand_per_country_today.csv" output: - industrial_energy_demand_per_node_today="resources/industrial_energy_demand_today_{network}_s{simpl}_{clusters}.csv" + industrial_energy_demand_per_node_today="resources/industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv" threads: 1 resources: mem_mb=1000 script: 'scripts/build_industrial_energy_demand_per_node_today.py' @@ -279,10 +279,10 @@ rule build_industrial_energy_demand_per_country: rule build_industrial_demand: input: - clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv", + clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", industrial_demand_per_country="resources/industrial_energy_demand_per_country.csv" output: - industrial_demand="resources/industrial_demand_{network}_s{simpl}_{clusters}.csv" + industrial_demand="resources/industrial_demand_elec_s{simpl}_{clusters}.csv" threads: 1 resources: mem_mb=1000 script: 'scripts/build_industrial_demand.py' @@ -290,24 +290,25 @@ rule build_industrial_demand: rule build_retro_cost: input: building_stock="data/retro/data_building_stock.csv", + data_tabula="data/retro/tabula-calculator-calcsetbuilding.csv", + air_temperature = "resources/temp_air_total_{network}_s{simpl}_{clusters}.nc", u_values_PL="data/retro/u_values_poland.csv", tax_w="data/retro/electricity_taxes_eu.csv", construction_index="data/retro/comparative_level_investment.csv", - average_surface="data/retro/average_surface_components.csv", floor_area_missing="data/retro/floor_area_missing.csv", - clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv", + clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", cost_germany="data/retro/retro_cost_germany.csv", - window_assumptions="data/retro/window_assumptions.csv" + window_assumptions="data/retro/window_assumptions.csv", output: - retro_cost="resources/retro_cost_{network}_s{simpl}_{clusters}.csv", - floor_area="resources/floor_area_{network}_s{simpl}_{clusters}.csv" + retro_cost="resources/retro_cost_elec_s{simpl}_{clusters}.csv", + floor_area="resources/floor_area_elec_s{simpl}_{clusters}.csv" resources: mem_mb=1000 script: "scripts/build_retro_cost.py" rule prepare_sector_network: input: - network=pypsaeur('networks/{network}_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc'), + network=pypsaeur('networks/elec_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc'), energy_totals_name='resources/energy_totals.csv', co2_totals_name='resources/co2_totals.csv', transport_name='resources/transport_data.csv', @@ -319,35 +320,35 @@ rule prepare_sector_network: h2_cavern = "data/hydrogen_salt_cavern_potentials.csv", profile_offwind_ac=pypsaeur("resources/profile_offwind-ac.nc"), profile_offwind_dc=pypsaeur("resources/profile_offwind-dc.nc"), - busmap_s=pypsaeur("resources/busmap_{network}_s{simpl}.csv"), - busmap=pypsaeur("resources/busmap_{network}_s{simpl}_{clusters}.csv"), - clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv", - simplified_pop_layout="resources/pop_layout_{network}_s{simpl}.csv", - industrial_demand="resources/industrial_energy_demand_{network}_s{simpl}_{clusters}.csv", - heat_demand_urban="resources/heat_demand_urban_{network}_s{simpl}_{clusters}.nc", - heat_demand_rural="resources/heat_demand_rural_{network}_s{simpl}_{clusters}.nc", - heat_demand_total="resources/heat_demand_total_{network}_s{simpl}_{clusters}.nc", - temp_soil_total="resources/temp_soil_total_{network}_s{simpl}_{clusters}.nc", - temp_soil_rural="resources/temp_soil_rural_{network}_s{simpl}_{clusters}.nc", - temp_soil_urban="resources/temp_soil_urban_{network}_s{simpl}_{clusters}.nc", - temp_air_total="resources/temp_air_total_{network}_s{simpl}_{clusters}.nc", - temp_air_rural="resources/temp_air_rural_{network}_s{simpl}_{clusters}.nc", - temp_air_urban="resources/temp_air_urban_{network}_s{simpl}_{clusters}.nc", - cop_soil_total="resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc", - cop_soil_rural="resources/cop_soil_rural_{network}_s{simpl}_{clusters}.nc", - cop_soil_urban="resources/cop_soil_urban_{network}_s{simpl}_{clusters}.nc", - cop_air_total="resources/cop_air_total_{network}_s{simpl}_{clusters}.nc", - cop_air_rural="resources/cop_air_rural_{network}_s{simpl}_{clusters}.nc", - cop_air_urban="resources/cop_air_urban_{network}_s{simpl}_{clusters}.nc", - solar_thermal_total="resources/solar_thermal_total_{network}_s{simpl}_{clusters}.nc", - solar_thermal_urban="resources/solar_thermal_urban_{network}_s{simpl}_{clusters}.nc", - solar_thermal_rural="resources/solar_thermal_rural_{network}_s{simpl}_{clusters}.nc", - retro_cost_energy = "resources/retro_cost_{network}_s{simpl}_{clusters}.csv", - floor_area = "resources/floor_area_{network}_s{simpl}_{clusters}.csv" - output: config['results_dir'] + config['run'] + '/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc' + busmap_s=pypsaeur("resources/busmap_elec_s{simpl}.csv"), + busmap=pypsaeur("resources/busmap_elec_s{simpl}_{clusters}.csv"), + clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", + simplified_pop_layout="resources/pop_layout_elec_s{simpl}.csv", + industrial_demand="resources/industrial_energy_demand_elec_s{simpl}_{clusters}.csv", + heat_demand_urban="resources/heat_demand_urban_elec_s{simpl}_{clusters}.nc", + heat_demand_rural="resources/heat_demand_rural_elec_s{simpl}_{clusters}.nc", + heat_demand_total="resources/heat_demand_total_elec_s{simpl}_{clusters}.nc", + temp_soil_total="resources/temp_soil_total_elec_s{simpl}_{clusters}.nc", + temp_soil_rural="resources/temp_soil_rural_elec_s{simpl}_{clusters}.nc", + temp_soil_urban="resources/temp_soil_urban_elec_s{simpl}_{clusters}.nc", + temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc", + temp_air_rural="resources/temp_air_rural_elec_s{simpl}_{clusters}.nc", + temp_air_urban="resources/temp_air_urban_elec_s{simpl}_{clusters}.nc", + cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc", + cop_soil_rural="resources/cop_soil_rural_elec_s{simpl}_{clusters}.nc", + cop_soil_urban="resources/cop_soil_urban_elec_s{simpl}_{clusters}.nc", + cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc", + cop_air_rural="resources/cop_air_rural_elec_s{simpl}_{clusters}.nc", + cop_air_urban="resources/cop_air_urban_elec_s{simpl}_{clusters}.nc", + solar_thermal_total="resources/solar_thermal_total_elec_s{simpl}_{clusters}.nc", + solar_thermal_urban="resources/solar_thermal_urban_elec_s{simpl}_{clusters}.nc", + solar_thermal_rural="resources/solar_thermal_rural_elec_s{simpl}_{clusters}.nc", + retro_cost_energy = "resources/retro_cost_elec_s{simpl}_{clusters}.csv", + floor_area = "resources/floor_area_elec_s{simpl}_{clusters}.csv" + output: config['results_dir'] + config['run'] + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc' threads: 1 resources: mem_mb=2000 - benchmark: config['results_dir'] + config['run'] + "/benchmarks/prepare_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" + benchmark: config['results_dir'] + config['run'] + "/benchmarks/prepare_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" script: "scripts/prepare_sector_network.py" @@ -420,16 +421,16 @@ if config["foresight"] == "overnight": rule solve_network: input: - network=config['results_dir'] + config['run'] + "/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", + network=config['results_dir'] + config['run'] + "/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", costs=config['costs_dir'] + "costs_{planning_horizons}.csv", config=config['summary_dir'] + '/' + config['run'] + '/configs/config.yaml' - output: config['results_dir'] + config['run'] + "/postnetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" + output: config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" shadow: "shallow" log: - solver=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_solver.log", - python=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_python.log", - memory=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_memory.log" - benchmark: config['results_dir'] + config['run'] + "/benchmarks/solve_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" + solver=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_solver.log", + python=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_python.log", + memory=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_memory.log" + benchmark: config['results_dir'] + config['run'] + "/benchmarks/solve_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" threads: 4 resources: mem_mb=config['solving']['mem'] # group: "solve" # with group, threads is ignored https://bitbucket.org/snakemake/snakemake/issues/971/group-job-description-does-not-contain @@ -440,15 +441,15 @@ if config["foresight"] == "myopic": rule add_existing_baseyear: input: - network=config['results_dir'] + config['run'] + '/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc', + network=config['results_dir'] + config['run'] + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc', powerplants=pypsaeur('resources/powerplants.csv'), - busmap_s=pypsaeur("resources/busmap_{network}_s{simpl}.csv"), - busmap=pypsaeur("resources/busmap_{network}_s{simpl}_{clusters}.csv"), - clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv", + busmap_s=pypsaeur("resources/busmap_elec_s{simpl}.csv"), + busmap=pypsaeur("resources/busmap_elec_s{simpl}_{clusters}.csv"), + clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", costs=config['costs_dir'] + "costs_{}.csv".format(config['scenario']['planning_horizons'][0]), - cop_soil_total="resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc", - cop_air_total="resources/cop_air_total_{network}_s{simpl}_{clusters}.nc" - output: config['results_dir'] + config['run'] + '/prenetworks-brownfield/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc' + cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc", + cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc" + output: config['results_dir'] + config['run'] + '/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc' wildcard_constraints: planning_horizons=config['scenario']['planning_horizons'][0] #only applies to baseyear threads: 1 @@ -457,18 +458,18 @@ if config["foresight"] == "myopic": def process_input(wildcards): i = config["scenario"]["planning_horizons"].index(int(wildcards.planning_horizons)) - return config['results_dir'] + config['run'] + "/postnetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_" + str(config["scenario"]["planning_horizons"][i-1]) + ".nc" + return config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_" + str(config["scenario"]["planning_horizons"][i-1]) + ".nc" rule add_brownfield: input: - network=config['results_dir'] + config['run'] + '/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc', + network=config['results_dir'] + config['run'] + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc', network_p=process_input, #solved network at previous time step costs=config['costs_dir'] + "costs_{planning_horizons}.csv", - cop_soil_total="resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc", - cop_air_total="resources/cop_air_total_{network}_s{simpl}_{clusters}.nc" + cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc", + cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc" - output: config['results_dir'] + config['run'] + "/prenetworks-brownfield/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" + output: config['results_dir'] + config['run'] + "/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" threads: 4 resources: mem_mb=10000 script: "scripts/add_brownfield.py" @@ -477,16 +478,16 @@ if config["foresight"] == "myopic": rule solve_network_myopic: input: - network=config['results_dir'] + config['run'] + "/prenetworks-brownfield/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", + network=config['results_dir'] + config['run'] + "/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", costs=config['costs_dir'] + "costs_{planning_horizons}.csv", config=config['summary_dir'] + '/' + config['run'] + '/configs/config.yaml' - output: config['results_dir'] + config['run'] + "/postnetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" + output: config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" shadow: "shallow" log: - solver=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_solver.log", - python=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_python.log", - memory=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_memory.log" - benchmark: config['results_dir'] + config['run'] + "/benchmarks/solve_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" + solver=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_solver.log", + python=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_python.log", + memory=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_memory.log" + benchmark: config['results_dir'] + config['run'] + "/benchmarks/solve_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" threads: 4 resources: mem_mb=config['solving']['mem'] script: "scripts/solve_network.py" diff --git a/config.default.yaml b/config.default.yaml index 6b4a20f3..74a61b85 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -119,20 +119,25 @@ sector: 'time_dep_hp_cop' : True #time dependent heat pump coefficient of performance 'heat_pump_sink_T' : 55. # Celsius, based on DTU / large area radiators; used in build_cop_profiles.py # conservatively high to cover hot water and space heating in poorly-insulated buildings - 'retrofitting' : - 'retro_exogen': True # space heat demand savings exogenously - 'dE': # reduction of space heat demand (applied before losses in DH) - 2020 : 0. - 2030 : 0.15 - 2040 : 0.3 - 2050 : 0.4 + 'reduce_space_heat_exogenously': True # reduces space heat demand by a given factor (applied before losses in DH) + # this can represent e.g. building renovation, building demolition, or if + # the factor is negative: increasing floor area, increased thermal comfort, population growth + 'reduce_space_heat_exogenously_factor': # per unit reduction in space heat demand + # the default factors are determined by the LTS scenario from http://tool.european-calculator.eu/app/buildings/building-types-area/?levers=1ddd4444421213bdbbbddd44444ffffff11f411111221111211l212221 + 2020: 0.10 # this results in a space heat demand reduction of 10% + 2025: 0.09 # first heat demand increases compared to 2020 because of larger floor area per capita + 2030: 0.09 + 2035: 0.11 + 2040: 0.16 + 2045: 0.21 + 2050: 0.29 + 'retrofitting' : # co-optimises building renovation to reduce space heat demand 'retro_endogen': False # co-optimise space heat savings - 'cost_factor' : 1.0 + 'cost_factor' : 1.0 # weight costs for building renovation 'interest_rate': 0.04 # for investment in building components 'annualise_cost': True # annualise the investment costs 'tax_weighting': False # weight costs depending on taxes in countries - 'construction_index': True # weight costs depending on labour/material costs per ct - 'l_strength': ["0.076", "0.197"] # additional insulation thickness[m], determines number of retro steps(=generators per bus) and maximum possible savings + 'construction_index': True # weight costs depending on labour/material costs per country 'tes' : True 'tes_tau' : 3. 'boilers' : True diff --git a/data/retro/average_surface_components.csv b/data/retro/average_surface_components.csv deleted file mode 100644 index de72edde..00000000 --- a/data/retro/average_surface_components.csv +++ /dev/null @@ -1,7 +0,0 @@ -,Dwelling,Ceilling,Standard component surfaces (m2),component,surfaces,(m2),, -Building type,Space(m²),Height(m),Roof,Facade,Floor,Windows,, -Single/two family house,120,2.5,90,166,63,29,, -Large apartment house,1457,2.5,354,1189,354,380,, -Apartment house,5276,,598.337,2992.1,598.337,756,tabula ,http://webtool.building-typology.eu/#pdfes -,,,,,,,, -"Source: https://link.springer.com/article/10.1007/s12053-010-9090-6 ,p.4",,,,,,,, diff --git a/doc/data.csv b/doc/data.csv index e8c19518..8e316281 100644 --- a/doc/data.csv +++ b/doc/data.csv @@ -22,5 +22,5 @@ U-values Poland,u_values_poland.csv,unknown,https://data.europa.eu/euodp/de/data Floor area missing in hotmaps building stock data,floor_area_missing.csv,unknown,https://data.europa.eu/euodp/de/data/dataset/building-stock-observatory Comparative level investment,comparative_level_investment.csv,Eurostat,https://ec.europa.eu/eurostat/statistics-explained/index.php?title=Comparative_price_levels_for_investment Electricity taxes,electricity_taxes_eu.csv,Eurostat,https://appsso.eurostat.ec.europa.eu/nui/show.do?dataset=nrg_pc_204&lang=en -Average surface components,average_surface_components.csv,unknown,http://webtool.building-typology.eu/#bm +Building topologies and corresponding standard values,tabula-calculator-calcsetbuilding.csv,unknown,https://episcope.eu/fileadmin/tabula/public/calc/tabula-calculator.xlsx Retrofitting thermal envelope costs for Germany,retro_cost_germany.csv,unkown,https://www.iwu.de/forschung/handlungslogiken/kosten-energierelevanter-bau-und-anlagenteile-bei-modernisierung/ diff --git a/doc/installation.rst b/doc/installation.rst index 728061c7..3ab3d328 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -73,8 +73,8 @@ To download and extract the data bundle on the command line: .. code:: bash - projects/pypsa-eur-sec/data % wget "https://nworbmot.org/pypsa-eur-sec-data-bundle-210125.tar.gz" - projects/pypsa-eur-sec/data % tar xvzf pypsa-eur-sec-data-bundle-210125.tar.gz + projects/pypsa-eur-sec/data % wget "https://nworbmot.org/pypsa-eur-sec-data-bundle-210418.tar.gz" + projects/pypsa-eur-sec/data % tar xvzf pypsa-eur-sec-data-bundle-210418.tar.gz The data licences and sources are given in the following table. diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 56c65669..a9561857 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -6,6 +6,7 @@ Future release =================== * For the myopic investment option, a carbon budget and a type of decay (exponential or beta) can be selected in the ``config.yaml`` file to distribute the budget across the ``planning_horizons``. For example, ``cb40ex0`` in the ``{sector_opts}`` wildcard will distribute a carbon budget of 40 GtCO2 following an exponential decay with initial growth rate 0. +* The cost database for retrofitting of the thermal envelope of buildings has been updated. Now, for calculating the space heat savings of a building, losses by thermal bridges and ventilation are included as well as heat gains (internal and by solar radiation). See the section :ref:`retro` for more details on the retrofitting module. * Added an option to alter the capital cost or maximum capacity of carriers by a factor via ``carrier+factor`` in the ``{sector_opts}`` wildcard. This can be useful for exploring uncertain cost parameters. Example: ``solar+c0.5`` reduces the ``capital_cost`` of solar to 50\% of original values. Similarly ``solar+p3`` multiplies the ``p_nom_max`` by 3. * Rename the bus for European liquid hydrocarbons from ``Fischer-Tropsch`` to ``EU oil``, since it can be supplied not just with the Fischer-Tropsch process, but also with fossil oil. * Bugfix: The new separation of land transport by carrier in Version 0.4.0 failed to account for the carbon dioxide emissions from internal combustion engines. This is now treated as a negative load on the atmospheric carbon dioxide bus, just like aviation emissions. @@ -137,4 +138,4 @@ To make a new release of the data bundle, make an archive of the files in ``data .. code:: bash - data % tar pczf pypsa-eur-sec-data-bundle-YYMMDD.tar.gz eea/UNFCCC_v23.csv switzerland-sfoe biomass eurostat-energy_balances-* jrc-idees-2015 emobility urban_percent.csv timezone_mappings.csv heat_load_profile_DK_AdamJensen.csv WindWaveWEC_GLTB.xlsx myb1-2017-nitro.xls Industrial_Database.csv + data % tar pczf pypsa-eur-sec-data-bundle-YYMMDD.tar.gz eea/UNFCCC_v23.csv switzerland-sfoe biomass eurostat-energy_balances-* jrc-idees-2015 emobility urban_percent.csv timezone_mappings.csv heat_load_profile_DK_AdamJensen.csv WindWaveWEC_GLTB.xlsx myb1-2017-nitro.xls Industrial_Database.csv retro/tabula-calculator-calcsetbuilding.csv diff --git a/doc/supply_demand.rst b/doc/supply_demand.rst index 002bd16c..77317094 100644 --- a/doc/supply_demand.rst +++ b/doc/supply_demand.rst @@ -108,6 +108,43 @@ Small for decentral applications. Big water pit storage for district heating. +.. _retro: + +Retrofitting of the thermal envelope of buildings +=================================================== +Co-optimising building renovation is only enabled if in the ``config.yaml`` the +option :mod:`retro_endogen: True`. To reduce the computational burden +default setting is + +.. literalinclude:: ../config.default.yaml + :language: yaml + :lines: 134-135 + +Renovation of the thermal envelope reduces the space heating demand and is +optimised at each node for every heat bus. Renovation measures through additional +insulation material and replacement of energy inefficient windows are considered. + +In a first step, costs per energy savings are estimated in :mod:`build_retro_cost.py`. +They depend on the insulation condition of the building stock and costs for +renovation of the building elements. +In a second step, for those cost per energy savings two possible renovation +strengths are determined: a moderate renovation with lower costs and lower +maximum possible space heat savings, and an ambitious renovation with associated +higher costs and higher efficiency gains. They are added by step-wise +linearisation in form of two additional generations in +:mod:`prepare_sector_network.py`. + +Settings in the config.yaml concerning the endogenously optimisation of building +renovation + +.. literalinclude:: ../config.default.yaml + :language: yaml + :lines: 136-140 + +Further information are given in the publication + +`Mitigating heat demand peaks in buildings in a highly renewable European energy system, (2021) `_. + Hydrogen demand ================== diff --git a/scripts/add_brownfield.py b/scripts/add_brownfield.py index a0b8d97b..20677498 100644 --- a/scripts/add_brownfield.py +++ b/scripts/add_brownfield.py @@ -90,12 +90,12 @@ if __name__ == "__main__": sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1', co2_budget_name='go', planning_horizons='2030'), - input=dict(network='pypsa-eur-sec/results/test/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_{planning_horizons}.nc', - network_p='pypsa-eur-sec/results/test/postnetworks/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_2020.nc', + input=dict(network='pypsa-eur-sec/results/test/prenetworks/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_{planning_horizons}.nc', + network_p='pypsa-eur-sec/results/test/postnetworks/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_2020.nc', costs='pypsa-eur-sec/data/costs/costs_{planning_horizons}.csv', - cop_air_total="pypsa-eur-sec/resources/cop_air_total_{network}_s{simpl}_{clusters}.nc", - cop_soil_total="pypsa-eur-sec/resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc"), - output=['pypsa-eur-sec/results/test/prenetworks_brownfield/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'] + cop_air_total="pypsa-eur-sec/resources/cop_air_total_elec_s{simpl}_{clusters}.nc", + cop_soil_total="pypsa-eur-sec/resources/cop_soil_total_elec_s{simpl}_{clusters}.nc"), + output=['pypsa-eur-sec/results/test/prenetworks_brownfield/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'] ) import yaml with open('config.yaml', encoding='utf8') as f: diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index 09b47da6..1b5451c4 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -411,15 +411,15 @@ if __name__ == "__main__": wildcards=dict(network='elec', simpl='', clusters='45', lv='1.0', sector_opts='Co2L0-3H-T-H-B-I-solar3-dist1', planning_horizons='2020'), - input=dict(network='pypsa-eur-sec/results/version-2/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc', + input=dict(network='pypsa-eur-sec/results/version-2/prenetworks/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc', powerplants='pypsa-eur/resources/powerplants.csv', - busmap_s='pypsa-eur/resources/busmap_{network}_s{simpl}.csv', - busmap='pypsa-eur/resources/busmap_{network}_s{simpl}_{clusters}.csv', + busmap_s='pypsa-eur/resources/busmap_elec_s{simpl}.csv', + busmap='pypsa-eur/resources/busmap_elec_s{simpl}_{clusters}.csv', costs='technology_data/outputs/costs_{planning_horizons}.csv', - cop_air_total="pypsa-eur-sec/resources/cop_air_total_{network}_s{simpl}_{clusters}.nc", - cop_soil_total="pypsa-eur-sec/resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc", - clustered_pop_layout="pypsa-eur-sec/resources/pop_layout_{network}_s{simpl}_{clusters}.csv",), - output=['pypsa-eur-sec/results/version-2/prenetworks_brownfield/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'], + cop_air_total="pypsa-eur-sec/resources/cop_air_total_elec_s{simpl}_{clusters}.nc", + cop_soil_total="pypsa-eur-sec/resources/cop_soil_total_elec_s{simpl}_{clusters}.nc", + clustered_pop_layout="pypsa-eur-sec/resources/pop_layout_elec_s{simpl}_{clusters}.csv",), + output=['pypsa-eur-sec/results/version-2/prenetworks_brownfield/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'], ) import yaml with open('config.yaml', encoding='utf8') as f: diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 4d80abb9..c5f0d297 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -1,4 +1,3 @@ - import pandas as pd import geopandas as gpd @@ -51,7 +50,6 @@ country_to_code = { 'Switzerland' : 'CH', } - non_EU = ['NO', 'CH', 'ME', 'MK', 'RS', 'BA', 'AL'] rename = {"GR" : "EL", @@ -73,7 +71,6 @@ def build_eurostat(year): fns = {2016: "data/eurostat-energy_balances-june_2016_edition/{year}-Energy-Balances-June2016edition.xlsx", 2017: "data/eurostat-energy_balances-june_2017_edition/{year}-ENERGY-BALANCES-June2017edition.xlsx"} - #2016 includes BA, 2017 doesn't #with sheet as None, an ordered dictionary of all sheets is returned @@ -82,7 +79,6 @@ def build_eurostat(year): skiprows=1, index_col=list(range(4))) - #sorted_index necessary for slicing df = pd.concat({country_to_code[df.columns[0]] : df for ct,df in dfs.items()},sort=True).sort_index() @@ -91,15 +87,12 @@ def build_eurostat(year): def build_swiss(year): - fn = "data/switzerland-sfoe/switzerland-new_format.csv" #convert PJ/a to TWh/a return (pd.read_csv(fn,index_col=list(range(2)))/3.6).loc["CH",str(year)] - - def build_idees(year): base_dir = "data/jrc-idees-2015" @@ -275,7 +268,7 @@ def build_idees(year): return totals -def build_energy_totals(): +def build_energy_totals(eurostat, swiss, idees): clean_df = idees.reindex(population.index).drop(["passenger cars","passenger car efficiency"],axis=1) @@ -316,7 +309,6 @@ def build_energy_totals(): + avg*(clean_df.loc[missing_in_eurostat,"{} {}".format("total",sector)] - clean_df.loc[missing_in_eurostat,"{} {}".format("electricity",sector)]) - #Fix Norway space and water heating fractions #http://www.ssb.no/en/energi-og-industri/statistikker/husenergi/hvert-3-aar/2014-07-14 #The main heating source for about 73 per cent of the households is based on electricity @@ -458,14 +450,12 @@ def build_eurostat_co2(year=1990): #Residual oil (No. 6) 0.298 #https://www.eia.gov/electricity/annual/html/epa_a_03.html - - eurostat_co2 = eurostat_for_co2.multiply(se).sum(axis=1) return eurostat_co2 -def build_co2_totals(eea_co2, eurostat_co2, year=1990): +def build_co2_totals(eea_co2, eurostat_co2): co2 = eea_co2.reindex(["EU28","NO","CH","BA","RS","AL","ME","MK"] + eu28) @@ -530,7 +520,6 @@ def build_transport_data(): if __name__ == "__main__": - # Detect running outside of snakemake and mock snakemake for testing if 'snakemake' not in globals(): from vresutils import Dict @@ -546,21 +535,19 @@ if __name__ == "__main__": nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index') population = nuts3['pop'].groupby(nuts3.country).sum() - year = 2011 + data_year = 2011 + eurostat = build_eurostat(data_year) + swiss = build_swiss(data_year) + idees = build_idees(data_year) - eurostat = build_eurostat(year) + build_energy_totals(eurostat, swiss, idees) - swiss = build_swiss(year) - idees = build_idees(year) - - build_energy_totals() - - eea_co2 = build_eea_co2() - - eurostat_co2 = build_eurostat_co2() - - co2=build_co2_totals(eea_co2, eurostat_co2, year) + base_year_emissions = 1990 + eea_co2 = build_eea_co2(base_year_emissions) + eurostat_co2 = build_eurostat_co2(base_year_emissions) + + co2 = build_co2_totals(eea_co2, eurostat_co2) co2.to_csv(snakemake.output.co2_name) build_transport_data() diff --git a/scripts/build_heat_demand.py b/scripts/build_heat_demand.py index 865f12bd..169e81f4 100644 --- a/scripts/build_heat_demand.py +++ b/scripts/build_heat_demand.py @@ -11,7 +11,7 @@ if 'snakemake' not in globals(): import yaml snakemake = Dict() with open('config.yaml') as f: - snakemake.config = yaml.load(f) + snakemake.config = yaml.safe_load(f) snakemake.input = Dict() snakemake.output = Dict() diff --git a/scripts/build_industrial_energy_demand_per_country_today.py b/scripts/build_industrial_energy_demand_per_country_today.py index 7593477b..4de5d2c1 100644 --- a/scripts/build_industrial_energy_demand_per_country_today.py +++ b/scripts/build_industrial_energy_demand_per_country_today.py @@ -98,7 +98,7 @@ for ct in eu28: for fuel in fuels: summary.at[fuel,sub] = s[fuels[fuel]].sum() - summary.at['other',sub] = summary.at['all',sub] - summary.loc[summary.index^['all','other'],sub].sum() + summary.at['other',sub] = summary.at['all',sub] - summary.loc[summary.index.symmetric_difference(['all','other']),sub].sum() summary['Other Industrial Sectors'] = summary[ois_subs].sum(axis=1) summary.drop(columns=ois_subs,inplace=True) @@ -128,7 +128,7 @@ output = pd.read_csv(snakemake.input.industrial_production_per_country, eu28_averages = final_summary.groupby(level=1,axis=1).sum().divide(output.loc[eu28].sum(),axis=1) -non_eu28 = output.index^eu28 +non_eu28 = output.index.symmetric_difference(eu28) for ct in non_eu28: print(ct) diff --git a/scripts/build_industrial_production_per_country.py b/scripts/build_industrial_production_per_country.py index c8fa6910..eed59d29 100644 --- a/scripts/build_industrial_production_per_country.py +++ b/scripts/build_industrial_production_per_country.py @@ -196,7 +196,7 @@ ammonia = pd.read_csv(snakemake.input.ammonia_production, index_col=0) there = ammonia.index.intersection(countries_demand.index) -missing = countries_demand.index^there +missing = countries_demand.index.symmetric_difference(there) print("Following countries have no ammonia demand:", missing) diff --git a/scripts/build_population_layouts.py b/scripts/build_population_layouts.py index 5093e1b4..497e399a 100644 --- a/scripts/build_population_layouts.py +++ b/scripts/build_population_layouts.py @@ -15,7 +15,7 @@ if 'snakemake' not in globals(): import yaml snakemake = Dict() with open('config.yaml') as f: - snakemake.config = yaml.load(f) + snakemake.config = yaml.safe_load(f) snakemake.input = Dict() snakemake.output = Dict() @@ -46,7 +46,7 @@ urban_fraction = pd.read_csv(snakemake.input.urban_percent, #fill missing Balkans values missing = ["AL","ME","MK"] reference = ["RS","BA"] -urban_fraction = urban_fraction.reindex(urban_fraction.index|missing) +urban_fraction = urban_fraction.reindex(urban_fraction.index.union(missing)) urban_fraction.loc[missing] = urban_fraction[reference].mean() diff --git a/scripts/build_retro_cost.py b/scripts/build_retro_cost.py index af44c91a..cb0acf41 100644 --- a/scripts/build_retro_cost.py +++ b/scripts/build_retro_cost.py @@ -1,30 +1,456 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ -Created on Mon Jan 20 14:57:21 2020 +Created on Fri Jan 22 10:36:39 2021 -@author: bw0928 +This script should calculate the space heating savings through better +insulation of the thermal envelope of a building and corresponding costs for +different building types in different countries. -***************************************************************************** -This script calculates cost-energy_saving-curves for retrofitting -for the EU-37 countries, based on the building stock data from hotmaps and -the EU building stock database -***************************************************************************** +-----------------METHODOLOGY ------------------------------------------------ +The energy savings calculations are based on the -Structure: + EN ISO 13790 / seasonal method https://www.iso.org/obp/ui/#iso:std:iso:13790:ed-2:v1:en: - (1) set assumptions and parameters - (2) read and prepare data - (3) calculate (€-dE-curves) - (4) save in csv + - calculations heavily oriented on the TABULAWebTool + http://webtool.building-typology.eu/ + http://www.episcope.eu/fileadmin/tabula/public/docs/report/TABULA_CommonCalculationMethod.pdf + which is following the EN ISO 13790 / seasonal method -***************************************************************************** + - building stock data: + mainly: hotmaps project https://gitlab.com/hotmaps/building-stock + missing: EU building observatory https://ec.europa.eu/energy/en/eu-buildings-database + + - building types with typical surfaces/ standard values: + - tabula https://episcope.eu/fileadmin/tabula/public/calc/tabula-calculator.xlsx + + +---------------------BASIC EQUAIONS ------------------------------------------- +The basic equations: + + The Energy needed for space heating E_space [W/m²] are calculated as the + sum of heat losses and heat gains: + + E_space = H_losses - H_gains + + Heat losses constitute from the losses through heat trasmission (H_tr [W/m²K]) + (this includes heat transfer through building elements and thermal bridges) + and losses by ventilation (H_ve [W/m²K]): + + H_losses = (H_tr + H_ve) * F_red * (T_threshold - T_averaged_d_heat) * d_heat * 1/365 + + F_red : reduction factor, considering non-uniform heating [°C], p.16 chapter 2.6 [-] + T_threshold : heating temperature threshold, assumed 15 C + d_heat : Length of heating season, number of days with daily averaged temperature below T_threshold + T_averaged_d_heat : mean daily averaged temperature of the days within heating season d_heat + + Heat gains constitute from the gains by solar radiation (H_solar) and + internal heat gains (H_int) weighted by a gain utilisation factor nu: + + H_gains = nu * (H_solar + H_int) + +---------------- STRUCTURE OF THE SCRIPT -------------------------------------- + +The script has the following structure: + + (i) fixed parameters are set + (ii) functions + (1) prepare data, bring to same format + (2) calculate space heat demand depending on additional insulation material + (3) calculate costs for corresponding additional insulation material + (4) get cost savings per retrofitting measures for each sector by weighting + with heated floor area + +------------------------------------------------------------------------------- +@author: Lisa """ - import pandas as pd -import matplotlib.pyplot as plt +import xarray as xr -#%% ************ FUCNTIONS *************************************************** +# (i) --- FIXED PARAMETER / STANDARD VALUES ----------------------------------- + +# thermal conductivity standard value +k = 0.035 +# strenght of relative retrofitting depending on the component +# determined by historical data of insulation thickness for retrofitting +l_weight = pd.DataFrame({"weight": [1.95, 1.48, 1.]}, + index=["Roof", "Wall", "Floor"]) + +# standard room height [m], used to calculate heat transfer by ventilation +h_room = 2.5 +# volume specific heat capacity air [Wh/m^3K] +c_p_air = 0.34 +# internal heat capacity per m² A_c_ref [Wh/(m^2K)] +c_m = 45 +# average thermal output of the internal heat sources per m^2 reference area [W/m^2] +phi_int = 3 +# constant parameter tau_H_0 [h] according to EN 13790 seasonal method +tau_H_0 = 30 +# constant parameter alpha_H_0 [-] according to EN 13790 seasonal method +alpha_H_0 = 0.8 + +# paramter for solar heat load during heating season ------------------------- +# tabular standard values table p.8 in documenation +external_shading = 0.6 # vertical orientation: fraction of window area shaded [-] +frame_area_fraction = 0.3 # fraction of frame area of window [-] +non_perpendicular = 0.9 # reduction factor, considering radiation non perpendicular to the glazing[-] +solar_energy_transmittance = 0.5 # solar energy transmiitance for radiation perpecidular to the glazing [-] +# solar global radiation [kWh/(m^2a)] +solar_global_radiation = pd.Series([246, 401, 246, 148], + index=["east", "south", "west", "north"], + name="solar_global_radiation [kWh/(m^2a)]") + +# threshold temperature for heating [Celsius] -------------------------------- +t_threshold = 15 + +# rename sectors +# rename residential sub sectors +rename_sectors = {'Single family- Terraced houses': "SFH", + 'Multifamily houses': "MFH", + 'Appartment blocks': "AB"} + + +# additional insulation thickness, determines maximum possible savings [m] +l_strength = [ + "0.07","0.075", "0.08", "0.1", "0.15", + "0.22", "0.24", "0.26" + ] + + +# (ii) --- FUNCTIONS ---------------------------------------------------------- + +def get_average_temperature_during_heating_season(temperature, t_threshold=15): + """ + returns average temperature during heating season + input: + temperature : pd.Series(Index=time, values=temperature) + t_threshold : threshold temperature for heating degree days (HDD) + returns: + average temperature + """ + t_average_daily = temperature.resample("1D").mean() + return t_average_daily.loc[t_average_daily < t_threshold].mean() + + +def prepare_building_stock_data(): + """ + reads building stock data and cleans up the format, returns + -------- + u_values: pd.DataFrame current U-values + area_tot: heated floor area per country and sector [Mm²] + area: heated floor area [Mm²] for country, sector, building + type and period + + """ + + building_data = pd.read_csv(snakemake.input.building_stock, + usecols=list(range(13))) + + # standardize data + building_data["type"].replace( + {'Covered area: heated [Mm²]': 'Heated area [Mm²]', + 'Windows ': 'Window', + 'Windows': 'Window', + 'Walls ': 'Wall', + 'Walls': 'Wall', + 'Roof ': 'Roof', + 'Floor ': 'Floor', + }, inplace=True) + + building_data.country_code = building_data.country_code.str.upper() + building_data["subsector"].replace({'Hotels and Restaurants': + 'Hotels and restaurants'}, inplace=True) + building_data["sector"].replace({'Residential sector': 'residential', + 'Service sector': 'services'}, + inplace=True) + + # extract u-values + u_values = building_data[(building_data.feature.str.contains("U-values")) + & (building_data.subsector != "Total")] + + components = list(u_values.type.unique()) + + country_iso_dic = building_data.set_index("country")["country_code"].to_dict() + + # add missing /rename countries + country_iso_dic.update({'Norway': 'NO', + 'Iceland': 'IS', + 'Montenegro': 'ME', + 'Serbia': 'RS', + 'Albania': 'AL', + 'United Kingdom': 'GB', + 'Bosnia and Herzegovina': 'BA', + 'Switzerland': 'CH'}) + + # heated floor area ---------------------------------------------------------- + area = building_data[(building_data.type == 'Heated area [Mm²]') & + (building_data.subsector != "Total")] + area_tot = area.groupby(["country", "sector"]).sum() + area = pd.concat([area, area.apply(lambda x: x.value / + area_tot.value.loc[(x.country, x.sector)], + axis=1).rename("weight")],axis=1) + area = area.groupby(['country', 'sector', 'subsector', 'bage']).sum() + area_tot.rename(index=country_iso_dic, inplace=True) + + # add for some missing countries floor area from other data sources + area_missing = pd.read_csv(snakemake.input.floor_area_missing, + index_col=[0, 1], usecols=[0, 1, 2, 3], + encoding='ISO-8859-1') + area_tot = area_tot.append(area_missing.unstack(level=-1).dropna().stack()) + area_tot = area_tot.loc[~area_tot.index.duplicated(keep='last')] + + # for still missing countries calculate floor area by population size + pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) + pop_layout["ct"] = pop_layout.index.str[:2] + ct_total = pop_layout.total.groupby(pop_layout["ct"]).sum() + + area_per_pop = area_tot.unstack().reindex(index=ct_total.index).apply(lambda x: x / ct_total[x.index]) + missing_area_ct = ct_total.index.difference(area_tot.index.levels[0]) + for ct in missing_area_ct.intersection(ct_total.index): + averaged_data = pd.DataFrame( + area_per_pop.value.reindex(map_for_missings[ct]).mean() + * ct_total[ct], + columns=["value"]) + index = pd.MultiIndex.from_product([[ct], averaged_data.index.to_list()]) + averaged_data.index = index + averaged_data["estimated"] = 1 + if ct not in area_tot.index.levels[0]: + area_tot = area_tot.append(averaged_data, sort=True) + else: + area_tot.loc[averaged_data.index] = averaged_data + + # u_values for Poland are missing -> take them from eurostat ----------- + u_values_PL = pd.read_csv(snakemake.input.u_values_PL) + u_values_PL.component.replace({"Walls":"Wall", "Windows": "Window"}, + inplace=True) + area_PL = area.loc["Poland"].reset_index() + data_PL = pd.DataFrame(columns=u_values.columns, index=area_PL.index) + data_PL["country"] = "Poland" + data_PL["country_code"] = "PL" + # data from area + for col in ["sector", "subsector", "bage"]: + data_PL[col] = area_PL[col] + data_PL["btype"] = area_PL["subsector"] + + data_PL_final = pd.DataFrame() + for component in components: + data_PL["type"] = component + data_PL["value"] = data_PL.apply(lambda x: u_values_PL[(u_values_PL.component==component) + & (u_values_PL.sector==x["sector"])] + [x["bage"]].iloc[0], axis=1) + data_PL_final = data_PL_final.append(data_PL) + + u_values = pd.concat([u_values, + data_PL_final]).reset_index(drop=True) + + # clean data --------------------------------------------------------------- + # smallest possible today u values for windows 0.8 (passive house standard) + # maybe the u values for the glass and not the whole window including frame + # for those types assumed in the dataset + u_values.loc[(u_values.type=="Window") & (u_values.value<0.8), "value"] = 0.8 + # drop unnecessary columns + u_values.drop(['topic', 'feature','detail', 'estimated','unit'], + axis=1, inplace=True, errors="ignore") + + + u_values = u_values.apply(lambda x: x.replace(rename_sectors)) + + # for missing weighting of surfaces of building types assume MFH + u_values["assumed_subsector"] = u_values.subsector + u_values.loc[~u_values.subsector.isin(rename_sectors.values()), + "assumed_subsector"] = 'MFH' + + u_values.country_code.replace({"UK":"GB"}, inplace=True) + u_values.bage.replace({'Berfore 1945':'Before 1945'}, inplace=True) + u_values = u_values[~u_values.bage.isna()] + + u_values.set_index(["country_code", "subsector", "bage", "type"], + inplace=True) + + # only take in config.yaml specified countries into account + countries = ct_total.index + area_tot = area_tot.loc[countries] + + return u_values, country_iso_dic, countries, area_tot, area + + + +def prepare_building_topology(u_values, same_building_topology=True): + """ + reads in typical building topologies (e.g. average surface of building elements) + and typical losses trough thermal bridging and air ventilation + """ + + data_tabula = pd.read_csv(snakemake.input.data_tabula, + skiprows=lambda x: x in range(1,11), + low_memory=False).iloc[:2974] + + parameters = ["Code_Country", + # building type (SFH/MFH/AB) + "Code_BuildingSizeClass", + # time period of build year + "Year1_Building", "Year2_Building", + # areas [m^2] + "A_C_Ref", # conditioned area, internal + "A_Roof_1", "A_Roof_2", "A_Wall_1", "A_Wall_2", + "A_Floor_1", "A_Floor_2", "A_Window_1", "A_Window_2", + # for air ventilation loses [1/h] + "n_air_use", "n_air_infiltration", + # for losses due to thermal bridges, standard values [W/(m^2K)] + "delta_U_ThermalBridging", + # floor area related heat transfer coefficient by transmission [-] + "F_red_temp", + # refurbishment state [1: not refurbished, 2: moderate ,3: strong refurbishment] + 'Number_BuildingVariant', + ] + + data_tabula = data_tabula[parameters] + + building_elements = ["Roof", "Wall", "Floor", "Window"] + + # get total area of building components + for element in building_elements: + elements = ["A_{}_1".format(element), + "A_{}_2".format(element)] + data_tabula = pd.concat([data_tabula.drop(elements, axis=1), + data_tabula[elements].sum(axis=1).rename("A_{}".format(element))], + axis=1) + + # clean data + data_tabula = data_tabula.loc[pd.concat([data_tabula[col]!=0 for col in + ["A_Wall", "A_Floor", "A_Window", "A_Roof", "A_C_Ref"]], + axis=1).all(axis=1)] + data_tabula = data_tabula[data_tabula.Number_BuildingVariant.isin([1,2,3])] + data_tabula = data_tabula[data_tabula.Code_BuildingSizeClass.isin(["AB", "SFH", "MFH", "TH"])] + + + + # map tabula building periods to hotmaps building periods + def map_periods(build_year1, build_year2): + periods = {(0, 1945): 'Before 1945', + (1945,1969) : '1945 - 1969', + (1970, 1979) :'1970 - 1979', + (1980, 1989) : '1980 - 1989', + (1990, 1999) :'1990 - 1999', + (2000, 2010) : '2000 - 2010', + (2010, 10000) : 'Post 2010'} + minimum = 1e5 + for key in periods: + diff = abs(build_year1-key[0]) + abs(build_year2-key[1]) + if diff < minimum: + minimum = diff + searched_period = periods[key] + return searched_period + + data_tabula["bage"] = data_tabula.apply(lambda x: map_periods(x.Year1_Building, x.Year2_Building), + axis=1) + + # set new index + data_tabula = data_tabula.set_index(['Code_Country', 'Code_BuildingSizeClass', + 'bage', 'Number_BuildingVariant']) + + # get typical building topology + area_cols = ['A_C_Ref', 'A_Floor', 'A_Roof', 'A_Wall', 'A_Window'] + typical_building = (data_tabula.groupby(level=[1,2]).mean() + .rename(index={"TH": "SFH"}).groupby(level=[0,1]).mean()) + + # drop duplicates + data_tabula = data_tabula[~data_tabula.index.duplicated(keep="first")] + + # fill missing values + hotmaps_data_i = u_values.reset_index().set_index(["country_code", "assumed_subsector", + "bage"]).index + # missing countries in tabular + missing_ct = data_tabula.unstack().reindex(hotmaps_data_i.unique()) + # areas should stay constant for different retrofitting measures + cols_constant = ['Year1_Building', 'Year2_Building', 'A_C_Ref','A_Roof', + 'A_Wall', 'A_Floor', 'A_Window'] + for col in cols_constant: + missing_ct[col] = missing_ct[col].combine_first(missing_ct[col] + .groupby(level=[0,1,2]).mean()) + missing_ct = missing_ct.unstack().unstack().fillna(missing_ct.unstack() + .unstack().mean()) + data_tabula = missing_ct.stack(level=[-1,-2, -3],dropna=False) + + # sets for different countries same building topology which only depends on + # build year and subsector (MFH, SFH, AB) + if same_building_topology: + typical_building = ((typical_building.reindex(data_tabula.droplevel(0).index)) + .set_index(data_tabula.index)) + data_tabula.update(typical_building[area_cols]) + + # total buildings envelope surface [m^2] + data_tabula["A_envelope"] = data_tabula[["A_{}".format(element) for + element in building_elements]].sum(axis=1) + + return data_tabula + + +def prepare_cost_retro(country_iso_dic): + """ + read and prepare retro costs, annualises them if annualise_cost=True + """ + cost_retro = pd.read_csv(snakemake.input.cost_germany, + nrows=4, index_col=0, usecols=[0, 1, 2, 3]) + cost_retro.rename(lambda x: x.capitalize(), inplace=True) + + window_assumptions = pd.read_csv(snakemake.input.window_assumptions, + skiprows=[1], usecols=[0,1,2,3], nrows=2) + + if annualise_cost: + cost_retro[["cost_fix", "cost_var"]] = (cost_retro[["cost_fix", "cost_var"]] + .apply(lambda x: x * interest_rate / + (1 - (1 + interest_rate) + ** -cost_retro.loc[x.index, + "life_time"]))) + + # weightings of costs --------------------------------------------- + if construction_index: + cost_w = pd.read_csv(snakemake.input.construction_index, + skiprows=3, nrows=32, index_col=0) + # since German retrofitting costs are assumed + cost_w = ((cost_w["2018"] / cost_w.loc["Germany", "2018"]) + .rename(index=country_iso_dic)) + else: + cost_w = None + + if tax_weighting: + tax_w = pd.read_csv(snakemake.input.tax_w, + header=12, nrows=39, index_col=0, usecols=[0, 4]) + tax_w.rename(index=country_iso_dic, inplace=True) + tax_w = tax_w.apply(pd.to_numeric, errors='coerce').iloc[:, 0] + tax_w.dropna(inplace=True) + else: + tax_w = None + + + return cost_retro, window_assumptions, cost_w, tax_w + + +def prepare_temperature_data(): + """ + returns the temperature dependent data for each country: + + d_heat : length of heating season pd.Series(index=countries) [days/year] + on those days, daily average temperature is below + threshold temperature t_threshold + temperature_factor : accumulated difference between internal and + external temperature pd.Series(index=countries) ([K]) * [days/year] + + temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365 + + """ + temperature = xr.open_dataarray(snakemake.input.air_temperature).T.to_pandas() + d_heat = (temperature.groupby(temperature.columns.str[:2], axis=1).mean() + .resample("1D").mean()window_limit(float(l), window_assumptions) else 0), + axis=1) + + +def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): """ calculate U-values after building retrofitting, depending on the old - U-values (u_values). + U-values (u_values). This is for simple insulation measuers, adding + an additional layer of insulation. + They depend for the components Roof, Wall, Floor on the additional insulation thickness (l), and the weighting for the corresponding component (l_weight). + Windows are renovated to new ones with U-value (function: u_retro_window(l)) only if the are worse insulated than a certain limit value (function: window_limit). @@ -81,280 +525,304 @@ def calculate_new_u(u_values, l, l_weight, k=0.035): """ return u_values.apply(lambda x: k / ((k / x.value) + - (float(l) * l_weight.loc[x.type][0])) - if x.type!="Windows" + (float(l) * l_weight.loc[x.name[3]])) + if x.name[3]!="Window" else (min(x.value, u_retro_window(float(l), window_assumptions)) if x.value>window_limit(float(l), window_assumptions) else x.value), axis=1) -def calculate_dE(u_values, l, average_surface_w): - """ - returns energy demand after retrofit (per unit of unrefurbished energy - demand) depending on current and retrofitted U-values, this energy demand - is weighted depending on the average surface of each component for the - building type of the assumend subsector - """ - return u_values.apply(lambda x: x[l] / x.value * - average_surface_w.loc[x.assumed_subsector, - x.type], - axis=1) - -def calculate_costs(u_values, l, cost_retro, average_surface): +def map_tabula_to_hotmaps(df_tabula, df_hotmaps, column_prefix): """ - returns costs for a given retrofitting strength weighted by the average - surface/volume ratio of the component for each building type - """ - return u_values.apply(lambda x: (cost_retro.loc[x.type, "cost_var"] * - 100 * float(l) * l_weight.loc[x.type][0] - + cost_retro.loc[x.type, "cost_fix"]) * - average_surface.loc[x.assumed_subsector, x.type] / - average_surface.loc[x.assumed_subsector, "surface"] - if x.type!="Windows" - else (window_cost(x[l], cost_retro, window_assumptions) * - average_surface.loc[x.assumed_subsector, x.type] / - average_surface.loc[x.assumed_subsector, "surface"] - if x.value>window_limit(float(l), window_assumptions) else 0), - axis=1) + maps tabula data to hotmaps data with wished column name prefix + Parameters + ---------- + df_tabula : pd.Series + tabula data with pd.MultiIndex + df_hotmaps : pd.DataFrame + dataframe with hotmaps pd.MultiIndex + column_prefix : string + column prefix to rename column names of df_tabula -# --------------------------------------------------------------------------- -def prepare_building_stock_data(): - """ - reads building stock data and cleans up the format, returns - -------- - u_values: pd.DataFrame current U-values - average_surface: pd.DataFrame (index= building type, - columns = [surface [m],height [m], - components area [m^2]]) - average_surface_w: pd.DataFrame weighted share of the components per - building type - area_tot: heated floor area per country and sector [Mm²] - area: heated floor area [Mm²] for country, sector, building - type and period + Returns + ------- + pd.DataFrame (index=df_hotmaps.index) + returns df_tabula with hotmaps index """ - - building_data = pd.read_csv(snakemake.input.building_stock, - usecols=list(range(13))) - - # standardize data - building_data["type"].replace( - {'Covered area: heated [Mm²]': 'Heated area [Mm²]', - 'Windows ': 'Windows', - 'Walls ': 'Walls', - 'Roof ': 'Roof', - 'Floor ': 'Floor'}, inplace=True) - - building_data.country_code = building_data.country_code.str.upper() - building_data["subsector"].replace({'Hotels and Restaurants': - 'Hotels and restaurants'}, inplace=True) - building_data["sector"].replace({'Residential sector': 'residential', - 'Service sector': 'services'}, - inplace=True) - # extract u-values - u_values = building_data[(building_data.feature.str.contains("U-values")) - & (building_data.subsector != "Total")] - - components = list(u_values.type.unique()) - - country_iso_dic = building_data.set_index("country")["country_code"].to_dict() - - # add missing /rename countries - country_iso_dic.update({'Norway': 'NO', - 'Iceland': 'IS', - 'Montenegro': 'ME', - 'Serbia': 'RS', - 'Albania': 'AL', - 'United Kingdom': 'GB', - 'Bosnia and Herzegovina': 'BA', - 'Switzerland': 'CH'}) - - # heated floor area ---------------------------------------------------------- - area = building_data[(building_data.type == 'Heated area [Mm²]') & - (building_data.subsector != "Total")] - area_tot = area.groupby(["country", "sector"]).sum() - area = pd.concat([area, area.apply(lambda x: x.value / - area_tot.value.loc[(x.country, x.sector)], - axis=1).rename("weight")],axis=1) - area = area.groupby(['country', 'sector', 'subsector', 'bage']).sum() - area_tot.rename(index=country_iso_dic, inplace=True) - - # add for some missing countries floor area from other data sources - area_missing = pd.read_csv(snakemake.input.floor_area_missing, - index_col=[0, 1], usecols=[0, 1, 2, 3]) - area_tot = area_tot.append(area_missing.unstack(level=-1).dropna().stack()) - area_tot = area_tot.loc[~area_tot.index.duplicated(keep='last')] - - # for still missing countries calculate floor area by population size - pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) - pop_layout["ct"] = pop_layout.index.str[:2] - ct_total = pop_layout.total.groupby(pop_layout["ct"]).sum() - - area_per_pop = area_tot.unstack().reindex(index=ct_total.index).apply(lambda x: x / ct_total[x.index]) - missing_area_ct = ct_total.index.difference(area_tot.index.levels[0]) - for ct in (missing_area_ct & ct_total.index): - averaged_data = pd.DataFrame( - area_per_pop.value.reindex(map_for_missings[ct]).mean() - * ct_total[ct], - columns=["value"]) - index = pd.MultiIndex.from_product([[ct], averaged_data.index.to_list()]) - averaged_data.index = index - averaged_data["estimated"] = 1 - if ct not in area_tot.index.levels[0]: - area_tot = area_tot.append(averaged_data, sort=True) - else: - area_tot.loc[averaged_data.index] = averaged_data - - # u_values for Poland are missing -> take them from eurostat ----------- - u_values_PL = pd.read_csv(snakemake.input.u_values_PL) - area_PL = area.loc["Poland"].reset_index() - data_PL = pd.DataFrame(columns=u_values.columns, index=area_PL.index) - data_PL["country"] = "Poland" - data_PL["country_code"] = "PL" - # data from area - for col in ["sector", "subsector", "bage"]: - data_PL[col] = area_PL[col] - data_PL["btype"] = area_PL["subsector"] - - data_PL_final = pd.DataFrame() - for component in components: - data_PL["type"] = component - data_PL["value"] = data_PL.apply(lambda x: u_values_PL[(u_values_PL.component==component) - & (u_values_PL.sector==x["sector"])] - [x["bage"]].iloc[0], axis=1) - data_PL_final = data_PL_final.append(data_PL) - - u_values = pd.concat([u_values, - data_PL_final]).reset_index(drop=True) - - # clean data --------------------------------------------------------------- - # smallest possible today u values for windows 0.8 (passive house standard) - # maybe the u values for the glass and not the whole window including frame - # for those types assumed in the dataset - u_values.loc[(u_values.type=="Windows") & (u_values.value<0.8), "value"] = 0.8 - # drop unnecessary columns - u_values.drop(['topic', 'feature','detail', 'estimated','unit'], - axis=1, inplace=True, errors="ignore") - # only take in config.yaml specified countries into account - countries = ct_total.index - area_tot = area_tot.loc[countries] - - # average component surface -------------------------------------------------- - average_surface = (pd.read_csv(snakemake.input.average_surface, - nrows=3, - header=1, - index_col=0).rename( - {'Single/two family house': 'Single family- Terraced houses', - 'Large apartment house': 'Multifamily houses', - 'Apartment house': 'Appartment blocks'}, - axis="index")).iloc[:, :6] - average_surface.columns = ["surface", "height", "Roof", - "Walls", "Floor", "Windows"] - # get area share of component - average_surface_w = average_surface[components].apply(lambda x: x / x.sum(), - axis=1) - - return (u_values, average_surface, - average_surface_w, area_tot, area, country_iso_dic, countries) + values = (df_tabula.unstack() + .reindex(df_hotmaps.rename(index = + lambda x: "MFH" if x not in rename_sectors.values() + else x, level=1).index)) + values.columns = pd.MultiIndex.from_product([[column_prefix], values.columns]) + values.index = df_hotmaps.index + return values -def prepare_cost_retro(): +def get_solar_gains_per_year(window_area): """ - read and prepare retro costs, annualises them if annualise_cost=True + returns solar heat gains during heating season in [kWh/a] depending on + the window area [m^2] of the building, assuming a equal distributed window + orientation (east, south, north, west) """ - cost_retro = pd.read_csv(snakemake.input.cost_germany, - nrows=4, index_col=0, usecols=[0, 1, 2, 3]) - cost_retro.index = cost_retro.index.str.capitalize() - cost_retro.rename(index={"Window": "Windows", "Wall": "Walls"}, inplace=True) - - window_assumptions = pd.read_csv(snakemake.input.window_assumptions, - skiprows=[1], usecols=[0,1,2,3], nrows=2) - - if annualise_cost: - cost_retro[["cost_fix", "cost_var"]] = (cost_retro[["cost_fix", "cost_var"]] - .apply(lambda x: x * interest_rate / - (1 - (1 + interest_rate) - ** -cost_retro.loc[x.index, - "life_time"]))) - - return cost_retro, window_assumptions + return sum(external_shading * frame_area_fraction * non_perpendicular + * 0.25 * window_area * solar_global_radiation) -def calculate_cost_energy_curve(u_values, l_strength, l_weight, average_surface_w, - average_surface, area, country_iso_dic, - countries): +def map_to_lstrength(l_strength, df): """ - returns energy demand per unit of unrefurbished (dE) and cost for given - renovation strength (l_strength), data for missing countries is - approximated by countries with similar building stock (dict:map_for_missings) - - parameter - -------- input ----------- - u_values: pd.DataFrame current U-values - l_strength: list of strings (strength of retrofitting) - l_weight: pd.DataFrame (component, weight) - average_surface: pd.DataFrame (index= building type, - columns = [surface [m],height [m], - components area [m^2]]) - average_surface_w: pd.DataFrame weighted share of the components per - building type - area: heated floor area [Mm²] for country, sector, building - type and period - country_iso_dic: dict (maps country name to 2-letter-iso-code) - countries: pd.Index (specified countries in config.yaml) - -------- output ---------- - res: pd.DataFrame(index=pd.MultiIndex([country, sector]), - columns=pd.MultiIndex([(dE/cost), l_strength])) + renames column names from a pandas dataframe to map tabula retrofitting + strengths [2 = moderate, 3 = ambitious] to l_strength """ + middle = len(l_strength) // 2 + map_to_l = pd.MultiIndex.from_arrays([middle*[2] + len(l_strength[middle:])*[3],l_strength]) + l_strength_df = (df.stack(-2).reindex(map_to_l, axis=1, level=0) + .droplevel(0, axis=1).unstack().swaplevel(axis=1).dropna(axis=1)) + return pd.concat([df.drop([2,3], axis=1, level=1), l_strength_df], axis=1) - energy_saved = u_values[['country', 'sector', 'subsector', 'bage', 'type']] - costs = u_values[['country', 'sector', 'subsector', 'bage', 'type']] +def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor): + """ + calculates total annual heat losses Q_ht for different insulation thiknesses + (l_strength), depening on current insulation state (u_values), standard + building topologies and air ventilation from TABULA (data_tabula) and + the accumulated difference between internal and external temperature + during the heating season (temperature_factor). + + Total annual heat losses Q_ht constitute from losses by: + (1) transmission (H_tr_e) + (2) thermal bridges (H_tb) + (3) ventilation (H_ve) + weighted by a factor (F_red_temp) which is taken account for non-uniform heating + and the temperature factor of the heating season + + Q_ht [W/m^2] = (H_tr_e + H_tb + H_ve) [W/m^2K] * F_red_temp * temperature_factor [K] + + returns Q_ht as pd.DataFrame(index=['country_code', 'subsector', 'bage'], + columns=[current (1.) + retrofitted (l_strength)]) + + """ + # (1) by transmission + # calculate new U values of building elements due to additional insulation for l in l_strength: - u_values[l] = calculate_new_u(u_values, l, l_weight) - energy_saved = pd.concat([energy_saved, - calculate_dE(u_values, l, average_surface_w).rename(l)], - axis=1) - costs = pd.concat([costs, - calculate_costs(u_values, l, cost_retro, average_surface).rename(l)], - axis=1) + u_values["new_U_{}".format(l)] = calculate_new_u(u_values, + l, l_weight, window_assumptions) + # surface area of building components [m^2] + area_element = (data_tabula[["A_{}".format(e) for e in u_values.index.levels[3]]] + .rename(columns=lambda x: x[2:]).stack().unstack(-2).stack()) + u_values["A_element"] = map_tabula_to_hotmaps(area_element, + u_values, "A_element").xs(1, level=1, axis=1) + + # heat transfer H_tr_e [W/m^2K] through building element + # U_e * A_e / A_C_Ref + columns = ["value"] + ["new_U_{}".format(l) for l in l_strength] + heat_transfer = pd.concat([u_values[columns].mul(u_values.A_element, axis=0), + u_values.A_element], axis=1) + # get real subsector back in index + heat_transfer.index = u_values.index + heat_transfer = heat_transfer.groupby(level=[0,1,2]).sum() + + # rename columns of heat transfer H_tr_e [W/K] and envelope surface A_envelope [m^2] + heat_transfer.rename(columns={"A_element":"A_envelope", + },inplace=True) + + # map reference area + heat_transfer["A_C_Ref"] = map_tabula_to_hotmaps(data_tabula.A_C_Ref, + heat_transfer, + "A_C_Ref").xs(1.,level=1,axis=1) + u_values["A_C_Ref"] = map_tabula_to_hotmaps(data_tabula.A_C_Ref, + u_values, + "A_C_Ref").xs(1.,level=1,axis=1) + + # get heat transfer by transmission through building element [W/(m^2K)] + heat_transfer_perm2 = heat_transfer[columns].div(heat_transfer.A_C_Ref, axis=0) + heat_transfer_perm2.columns = pd.MultiIndex.from_product([["H_tr_e"], [1.] + l_strength]) + + # (2) heat transfer by thermal bridges H_tb [W/(m^2K)] + # H_tb = delta_U [W/(m^2K)]* A_envelope [m^2] / A_C_Ref [m^2] + H_tb_tabula = data_tabula.delta_U_ThermalBridging * data_tabula.A_envelope / data_tabula.A_C_Ref + heat_transfer_perm2 = pd.concat([heat_transfer_perm2, + map_tabula_to_hotmaps(H_tb_tabula, heat_transfer_perm2, "H_tb")], axis=1) + + + # (3) by ventilation H_ve [W/(m²K)] + # = c_p_air [Wh/(m^3K)] * (n_air_use + n_air_infilitraion) [1/h] * h_room [m] + H_ve_tabula = (data_tabula.n_air_infiltration + data_tabula.n_air_use) * c_p_air * h_room + heat_transfer_perm2 = pd.concat([heat_transfer_perm2, + map_tabula_to_hotmaps(H_ve_tabula, heat_transfer_perm2, "H_ve")], + axis=1) + + + # F_red_temp factor which is taken account for non-uniform heating e.g. + # lower heating/switch point during night times/weekends + # effect is significant for buildings with poor insulation + # for well insulated buildings/passive houses it has nearly no effect + # based on tabula values depending on the building type + F_red_temp = map_tabula_to_hotmaps(data_tabula.F_red_temp, + heat_transfer_perm2, + "F_red_temp") + # total heat transfer Q_ht [W/m^2] = + # (H_tr_e + H_tb + H_ve) [W/m^2K] * F_red_temp * temperature_factor [K] + # temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365 + heat_transfer_perm2 = map_to_lstrength(l_strength, heat_transfer_perm2) + F_red_temp = map_to_lstrength(l_strength, F_red_temp) + + Q_ht = (heat_transfer_perm2.groupby(level=1,axis=1).sum() + .mul(F_red_temp.droplevel(0, axis=1)) + .mul(temperature_factor.reindex(heat_transfer_perm2.index,level=0), axis=0)) + + return Q_ht, heat_transfer_perm2 + + +def calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat): + """ + calculates heat gains Q_gain [W/m^2], which consititure from gains by: + (1) solar radiation + (2) internal heat gains + + """ + # (1) by solar radiation H_solar [W/m^2] + # solar radiation [kWhm^2/a] / A_C_Ref [m^2] *1e3[1/k] / 8760 [a/h] + H_solar = (data_tabula.A_Window.apply(lambda x: get_solar_gains_per_year(x)) + / data_tabula.A_C_Ref * 1e3 / 8760) + + Q_gain = map_tabula_to_hotmaps(H_solar, heat_transfer_perm2, "H_solar").xs(1.,level=1, axis=1) + + # (2) by internal H_int + # phi [W/m^2] * d_heat [d/a] * 1/365 [a/d] -> W/m^2 + Q_gain["H_int"] = (phi_int * d_heat * 1/365).reindex(index=heat_transfer_perm2.index, level=0) + + return Q_gain + +def calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain): + """ + calculates gain utilisation factor nu + """ + # time constant of the building tau [h] = c_m [Wh/(m^2K)] * 1 /(H_tr_e+H_tb*H_ve) [m^2 K /W] + tau = c_m / heat_transfer_perm2.groupby(level=1,axis=1).sum() + alpha = alpha_H_0 + (tau/tau_H_0) + # heat balance ratio + gamma = (1 / Q_ht).mul(Q_gain.sum(axis=1), axis=0) + # gain utilisation factor + nu = (1 - gamma**alpha) / (1 - gamma**(alpha+1)) + + return nu + + +def calculate_space_heat_savings(u_values, data_tabula, l_strength, + temperature_factor, d_heat): + """ + calculates space heat savings (dE_space [per unit of unrefurbished state]) + through retrofitting of the thermal envelope by additional insulation + material (l_strength[m]) + """ + # heat losses Q_ht [W/m^2] + Q_ht, heat_transfer_perm2 = calculate_heat_losses(u_values, data_tabula, + l_strength, temperature_factor) + # heat gains Q_gain [W/m^2] + Q_gain = calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat) + + # calculate gain utilisation factor nu [dimensionless] + nu = calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain) + + # total space heating demand E_space + E_space = Q_ht - nu.mul(Q_gain.sum(axis=1), axis=0) + dE_space = E_space.div(E_space[1.], axis=0).iloc[:, 1:] + dE_space.columns = pd.MultiIndex.from_product([["dE"], l_strength]) + + return dE_space + + +def calculate_retro_costs(u_values, l_strength, cost_retro): + """ + returns costs of different retrofitting measures + """ + costs = pd.concat([calculate_costs(u_values, l, cost_retro, window_assumptions).rename(l) + for l in l_strength], axis=1) # energy and costs per country, sector, subsector and year - e_tot = energy_saved.groupby(['country', 'sector', 'subsector', 'bage']).sum() - cost_tot = costs.groupby(['country', 'sector', 'subsector', 'bage']).sum() + cost_tot = costs.groupby(level=['country_code', 'subsector', 'bage']).sum() + cost_tot.columns = pd.MultiIndex.from_product([["cost"], cost_tot.columns]) - # weighting by area -> energy and costs per country and sector - # in case of missing data first concat - energy_saved = pd.concat([e_tot, area.weight], axis=1) - cost_res = pd.concat([cost_tot, area.weight], axis=1) - energy_saved = (energy_saved.apply(lambda x: x * x.weight, axis=1) - .groupby(level=[0, 1]).sum()) - cost_res = (cost_res.apply(lambda x: x * x.weight, axis=1) - .groupby(level=[0, 1]).sum()) + return cost_tot - res = pd.concat([energy_saved[l_strength], cost_res[l_strength]], - axis=1, keys=["dE", "cost"]) - res.rename(index=country_iso_dic, inplace=True) - res = res.reindex(index=countries, level=0) - # reset index because otherwise not considered countries still in index.levels[0] - res = res.reset_index().set_index(["country", "sector"]) +def sample_dE_costs_area(area, area_tot, costs, dE_space, countries, + construction_index, tax_weighting): + """ + bring costs and energy savings together, fill area and costs per energy + savings for missing countries, weight costs, + determine "moderate" and "ambitious" retrofitting + """ + sub_to_sector_dict = (area.reset_index().replace(rename_sectors) + .set_index("subsector")["sector"].to_dict()) + + area_reordered = ((area.rename(index=country_iso_dic, level=0) + .rename(index=rename_sectors, level=2) + .reset_index()).rename(columns={"country":"country_code"}) + .set_index(["country_code", "subsector", "bage"])) + + cost_dE =(pd.concat([costs, dE_space], axis=1) + .mul(area_reordered.weight, axis=0) + .rename(sub_to_sector_dict,level=1).groupby(level=[0,1]).sum()) # map missing countries - for ct in pd.Index(map_for_missings.keys()) & countries: - averaged_data = res.reindex(index=map_for_missings[ct], level=0).mean(level=1) - index = pd.MultiIndex.from_product([[ct], averaged_data.index.to_list()]) - averaged_data.index = index - if ct not in res.index.levels[0]: - res = res.append(averaged_data) - else: - res.loc[averaged_data.index] = averaged_data - - return res + for ct in countries.difference(cost_dE.index.levels[0]): + averaged_data = (cost_dE.reindex(index=map_for_missings[ct], level=0).mean(level=1) + .set_index(pd.MultiIndex + .from_product([[ct], cost_dE.index.levels[1]]))) + cost_dE = cost_dE.append(averaged_data) -# %% **************** MAIN ************************************************ + # weights costs after construction index + if construction_index: + for ct in list(map_for_missings.keys() - cost_w.index): + cost_w.loc[ct] = cost_w.reindex(index=map_for_missings[ct]).mean() + cost_dE.cost = cost_dE.cost.mul(cost_w, level=0, axis=0) + + # weights cost depending on country taxes + if tax_weighting: + for ct in list(map_for_missings.keys() - tax_w.index): + tax_w[ct] = tax_w.reindex(index=map_for_missings[ct]).mean() + cost_dE.cost = cost_dE.cost.mul(tax_w, level=0, axis=0) + + # drop not considered countries + cost_dE = cost_dE.reindex(countries,level=0) + # get share of residential and sevice floor area + sec_w = area_tot.value / area_tot.value.groupby(level=0).sum() + # get the total cost-energy-savings weight by sector area + tot = (cost_dE.mul(sec_w, axis=0).groupby(level="country_code").sum() + .set_index(pd.MultiIndex + .from_product([cost_dE.index.unique(level="country_code"), ["tot"]]))) + cost_dE = cost_dE.append(tot).unstack().stack() + + summed_area = (pd.DataFrame(area_tot.groupby("country").sum()) + .set_index(pd.MultiIndex.from_product( + [area_tot.index.unique(level="country"), ["tot"]]))) + area_tot = area_tot.append(summed_area).unstack().stack() + + + + cost_per_saving = (cost_dE["cost"] / (1-cost_dE["dE"])) #.diff(axis=1).dropna(axis=1) + + + moderate_min = cost_per_saving.idxmin(axis=1) + moderate_dE_cost = pd.concat([cost_dE.loc[i].xs(moderate_min.loc[i], level=1) + for i in moderate_min.index], axis=1).T + moderate_dE_cost.columns = pd.MultiIndex.from_product([moderate_dE_cost.columns, + ["moderate"]]) + + ambitious_dE_cost = cost_dE.xs("0.26", level=1,axis=1) + ambitious_dE_cost.columns = pd.MultiIndex.from_product([ambitious_dE_cost.columns, + ["ambitious"]]) + + cost_dE_new = pd.concat([moderate_dE_cost, ambitious_dE_cost], axis=1) + + return cost_dE_new, area_tot + + +#%% --- MAIN -------------------------------------------------------------- if __name__ == "__main__": # for testing if 'snakemake' not in globals(): @@ -365,42 +833,35 @@ if __name__ == "__main__": wildcards=dict( network='elec', simpl='', - clusters='37', + clusters='48', lv='1', opts='Co2L-3H', sector_opts="[Co2L0p0-168H-T-H-B-I]"), input=dict( building_stock="data/retro/data_building_stock.csv", + data_tabula="data/retro/tabula-calculator-calcsetbuilding.csv", u_values_PL="data/retro/u_values_poland.csv", + air_temperature = "resources/temp_air_total_{network}_s{simpl}_{clusters}.nc", tax_w="data/retro/electricity_taxes_eu.csv", construction_index="data/retro/comparative_level_investment.csv", - average_surface="data/retro/average_surface_components.csv", floor_area_missing="data/retro/floor_area_missing.csv", - clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv", + clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", cost_germany="data/retro/retro_cost_germany.csv", window_assumptions="data/retro/window_assumptions.csv"), output=dict( - retro_cost="resources/retro_cost_{network}_s{simpl}_{clusters}.csv", - floor_area="resources/floor_area_{network}_s{simpl}_{clusters}.csv") + retro_cost="resources/retro_cost_elec_s{simpl}_{clusters}.csv", + floor_area="resources/floor_area_elec_s{simpl}_{clusters}.csv") ) with open('config.yaml', encoding='utf8') as f: snakemake.config = yaml.safe_load(f) -# ******** (1) ASSUMPTIONS - PARAMETERS ********************************** +# ******** config ********************************************************* + retro_opts = snakemake.config["sector"]["retrofitting"] interest_rate = retro_opts["interest_rate"] annualise_cost = retro_opts["annualise_cost"] # annualise the investment costs tax_weighting = retro_opts["tax_weighting"] # weight costs depending on taxes in countries construction_index = retro_opts["construction_index"] # weight costs depending on labour/material costs per ct - # additional insulation thickness, determines maximum possible savings - l_strength = retro_opts["l_strength"] - - k = 0.035 # thermal conductivity standard value - # strenght of relative retrofitting depending on the component - # determined by historical data of insulation thickness for retrofitting - l_weight = pd.DataFrame({"weight": [1.95, 1.48, 1.]}, - index=["Roof", "Walls", "Floor"]) - # mapping missing countries by neighbours map_for_missings = { @@ -413,72 +874,31 @@ if __name__ == "__main__": "NO": ["SE"], } -# %% ************ (2) DATA *************************************************** +# (1) prepare data ********************************************************** # building stock data ----------------------------------------------------- - (u_values, average_surface, average_surface_w, - area_tot, area, country_iso_dic, countries) = prepare_building_stock_data() - + # hotmaps u_values, heated floor areas per sector + u_values, country_iso_dic, countries, area_tot, area = prepare_building_stock_data() + # building topology, thermal bridges, ventilation losses + data_tabula = prepare_building_topology(u_values) # costs for retrofitting ------------------------------------------------- - cost_retro, window_assumptions = prepare_cost_retro() + cost_retro, window_assumptions, cost_w, tax_w = prepare_cost_retro(country_iso_dic) + # temperature dependend parameters + d_heat, temperature_factor = prepare_temperature_data() - # weightings of costs - if construction_index: - cost_w = pd.read_csv(snakemake.input.construction_index, - skiprows=3, nrows=32, index_col=0) - # since German retrofitting costs are assumed - cost_w = ((cost_w["2018"] / cost_w.loc["Germany", "2018"]) - .rename(index=country_iso_dic)) - if tax_weighting: - tax_w = pd.read_csv(snakemake.input.tax_w, - header=12, nrows=39, index_col=0, usecols=[0, 4]) - tax_w.rename(index=country_iso_dic, inplace=True) - tax_w = tax_w.apply(pd.to_numeric, errors='coerce').iloc[:, 0] - tax_w.dropna(inplace=True) +# (2) space heat savings **************************************************** + dE_space = calculate_space_heat_savings(u_values, data_tabula, l_strength, + temperature_factor, d_heat) -# %% ********** (3) CALCULATE COST-ENERGY-CURVES **************************** +# (3) costs ***************************************************************** + costs = calculate_retro_costs(u_values, l_strength, cost_retro) - # for missing weighting of surfaces of building types assume MultiFamily houses - u_values["assumed_subsector"] = u_values.subsector - u_values.loc[~u_values.subsector.isin(average_surface.index), - "assumed_subsector"] = 'Multifamily houses' +# (4) cost-dE and area per sector ******************************************* + cost_dE, area_tot = sample_dE_costs_area(area, area_tot, costs, dE_space, countries, + construction_index, tax_weighting) - dE_and_cost = calculate_cost_energy_curve(u_values, l_strength, l_weight, - average_surface_w, average_surface, area, - country_iso_dic, countries) - # reset index because otherwise not considered countries still in index.levels[0] - dE_and_cost = dE_and_cost.reset_index().set_index(["country", "sector"]) - - # weights costs after construction index - if construction_index: - for ct in list(map_for_missings.keys() - cost_w.index): - cost_w.loc[ct] = cost_w.reindex(index=map_for_missings[ct]).mean() - dE_and_cost.cost = dE_and_cost.cost.apply(lambda x: x * cost_w[x.index.levels[0]]) - - # weights cost depending on country taxes - if tax_weighting: - for ct in list(map_for_missings.keys() - tax_w.index): - tax_w[ct] = tax_w.reindex(index=map_for_missings[ct]).mean() - dE_and_cost.cost = dE_and_cost.cost.apply(lambda x: x * tax_w[x.index.levels[0]]) - - # get share of residential and sevice floor area - sec_w = (area_tot / area_tot.groupby(["country"]).sum())["value"] - # get the total cost-energy-savings weight by sector area - tot = dE_and_cost.apply(lambda col: col * sec_w, axis=0).groupby(level=0).sum() - tot.set_index(pd.MultiIndex.from_product([list(tot.index), ["tot"]]), - inplace=True) - dE_and_cost = dE_and_cost.append(tot).unstack().stack() - - summed_area = pd.DataFrame(area_tot.groupby("country").sum()) - summed_area.set_index(pd.MultiIndex.from_product( - [list(summed_area.index), ["tot"]]), inplace=True) - area_tot = area_tot.append(summed_area).unstack().stack() - -# %% ******* (4) SAVE ************************************************ - - dE_and_cost.to_csv(snakemake.output.retro_cost) +# save ********************************************************************* + cost_dE.to_csv(snakemake.output.retro_cost) area_tot.to_csv(snakemake.output.floor_area) - - diff --git a/scripts/build_solar_thermal_profiles.py b/scripts/build_solar_thermal_profiles.py index c26266aa..be5af409 100644 --- a/scripts/build_solar_thermal_profiles.py +++ b/scripts/build_solar_thermal_profiles.py @@ -11,7 +11,7 @@ if 'snakemake' not in globals(): import yaml snakemake = Dict() with open('config.yaml') as f: - snakemake.config = yaml.load(f) + snakemake.config = yaml.safe_load(f) snakemake.input = Dict() snakemake.output = Dict() diff --git a/scripts/build_temperature_profiles.py b/scripts/build_temperature_profiles.py index a55bd606..82ff431e 100644 --- a/scripts/build_temperature_profiles.py +++ b/scripts/build_temperature_profiles.py @@ -11,7 +11,7 @@ if 'snakemake' not in globals(): import yaml snakemake = Dict() with open('config.yaml') as f: - snakemake.config = yaml.load(f) + snakemake.config = yaml.safe_load(f) snakemake.input = Dict() snakemake.output = Dict() diff --git a/scripts/make_summary.py b/scripts/make_summary.py index 43f64b2c..518874ef 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -79,7 +79,7 @@ def calculate_nodal_cfs(n,label,nodal_cfs): cf_c = p_c/capacities_c index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in cf_c.index.to_list()]) - nodal_cfs = nodal_cfs.reindex(index|nodal_cfs.index) + nodal_cfs = nodal_cfs.reindex(index.union(nodal_cfs.index)) nodal_cfs.loc[index,label] = cf_c.values return nodal_cfs @@ -106,7 +106,7 @@ def calculate_cfs(n,label,cfs): cf_c = pd.concat([cf_c], keys=[c.list_name]) - cfs = cfs.reindex(cf_c.index|cfs.index) + cfs = cfs.reindex(cf_c.index.union(cfs.index)) cfs.loc[cf_c.index,label] = cf_c @@ -121,7 +121,7 @@ def calculate_nodal_costs(n,label,nodal_costs): c.df["capital_costs"] = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"] capital_costs = c.df.groupby(["location","carrier"])["capital_costs"].sum() index = pd.MultiIndex.from_tuples([(c.list_name,"capital") + t for t in capital_costs.index.to_list()]) - nodal_costs = nodal_costs.reindex(index|nodal_costs.index) + nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index)) nodal_costs.loc[index,label] = capital_costs.values if c.name == "Link": @@ -143,7 +143,7 @@ def calculate_nodal_costs(n,label,nodal_costs): c.df["marginal_costs"] = p*c.df.marginal_cost marginal_costs = c.df.groupby(["location","carrier"])["marginal_costs"].sum() index = pd.MultiIndex.from_tuples([(c.list_name,"marginal") + t for t in marginal_costs.index.to_list()]) - nodal_costs = nodal_costs.reindex(index|nodal_costs.index) + nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index)) nodal_costs.loc[index,label] = marginal_costs.values return nodal_costs @@ -158,7 +158,7 @@ def calculate_costs(n,label,costs): capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"]) capital_costs_grouped = pd.concat([capital_costs_grouped], keys=[c.list_name]) - costs = costs.reindex(capital_costs_grouped.index|costs.index) + costs = costs.reindex(capital_costs_grouped.index.union(costs.index)) costs.loc[capital_costs_grouped.index,label] = capital_costs_grouped @@ -185,7 +185,7 @@ def calculate_costs(n,label,costs): marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=["marginal"]) marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=[c.list_name]) - costs = costs.reindex(marginal_costs_grouped.index|costs.index) + costs = costs.reindex(marginal_costs_grouped.index.union(costs.index)) costs.loc[marginal_costs_grouped.index,label] = marginal_costs_grouped @@ -196,31 +196,31 @@ def calculate_costs(n,label,costs): return costs -def calculate_cumulative_cost(): +def calculate_cumulative_cost(): planning_horizons = snakemake.config['scenario']['planning_horizons'] cumulative_cost = pd.DataFrame(index = df["costs"].sum().index, columns=pd.Series(data=np.arange(0,0.1, 0.01), name='social discount rate')) - + #discount cost and express them in money value of planning_horizons[0] for r in cumulative_cost.columns: cumulative_cost[r]=[df["costs"].sum()[index]/((1+r)**(index[-1]-planning_horizons[0])) for index in cumulative_cost.index] - + #integrate cost throughout the transition path - for r in cumulative_cost.columns: + for r in cumulative_cost.columns: for cluster in cumulative_cost.index.get_level_values(level=0).unique(): for lv in cumulative_cost.index.get_level_values(level=1).unique(): for sector_opts in cumulative_cost.index.get_level_values(level=2).unique(): cumulative_cost.loc[(cluster, lv, sector_opts,'cumulative cost'),r] = np.trapz(cumulative_cost.loc[idx[cluster, lv, sector_opts,planning_horizons],r].values, x=planning_horizons) - return cumulative_cost - + return cumulative_cost + def calculate_nodal_capacities(n,label,nodal_capacities): #Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): nodal_capacities_c = c.df.groupby(["location","carrier"])[opt_name.get(c.name,"p") + "_nom_opt"].sum() index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in nodal_capacities_c.index.to_list()]) - nodal_capacities = nodal_capacities.reindex(index|nodal_capacities.index) + nodal_capacities = nodal_capacities.reindex(index.union(nodal_capacities.index)) nodal_capacities.loc[index,label] = nodal_capacities_c.values return nodal_capacities @@ -234,7 +234,7 @@ def calculate_capacities(n,label,capacities): capacities_grouped = c.df[opt_name.get(c.name,"p") + "_nom_opt"].groupby(c.df.carrier).sum() capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name]) - capacities = capacities.reindex(capacities_grouped.index|capacities.index) + capacities = capacities.reindex(capacities_grouped.index.union(capacities.index)) capacities.loc[capacities_grouped.index,label] = capacities_grouped @@ -267,7 +267,7 @@ def calculate_energy(n,label,energy): c_energies = pd.concat([c_energies], keys=[c.list_name]) - energy = energy.reindex(c_energies.index|energy.index) + energy = energy.reindex(c_energies.index.union(energy.index)) energy.loc[c_energies.index,label] = c_energies @@ -285,7 +285,7 @@ def calculate_supply(n,label,supply): for c in n.iterate_components(n.one_port_components): - items = c.df.index[c.df.bus.map(bus_map)] + items = c.df.index[c.df.bus.map(bus_map).fillna(False)] if len(items) == 0: continue @@ -294,7 +294,7 @@ def calculate_supply(n,label,supply): s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[i]) - supply = supply.reindex(s.index|supply.index) + supply = supply.reindex(s.index.union(supply.index)) supply.loc[s.index,label] = s @@ -313,7 +313,7 @@ def calculate_supply(n,label,supply): s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[i]) - supply = supply.reindex(s.index|supply.index) + supply = supply.reindex(s.index.union(supply.index)) supply.loc[s.index,label] = s return supply @@ -330,7 +330,7 @@ def calculate_supply_energy(n,label,supply_energy): for c in n.iterate_components(n.one_port_components): - items = c.df.index[c.df.bus.map(bus_map)] + items = c.df.index[c.df.bus.map(bus_map).fillna(False)] if len(items) == 0: continue @@ -339,7 +339,7 @@ def calculate_supply_energy(n,label,supply_energy): s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[i]) - supply_energy = supply_energy.reindex(s.index|supply_energy.index) + supply_energy = supply_energy.reindex(s.index.union(supply_energy.index)) supply_energy.loc[s.index,label] = s @@ -357,7 +357,7 @@ def calculate_supply_energy(n,label,supply_energy): s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[i]) - supply_energy = supply_energy.reindex(s.index|supply_energy.index) + supply_energy = supply_energy.reindex(s.index.union(supply_energy.index)) supply_energy.loc[s.index,label] = s @@ -366,7 +366,7 @@ def calculate_supply_energy(n,label,supply_energy): def calculate_metrics(n,label,metrics): - metrics = metrics.reindex(pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"])|metrics.index) + metrics = metrics.reindex(pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"]).union(metrics.index)) metrics.at["line_volume_DC",label] = (n.links.length*n.links.p_nom_opt)[n.links.carrier == "DC"].sum() metrics.at["line_volume_AC",label] = (n.lines.length*n.lines.s_nom_opt).sum() @@ -384,7 +384,7 @@ def calculate_metrics(n,label,metrics): def calculate_prices(n,label,prices): - prices = prices.reindex(prices.index|n.buses.carrier.unique()) + prices = prices.reindex(prices.index.union(n.buses.carrier.unique())) #WARNING: this is time-averaged, see weighted_prices for load-weighted average prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean() @@ -467,7 +467,7 @@ def calculate_market_values(n, label, market_values): techs = n.generators.loc[generators,"carrier"].value_counts().index - market_values = market_values.reindex(market_values.index | techs) + market_values = market_values.reindex(market_values.index.union(techs)) for tech in techs: @@ -488,7 +488,7 @@ def calculate_market_values(n, label, market_values): techs = n.links.loc[all_links,"carrier"].value_counts().index - market_values = market_values.reindex(market_values.index | techs) + market_values = market_values.reindex(market_values.index.union(techs)) for tech in techs: links = all_links[n.links.loc[all_links,"carrier"] == tech] @@ -505,7 +505,7 @@ def calculate_market_values(n, label, market_values): def calculate_price_statistics(n, label, price_statistics): - price_statistics = price_statistics.reindex(price_statistics.index|pd.Index(["zero_hours","mean","standard_deviation"])) + price_statistics = price_statistics.reindex(price_statistics.index.union(pd.Index(["zero_hours","mean","standard_deviation"]))) buses = n.buses.index[n.buses.carrier == "AC"] @@ -611,7 +611,7 @@ if __name__ == "__main__": print(networks_dict) Nyears = 1 - + costs_db = prepare_costs(snakemake.input.costs, snakemake.config['costs']['USD2013_to_EUR2013'], snakemake.config['costs']['discountrate'], @@ -623,10 +623,9 @@ if __name__ == "__main__": df["metrics"].loc["total costs"] = df["costs"].sum() to_csv(df) - + if snakemake.config["foresight"]=='myopic': cumulative_cost=calculate_cumulative_cost() cumulative_cost.to_csv(snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/csvs/cumulative_cost.csv') - \ No newline at end of file diff --git a/scripts/plot_network.py b/scripts/plot_network.py index cd419808..2aaef6bc 100644 --- a/scripts/plot_network.py +++ b/scripts/plot_network.py @@ -130,7 +130,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator costs.drop(list(costs.columns[(costs == 0.).all()]), axis=1, inplace=True) - new_columns = ((preferred_order & costs.columns) + new_columns = (preferred_order.intersection(costs.columns) .append(costs.columns.difference(preferred_order))) costs = costs[new_columns] @@ -147,7 +147,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator n.links.carrier != "B2B")], inplace=True) # drop non-bus - to_drop = costs.index.levels[0] ^ n.buses.index + to_drop = costs.index.levels[0].symmetric_difference(n.buses.index) if len(to_drop) != 0: print("dropping non-buses", to_drop) costs.drop(to_drop, level=0, inplace=True, axis=0) @@ -463,7 +463,7 @@ def plot_series(network, carrier="AC", name="test"): "battery storage", "hot water storage"]) - new_columns = ((preferred_order & supply.columns) + new_columns = (preferred_order.intersection(supply.columns) .append(supply.columns.difference(preferred_order))) supply = supply.groupby(supply.columns, axis=1).sum() diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index fe28cfed..bc852e70 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -82,7 +82,7 @@ def plot_costs(): print(df.sum()) - new_index = (preferred_order&df.index).append(df.index.difference(preferred_order)) + new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order)) new_columns = df.sum().sort_values().index @@ -136,7 +136,7 @@ def plot_energy(): print(df) - new_index = (preferred_order&df.index).append(df.index.difference(preferred_order)) + new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order)) new_columns = df.columns.sort_values() #new_columns = df.sum().sort_values().index @@ -177,7 +177,7 @@ def plot_balances(): balances_df = pd.read_csv(snakemake.input.balances,index_col=list(range(3)),header=list(range(n_header))) balances = {i.replace(" ","_") : [i] for i in balances_df.index.levels[0]} - balances["energy"] = balances_df.index.levels[0]^co2_carriers + balances["energy"] = balances_df.index.levels[0].symmetric_difference(co2_carriers) for k,v in balances.items(): @@ -205,7 +205,7 @@ def plot_balances(): if df.empty: continue - new_index = (preferred_order&df.index).append(df.index.difference(preferred_order)) + new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order)) new_columns = df.columns.sort_values() diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 0ae099bf..f30fad57 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -6,9 +6,8 @@ import pandas as pd idx = pd.IndexSlice import numpy as np -import scipy as sp import xarray as xr -import re, os +import re, os, sys from six import iteritems, string_types @@ -50,22 +49,19 @@ override_component_attrs["Store"].loc["lifetime"] = ["float","years",np.nan,"lif def co2_emissions_year(cts, opts, year): - """ - calculate co2 emissions in one specific year (e.g. 1990 or 2018). + Calculate CO2 emissions in one specific year (e.g. 1990 or 2018). """ eea_co2 = build_eea_co2(year) - #TODO: read Eurostat data from year>2014, this only affects the estimation of - + # TODO: read Eurostat data from year>2014, this only affects the estimation of # CO2 emissions for "BA","RS","AL","ME","MK" if year > 2014: eurostat_co2 = build_eurostat_co2(year=2014) else: eurostat_co2 = build_eurostat_co2(year) - co2_totals=build_co2_totals(eea_co2, eurostat_co2, year) - + co2_totals = build_co2_totals(eea_co2, eurostat_co2) co2_emissions = co2_totals.loc[cts, "electricity"].sum() @@ -77,11 +73,11 @@ def co2_emissions_year(cts, opts, year): co2_emissions += co2_totals.loc[cts, ["industrial non-elec","industrial processes", "domestic aviation","international aviation", "domestic navigation","international navigation"]].sum().sum() - co2_emissions *=0.001 #MtCO2 to GtCO2 + + co2_emissions *= 0.001 # Convert MtCO2 to GtCO2 return co2_emissions - def build_carbon_budget(o): #distribute carbon budget following beta or exponential transition path if "be" in o: @@ -244,7 +240,7 @@ def remove_elec_base_techs(n): for c in n.iterate_components(snakemake.config["pypsa_eur"]): to_keep = snakemake.config["pypsa_eur"][c.name] - to_remove = pd.Index(c.df.carrier.unique())^to_keep + to_remove = pd.Index(c.df.carrier.unique()).symmetric_difference(to_keep) print("Removing",c.list_name,"with carrier",to_remove) names = c.df.index[c.df.carrier.isin(to_remove)] print(names) @@ -252,6 +248,14 @@ def remove_elec_base_techs(n): n.carriers.drop(to_remove, inplace=True, errors="ignore") +def remove_non_electric_buses(n): + """ + remove buses from pypsa-eur with carriers which are not AC buses + """ + print("drop buses from PyPSA-Eur with carrier: ", n.buses[~n.buses.carrier.isin(["AC", "DC"])].carrier.unique()) + n.buses = n.buses[n.buses.carrier.isin(["AC", "DC"])] + + def add_co2_tracking(n): @@ -925,7 +929,7 @@ def add_storage(network): # hydrogen stored overground h2_capital_cost = costs.at["hydrogen storage tank", "fixed"] - nodes_overground = nodes ^ cavern_nodes.index + nodes_overground = nodes.symmetric_difference(cavern_nodes.index) network.madd("Store", nodes_overground + " H2 Store", @@ -1178,11 +1182,10 @@ def add_heat(network): urban_fraction = options['central_fraction']*pop_layout["urban"]/(pop_layout[["urban","rural"]].sum(axis=1)) - # building retrofitting, exogenously reduce space heat demand - if options["retrofitting"]["retro_exogen"]: - dE = get_parameter(options["retrofitting"]["dE"]) - print("retrofitting exogenously, assumed space heat reduction of ", - dE) + # exogenously reduce space heat demand + if options["reduce_space_heat_exogenously"]: + dE = get_parameter(options["reduce_space_heat_exogenously_factor"]) + print("assumed space heat reduction of {} %".format(dE*100)) for sector in sectors: heat_demand[sector + " space"] = (1-dE)*heat_demand[sector + " space"] @@ -1488,7 +1491,7 @@ def create_nodes_for_heat_sector(): else: nodes[sector + " urban decentral"] = pop_layout.index # for central nodes, residential and services are aggregated - nodes["urban central"] = pop_layout.index ^ nodes["residential urban decentral"] + nodes["urban central"] = pop_layout.index.symmetric_difference(nodes["residential urban decentral"]) return nodes @@ -1874,7 +1877,7 @@ if __name__ == "__main__": opts='', planning_horizons='2020', sector_opts='120H-T-H-B-I-onwind+p3-dist1-cb48be3'), - input=dict( network='../pypsa-eur/networks/{network}_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc', + input=dict( network='../pypsa-eur/networks/elec_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc', energy_totals_name='resources/energy_totals.csv', co2_totals_name='resources/co2_totals.csv', transport_name='resources/transport_data.csv', @@ -1886,34 +1889,33 @@ if __name__ == "__main__": h2_cavern = "data/hydrogen_salt_cavern_potentials.csv", profile_offwind_ac="../pypsa-eur/resources/profile_offwind-ac.nc", profile_offwind_dc="../pypsa-eur/resources/profile_offwind-dc.nc", - busmap_s="../pypsa-eur/resources/busmap_{network}_s{simpl}.csv", - busmap="../pypsa-eur/resources/busmap_{network}_s{simpl}_{clusters}.csv", - clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv", - simplified_pop_layout="resources/pop_layout_{network}_s{simpl}.csv", - industrial_demand="resources/industrial_energy_demand_{network}_s{simpl}_{clusters}.csv", - heat_demand_urban="resources/heat_demand_urban_{network}_s{simpl}_{clusters}.nc", - heat_demand_rural="resources/heat_demand_rural_{network}_s{simpl}_{clusters}.nc", - heat_demand_total="resources/heat_demand_total_{network}_s{simpl}_{clusters}.nc", - temp_soil_total="resources/temp_soil_total_{network}_s{simpl}_{clusters}.nc", - temp_soil_rural="resources/temp_soil_rural_{network}_s{simpl}_{clusters}.nc", - temp_soil_urban="resources/temp_soil_urban_{network}_s{simpl}_{clusters}.nc", - temp_air_total="resources/temp_air_total_{network}_s{simpl}_{clusters}.nc", - temp_air_rural="resources/temp_air_rural_{network}_s{simpl}_{clusters}.nc", - temp_air_urban="resources/temp_air_urban_{network}_s{simpl}_{clusters}.nc", - cop_soil_total="resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc", - cop_soil_rural="resources/cop_soil_rural_{network}_s{simpl}_{clusters}.nc", - cop_soil_urban="resources/cop_soil_urban_{network}_s{simpl}_{clusters}.nc", - cop_air_total="resources/cop_air_total_{network}_s{simpl}_{clusters}.nc", - cop_air_rural="resources/cop_air_rural_{network}_s{simpl}_{clusters}.nc", - cop_air_urban="resources/cop_air_urban_{network}_s{simpl}_{clusters}.nc", - solar_thermal_total="resources/solar_thermal_total_{network}_s{simpl}_{clusters}.nc", - solar_thermal_urban="resources/solar_thermal_urban_{network}_s{simpl}_{clusters}.nc", - solar_thermal_rural="resources/solar_thermal_rural_{network}_s{simpl}_{clusters}.nc", - retro_cost_energy = "resources/retro_cost_{network}_s{simpl}_{clusters}.csv", - floor_area = "resources/floor_area_{network}_s{simpl}_{clusters}.csv" + busmap_s="../pypsa-eur/resources/busmap_elec_s{simpl}.csv", + busmap="../pypsa-eur/resources/busmap_elec_s{simpl}_{clusters}.csv", + clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv", + simplified_pop_layout="resources/pop_layout_elec_s{simpl}.csv", + industrial_demand="resources/industrial_energy_demand_elec_s{simpl}_{clusters}.csv", + heat_demand_urban="resources/heat_demand_urban_elec_s{simpl}_{clusters}.nc", + heat_demand_rural="resources/heat_demand_rural_elec_s{simpl}_{clusters}.nc", + heat_demand_total="resources/heat_demand_total_elec_s{simpl}_{clusters}.nc", + temp_soil_total="resources/temp_soil_total_elec_s{simpl}_{clusters}.nc", + temp_soil_rural="resources/temp_soil_rural_elec_s{simpl}_{clusters}.nc", + temp_soil_urban="resources/temp_soil_urban_elec_s{simpl}_{clusters}.nc", + temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc", + temp_air_rural="resources/temp_air_rural_elec_s{simpl}_{clusters}.nc", + temp_air_urban="resources/temp_air_urban_elec_s{simpl}_{clusters}.nc", + cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc", + cop_soil_rural="resources/cop_soil_rural_elec_s{simpl}_{clusters}.nc", + cop_soil_urban="resources/cop_soil_urban_elec_s{simpl}_{clusters}.nc", + cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc", + cop_air_rural="resources/cop_air_rural_elec_s{simpl}_{clusters}.nc", + cop_air_urban="resources/cop_air_urban_elec_s{simpl}_{clusters}.nc", + solar_thermal_total="resources/solar_thermal_total_elec_s{simpl}_{clusters}.nc", + solar_thermal_urban="resources/solar_thermal_urban_elec_s{simpl}_{clusters}.nc", + solar_thermal_rural="resources/solar_thermal_rural_elec_s{simpl}_{clusters}.nc", + retro_cost_energy = "resources/retro_cost_elec_s{simpl}_{clusters}.csv", + floor_area = "resources/floor_area_elec_s{simpl}_{clusters}.csv" ), output=['results/version-cb48be3/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'] - ) import yaml with open('config.yaml', encoding='utf8') as f: @@ -1953,6 +1955,8 @@ if __name__ == "__main__": n.loads["carrier"] = "electricity" + remove_non_electric_buses(n) + n.buses["location"] = n.buses.index update_wind_solar_costs(n, costs) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index 85251caa..1e8f7a64 100644 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -376,10 +376,10 @@ if __name__ == "__main__": wildcards=dict(network='elec', simpl='', clusters='39', lv='1.0', sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1', co2_budget_name='b30b3', planning_horizons='2050'), - input=dict(network="pypsa-eur-sec/results/test/prenetworks_brownfield/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_{planning_horizons}.nc"), + input=dict(network="pypsa-eur-sec/results/test/prenetworks_brownfield/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_{planning_horizons}.nc"), output=["results/networks/s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}-test.nc"], - log=dict(gurobi="logs/{network}_s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}_gurobi-test.log", - python="logs/{network}_s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}_python-test.log") + log=dict(gurobi="logs/elec_s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}_gurobi-test.log", + python="logs/elec_s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}_python-test.log") ) import yaml with open('config.yaml', encoding='utf8') as f: