Merge branch 'PyPSA:master' into master

This commit is contained in:
martavp 2021-06-15 09:18:11 +02:00
commit 228ccf611e
26 changed files with 1088 additions and 598 deletions

1
.gitignore vendored
View File

@ -26,6 +26,7 @@ gurobi.log
/data/switzerland* /data/switzerland*
/data/.nfs* /data/.nfs*
/data/Industrial_Database.csv /data/Industrial_Database.csv
/data/retro/tabula-calculator-calcsetbuilding.csv
*.org *.org

View File

@ -14,7 +14,7 @@ problems that distort the results. See the github repository
[issues](https://github.com/PyPSA/pypsa-eur-sec/issues) for some of [issues](https://github.com/PyPSA/pypsa-eur-sec/issues) for some of
the problems (please feel free to help or make suggestions). There is the problems (please feel free to help or make suggestions). There is
neither documentation nor a paper yet, but we hope to have a preprint neither documentation nor a paper yet, but we hope to have a preprint
out by summer 2020. We cannot support this model if you choose to use out by autumn 2021. We cannot support this model if you choose to use
it. it.
@ -33,6 +33,14 @@ them:
![sector diagram](graphics/multisector_figure.png) ![sector diagram](graphics/multisector_figure.png)
Each of these sectors is built up on the transmission network nodes
from [PyPSA-Eur](https://github.com/PyPSA/pypsa-eur):
![network diagram](https://github.com/PyPSA/pypsa-eur/blob/master/doc/img/base.png?raw=true)
For computational reasons the model is usually clustered down
to 50-200 nodes.
PyPSA-Eur-Sec was initially based on the model PyPSA-Eur-Sec-30 described PyPSA-Eur-Sec was initially based on the model PyPSA-Eur-Sec-30 described
in the paper [Synergies of sector coupling and transmission in the paper [Synergies of sector coupling and transmission

201
Snakefile
View File

@ -51,9 +51,9 @@ rule build_clustered_population_layouts:
pop_layout_total="resources/pop_layout_total.nc", pop_layout_total="resources/pop_layout_total.nc",
pop_layout_urban="resources/pop_layout_urban.nc", pop_layout_urban="resources/pop_layout_urban.nc",
pop_layout_rural="resources/pop_layout_rural.nc", pop_layout_rural="resources/pop_layout_rural.nc",
regions_onshore=pypsaeur('resources/regions_onshore_{network}_s{simpl}_{clusters}.geojson') regions_onshore=pypsaeur('resources/regions_onshore_elec_s{simpl}_{clusters}.geojson')
output: output:
clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv" clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv"
resources: mem_mb=10000 resources: mem_mb=10000
script: "scripts/build_clustered_population_layouts.py" script: "scripts/build_clustered_population_layouts.py"
@ -63,9 +63,9 @@ rule build_simplified_population_layouts:
pop_layout_total="resources/pop_layout_total.nc", pop_layout_total="resources/pop_layout_total.nc",
pop_layout_urban="resources/pop_layout_urban.nc", pop_layout_urban="resources/pop_layout_urban.nc",
pop_layout_rural="resources/pop_layout_rural.nc", pop_layout_rural="resources/pop_layout_rural.nc",
regions_onshore=pypsaeur('resources/regions_onshore_{network}_s{simpl}.geojson') regions_onshore=pypsaeur('resources/regions_onshore_elec_s{simpl}.geojson')
output: output:
clustered_pop_layout="resources/pop_layout_{network}_s{simpl}.csv" clustered_pop_layout="resources/pop_layout_elec_s{simpl}.csv"
resources: mem_mb=10000 resources: mem_mb=10000
script: "scripts/build_clustered_population_layouts.py" script: "scripts/build_clustered_population_layouts.py"
@ -75,11 +75,11 @@ rule build_heat_demands:
pop_layout_total="resources/pop_layout_total.nc", pop_layout_total="resources/pop_layout_total.nc",
pop_layout_urban="resources/pop_layout_urban.nc", pop_layout_urban="resources/pop_layout_urban.nc",
pop_layout_rural="resources/pop_layout_rural.nc", pop_layout_rural="resources/pop_layout_rural.nc",
regions_onshore=pypsaeur("resources/regions_onshore_{network}_s{simpl}_{clusters}.geojson") regions_onshore=pypsaeur("resources/regions_onshore_elec_s{simpl}_{clusters}.geojson")
output: output:
heat_demand_urban="resources/heat_demand_urban_{network}_s{simpl}_{clusters}.nc", heat_demand_urban="resources/heat_demand_urban_elec_s{simpl}_{clusters}.nc",
heat_demand_rural="resources/heat_demand_rural_{network}_s{simpl}_{clusters}.nc", heat_demand_rural="resources/heat_demand_rural_elec_s{simpl}_{clusters}.nc",
heat_demand_total="resources/heat_demand_total_{network}_s{simpl}_{clusters}.nc" heat_demand_total="resources/heat_demand_total_elec_s{simpl}_{clusters}.nc"
resources: mem_mb=20000 resources: mem_mb=20000
script: "scripts/build_heat_demand.py" script: "scripts/build_heat_demand.py"
@ -88,33 +88,33 @@ rule build_temperature_profiles:
pop_layout_total="resources/pop_layout_total.nc", pop_layout_total="resources/pop_layout_total.nc",
pop_layout_urban="resources/pop_layout_urban.nc", pop_layout_urban="resources/pop_layout_urban.nc",
pop_layout_rural="resources/pop_layout_rural.nc", pop_layout_rural="resources/pop_layout_rural.nc",
regions_onshore=pypsaeur("resources/regions_onshore_{network}_s{simpl}_{clusters}.geojson") regions_onshore=pypsaeur("resources/regions_onshore_elec_s{simpl}_{clusters}.geojson")
output: output:
temp_soil_total="resources/temp_soil_total_{network}_s{simpl}_{clusters}.nc", temp_soil_total="resources/temp_soil_total_elec_s{simpl}_{clusters}.nc",
temp_soil_rural="resources/temp_soil_rural_{network}_s{simpl}_{clusters}.nc", temp_soil_rural="resources/temp_soil_rural_elec_s{simpl}_{clusters}.nc",
temp_soil_urban="resources/temp_soil_urban_{network}_s{simpl}_{clusters}.nc", temp_soil_urban="resources/temp_soil_urban_elec_s{simpl}_{clusters}.nc",
temp_air_total="resources/temp_air_total_{network}_s{simpl}_{clusters}.nc", temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc",
temp_air_rural="resources/temp_air_rural_{network}_s{simpl}_{clusters}.nc", temp_air_rural="resources/temp_air_rural_elec_s{simpl}_{clusters}.nc",
temp_air_urban="resources/temp_air_urban_{network}_s{simpl}_{clusters}.nc" temp_air_urban="resources/temp_air_urban_elec_s{simpl}_{clusters}.nc"
resources: mem_mb=20000 resources: mem_mb=20000
script: "scripts/build_temperature_profiles.py" script: "scripts/build_temperature_profiles.py"
rule build_cop_profiles: rule build_cop_profiles:
input: input:
temp_soil_total="resources/temp_soil_total_{network}_s{simpl}_{clusters}.nc", temp_soil_total="resources/temp_soil_total_elec_s{simpl}_{clusters}.nc",
temp_soil_rural="resources/temp_soil_rural_{network}_s{simpl}_{clusters}.nc", temp_soil_rural="resources/temp_soil_rural_elec_s{simpl}_{clusters}.nc",
temp_soil_urban="resources/temp_soil_urban_{network}_s{simpl}_{clusters}.nc", temp_soil_urban="resources/temp_soil_urban_elec_s{simpl}_{clusters}.nc",
temp_air_total="resources/temp_air_total_{network}_s{simpl}_{clusters}.nc", temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc",
temp_air_rural="resources/temp_air_rural_{network}_s{simpl}_{clusters}.nc", temp_air_rural="resources/temp_air_rural_elec_s{simpl}_{clusters}.nc",
temp_air_urban="resources/temp_air_urban_{network}_s{simpl}_{clusters}.nc" temp_air_urban="resources/temp_air_urban_elec_s{simpl}_{clusters}.nc"
output: output:
cop_soil_total="resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc", cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_soil_rural="resources/cop_soil_rural_{network}_s{simpl}_{clusters}.nc", cop_soil_rural="resources/cop_soil_rural_elec_s{simpl}_{clusters}.nc",
cop_soil_urban="resources/cop_soil_urban_{network}_s{simpl}_{clusters}.nc", cop_soil_urban="resources/cop_soil_urban_elec_s{simpl}_{clusters}.nc",
cop_air_total="resources/cop_air_total_{network}_s{simpl}_{clusters}.nc", cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc",
cop_air_rural="resources/cop_air_rural_{network}_s{simpl}_{clusters}.nc", cop_air_rural="resources/cop_air_rural_elec_s{simpl}_{clusters}.nc",
cop_air_urban="resources/cop_air_urban_{network}_s{simpl}_{clusters}.nc" cop_air_urban="resources/cop_air_urban_elec_s{simpl}_{clusters}.nc"
resources: mem_mb=20000 resources: mem_mb=20000
script: "scripts/build_cop_profiles.py" script: "scripts/build_cop_profiles.py"
@ -124,11 +124,11 @@ rule build_solar_thermal_profiles:
pop_layout_total="resources/pop_layout_total.nc", pop_layout_total="resources/pop_layout_total.nc",
pop_layout_urban="resources/pop_layout_urban.nc", pop_layout_urban="resources/pop_layout_urban.nc",
pop_layout_rural="resources/pop_layout_rural.nc", pop_layout_rural="resources/pop_layout_rural.nc",
regions_onshore=pypsaeur("resources/regions_onshore_{network}_s{simpl}_{clusters}.geojson") regions_onshore=pypsaeur("resources/regions_onshore_elec_s{simpl}_{clusters}.geojson")
output: output:
solar_thermal_total="resources/solar_thermal_total_{network}_s{simpl}_{clusters}.nc", solar_thermal_total="resources/solar_thermal_total_elec_s{simpl}_{clusters}.nc",
solar_thermal_urban="resources/solar_thermal_urban_{network}_s{simpl}_{clusters}.nc", solar_thermal_urban="resources/solar_thermal_urban_elec_s{simpl}_{clusters}.nc",
solar_thermal_rural="resources/solar_thermal_rural_{network}_s{simpl}_{clusters}.nc" solar_thermal_rural="resources/solar_thermal_rural_elec_s{simpl}_{clusters}.nc"
resources: mem_mb=20000 resources: mem_mb=20000
script: "scripts/build_solar_thermal_profiles.py" script: "scripts/build_solar_thermal_profiles.py"
@ -199,12 +199,12 @@ rule build_industrial_production_per_country_tomorrow:
rule build_industrial_distribution_key: rule build_industrial_distribution_key:
input: input:
clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv", clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv",
europe_shape=pypsaeur('resources/europe_shape.geojson'), europe_shape=pypsaeur('resources/europe_shape.geojson'),
hotmaps_industrial_database="data/Industrial_Database.csv", hotmaps_industrial_database="data/Industrial_Database.csv",
network=pypsaeur('networks/{network}_s{simpl}_{clusters}.nc') network=pypsaeur('networks/elec_s{simpl}_{clusters}.nc')
output: output:
industrial_distribution_key="resources/industrial_distribution_key_{network}_s{simpl}_{clusters}.csv" industrial_distribution_key="resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv"
threads: 1 threads: 1
resources: mem_mb=1000 resources: mem_mb=1000
script: 'scripts/build_industrial_distribution_key.py' script: 'scripts/build_industrial_distribution_key.py'
@ -213,10 +213,10 @@ rule build_industrial_distribution_key:
rule build_industrial_production_per_node: rule build_industrial_production_per_node:
input: input:
industrial_distribution_key="resources/industrial_distribution_key_{network}_s{simpl}_{clusters}.csv", industrial_distribution_key="resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv",
industrial_production_per_country_tomorrow="resources/industrial_production_per_country_tomorrow.csv" industrial_production_per_country_tomorrow="resources/industrial_production_per_country_tomorrow.csv"
output: output:
industrial_production_per_node="resources/industrial_production_{network}_s{simpl}_{clusters}.csv" industrial_production_per_node="resources/industrial_production_elec_s{simpl}_{clusters}.csv"
threads: 1 threads: 1
resources: mem_mb=1000 resources: mem_mb=1000
script: 'scripts/build_industrial_production_per_node.py' script: 'scripts/build_industrial_production_per_node.py'
@ -225,10 +225,10 @@ rule build_industrial_production_per_node:
rule build_industrial_energy_demand_per_node: rule build_industrial_energy_demand_per_node:
input: input:
industry_sector_ratios="resources/industry_sector_ratios.csv", industry_sector_ratios="resources/industry_sector_ratios.csv",
industrial_production_per_node="resources/industrial_production_{network}_s{simpl}_{clusters}.csv", industrial_production_per_node="resources/industrial_production_elec_s{simpl}_{clusters}.csv",
industrial_energy_demand_per_node_today="resources/industrial_energy_demand_today_{network}_s{simpl}_{clusters}.csv" industrial_energy_demand_per_node_today="resources/industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv"
output: output:
industrial_energy_demand_per_node="resources/industrial_energy_demand_{network}_s{simpl}_{clusters}.csv" industrial_energy_demand_per_node="resources/industrial_energy_demand_elec_s{simpl}_{clusters}.csv"
threads: 1 threads: 1
resources: mem_mb=1000 resources: mem_mb=1000
script: 'scripts/build_industrial_energy_demand_per_node.py' script: 'scripts/build_industrial_energy_demand_per_node.py'
@ -247,10 +247,10 @@ rule build_industrial_energy_demand_per_country_today:
rule build_industrial_energy_demand_per_node_today: rule build_industrial_energy_demand_per_node_today:
input: input:
industrial_distribution_key="resources/industrial_distribution_key_{network}_s{simpl}_{clusters}.csv", industrial_distribution_key="resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv",
industrial_energy_demand_per_country_today="resources/industrial_energy_demand_per_country_today.csv" industrial_energy_demand_per_country_today="resources/industrial_energy_demand_per_country_today.csv"
output: output:
industrial_energy_demand_per_node_today="resources/industrial_energy_demand_today_{network}_s{simpl}_{clusters}.csv" industrial_energy_demand_per_node_today="resources/industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv"
threads: 1 threads: 1
resources: mem_mb=1000 resources: mem_mb=1000
script: 'scripts/build_industrial_energy_demand_per_node_today.py' script: 'scripts/build_industrial_energy_demand_per_node_today.py'
@ -270,10 +270,10 @@ rule build_industrial_energy_demand_per_country:
rule build_industrial_demand: rule build_industrial_demand:
input: input:
clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv", clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv",
industrial_demand_per_country="resources/industrial_energy_demand_per_country.csv" industrial_demand_per_country="resources/industrial_energy_demand_per_country.csv"
output: output:
industrial_demand="resources/industrial_demand_{network}_s{simpl}_{clusters}.csv" industrial_demand="resources/industrial_demand_elec_s{simpl}_{clusters}.csv"
threads: 1 threads: 1
resources: mem_mb=1000 resources: mem_mb=1000
script: 'scripts/build_industrial_demand.py' script: 'scripts/build_industrial_demand.py'
@ -281,24 +281,25 @@ rule build_industrial_demand:
rule build_retro_cost: rule build_retro_cost:
input: input:
building_stock="data/retro/data_building_stock.csv", building_stock="data/retro/data_building_stock.csv",
data_tabula="data/retro/tabula-calculator-calcsetbuilding.csv",
air_temperature = "resources/temp_air_total_elec_s{simpl}_{clusters}.nc",
u_values_PL="data/retro/u_values_poland.csv", u_values_PL="data/retro/u_values_poland.csv",
tax_w="data/retro/electricity_taxes_eu.csv", tax_w="data/retro/electricity_taxes_eu.csv",
construction_index="data/retro/comparative_level_investment.csv", construction_index="data/retro/comparative_level_investment.csv",
average_surface="data/retro/average_surface_components.csv",
floor_area_missing="data/retro/floor_area_missing.csv", floor_area_missing="data/retro/floor_area_missing.csv",
clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv", clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv",
cost_germany="data/retro/retro_cost_germany.csv", cost_germany="data/retro/retro_cost_germany.csv",
window_assumptions="data/retro/window_assumptions.csv" window_assumptions="data/retro/window_assumptions.csv",
output: output:
retro_cost="resources/retro_cost_{network}_s{simpl}_{clusters}.csv", retro_cost="resources/retro_cost_elec_s{simpl}_{clusters}.csv",
floor_area="resources/floor_area_{network}_s{simpl}_{clusters}.csv" floor_area="resources/floor_area_elec_s{simpl}_{clusters}.csv"
resources: mem_mb=1000 resources: mem_mb=1000
script: "scripts/build_retro_cost.py" script: "scripts/build_retro_cost.py"
rule prepare_sector_network: rule prepare_sector_network:
input: input:
network=pypsaeur('networks/{network}_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc'), network=pypsaeur('networks/elec_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc'),
energy_totals_name='resources/energy_totals.csv', energy_totals_name='resources/energy_totals.csv',
co2_totals_name='resources/co2_totals.csv', co2_totals_name='resources/co2_totals.csv',
transport_name='resources/transport_data.csv', transport_name='resources/transport_data.csv',
@ -310,35 +311,35 @@ rule prepare_sector_network:
h2_cavern = "data/hydrogen_salt_cavern_potentials.csv", h2_cavern = "data/hydrogen_salt_cavern_potentials.csv",
profile_offwind_ac=pypsaeur("resources/profile_offwind-ac.nc"), profile_offwind_ac=pypsaeur("resources/profile_offwind-ac.nc"),
profile_offwind_dc=pypsaeur("resources/profile_offwind-dc.nc"), profile_offwind_dc=pypsaeur("resources/profile_offwind-dc.nc"),
busmap_s=pypsaeur("resources/busmap_{network}_s{simpl}.csv"), busmap_s=pypsaeur("resources/busmap_elec_s{simpl}.csv"),
busmap=pypsaeur("resources/busmap_{network}_s{simpl}_{clusters}.csv"), busmap=pypsaeur("resources/busmap_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv", clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv",
simplified_pop_layout="resources/pop_layout_{network}_s{simpl}.csv", simplified_pop_layout="resources/pop_layout_elec_s{simpl}.csv",
industrial_demand="resources/industrial_energy_demand_{network}_s{simpl}_{clusters}.csv", industrial_demand="resources/industrial_energy_demand_elec_s{simpl}_{clusters}.csv",
heat_demand_urban="resources/heat_demand_urban_{network}_s{simpl}_{clusters}.nc", heat_demand_urban="resources/heat_demand_urban_elec_s{simpl}_{clusters}.nc",
heat_demand_rural="resources/heat_demand_rural_{network}_s{simpl}_{clusters}.nc", heat_demand_rural="resources/heat_demand_rural_elec_s{simpl}_{clusters}.nc",
heat_demand_total="resources/heat_demand_total_{network}_s{simpl}_{clusters}.nc", heat_demand_total="resources/heat_demand_total_elec_s{simpl}_{clusters}.nc",
temp_soil_total="resources/temp_soil_total_{network}_s{simpl}_{clusters}.nc", temp_soil_total="resources/temp_soil_total_elec_s{simpl}_{clusters}.nc",
temp_soil_rural="resources/temp_soil_rural_{network}_s{simpl}_{clusters}.nc", temp_soil_rural="resources/temp_soil_rural_elec_s{simpl}_{clusters}.nc",
temp_soil_urban="resources/temp_soil_urban_{network}_s{simpl}_{clusters}.nc", temp_soil_urban="resources/temp_soil_urban_elec_s{simpl}_{clusters}.nc",
temp_air_total="resources/temp_air_total_{network}_s{simpl}_{clusters}.nc", temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc",
temp_air_rural="resources/temp_air_rural_{network}_s{simpl}_{clusters}.nc", temp_air_rural="resources/temp_air_rural_elec_s{simpl}_{clusters}.nc",
temp_air_urban="resources/temp_air_urban_{network}_s{simpl}_{clusters}.nc", temp_air_urban="resources/temp_air_urban_elec_s{simpl}_{clusters}.nc",
cop_soil_total="resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc", cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_soil_rural="resources/cop_soil_rural_{network}_s{simpl}_{clusters}.nc", cop_soil_rural="resources/cop_soil_rural_elec_s{simpl}_{clusters}.nc",
cop_soil_urban="resources/cop_soil_urban_{network}_s{simpl}_{clusters}.nc", cop_soil_urban="resources/cop_soil_urban_elec_s{simpl}_{clusters}.nc",
cop_air_total="resources/cop_air_total_{network}_s{simpl}_{clusters}.nc", cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc",
cop_air_rural="resources/cop_air_rural_{network}_s{simpl}_{clusters}.nc", cop_air_rural="resources/cop_air_rural_elec_s{simpl}_{clusters}.nc",
cop_air_urban="resources/cop_air_urban_{network}_s{simpl}_{clusters}.nc", cop_air_urban="resources/cop_air_urban_elec_s{simpl}_{clusters}.nc",
solar_thermal_total="resources/solar_thermal_total_{network}_s{simpl}_{clusters}.nc", solar_thermal_total="resources/solar_thermal_total_elec_s{simpl}_{clusters}.nc",
solar_thermal_urban="resources/solar_thermal_urban_{network}_s{simpl}_{clusters}.nc", solar_thermal_urban="resources/solar_thermal_urban_elec_s{simpl}_{clusters}.nc",
solar_thermal_rural="resources/solar_thermal_rural_{network}_s{simpl}_{clusters}.nc", solar_thermal_rural="resources/solar_thermal_rural_elec_s{simpl}_{clusters}.nc",
retro_cost_energy = "resources/retro_cost_{network}_s{simpl}_{clusters}.csv", retro_cost_energy = "resources/retro_cost_elec_s{simpl}_{clusters}.csv",
floor_area = "resources/floor_area_{network}_s{simpl}_{clusters}.csv" floor_area = "resources/floor_area_elec_s{simpl}_{clusters}.csv"
output: config['results_dir'] + config['run'] + '/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc' output: config['results_dir'] + config['run'] + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc'
threads: 1 threads: 1
resources: mem_mb=2000 resources: mem_mb=2000
benchmark: config['results_dir'] + config['run'] + "/benchmarks/prepare_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" benchmark: config['results_dir'] + config['run'] + "/benchmarks/prepare_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}"
script: "scripts/prepare_sector_network.py" script: "scripts/prepare_sector_network.py"
@ -411,16 +412,16 @@ if config["foresight"] == "overnight":
rule solve_network: rule solve_network:
input: input:
network=config['results_dir'] + config['run'] + "/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", network=config['results_dir'] + config['run'] + "/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc",
costs=config['costs_dir'] + "costs_{planning_horizons}.csv", costs=config['costs_dir'] + "costs_{planning_horizons}.csv",
config=config['summary_dir'] + '/' + config['run'] + '/configs/config.yaml' config=config['summary_dir'] + '/' + config['run'] + '/configs/config.yaml'
output: config['results_dir'] + config['run'] + "/postnetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" output: config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc"
shadow: "shallow" shadow: "shallow"
log: log:
solver=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_solver.log", solver=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
python=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_python.log", python=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_python.log",
memory=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_memory.log" memory=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_memory.log"
benchmark: config['results_dir'] + config['run'] + "/benchmarks/solve_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" benchmark: config['results_dir'] + config['run'] + "/benchmarks/solve_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}"
threads: 4 threads: 4
resources: mem_mb=config['solving']['mem'] resources: mem_mb=config['solving']['mem']
# group: "solve" # with group, threads is ignored https://bitbucket.org/snakemake/snakemake/issues/971/group-job-description-does-not-contain # group: "solve" # with group, threads is ignored https://bitbucket.org/snakemake/snakemake/issues/971/group-job-description-does-not-contain
@ -431,15 +432,15 @@ if config["foresight"] == "myopic":
rule add_existing_baseyear: rule add_existing_baseyear:
input: input:
network=config['results_dir'] + config['run'] + '/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc', network=config['results_dir'] + config['run'] + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc',
powerplants=pypsaeur('resources/powerplants.csv'), powerplants=pypsaeur('resources/powerplants.csv'),
busmap_s=pypsaeur("resources/busmap_{network}_s{simpl}.csv"), busmap_s=pypsaeur("resources/busmap_elec_s{simpl}.csv"),
busmap=pypsaeur("resources/busmap_{network}_s{simpl}_{clusters}.csv"), busmap=pypsaeur("resources/busmap_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv", clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv",
costs=config['costs_dir'] + "costs_{}.csv".format(config['scenario']['planning_horizons'][0]), costs=config['costs_dir'] + "costs_{}.csv".format(config['scenario']['planning_horizons'][0]),
cop_soil_total="resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc", cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total="resources/cop_air_total_{network}_s{simpl}_{clusters}.nc" cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc"
output: config['results_dir'] + config['run'] + '/prenetworks-brownfield/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc' output: config['results_dir'] + config['run'] + '/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc'
wildcard_constraints: wildcard_constraints:
planning_horizons=config['scenario']['planning_horizons'][0] #only applies to baseyear planning_horizons=config['scenario']['planning_horizons'][0] #only applies to baseyear
threads: 1 threads: 1
@ -448,18 +449,18 @@ if config["foresight"] == "myopic":
def process_input(wildcards): def process_input(wildcards):
i = config["scenario"]["planning_horizons"].index(int(wildcards.planning_horizons)) i = config["scenario"]["planning_horizons"].index(int(wildcards.planning_horizons))
return config['results_dir'] + config['run'] + "/postnetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_" + str(config["scenario"]["planning_horizons"][i-1]) + ".nc" return config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_" + str(config["scenario"]["planning_horizons"][i-1]) + ".nc"
rule add_brownfield: rule add_brownfield:
input: input:
network=config['results_dir'] + config['run'] + '/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc', network=config['results_dir'] + config['run'] + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc',
network_p=process_input, #solved network at previous time step network_p=process_input, #solved network at previous time step
costs=config['costs_dir'] + "costs_{planning_horizons}.csv", costs=config['costs_dir'] + "costs_{planning_horizons}.csv",
cop_soil_total="resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc", cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total="resources/cop_air_total_{network}_s{simpl}_{clusters}.nc" cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc"
output: config['results_dir'] + config['run'] + "/prenetworks-brownfield/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" output: config['results_dir'] + config['run'] + "/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc"
threads: 4 threads: 4
resources: mem_mb=10000 resources: mem_mb=10000
script: "scripts/add_brownfield.py" script: "scripts/add_brownfield.py"
@ -468,16 +469,16 @@ if config["foresight"] == "myopic":
rule solve_network_myopic: rule solve_network_myopic:
input: input:
network=config['results_dir'] + config['run'] + "/prenetworks-brownfield/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc", network=config['results_dir'] + config['run'] + "/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc",
costs=config['costs_dir'] + "costs_{planning_horizons}.csv", costs=config['costs_dir'] + "costs_{planning_horizons}.csv",
config=config['summary_dir'] + '/' + config['run'] + '/configs/config.yaml' config=config['summary_dir'] + '/' + config['run'] + '/configs/config.yaml'
output: config['results_dir'] + config['run'] + "/postnetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc" output: config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc"
shadow: "shallow" shadow: "shallow"
log: log:
solver=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_solver.log", solver=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
python=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_python.log", python=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_python.log",
memory=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_memory.log" memory=config['results_dir'] + config['run'] + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_memory.log"
benchmark: config['results_dir'] + config['run'] + "/benchmarks/solve_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}" benchmark: config['results_dir'] + config['run'] + "/benchmarks/solve_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}"
threads: 4 threads: 4
resources: mem_mb=config['solving']['mem'] resources: mem_mb=config['solving']['mem']
script: "scripts/solve_network.py" script: "scripts/solve_network.py"

View File

@ -1,4 +1,4 @@
version: 0.4.0 version: 0.5.0
logging_level: INFO logging_level: INFO
@ -119,20 +119,25 @@ sector:
'time_dep_hp_cop' : True #time dependent heat pump coefficient of performance 'time_dep_hp_cop' : True #time dependent heat pump coefficient of performance
'heat_pump_sink_T' : 55. # Celsius, based on DTU / large area radiators; used in build_cop_profiles.py 'heat_pump_sink_T' : 55. # Celsius, based on DTU / large area radiators; used in build_cop_profiles.py
# conservatively high to cover hot water and space heating in poorly-insulated buildings # conservatively high to cover hot water and space heating in poorly-insulated buildings
'retrofitting' : 'reduce_space_heat_exogenously': True # reduces space heat demand by a given factor (applied before losses in DH)
'retro_exogen': True # space heat demand savings exogenously # this can represent e.g. building renovation, building demolition, or if
'dE': # reduction of space heat demand (applied before losses in DH) # the factor is negative: increasing floor area, increased thermal comfort, population growth
2020 : 0. 'reduce_space_heat_exogenously_factor': # per unit reduction in space heat demand
2030 : 0.15 # the default factors are determined by the LTS scenario from http://tool.european-calculator.eu/app/buildings/building-types-area/?levers=1ddd4444421213bdbbbddd44444ffffff11f411111221111211l212221
2040 : 0.3 2020: 0.10 # this results in a space heat demand reduction of 10%
2050 : 0.4 2025: 0.09 # first heat demand increases compared to 2020 because of larger floor area per capita
2030: 0.09
2035: 0.11
2040: 0.16
2045: 0.21
2050: 0.29
'retrofitting' : # co-optimises building renovation to reduce space heat demand
'retro_endogen': False # co-optimise space heat savings 'retro_endogen': False # co-optimise space heat savings
'cost_factor' : 1.0 'cost_factor' : 1.0 # weight costs for building renovation
'interest_rate': 0.04 # for investment in building components 'interest_rate': 0.04 # for investment in building components
'annualise_cost': True # annualise the investment costs 'annualise_cost': True # annualise the investment costs
'tax_weighting': False # weight costs depending on taxes in countries 'tax_weighting': False # weight costs depending on taxes in countries
'construction_index': True # weight costs depending on labour/material costs per ct 'construction_index': True # weight costs depending on labour/material costs per country
'l_strength': ["0.076", "0.197"] # additional insulation thickness[m], determines number of retro steps(=generators per bus) and maximum possible savings
'tes' : True 'tes' : True
'tes_tau' : 3. 'tes_tau' : 3.
'boilers' : True 'boilers' : True
@ -370,6 +375,7 @@ plotting:
"process emissions to atmosphere" : "#888888" "process emissions to atmosphere" : "#888888"
"process emissions" : "#222222" "process emissions" : "#222222"
"oil emissions" : "#666666" "oil emissions" : "#666666"
"land transport oil emissions" : "#666666"
"land transport fuel cell" : "#AAAAAA" "land transport fuel cell" : "#AAAAAA"
"biogas" : "#800000" "biogas" : "#800000"
"solid biomass" : "#DAA520" "solid biomass" : "#DAA520"

View File

@ -1,7 +0,0 @@
,Dwelling,Ceilling,Standard component surfaces (m2),component,surfaces,(m2),,
Building type,Space(m²),Height(m),Roof,Facade,Floor,Windows,,
Single/two family house,120,2.5,90,166,63,29,,
Large apartment house,1457,2.5,354,1189,354,380,,
Apartment house,5276,,598.337,2992.1,598.337,756,tabula ,http://webtool.building-typology.eu/#pdfes
,,,,,,,,
"Source: https://link.springer.com/article/10.1007/s12053-010-9090-6 ,p.4",,,,,,,,
1 Dwelling Ceilling Standard component surfaces (m2) component surfaces (m2)
2 Building type Space(m²) Height(m) Roof Facade Floor Windows
3 Single/two family house 120 2.5 90 166 63 29
4 Large apartment house 1457 2.5 354 1189 354 380
5 Apartment house 5276 598.337 2992.1 598.337 756 tabula http://webtool.building-typology.eu/#pdfes
6
7 Source: https://link.springer.com/article/10.1007/s12053-010-9090-6 ,p.4

View File

@ -70,9 +70,9 @@ author = u'2019-2020 Tom Brown (KIT), Marta Victoria (Aarhus University), Lisa Z
# built documents. # built documents.
# #
# The short X.Y version. # The short X.Y version.
version = u'0.4' version = u'0.5'
# The full version, including alpha/beta/rc tags. # The full version, including alpha/beta/rc tags.
release = u'0.4.0' release = u'0.5.0'
# The language for content autogenerated by Sphinx. Refer to documentation # The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages. # for a list of supported languages.

View File

@ -22,5 +22,5 @@ U-values Poland,u_values_poland.csv,unknown,https://data.europa.eu/euodp/de/data
Floor area missing in hotmaps building stock data,floor_area_missing.csv,unknown,https://data.europa.eu/euodp/de/data/dataset/building-stock-observatory Floor area missing in hotmaps building stock data,floor_area_missing.csv,unknown,https://data.europa.eu/euodp/de/data/dataset/building-stock-observatory
Comparative level investment,comparative_level_investment.csv,Eurostat,https://ec.europa.eu/eurostat/statistics-explained/index.php?title=Comparative_price_levels_for_investment Comparative level investment,comparative_level_investment.csv,Eurostat,https://ec.europa.eu/eurostat/statistics-explained/index.php?title=Comparative_price_levels_for_investment
Electricity taxes,electricity_taxes_eu.csv,Eurostat,https://appsso.eurostat.ec.europa.eu/nui/show.do?dataset=nrg_pc_204&lang=en Electricity taxes,electricity_taxes_eu.csv,Eurostat,https://appsso.eurostat.ec.europa.eu/nui/show.do?dataset=nrg_pc_204&lang=en
Average surface components,average_surface_components.csv,unknown,http://webtool.building-typology.eu/#bm Building topologies and corresponding standard values,tabula-calculator-calcsetbuilding.csv,unknown,https://episcope.eu/fileadmin/tabula/public/calc/tabula-calculator.xlsx
Retrofitting thermal envelope costs for Germany,retro_cost_germany.csv,unkown,https://www.iwu.de/forschung/handlungslogiken/kosten-energierelevanter-bau-und-anlagenteile-bei-modernisierung/ Retrofitting thermal envelope costs for Germany,retro_cost_germany.csv,unkown,https://www.iwu.de/forschung/handlungslogiken/kosten-energierelevanter-bau-und-anlagenteile-bei-modernisierung/

1 description file/folder licence source
22 Floor area missing in hotmaps building stock data floor_area_missing.csv unknown https://data.europa.eu/euodp/de/data/dataset/building-stock-observatory
23 Comparative level investment comparative_level_investment.csv Eurostat https://ec.europa.eu/eurostat/statistics-explained/index.php?title=Comparative_price_levels_for_investment
24 Electricity taxes electricity_taxes_eu.csv Eurostat https://appsso.eurostat.ec.europa.eu/nui/show.do?dataset=nrg_pc_204&lang=en
25 Average surface components Building topologies and corresponding standard values average_surface_components.csv tabula-calculator-calcsetbuilding.csv unknown http://webtool.building-typology.eu/#bm https://episcope.eu/fileadmin/tabula/public/calc/tabula-calculator.xlsx
26 Retrofitting thermal envelope costs for Germany retro_cost_germany.csv unkown https://www.iwu.de/forschung/handlungslogiken/kosten-energierelevanter-bau-und-anlagenteile-bei-modernisierung/

View File

@ -4,8 +4,8 @@ PyPSA-Eur-Sec: A Sector-Coupled Open Optimisation Model of the European Energy S
.. image:: https://img.shields.io/github/v/release/pypsa/pypsa-eur-sec?include_prereleases .. image:: https://img.shields.io/github/v/release/pypsa/pypsa-eur-sec?include_prereleases
:alt: GitHub release (latest by date including pre-releases) :alt: GitHub release (latest by date including pre-releases)
.. image:: https://readthedocs.org/projects/pypsa-eur/badge/?version=latest .. image:: https://readthedocs.org/projects/pypsa-eur-sec/badge/?version=latest
:target: https://pypsa-eur.readthedocs.io/en/latest/?badge=latest :target: https://pypsa-eur-sec.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status :alt: Documentation Status
.. image:: https://img.shields.io/github/license/pypsa/pypsa-eur-sec .. image:: https://img.shields.io/github/license/pypsa/pypsa-eur-sec

View File

@ -73,8 +73,8 @@ To download and extract the data bundle on the command line:
.. code:: bash .. code:: bash
projects/pypsa-eur-sec/data % wget "https://nworbmot.org/pypsa-eur-sec-data-bundle-210125.tar.gz" projects/pypsa-eur-sec/data % wget "https://nworbmot.org/pypsa-eur-sec-data-bundle-210418.tar.gz"
projects/pypsa-eur-sec/data % tar xvzf pypsa-eur-sec-data-bundle-210125.tar.gz projects/pypsa-eur-sec/data % tar xvzf pypsa-eur-sec-data-bundle-210418.tar.gz
The data licences and sources are given in the following table. The data licences and sources are given in the following table.

View File

@ -3,14 +3,31 @@ Release Notes
########################################## ##########################################
Future release Future release
=================== ==============
* Include new features here.
PyPSA-Eur-Sec 0.5.0 (21st May 2021)
===================================
This release includes improvements to the cost database for building retrofits, carbon budget management and wildcard settings, as well as an important bugfix for the emissions from land transport.
This release is known to work with `PyPSA-Eur <https://github.com/PyPSA/pypsa-eur>`_ Version 0.3.0 and `Technology Data <https://github.com/PyPSA/technology-data>`_ Version 0.2.0.
Please note that the data bundle has also been updated.
New features and bugfixes:
* The cost database for retrofitting of the thermal envelope of buildings has been updated. Now, for calculating the space heat savings of a building, losses by thermal bridges and ventilation are included as well as heat gains (internal and by solar radiation). See the section :ref:`retro` for more details on the retrofitting module.
* For the myopic investment option, a carbon budget and a type of decay (exponential or beta) can be selected in the ``config.yaml`` file to distribute the budget across the ``planning_horizons``. For example, ``cb40ex0`` in the ``{sector_opts}`` wildcard will distribute a carbon budget of 40 GtCO2 following an exponential decay with initial growth rate 0. * For the myopic investment option, a carbon budget and a type of decay (exponential or beta) can be selected in the ``config.yaml`` file to distribute the budget across the ``planning_horizons``. For example, ``cb40ex0`` in the ``{sector_opts}`` wildcard will distribute a carbon budget of 40 GtCO2 following an exponential decay with initial growth rate 0.
* Added an option to alter the capital cost or maximum capacity of carriers by a factor via ``carrier+factor`` in the ``{sector_opts}`` wildcard. This can be useful for exploring uncertain cost parameters. Example: ``solar+c0.5`` reduces the ``capital_cost`` of solar to 50\% of original values. Similarly ``solar+p3`` multiplies the ``p_nom_max`` by 3. * Added an option to alter the capital cost or maximum capacity of carriers by a factor via ``carrier+factor`` in the ``{sector_opts}`` wildcard. This can be useful for exploring uncertain cost parameters. Example: ``solar+c0.5`` reduces the ``capital_cost`` of solar to 50\% of original values. Similarly ``solar+p3`` multiplies the ``p_nom_max`` by 3.
* Rename the bus for European liquid hydrocarbons from ``Fischer-Tropsch`` to ``EU oil``, since it can be supplied not just with the Fischer-Tropsch process, but also with fossil oil. * Rename the bus for European liquid hydrocarbons from ``Fischer-Tropsch`` to ``EU oil``, since it can be supplied not just with the Fischer-Tropsch process, but also with fossil oil.
* Bugfix: The new separation of land transport by carrier in Version 0.4.0 failed to account for the carbon dioxide emissions from internal combustion engines in land transport. This is now treated as a negative load on the atmospheric carbon dioxide bus, just like aviation emissions.
* Bugfix: Fix reading in of ``pypsa-eur/resources/powerplants.csv`` to PyPSA-Eur Version 0.3.0 (use column attribute name ``DateIn`` instead of old ``YearDecommissioned``). * Bugfix: Fix reading in of ``pypsa-eur/resources/powerplants.csv`` to PyPSA-Eur Version 0.3.0 (use column attribute name ``DateIn`` instead of old ``YearDecommissioned``).
* Bugfix: Make sure that ``Store`` components (battery and H2) are also removed from PyPSA-Eur, so they can be added later by PyPSA-Eur-Sec. * Bugfix: Make sure that ``Store`` components (battery and H2) are also removed from PyPSA-Eur, so they can be added later by PyPSA-Eur-Sec.
Thanks to Lisa Zeyen (KIT) for the retrofitting improvements and Marta Victoria (Aarhus University) for the carbon budget and wildcard management.
PyPSA-Eur-Sec 0.4.0 (11th December 2020) PyPSA-Eur-Sec 0.4.0 (11th December 2020)
========================================= =========================================
@ -136,4 +153,4 @@ To make a new release of the data bundle, make an archive of the files in ``data
.. code:: bash .. code:: bash
data % tar pczf pypsa-eur-sec-data-bundle-YYMMDD.tar.gz eea/UNFCCC_v23.csv switzerland-sfoe biomass eurostat-energy_balances-* jrc-idees-2015 emobility urban_percent.csv timezone_mappings.csv heat_load_profile_DK_AdamJensen.csv WindWaveWEC_GLTB.xlsx myb1-2017-nitro.xls Industrial_Database.csv data % tar pczf pypsa-eur-sec-data-bundle-YYMMDD.tar.gz eea/UNFCCC_v23.csv switzerland-sfoe biomass eurostat-energy_balances-* jrc-idees-2015 emobility urban_percent.csv timezone_mappings.csv heat_load_profile_DK_AdamJensen.csv WindWaveWEC_GLTB.xlsx myb1-2017-nitro.xls Industrial_Database.csv retro/tabula-calculator-calcsetbuilding.csv

View File

@ -108,6 +108,43 @@ Small for decentral applications.
Big water pit storage for district heating. Big water pit storage for district heating.
.. _retro:
Retrofitting of the thermal envelope of buildings
===================================================
Co-optimising building renovation is only enabled if in the ``config.yaml`` the
option :mod:`retro_endogen: True`. To reduce the computational burden
default setting is
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 134-135
Renovation of the thermal envelope reduces the space heating demand and is
optimised at each node for every heat bus. Renovation measures through additional
insulation material and replacement of energy inefficient windows are considered.
In a first step, costs per energy savings are estimated in :mod:`build_retro_cost.py`.
They depend on the insulation condition of the building stock and costs for
renovation of the building elements.
In a second step, for those cost per energy savings two possible renovation
strengths are determined: a moderate renovation with lower costs and lower
maximum possible space heat savings, and an ambitious renovation with associated
higher costs and higher efficiency gains. They are added by step-wise
linearisation in form of two additional generations in
:mod:`prepare_sector_network.py`.
Settings in the config.yaml concerning the endogenously optimisation of building
renovation
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 136-140
Further information are given in the publication
`Mitigating heat demand peaks in buildings in a highly renewable European energy system, (2021) <https://arxiv.org/abs/2012.01831>`_.
Hydrogen demand Hydrogen demand
================== ==================

View File

@ -90,12 +90,12 @@ if __name__ == "__main__":
sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1', sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1',
co2_budget_name='go', co2_budget_name='go',
planning_horizons='2030'), planning_horizons='2030'),
input=dict(network='pypsa-eur-sec/results/test/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_{planning_horizons}.nc', input=dict(network='pypsa-eur-sec/results/test/prenetworks/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_{planning_horizons}.nc',
network_p='pypsa-eur-sec/results/test/postnetworks/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_2020.nc', network_p='pypsa-eur-sec/results/test/postnetworks/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_2020.nc',
costs='pypsa-eur-sec/data/costs/costs_{planning_horizons}.csv', costs='pypsa-eur-sec/data/costs/costs_{planning_horizons}.csv',
cop_air_total="pypsa-eur-sec/resources/cop_air_total_{network}_s{simpl}_{clusters}.nc", cop_air_total="pypsa-eur-sec/resources/cop_air_total_elec_s{simpl}_{clusters}.nc",
cop_soil_total="pypsa-eur-sec/resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc"), cop_soil_total="pypsa-eur-sec/resources/cop_soil_total_elec_s{simpl}_{clusters}.nc"),
output=['pypsa-eur-sec/results/test/prenetworks_brownfield/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'] output=['pypsa-eur-sec/results/test/prenetworks_brownfield/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc']
) )
import yaml import yaml
with open('config.yaml', encoding='utf8') as f: with open('config.yaml', encoding='utf8') as f:

View File

@ -411,15 +411,15 @@ if __name__ == "__main__":
wildcards=dict(network='elec', simpl='', clusters='45', lv='1.0', wildcards=dict(network='elec', simpl='', clusters='45', lv='1.0',
sector_opts='Co2L0-3H-T-H-B-I-solar3-dist1', sector_opts='Co2L0-3H-T-H-B-I-solar3-dist1',
planning_horizons='2020'), planning_horizons='2020'),
input=dict(network='pypsa-eur-sec/results/version-2/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc', input=dict(network='pypsa-eur-sec/results/version-2/prenetworks/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc',
powerplants='pypsa-eur/resources/powerplants.csv', powerplants='pypsa-eur/resources/powerplants.csv',
busmap_s='pypsa-eur/resources/busmap_{network}_s{simpl}.csv', busmap_s='pypsa-eur/resources/busmap_elec_s{simpl}.csv',
busmap='pypsa-eur/resources/busmap_{network}_s{simpl}_{clusters}.csv', busmap='pypsa-eur/resources/busmap_elec_s{simpl}_{clusters}.csv',
costs='technology_data/outputs/costs_{planning_horizons}.csv', costs='technology_data/outputs/costs_{planning_horizons}.csv',
cop_air_total="pypsa-eur-sec/resources/cop_air_total_{network}_s{simpl}_{clusters}.nc", cop_air_total="pypsa-eur-sec/resources/cop_air_total_elec_s{simpl}_{clusters}.nc",
cop_soil_total="pypsa-eur-sec/resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc", cop_soil_total="pypsa-eur-sec/resources/cop_soil_total_elec_s{simpl}_{clusters}.nc",
clustered_pop_layout="pypsa-eur-sec/resources/pop_layout_{network}_s{simpl}_{clusters}.csv",), clustered_pop_layout="pypsa-eur-sec/resources/pop_layout_elec_s{simpl}_{clusters}.csv",),
output=['pypsa-eur-sec/results/version-2/prenetworks_brownfield/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'], output=['pypsa-eur-sec/results/version-2/prenetworks_brownfield/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'],
) )
import yaml import yaml
with open('config.yaml', encoding='utf8') as f: with open('config.yaml', encoding='utf8') as f:

View File

@ -1,4 +1,3 @@
import pandas as pd import pandas as pd
import geopandas as gpd import geopandas as gpd
@ -51,7 +50,6 @@ country_to_code = {
'Switzerland' : 'CH', 'Switzerland' : 'CH',
} }
non_EU = ['NO', 'CH', 'ME', 'MK', 'RS', 'BA', 'AL'] non_EU = ['NO', 'CH', 'ME', 'MK', 'RS', 'BA', 'AL']
rename = {"GR" : "EL", rename = {"GR" : "EL",
@ -73,7 +71,6 @@ def build_eurostat(year):
fns = {2016: "data/eurostat-energy_balances-june_2016_edition/{year}-Energy-Balances-June2016edition.xlsx", fns = {2016: "data/eurostat-energy_balances-june_2016_edition/{year}-Energy-Balances-June2016edition.xlsx",
2017: "data/eurostat-energy_balances-june_2017_edition/{year}-ENERGY-BALANCES-June2017edition.xlsx"} 2017: "data/eurostat-energy_balances-june_2017_edition/{year}-ENERGY-BALANCES-June2017edition.xlsx"}
#2016 includes BA, 2017 doesn't #2016 includes BA, 2017 doesn't
#with sheet as None, an ordered dictionary of all sheets is returned #with sheet as None, an ordered dictionary of all sheets is returned
@ -82,7 +79,6 @@ def build_eurostat(year):
skiprows=1, skiprows=1,
index_col=list(range(4))) index_col=list(range(4)))
#sorted_index necessary for slicing #sorted_index necessary for slicing
df = pd.concat({country_to_code[df.columns[0]] : df for ct,df in dfs.items()},sort=True).sort_index() df = pd.concat({country_to_code[df.columns[0]] : df for ct,df in dfs.items()},sort=True).sort_index()
@ -91,15 +87,12 @@ def build_eurostat(year):
def build_swiss(year): def build_swiss(year):
fn = "data/switzerland-sfoe/switzerland-new_format.csv" fn = "data/switzerland-sfoe/switzerland-new_format.csv"
#convert PJ/a to TWh/a #convert PJ/a to TWh/a
return (pd.read_csv(fn,index_col=list(range(2)))/3.6).loc["CH",str(year)] return (pd.read_csv(fn,index_col=list(range(2)))/3.6).loc["CH",str(year)]
def build_idees(year): def build_idees(year):
base_dir = "data/jrc-idees-2015" base_dir = "data/jrc-idees-2015"
@ -275,7 +268,7 @@ def build_idees(year):
return totals return totals
def build_energy_totals(): def build_energy_totals(eurostat, swiss, idees):
clean_df = idees.reindex(population.index).drop(["passenger cars","passenger car efficiency"],axis=1) clean_df = idees.reindex(population.index).drop(["passenger cars","passenger car efficiency"],axis=1)
@ -316,7 +309,6 @@ def build_energy_totals():
+ avg*(clean_df.loc[missing_in_eurostat,"{} {}".format("total",sector)] - clean_df.loc[missing_in_eurostat,"{} {}".format("electricity",sector)]) + avg*(clean_df.loc[missing_in_eurostat,"{} {}".format("total",sector)] - clean_df.loc[missing_in_eurostat,"{} {}".format("electricity",sector)])
#Fix Norway space and water heating fractions #Fix Norway space and water heating fractions
#http://www.ssb.no/en/energi-og-industri/statistikker/husenergi/hvert-3-aar/2014-07-14 #http://www.ssb.no/en/energi-og-industri/statistikker/husenergi/hvert-3-aar/2014-07-14
#The main heating source for about 73 per cent of the households is based on electricity #The main heating source for about 73 per cent of the households is based on electricity
@ -458,14 +450,12 @@ def build_eurostat_co2(year=1990):
#Residual oil (No. 6) 0.298 #Residual oil (No. 6) 0.298
#https://www.eia.gov/electricity/annual/html/epa_a_03.html #https://www.eia.gov/electricity/annual/html/epa_a_03.html
eurostat_co2 = eurostat_for_co2.multiply(se).sum(axis=1) eurostat_co2 = eurostat_for_co2.multiply(se).sum(axis=1)
return eurostat_co2 return eurostat_co2
def build_co2_totals(eea_co2, eurostat_co2, year=1990): def build_co2_totals(eea_co2, eurostat_co2):
co2 = eea_co2.reindex(["EU28","NO","CH","BA","RS","AL","ME","MK"] + eu28) co2 = eea_co2.reindex(["EU28","NO","CH","BA","RS","AL","ME","MK"] + eu28)
@ -530,7 +520,6 @@ def build_transport_data():
if __name__ == "__main__": if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing # Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals(): if 'snakemake' not in globals():
from vresutils import Dict from vresutils import Dict
@ -546,21 +535,19 @@ if __name__ == "__main__":
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index') nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index')
population = nuts3['pop'].groupby(nuts3.country).sum() population = nuts3['pop'].groupby(nuts3.country).sum()
year = 2011 data_year = 2011
eurostat = build_eurostat(data_year)
swiss = build_swiss(data_year)
idees = build_idees(data_year)
eurostat = build_eurostat(year) build_energy_totals(eurostat, swiss, idees)
swiss = build_swiss(year)
idees = build_idees(year) base_year_emissions = 1990
eea_co2 = build_eea_co2(base_year_emissions)
eurostat_co2 = build_eurostat_co2(base_year_emissions)
build_energy_totals() co2 = build_co2_totals(eea_co2, eurostat_co2)
eea_co2 = build_eea_co2()
eurostat_co2 = build_eurostat_co2()
co2=build_co2_totals(eea_co2, eurostat_co2, year)
co2.to_csv(snakemake.output.co2_name) co2.to_csv(snakemake.output.co2_name)
build_transport_data() build_transport_data()

View File

@ -11,7 +11,7 @@ if 'snakemake' not in globals():
import yaml import yaml
snakemake = Dict() snakemake = Dict()
with open('config.yaml') as f: with open('config.yaml') as f:
snakemake.config = yaml.load(f) snakemake.config = yaml.safe_load(f)
snakemake.input = Dict() snakemake.input = Dict()
snakemake.output = Dict() snakemake.output = Dict()

View File

@ -98,7 +98,7 @@ for ct in eu28:
for fuel in fuels: for fuel in fuels:
summary.at[fuel,sub] = s[fuels[fuel]].sum() summary.at[fuel,sub] = s[fuels[fuel]].sum()
summary.at['other',sub] = summary.at['all',sub] - summary.loc[summary.index^['all','other'],sub].sum() summary.at['other',sub] = summary.at['all',sub] - summary.loc[summary.index.symmetric_difference(['all','other']),sub].sum()
summary['Other Industrial Sectors'] = summary[ois_subs].sum(axis=1) summary['Other Industrial Sectors'] = summary[ois_subs].sum(axis=1)
summary.drop(columns=ois_subs,inplace=True) summary.drop(columns=ois_subs,inplace=True)
@ -128,7 +128,7 @@ output = pd.read_csv(snakemake.input.industrial_production_per_country,
eu28_averages = final_summary.groupby(level=1,axis=1).sum().divide(output.loc[eu28].sum(),axis=1) eu28_averages = final_summary.groupby(level=1,axis=1).sum().divide(output.loc[eu28].sum(),axis=1)
non_eu28 = output.index^eu28 non_eu28 = output.index.symmetric_difference(eu28)
for ct in non_eu28: for ct in non_eu28:
print(ct) print(ct)

View File

@ -196,7 +196,7 @@ ammonia = pd.read_csv(snakemake.input.ammonia_production,
index_col=0) index_col=0)
there = ammonia.index.intersection(countries_demand.index) there = ammonia.index.intersection(countries_demand.index)
missing = countries_demand.index^there missing = countries_demand.index.symmetric_difference(there)
print("Following countries have no ammonia demand:", missing) print("Following countries have no ammonia demand:", missing)

View File

@ -15,7 +15,7 @@ if 'snakemake' not in globals():
import yaml import yaml
snakemake = Dict() snakemake = Dict()
with open('config.yaml') as f: with open('config.yaml') as f:
snakemake.config = yaml.load(f) snakemake.config = yaml.safe_load(f)
snakemake.input = Dict() snakemake.input = Dict()
snakemake.output = Dict() snakemake.output = Dict()
@ -46,7 +46,7 @@ urban_fraction = pd.read_csv(snakemake.input.urban_percent,
#fill missing Balkans values #fill missing Balkans values
missing = ["AL","ME","MK"] missing = ["AL","ME","MK"]
reference = ["RS","BA"] reference = ["RS","BA"]
urban_fraction = urban_fraction.reindex(urban_fraction.index|missing) urban_fraction = urban_fraction.reindex(urban_fraction.index.union(missing))
urban_fraction.loc[missing] = urban_fraction[reference].mean() urban_fraction.loc[missing] = urban_fraction[reference].mean()

File diff suppressed because it is too large Load Diff

View File

@ -11,7 +11,7 @@ if 'snakemake' not in globals():
import yaml import yaml
snakemake = Dict() snakemake = Dict()
with open('config.yaml') as f: with open('config.yaml') as f:
snakemake.config = yaml.load(f) snakemake.config = yaml.safe_load(f)
snakemake.input = Dict() snakemake.input = Dict()
snakemake.output = Dict() snakemake.output = Dict()

View File

@ -11,7 +11,7 @@ if 'snakemake' not in globals():
import yaml import yaml
snakemake = Dict() snakemake = Dict()
with open('config.yaml') as f: with open('config.yaml') as f:
snakemake.config = yaml.load(f) snakemake.config = yaml.safe_load(f)
snakemake.input = Dict() snakemake.input = Dict()
snakemake.output = Dict() snakemake.output = Dict()

View File

@ -79,7 +79,7 @@ def calculate_nodal_cfs(n,label,nodal_cfs):
cf_c = p_c/capacities_c cf_c = p_c/capacities_c
index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in cf_c.index.to_list()]) index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in cf_c.index.to_list()])
nodal_cfs = nodal_cfs.reindex(index|nodal_cfs.index) nodal_cfs = nodal_cfs.reindex(index.union(nodal_cfs.index))
nodal_cfs.loc[index,label] = cf_c.values nodal_cfs.loc[index,label] = cf_c.values
return nodal_cfs return nodal_cfs
@ -106,7 +106,7 @@ def calculate_cfs(n,label,cfs):
cf_c = pd.concat([cf_c], keys=[c.list_name]) cf_c = pd.concat([cf_c], keys=[c.list_name])
cfs = cfs.reindex(cf_c.index|cfs.index) cfs = cfs.reindex(cf_c.index.union(cfs.index))
cfs.loc[cf_c.index,label] = cf_c cfs.loc[cf_c.index,label] = cf_c
@ -121,7 +121,7 @@ def calculate_nodal_costs(n,label,nodal_costs):
c.df["capital_costs"] = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"] c.df["capital_costs"] = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"]
capital_costs = c.df.groupby(["location","carrier"])["capital_costs"].sum() capital_costs = c.df.groupby(["location","carrier"])["capital_costs"].sum()
index = pd.MultiIndex.from_tuples([(c.list_name,"capital") + t for t in capital_costs.index.to_list()]) index = pd.MultiIndex.from_tuples([(c.list_name,"capital") + t for t in capital_costs.index.to_list()])
nodal_costs = nodal_costs.reindex(index|nodal_costs.index) nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index))
nodal_costs.loc[index,label] = capital_costs.values nodal_costs.loc[index,label] = capital_costs.values
if c.name == "Link": if c.name == "Link":
@ -143,7 +143,7 @@ def calculate_nodal_costs(n,label,nodal_costs):
c.df["marginal_costs"] = p*c.df.marginal_cost c.df["marginal_costs"] = p*c.df.marginal_cost
marginal_costs = c.df.groupby(["location","carrier"])["marginal_costs"].sum() marginal_costs = c.df.groupby(["location","carrier"])["marginal_costs"].sum()
index = pd.MultiIndex.from_tuples([(c.list_name,"marginal") + t for t in marginal_costs.index.to_list()]) index = pd.MultiIndex.from_tuples([(c.list_name,"marginal") + t for t in marginal_costs.index.to_list()])
nodal_costs = nodal_costs.reindex(index|nodal_costs.index) nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index))
nodal_costs.loc[index,label] = marginal_costs.values nodal_costs.loc[index,label] = marginal_costs.values
return nodal_costs return nodal_costs
@ -158,7 +158,7 @@ def calculate_costs(n,label,costs):
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"]) capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"])
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=[c.list_name]) capital_costs_grouped = pd.concat([capital_costs_grouped], keys=[c.list_name])
costs = costs.reindex(capital_costs_grouped.index|costs.index) costs = costs.reindex(capital_costs_grouped.index.union(costs.index))
costs.loc[capital_costs_grouped.index,label] = capital_costs_grouped costs.loc[capital_costs_grouped.index,label] = capital_costs_grouped
@ -185,7 +185,7 @@ def calculate_costs(n,label,costs):
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=["marginal"]) marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=["marginal"])
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=[c.list_name]) marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=[c.list_name])
costs = costs.reindex(marginal_costs_grouped.index|costs.index) costs = costs.reindex(marginal_costs_grouped.index.union(costs.index))
costs.loc[marginal_costs_grouped.index,label] = marginal_costs_grouped costs.loc[marginal_costs_grouped.index,label] = marginal_costs_grouped
@ -220,7 +220,7 @@ def calculate_nodal_capacities(n,label,nodal_capacities):
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}):
nodal_capacities_c = c.df.groupby(["location","carrier"])[opt_name.get(c.name,"p") + "_nom_opt"].sum() nodal_capacities_c = c.df.groupby(["location","carrier"])[opt_name.get(c.name,"p") + "_nom_opt"].sum()
index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in nodal_capacities_c.index.to_list()]) index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in nodal_capacities_c.index.to_list()])
nodal_capacities = nodal_capacities.reindex(index|nodal_capacities.index) nodal_capacities = nodal_capacities.reindex(index.union(nodal_capacities.index))
nodal_capacities.loc[index,label] = nodal_capacities_c.values nodal_capacities.loc[index,label] = nodal_capacities_c.values
return nodal_capacities return nodal_capacities
@ -234,7 +234,7 @@ def calculate_capacities(n,label,capacities):
capacities_grouped = c.df[opt_name.get(c.name,"p") + "_nom_opt"].groupby(c.df.carrier).sum() capacities_grouped = c.df[opt_name.get(c.name,"p") + "_nom_opt"].groupby(c.df.carrier).sum()
capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name]) capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name])
capacities = capacities.reindex(capacities_grouped.index|capacities.index) capacities = capacities.reindex(capacities_grouped.index.union(capacities.index))
capacities.loc[capacities_grouped.index,label] = capacities_grouped capacities.loc[capacities_grouped.index,label] = capacities_grouped
@ -267,7 +267,7 @@ def calculate_energy(n,label,energy):
c_energies = pd.concat([c_energies], keys=[c.list_name]) c_energies = pd.concat([c_energies], keys=[c.list_name])
energy = energy.reindex(c_energies.index|energy.index) energy = energy.reindex(c_energies.index.union(energy.index))
energy.loc[c_energies.index,label] = c_energies energy.loc[c_energies.index,label] = c_energies
@ -285,7 +285,7 @@ def calculate_supply(n,label,supply):
for c in n.iterate_components(n.one_port_components): for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map)] items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0: if len(items) == 0:
continue continue
@ -294,7 +294,7 @@ def calculate_supply(n,label,supply):
s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i]) s = pd.concat([s], keys=[i])
supply = supply.reindex(s.index|supply.index) supply = supply.reindex(s.index.union(supply.index))
supply.loc[s.index,label] = s supply.loc[s.index,label] = s
@ -313,7 +313,7 @@ def calculate_supply(n,label,supply):
s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i]) s = pd.concat([s], keys=[i])
supply = supply.reindex(s.index|supply.index) supply = supply.reindex(s.index.union(supply.index))
supply.loc[s.index,label] = s supply.loc[s.index,label] = s
return supply return supply
@ -330,7 +330,7 @@ def calculate_supply_energy(n,label,supply_energy):
for c in n.iterate_components(n.one_port_components): for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map)] items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0: if len(items) == 0:
continue continue
@ -339,7 +339,7 @@ def calculate_supply_energy(n,label,supply_energy):
s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i]) s = pd.concat([s], keys=[i])
supply_energy = supply_energy.reindex(s.index|supply_energy.index) supply_energy = supply_energy.reindex(s.index.union(supply_energy.index))
supply_energy.loc[s.index,label] = s supply_energy.loc[s.index,label] = s
@ -357,7 +357,7 @@ def calculate_supply_energy(n,label,supply_energy):
s = pd.concat([s], keys=[c.list_name]) s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i]) s = pd.concat([s], keys=[i])
supply_energy = supply_energy.reindex(s.index|supply_energy.index) supply_energy = supply_energy.reindex(s.index.union(supply_energy.index))
supply_energy.loc[s.index,label] = s supply_energy.loc[s.index,label] = s
@ -366,7 +366,7 @@ def calculate_supply_energy(n,label,supply_energy):
def calculate_metrics(n,label,metrics): def calculate_metrics(n,label,metrics):
metrics = metrics.reindex(pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"])|metrics.index) metrics = metrics.reindex(pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"]).union(metrics.index))
metrics.at["line_volume_DC",label] = (n.links.length*n.links.p_nom_opt)[n.links.carrier == "DC"].sum() metrics.at["line_volume_DC",label] = (n.links.length*n.links.p_nom_opt)[n.links.carrier == "DC"].sum()
metrics.at["line_volume_AC",label] = (n.lines.length*n.lines.s_nom_opt).sum() metrics.at["line_volume_AC",label] = (n.lines.length*n.lines.s_nom_opt).sum()
@ -384,7 +384,7 @@ def calculate_metrics(n,label,metrics):
def calculate_prices(n,label,prices): def calculate_prices(n,label,prices):
prices = prices.reindex(prices.index|n.buses.carrier.unique()) prices = prices.reindex(prices.index.union(n.buses.carrier.unique()))
#WARNING: this is time-averaged, see weighted_prices for load-weighted average #WARNING: this is time-averaged, see weighted_prices for load-weighted average
prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean() prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean()
@ -467,7 +467,7 @@ def calculate_market_values(n, label, market_values):
techs = n.generators.loc[generators,"carrier"].value_counts().index techs = n.generators.loc[generators,"carrier"].value_counts().index
market_values = market_values.reindex(market_values.index | techs) market_values = market_values.reindex(market_values.index.union(techs))
for tech in techs: for tech in techs:
@ -488,7 +488,7 @@ def calculate_market_values(n, label, market_values):
techs = n.links.loc[all_links,"carrier"].value_counts().index techs = n.links.loc[all_links,"carrier"].value_counts().index
market_values = market_values.reindex(market_values.index | techs) market_values = market_values.reindex(market_values.index.union(techs))
for tech in techs: for tech in techs:
links = all_links[n.links.loc[all_links,"carrier"] == tech] links = all_links[n.links.loc[all_links,"carrier"] == tech]
@ -505,7 +505,7 @@ def calculate_market_values(n, label, market_values):
def calculate_price_statistics(n, label, price_statistics): def calculate_price_statistics(n, label, price_statistics):
price_statistics = price_statistics.reindex(price_statistics.index|pd.Index(["zero_hours","mean","standard_deviation"])) price_statistics = price_statistics.reindex(price_statistics.index.union(pd.Index(["zero_hours","mean","standard_deviation"])))
buses = n.buses.index[n.buses.carrier == "AC"] buses = n.buses.index[n.buses.carrier == "AC"]
@ -629,4 +629,3 @@ if __name__ == "__main__":
cumulative_cost.to_csv(snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/csvs/cumulative_cost.csv') cumulative_cost.to_csv(snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/csvs/cumulative_cost.csv')

View File

@ -130,7 +130,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
costs.drop(list(costs.columns[(costs == 0.).all()]), axis=1, inplace=True) costs.drop(list(costs.columns[(costs == 0.).all()]), axis=1, inplace=True)
new_columns = ((preferred_order & costs.columns) new_columns = (preferred_order.intersection(costs.columns)
.append(costs.columns.difference(preferred_order))) .append(costs.columns.difference(preferred_order)))
costs = costs[new_columns] costs = costs[new_columns]
@ -147,7 +147,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
n.links.carrier != "B2B")], inplace=True) n.links.carrier != "B2B")], inplace=True)
# drop non-bus # drop non-bus
to_drop = costs.index.levels[0] ^ n.buses.index to_drop = costs.index.levels[0].symmetric_difference(n.buses.index)
if len(to_drop) != 0: if len(to_drop) != 0:
print("dropping non-buses", to_drop) print("dropping non-buses", to_drop)
costs.drop(to_drop, level=0, inplace=True, axis=0) costs.drop(to_drop, level=0, inplace=True, axis=0)
@ -463,7 +463,7 @@ def plot_series(network, carrier="AC", name="test"):
"battery storage", "battery storage",
"hot water storage"]) "hot water storage"])
new_columns = ((preferred_order & supply.columns) new_columns = (preferred_order.intersection(supply.columns)
.append(supply.columns.difference(preferred_order))) .append(supply.columns.difference(preferred_order)))
supply = supply.groupby(supply.columns, axis=1).sum() supply = supply.groupby(supply.columns, axis=1).sum()

View File

@ -82,7 +82,7 @@ def plot_costs():
print(df.sum()) print(df.sum())
new_index = (preferred_order&df.index).append(df.index.difference(preferred_order)) new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order))
new_columns = df.sum().sort_values().index new_columns = df.sum().sort_values().index
@ -136,7 +136,7 @@ def plot_energy():
print(df) print(df)
new_index = (preferred_order&df.index).append(df.index.difference(preferred_order)) new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order))
new_columns = df.columns.sort_values() new_columns = df.columns.sort_values()
#new_columns = df.sum().sort_values().index #new_columns = df.sum().sort_values().index
@ -177,7 +177,7 @@ def plot_balances():
balances_df = pd.read_csv(snakemake.input.balances,index_col=list(range(3)),header=list(range(n_header))) balances_df = pd.read_csv(snakemake.input.balances,index_col=list(range(3)),header=list(range(n_header)))
balances = {i.replace(" ","_") : [i] for i in balances_df.index.levels[0]} balances = {i.replace(" ","_") : [i] for i in balances_df.index.levels[0]}
balances["energy"] = balances_df.index.levels[0]^co2_carriers balances["energy"] = [i for i in balances_df.index.levels[0] if i not in co2_carriers]
for k,v in balances.items(): for k,v in balances.items():
@ -205,7 +205,7 @@ def plot_balances():
if df.empty: if df.empty:
continue continue
new_index = (preferred_order&df.index).append(df.index.difference(preferred_order)) new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order))
new_columns = df.columns.sort_values() new_columns = df.columns.sort_values()

View File

@ -6,9 +6,8 @@ import pandas as pd
idx = pd.IndexSlice idx = pd.IndexSlice
import numpy as np import numpy as np
import scipy as sp
import xarray as xr import xarray as xr
import re, os import re, os, sys
from six import iteritems, string_types from six import iteritems, string_types
@ -50,22 +49,19 @@ override_component_attrs["Store"].loc["lifetime"] = ["float","years",np.nan,"lif
def co2_emissions_year(cts, opts, year): def co2_emissions_year(cts, opts, year):
""" """
calculate co2 emissions in one specific year (e.g. 1990 or 2018). Calculate CO2 emissions in one specific year (e.g. 1990 or 2018).
""" """
eea_co2 = build_eea_co2(year) eea_co2 = build_eea_co2(year)
# TODO: read Eurostat data from year>2014, this only affects the estimation of # TODO: read Eurostat data from year>2014, this only affects the estimation of
# CO2 emissions for "BA","RS","AL","ME","MK" # CO2 emissions for "BA","RS","AL","ME","MK"
if year > 2014: if year > 2014:
eurostat_co2 = build_eurostat_co2(year=2014) eurostat_co2 = build_eurostat_co2(year=2014)
else: else:
eurostat_co2 = build_eurostat_co2(year) eurostat_co2 = build_eurostat_co2(year)
co2_totals=build_co2_totals(eea_co2, eurostat_co2, year) co2_totals = build_co2_totals(eea_co2, eurostat_co2)
co2_emissions = co2_totals.loc[cts, "electricity"].sum() co2_emissions = co2_totals.loc[cts, "electricity"].sum()
@ -77,9 +73,9 @@ def co2_emissions_year(cts, opts, year):
co2_emissions += co2_totals.loc[cts, ["industrial non-elec","industrial processes", co2_emissions += co2_totals.loc[cts, ["industrial non-elec","industrial processes",
"domestic aviation","international aviation", "domestic aviation","international aviation",
"domestic navigation","international navigation"]].sum().sum() "domestic navigation","international navigation"]].sum().sum()
co2_emissions *=0.001 #MtCO2 to GtCO2
return co2_emissions
co2_emissions *= 0.001 # Convert MtCO2 to GtCO2
return co2_emissions
def build_carbon_budget(o): def build_carbon_budget(o):
@ -244,7 +240,7 @@ def remove_elec_base_techs(n):
for c in n.iterate_components(snakemake.config["pypsa_eur"]): for c in n.iterate_components(snakemake.config["pypsa_eur"]):
to_keep = snakemake.config["pypsa_eur"][c.name] to_keep = snakemake.config["pypsa_eur"][c.name]
to_remove = pd.Index(c.df.carrier.unique())^to_keep to_remove = pd.Index(c.df.carrier.unique()).symmetric_difference(to_keep)
print("Removing",c.list_name,"with carrier",to_remove) print("Removing",c.list_name,"with carrier",to_remove)
names = c.df.index[c.df.carrier.isin(to_remove)] names = c.df.index[c.df.carrier.isin(to_remove)]
print(names) print(names)
@ -252,6 +248,14 @@ def remove_elec_base_techs(n):
n.carriers.drop(to_remove, inplace=True, errors="ignore") n.carriers.drop(to_remove, inplace=True, errors="ignore")
def remove_non_electric_buses(n):
"""
remove buses from pypsa-eur with carriers which are not AC buses
"""
print("drop buses from PyPSA-Eur with carrier: ", n.buses[~n.buses.carrier.isin(["AC", "DC"])].carrier.unique())
n.buses = n.buses[n.buses.carrier.isin(["AC", "DC"])]
def add_co2_tracking(n): def add_co2_tracking(n):
@ -925,7 +929,7 @@ def add_storage(network):
# hydrogen stored overground # hydrogen stored overground
h2_capital_cost = costs.at["hydrogen storage tank", "fixed"] h2_capital_cost = costs.at["hydrogen storage tank", "fixed"]
nodes_overground = nodes ^ cavern_nodes.index nodes_overground = nodes.symmetric_difference(cavern_nodes.index)
network.madd("Store", network.madd("Store",
nodes_overground + " H2 Store", nodes_overground + " H2 Store",
@ -1150,6 +1154,12 @@ def add_land_transport(network):
if ice_share > 0: if ice_share > 0:
if "EU oil" not in network.buses.index:
network.madd("Bus",
["EU oil"],
location="EU",
carrier="oil")
network.madd("Load", network.madd("Load",
nodes, nodes,
suffix=" land transport oil", suffix=" land transport oil",
@ -1157,6 +1167,13 @@ def add_land_transport(network):
carrier="land transport oil", carrier="land transport oil",
p_set=ice_share/options['transport_internal_combustion_efficiency']*transport[nodes]) p_set=ice_share/options['transport_internal_combustion_efficiency']*transport[nodes])
co2 = ice_share/options['transport_internal_combustion_efficiency']*transport[nodes].sum().sum()/8760.*costs.at["oil",'CO2 intensity']
network.madd("Load",
["land transport oil emissions"],
bus="co2 atmosphere",
carrier="land transport oil emissions",
p_set=-co2)
def add_heat(network): def add_heat(network):
@ -1171,11 +1188,10 @@ def add_heat(network):
urban_fraction = options['central_fraction']*pop_layout["urban"]/(pop_layout[["urban","rural"]].sum(axis=1)) urban_fraction = options['central_fraction']*pop_layout["urban"]/(pop_layout[["urban","rural"]].sum(axis=1))
# building retrofitting, exogenously reduce space heat demand # exogenously reduce space heat demand
if options["retrofitting"]["retro_exogen"]: if options["reduce_space_heat_exogenously"]:
dE = get_parameter(options["retrofitting"]["dE"]) dE = get_parameter(options["reduce_space_heat_exogenously_factor"])
print("retrofitting exogenously, assumed space heat reduction of ", print("assumed space heat reduction of {} %".format(dE*100))
dE)
for sector in sectors: for sector in sectors:
heat_demand[sector + " space"] = (1-dE)*heat_demand[sector + " space"] heat_demand[sector + " space"] = (1-dE)*heat_demand[sector + " space"]
@ -1481,7 +1497,7 @@ def create_nodes_for_heat_sector():
else: else:
nodes[sector + " urban decentral"] = pop_layout.index nodes[sector + " urban decentral"] = pop_layout.index
# for central nodes, residential and services are aggregated # for central nodes, residential and services are aggregated
nodes["urban central"] = pop_layout.index ^ nodes["residential urban decentral"] nodes["urban central"] = pop_layout.index.symmetric_difference(nodes["residential urban decentral"])
return nodes return nodes
@ -1532,6 +1548,8 @@ def add_biomass(network):
bus1="EU gas", bus1="EU gas",
bus2="co2 atmosphere", bus2="co2 atmosphere",
carrier="biogas to gas", carrier="biogas to gas",
capital_cost=costs.loc["biogas upgrading", "fixed"],
marginal_cost=costs.loc["biogas upgrading", "VOM"],
efficiency2=-costs.at['gas','CO2 intensity'], efficiency2=-costs.at['gas','CO2 intensity'],
p_nom_extendable=True) p_nom_extendable=True)
@ -1759,6 +1777,7 @@ def add_industry(network):
#remove today's industrial electricity demand by scaling down total electricity demand #remove today's industrial electricity demand by scaling down total electricity demand
for ct in n.buses.country.unique(): for ct in n.buses.country.unique():
loads = n.loads.index[(n.loads.index.str[:2] == ct) & (n.loads.carrier == "electricity")] loads = n.loads.index[(n.loads.index.str[:2] == ct) & (n.loads.carrier == "electricity")]
if n.loads_t.p_set[loads].empty: continue
factor = 1 - industrial_demand.loc[loads,"current electricity"].sum()/n.loads_t.p_set[loads].sum().sum() factor = 1 - industrial_demand.loc[loads,"current electricity"].sum()/n.loads_t.p_set[loads].sum().sum()
n.loads_t.p_set[loads] *= factor n.loads_t.p_set[loads] *= factor
@ -1867,7 +1886,7 @@ if __name__ == "__main__":
opts='', planning_horizons='2020', opts='', planning_horizons='2020',
sector_opts='120H-T-H-B-I-onwind+p3-dist1-cb48be3'), sector_opts='120H-T-H-B-I-onwind+p3-dist1-cb48be3'),
input=dict( network='../pypsa-eur/networks/{network}_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc', input=dict( network='../pypsa-eur/networks/elec_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc',
energy_totals_name='resources/energy_totals.csv', energy_totals_name='resources/energy_totals.csv',
co2_totals_name='resources/co2_totals.csv', co2_totals_name='resources/co2_totals.csv',
transport_name='resources/transport_data.csv', transport_name='resources/transport_data.csv',
@ -1879,34 +1898,33 @@ if __name__ == "__main__":
h2_cavern = "data/hydrogen_salt_cavern_potentials.csv", h2_cavern = "data/hydrogen_salt_cavern_potentials.csv",
profile_offwind_ac="../pypsa-eur/resources/profile_offwind-ac.nc", profile_offwind_ac="../pypsa-eur/resources/profile_offwind-ac.nc",
profile_offwind_dc="../pypsa-eur/resources/profile_offwind-dc.nc", profile_offwind_dc="../pypsa-eur/resources/profile_offwind-dc.nc",
busmap_s="../pypsa-eur/resources/busmap_{network}_s{simpl}.csv", busmap_s="../pypsa-eur/resources/busmap_elec_s{simpl}.csv",
busmap="../pypsa-eur/resources/busmap_{network}_s{simpl}_{clusters}.csv", busmap="../pypsa-eur/resources/busmap_elec_s{simpl}_{clusters}.csv",
clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv", clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv",
simplified_pop_layout="resources/pop_layout_{network}_s{simpl}.csv", simplified_pop_layout="resources/pop_layout_elec_s{simpl}.csv",
industrial_demand="resources/industrial_energy_demand_{network}_s{simpl}_{clusters}.csv", industrial_demand="resources/industrial_energy_demand_elec_s{simpl}_{clusters}.csv",
heat_demand_urban="resources/heat_demand_urban_{network}_s{simpl}_{clusters}.nc", heat_demand_urban="resources/heat_demand_urban_elec_s{simpl}_{clusters}.nc",
heat_demand_rural="resources/heat_demand_rural_{network}_s{simpl}_{clusters}.nc", heat_demand_rural="resources/heat_demand_rural_elec_s{simpl}_{clusters}.nc",
heat_demand_total="resources/heat_demand_total_{network}_s{simpl}_{clusters}.nc", heat_demand_total="resources/heat_demand_total_elec_s{simpl}_{clusters}.nc",
temp_soil_total="resources/temp_soil_total_{network}_s{simpl}_{clusters}.nc", temp_soil_total="resources/temp_soil_total_elec_s{simpl}_{clusters}.nc",
temp_soil_rural="resources/temp_soil_rural_{network}_s{simpl}_{clusters}.nc", temp_soil_rural="resources/temp_soil_rural_elec_s{simpl}_{clusters}.nc",
temp_soil_urban="resources/temp_soil_urban_{network}_s{simpl}_{clusters}.nc", temp_soil_urban="resources/temp_soil_urban_elec_s{simpl}_{clusters}.nc",
temp_air_total="resources/temp_air_total_{network}_s{simpl}_{clusters}.nc", temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc",
temp_air_rural="resources/temp_air_rural_{network}_s{simpl}_{clusters}.nc", temp_air_rural="resources/temp_air_rural_elec_s{simpl}_{clusters}.nc",
temp_air_urban="resources/temp_air_urban_{network}_s{simpl}_{clusters}.nc", temp_air_urban="resources/temp_air_urban_elec_s{simpl}_{clusters}.nc",
cop_soil_total="resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc", cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_soil_rural="resources/cop_soil_rural_{network}_s{simpl}_{clusters}.nc", cop_soil_rural="resources/cop_soil_rural_elec_s{simpl}_{clusters}.nc",
cop_soil_urban="resources/cop_soil_urban_{network}_s{simpl}_{clusters}.nc", cop_soil_urban="resources/cop_soil_urban_elec_s{simpl}_{clusters}.nc",
cop_air_total="resources/cop_air_total_{network}_s{simpl}_{clusters}.nc", cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc",
cop_air_rural="resources/cop_air_rural_{network}_s{simpl}_{clusters}.nc", cop_air_rural="resources/cop_air_rural_elec_s{simpl}_{clusters}.nc",
cop_air_urban="resources/cop_air_urban_{network}_s{simpl}_{clusters}.nc", cop_air_urban="resources/cop_air_urban_elec_s{simpl}_{clusters}.nc",
solar_thermal_total="resources/solar_thermal_total_{network}_s{simpl}_{clusters}.nc", solar_thermal_total="resources/solar_thermal_total_elec_s{simpl}_{clusters}.nc",
solar_thermal_urban="resources/solar_thermal_urban_{network}_s{simpl}_{clusters}.nc", solar_thermal_urban="resources/solar_thermal_urban_elec_s{simpl}_{clusters}.nc",
solar_thermal_rural="resources/solar_thermal_rural_{network}_s{simpl}_{clusters}.nc", solar_thermal_rural="resources/solar_thermal_rural_elec_s{simpl}_{clusters}.nc",
retro_cost_energy = "resources/retro_cost_{network}_s{simpl}_{clusters}.csv", retro_cost_energy = "resources/retro_cost_elec_s{simpl}_{clusters}.csv",
floor_area = "resources/floor_area_{network}_s{simpl}_{clusters}.csv" floor_area = "resources/floor_area_elec_s{simpl}_{clusters}.csv"
), ),
output=['results/version-cb48be3/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'] output=['results/version-cb48be3/prenetworks/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc']
) )
import yaml import yaml
with open('config.yaml', encoding='utf8') as f: with open('config.yaml', encoding='utf8') as f:
@ -1946,6 +1964,8 @@ if __name__ == "__main__":
n.loads["carrier"] = "electricity" n.loads["carrier"] = "electricity"
remove_non_electric_buses(n)
n.buses["location"] = n.buses.index n.buses["location"] = n.buses.index
update_wind_solar_costs(n, costs) update_wind_solar_costs(n, costs)

View File

@ -376,10 +376,10 @@ if __name__ == "__main__":
wildcards=dict(network='elec', simpl='', clusters='39', lv='1.0', wildcards=dict(network='elec', simpl='', clusters='39', lv='1.0',
sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1', sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1',
co2_budget_name='b30b3', planning_horizons='2050'), co2_budget_name='b30b3', planning_horizons='2050'),
input=dict(network="pypsa-eur-sec/results/test/prenetworks_brownfield/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_{planning_horizons}.nc"), input=dict(network="pypsa-eur-sec/results/test/prenetworks_brownfield/elec_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_{planning_horizons}.nc"),
output=["results/networks/s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}-test.nc"], output=["results/networks/s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}-test.nc"],
log=dict(gurobi="logs/{network}_s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}_gurobi-test.log", log=dict(gurobi="logs/elec_s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}_gurobi-test.log",
python="logs/{network}_s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}_python-test.log") python="logs/elec_s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}_python-test.log")
) )
import yaml import yaml
with open('config.yaml', encoding='utf8') as f: with open('config.yaml', encoding='utf8') as f: