merge master

This commit is contained in:
Fabian Neumann 2021-07-02 10:12:43 +02:00
commit e97c4dd3c0
57 changed files with 70561 additions and 5776 deletions

6
.gitignore vendored
View File

@ -2,9 +2,10 @@
.ipynb_checkpoints
__pycache__
gurobi.log
.vscode
/bak
/resources
/resources*
/results
/networks
/benchmarks
@ -26,6 +27,7 @@ gurobi.log
/data/switzerland*
/data/.nfs*
/data/Industrial_Database.csv
/data/retro/tabula-calculator-calcsetbuilding.csv
*.org
@ -45,4 +47,4 @@ config.yaml
doc/_build
*.xls
*.xls

View File

@ -14,7 +14,7 @@ problems that distort the results. See the github repository
[issues](https://github.com/PyPSA/pypsa-eur-sec/issues) for some of
the problems (please feel free to help or make suggestions). There is
neither documentation nor a paper yet, but we hope to have a preprint
out by summer 2020. We cannot support this model if you choose to use
out by autumn 2021. We cannot support this model if you choose to use
it.
@ -26,13 +26,21 @@ the energy system and includes all greenhouse gas emitters except
waste management, agriculture, forestry and land use.
Please see the [documentation](https://pypsa-eur-sec.readthedocs.io/)
for installation instructions and other useful information.
for installation instructions and other useful information about the snakemake workflow.
This diagram gives an overview of the sectors and the links between
them:
![sector diagram](graphics/multisector_figure.png)
Each of these sectors is built up on the transmission network nodes
from [PyPSA-Eur](https://github.com/PyPSA/pypsa-eur):
![network diagram](https://github.com/PyPSA/pypsa-eur/blob/master/doc/img/base.png?raw=true)
For computational reasons the model is usually clustered down
to 50-200 nodes.
PyPSA-Eur-Sec was initially based on the model PyPSA-Eur-Sec-30 described
in the paper [Synergies of sector coupling and transmission

434
Snakefile
View File

@ -1,42 +1,41 @@
configfile: "config.yaml"
wildcard_constraints:
lv="[a-z0-9\.]+",
network="[a-zA-Z0-9]*",
simpl="[a-zA-Z0-9]*",
clusters="[0-9]+m?",
sectors="[+a-zA-Z0-9]+",
opts="[-+a-zA-Z0-9]*",
sector_opts="[-+a-zA-Z0-9]*"
sector_opts="[-+a-zA-Z0-9\.\s]*"
SDIR = config['summary_dir'] + '/' + config['run']
RDIR = config['results_dir'] + config['run']
CDIR = config['costs_dir']
subworkflow pypsaeur:
workdir: "../pypsa-eur"
snakefile: "../pypsa-eur/Snakefile"
configfile: "../pypsa-eur/config.yaml"
rule all:
input:
config['summary_dir'] + '/' + config['run'] + '/graphs/costs.pdf'
rule all:
input: SDIR + '/graphs/costs.pdf'
rule solve_all_networks:
input:
expand(config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}.nc",
expand(RDIR + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc",
**config['scenario'])
rule test_script:
input:
expand("resources/heat_demand_urban_elec_s_{clusters}.nc",
**config['scenario'])
rule prepare_sector_networks:
input:
expand(config['results_dir'] + config['run'] + "/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}.nc",
**config['scenario'])
expand(RDIR + "/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc",
**config['scenario'])
rule build_population_layouts:
@ -48,6 +47,8 @@ rule build_population_layouts:
pop_layout_urban="resources/pop_layout_urban.nc",
pop_layout_rural="resources/pop_layout_rural.nc"
resources: mem_mb=20000
benchmark: "benchmarks/build_population_layouts"
threads: 8
script: "scripts/build_population_layouts.py"
@ -56,10 +57,11 @@ rule build_clustered_population_layouts:
pop_layout_total="resources/pop_layout_total.nc",
pop_layout_urban="resources/pop_layout_urban.nc",
pop_layout_rural="resources/pop_layout_rural.nc",
regions_onshore=pypsaeur('resources/regions_onshore_{network}_s{simpl}_{clusters}.geojson')
regions_onshore=pypsaeur('resources/regions_onshore_elec_s{simpl}_{clusters}.geojson')
output:
clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv"
clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv"
resources: mem_mb=10000
benchmark: "benchmarks/build_clustered_population_layouts/s{simpl}_{clusters}"
script: "scripts/build_clustered_population_layouts.py"
@ -68,10 +70,11 @@ rule build_simplified_population_layouts:
pop_layout_total="resources/pop_layout_total.nc",
pop_layout_urban="resources/pop_layout_urban.nc",
pop_layout_rural="resources/pop_layout_rural.nc",
regions_onshore=pypsaeur('resources/regions_onshore_{network}_s{simpl}.geojson')
regions_onshore=pypsaeur('resources/regions_onshore_elec_s{simpl}.geojson')
output:
clustered_pop_layout="resources/pop_layout_{network}_s{simpl}.csv"
clustered_pop_layout="resources/pop_layout_elec_s{simpl}.csv"
resources: mem_mb=10000
benchmark: "benchmarks/build_clustered_population_layouts/s{simpl}"
script: "scripts/build_clustered_population_layouts.py"
@ -80,47 +83,51 @@ rule build_heat_demands:
pop_layout_total="resources/pop_layout_total.nc",
pop_layout_urban="resources/pop_layout_urban.nc",
pop_layout_rural="resources/pop_layout_rural.nc",
regions_onshore=pypsaeur("resources/regions_onshore_{network}_s{simpl}_{clusters}.geojson")
regions_onshore=pypsaeur("resources/regions_onshore_elec_s{simpl}_{clusters}.geojson")
output:
heat_demand_urban="resources/heat_demand_urban_{network}_s{simpl}_{clusters}.nc",
heat_demand_rural="resources/heat_demand_rural_{network}_s{simpl}_{clusters}.nc",
heat_demand_total="resources/heat_demand_total_{network}_s{simpl}_{clusters}.nc"
heat_demand_urban="resources/heat_demand_urban_elec_s{simpl}_{clusters}.nc",
heat_demand_rural="resources/heat_demand_rural_elec_s{simpl}_{clusters}.nc",
heat_demand_total="resources/heat_demand_total_elec_s{simpl}_{clusters}.nc"
resources: mem_mb=20000
benchmark: "benchmarks/build_heat_demands/s{simpl}_{clusters}"
script: "scripts/build_heat_demand.py"
rule build_temperature_profiles:
input:
pop_layout_total="resources/pop_layout_total.nc",
pop_layout_urban="resources/pop_layout_urban.nc",
pop_layout_rural="resources/pop_layout_rural.nc",
regions_onshore=pypsaeur("resources/regions_onshore_{network}_s{simpl}_{clusters}.geojson")
regions_onshore=pypsaeur("resources/regions_onshore_elec_s{simpl}_{clusters}.geojson")
output:
temp_soil_total="resources/temp_soil_total_{network}_s{simpl}_{clusters}.nc",
temp_soil_rural="resources/temp_soil_rural_{network}_s{simpl}_{clusters}.nc",
temp_soil_urban="resources/temp_soil_urban_{network}_s{simpl}_{clusters}.nc",
temp_air_total="resources/temp_air_total_{network}_s{simpl}_{clusters}.nc",
temp_air_rural="resources/temp_air_rural_{network}_s{simpl}_{clusters}.nc",
temp_air_urban="resources/temp_air_urban_{network}_s{simpl}_{clusters}.nc"
temp_soil_total="resources/temp_soil_total_elec_s{simpl}_{clusters}.nc",
temp_soil_rural="resources/temp_soil_rural_elec_s{simpl}_{clusters}.nc",
temp_soil_urban="resources/temp_soil_urban_elec_s{simpl}_{clusters}.nc",
temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc",
temp_air_rural="resources/temp_air_rural_elec_s{simpl}_{clusters}.nc",
temp_air_urban="resources/temp_air_urban_elec_s{simpl}_{clusters}.nc"
resources: mem_mb=20000
benchmark: "benchmarks/build_temperature_profiles/s{simpl}_{clusters}"
script: "scripts/build_temperature_profiles.py"
rule build_cop_profiles:
input:
temp_soil_total="resources/temp_soil_total_{network}_s{simpl}_{clusters}.nc",
temp_soil_rural="resources/temp_soil_rural_{network}_s{simpl}_{clusters}.nc",
temp_soil_urban="resources/temp_soil_urban_{network}_s{simpl}_{clusters}.nc",
temp_air_total="resources/temp_air_total_{network}_s{simpl}_{clusters}.nc",
temp_air_rural="resources/temp_air_rural_{network}_s{simpl}_{clusters}.nc",
temp_air_urban="resources/temp_air_urban_{network}_s{simpl}_{clusters}.nc"
temp_soil_total="resources/temp_soil_total_elec_s{simpl}_{clusters}.nc",
temp_soil_rural="resources/temp_soil_rural_elec_s{simpl}_{clusters}.nc",
temp_soil_urban="resources/temp_soil_urban_elec_s{simpl}_{clusters}.nc",
temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc",
temp_air_rural="resources/temp_air_rural_elec_s{simpl}_{clusters}.nc",
temp_air_urban="resources/temp_air_urban_elec_s{simpl}_{clusters}.nc"
output:
cop_soil_total="resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc",
cop_soil_rural="resources/cop_soil_rural_{network}_s{simpl}_{clusters}.nc",
cop_soil_urban="resources/cop_soil_urban_{network}_s{simpl}_{clusters}.nc",
cop_air_total="resources/cop_air_total_{network}_s{simpl}_{clusters}.nc",
cop_air_rural="resources/cop_air_rural_{network}_s{simpl}_{clusters}.nc",
cop_air_urban="resources/cop_air_urban_{network}_s{simpl}_{clusters}.nc"
cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_soil_rural="resources/cop_soil_rural_elec_s{simpl}_{clusters}.nc",
cop_soil_urban="resources/cop_soil_urban_elec_s{simpl}_{clusters}.nc",
cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc",
cop_air_rural="resources/cop_air_rural_elec_s{simpl}_{clusters}.nc",
cop_air_urban="resources/cop_air_urban_elec_s{simpl}_{clusters}.nc"
resources: mem_mb=20000
benchmark: "benchmarks/build_cop_profiles/s{simpl}_{clusters}"
script: "scripts/build_cop_profiles.py"
@ -129,27 +136,38 @@ rule build_solar_thermal_profiles:
pop_layout_total="resources/pop_layout_total.nc",
pop_layout_urban="resources/pop_layout_urban.nc",
pop_layout_rural="resources/pop_layout_rural.nc",
regions_onshore=pypsaeur("resources/regions_onshore_{network}_s{simpl}_{clusters}.geojson")
regions_onshore=pypsaeur("resources/regions_onshore_elec_s{simpl}_{clusters}.geojson")
output:
solar_thermal_total="resources/solar_thermal_total_{network}_s{simpl}_{clusters}.nc",
solar_thermal_urban="resources/solar_thermal_urban_{network}_s{simpl}_{clusters}.nc",
solar_thermal_rural="resources/solar_thermal_rural_{network}_s{simpl}_{clusters}.nc"
solar_thermal_total="resources/solar_thermal_total_elec_s{simpl}_{clusters}.nc",
solar_thermal_urban="resources/solar_thermal_urban_elec_s{simpl}_{clusters}.nc",
solar_thermal_rural="resources/solar_thermal_rural_elec_s{simpl}_{clusters}.nc"
resources: mem_mb=20000
benchmark: "benchmarks/build_solar_thermal_profiles/s{simpl}_{clusters}"
script: "scripts/build_solar_thermal_profiles.py"
def input_eurostat(w):
# 2016 includes BA, 2017 does not
report_year = config["energy"]["eurostat_report_year"]
return f"data/eurostat-energy_balances-june_{report_year}_edition"
rule build_energy_totals:
input:
nuts3_shapes=pypsaeur('resources/nuts3_shapes.geojson')
nuts3_shapes=pypsaeur('resources/nuts3_shapes.geojson'),
co2="data/eea/UNFCCC_v23.csv",
swiss="data/switzerland-sfoe/switzerland-new_format.csv",
idees="data/jrc-idees-2015",
eurostat=input_eurostat
output:
energy_name='resources/energy_totals.csv',
co2_name='resources/co2_totals.csv',
transport_name='resources/transport_data.csv'
threads: 1
co2_name='resources/co2_totals.csv',
transport_name='resources/transport_data.csv'
threads: 16
resources: mem_mb=10000
benchmark: "benchmarks/build_energy_totals"
script: 'scripts/build_energy_totals.py'
rule build_biomass_potentials:
input:
jrc_potentials="data/biomass/JRC Biomass Potentials.xlsx"
@ -158,8 +176,10 @@ rule build_biomass_potentials:
biomass_potentials='resources/biomass_potentials.csv'
threads: 1
resources: mem_mb=1000
benchmark: "benchmarks/build_biomass_potentials"
script: 'scripts/build_biomass_potentials.py'
rule build_ammonia_production:
input:
usgs="data/myb1-2017-nitro.xls"
@ -167,26 +187,32 @@ rule build_ammonia_production:
ammonia_production="resources/ammonia_production.csv"
threads: 1
resources: mem_mb=1000
benchmark: "benchmarks/build_ammonia_production"
script: 'scripts/build_ammonia_production.py'
rule build_industry_sector_ratios:
input:
ammonia_production="resources/ammonia_production.csv"
ammonia_production="resources/ammonia_production.csv",
idees="data/jrc-idees-2015"
output:
industry_sector_ratios="resources/industry_sector_ratios.csv"
threads: 1
resources: mem_mb=1000
benchmark: "benchmarks/build_industry_sector_ratios"
script: 'scripts/build_industry_sector_ratios.py'
rule build_industrial_production_per_country:
input:
ammonia_production="resources/ammonia_production.csv"
ammonia_production="resources/ammonia_production.csv",
jrc="data/jrc-idees-2015",
eurostat="data/eurostat-energy_balances-may_2018_edition",
output:
industrial_production_per_country="resources/industrial_production_per_country.csv"
threads: 1
threads: 8
resources: mem_mb=1000
benchmark: "benchmarks/build_industrial_production_per_country"
script: 'scripts/build_industrial_production_per_country.py'
@ -197,220 +223,231 @@ rule build_industrial_production_per_country_tomorrow:
industrial_production_per_country_tomorrow="resources/industrial_production_per_country_tomorrow.csv"
threads: 1
resources: mem_mb=1000
benchmark: "benchmarks/build_industrial_production_per_country_tomorrow"
script: 'scripts/build_industrial_production_per_country_tomorrow.py'
rule build_industrial_distribution_key:
input:
clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv",
europe_shape=pypsaeur('resources/europe_shape.geojson'),
regions_onshore=pypsaeur('resources/regions_onshore_elec_s{simpl}_{clusters}.geojson'),
clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv",
hotmaps_industrial_database="data/Industrial_Database.csv",
network=pypsaeur('networks/{network}_s{simpl}_{clusters}.nc')
output:
industrial_distribution_key="resources/industrial_distribution_key_{network}_s{simpl}_{clusters}.csv"
industrial_distribution_key="resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv"
threads: 1
resources: mem_mb=1000
benchmark: "benchmarks/build_industrial_distribution_key/s{simpl}_{clusters}"
script: 'scripts/build_industrial_distribution_key.py'
rule build_industrial_production_per_node:
input:
industrial_distribution_key="resources/industrial_distribution_key_{network}_s{simpl}_{clusters}.csv",
industrial_distribution_key="resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv",
industrial_production_per_country_tomorrow="resources/industrial_production_per_country_tomorrow.csv"
output:
industrial_production_per_node="resources/industrial_production_{network}_s{simpl}_{clusters}.csv"
industrial_production_per_node="resources/industrial_production_elec_s{simpl}_{clusters}.csv"
threads: 1
resources: mem_mb=1000
benchmark: "benchmarks/build_industrial_production_per_node/s{simpl}_{clusters}"
script: 'scripts/build_industrial_production_per_node.py'
rule build_industrial_energy_demand_per_node:
input:
industry_sector_ratios="resources/industry_sector_ratios.csv",
industrial_production_per_node="resources/industrial_production_{network}_s{simpl}_{clusters}.csv",
industrial_energy_demand_per_node_today="resources/industrial_energy_demand_today_{network}_s{simpl}_{clusters}.csv"
industrial_production_per_node="resources/industrial_production_elec_s{simpl}_{clusters}.csv",
industrial_energy_demand_per_node_today="resources/industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv"
output:
industrial_energy_demand_per_node="resources/industrial_energy_demand_{network}_s{simpl}_{clusters}.csv"
industrial_energy_demand_per_node="resources/industrial_energy_demand_elec_s{simpl}_{clusters}.csv"
threads: 1
resources: mem_mb=1000
benchmark: "benchmarks/build_industrial_energy_demand_per_node/s{simpl}_{clusters}"
script: 'scripts/build_industrial_energy_demand_per_node.py'
rule build_industrial_energy_demand_per_country_today:
input:
jrc="data/jrc-idees-2015",
ammonia_production="resources/ammonia_production.csv",
industrial_production_per_country="resources/industrial_production_per_country.csv"
output:
industrial_energy_demand_per_country_today="resources/industrial_energy_demand_per_country_today.csv"
threads: 1
threads: 8
resources: mem_mb=1000
benchmark: "benchmarks/build_industrial_energy_demand_per_country_today"
script: 'scripts/build_industrial_energy_demand_per_country_today.py'
rule build_industrial_energy_demand_per_node_today:
input:
industrial_distribution_key="resources/industrial_distribution_key_{network}_s{simpl}_{clusters}.csv",
industrial_distribution_key="resources/industrial_distribution_key_elec_s{simpl}_{clusters}.csv",
industrial_energy_demand_per_country_today="resources/industrial_energy_demand_per_country_today.csv"
output:
industrial_energy_demand_per_node_today="resources/industrial_energy_demand_today_{network}_s{simpl}_{clusters}.csv"
industrial_energy_demand_per_node_today="resources/industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv"
threads: 1
resources: mem_mb=1000
benchmark: "benchmarks/build_industrial_energy_demand_per_node_today/s{simpl}_{clusters}"
script: 'scripts/build_industrial_energy_demand_per_node_today.py'
rule build_industrial_energy_demand_per_country:
input:
industry_sector_ratios="resources/industry_sector_ratios.csv",
industrial_production_per_country="resources/industrial_production_per_country_tomorrow.csv"
output:
industrial_energy_demand_per_country="resources/industrial_energy_demand_per_country.csv"
threads: 1
resources: mem_mb=1000
script: 'scripts/build_industrial_energy_demand_per_country.py'
rule build_industrial_demand:
input:
clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv",
industrial_demand_per_country="resources/industrial_energy_demand_per_country.csv"
output:
industrial_demand="resources/industrial_demand_{network}_s{simpl}_{clusters}.csv"
threads: 1
resources: mem_mb=1000
script: 'scripts/build_industrial_demand.py'
if config["sector"]["retrofitting"]["retro_endogen"]:
rule build_retro_cost:
input:
building_stock="data/retro/data_building_stock.csv",
data_tabula="data/retro/tabula-calculator-calcsetbuilding.csv",
air_temperature = "resources/temp_air_total_elec_s{simpl}_{clusters}.nc",
u_values_PL="data/retro/u_values_poland.csv",
tax_w="data/retro/electricity_taxes_eu.csv",
construction_index="data/retro/comparative_level_investment.csv",
floor_area_missing="data/retro/floor_area_missing.csv",
clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv",
cost_germany="data/retro/retro_cost_germany.csv",
window_assumptions="data/retro/window_assumptions.csv",
output:
retro_cost="resources/retro_cost_elec_s{simpl}_{clusters}.csv",
floor_area="resources/floor_area_elec_s{simpl}_{clusters}.csv"
resources: mem_mb=1000
benchmark: "benchmarks/build_retro_cost/s{simpl}_{clusters}"
script: "scripts/build_retro_cost.py"
build_retro_cost_output = rules.build_retro_cost.output
else:
build_retro_cost_output = {}
rule prepare_sector_network:
input:
network=pypsaeur('networks/{network}_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc'),
overrides="data/override_component_attrs",
network=pypsaeur('networks/elec_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc'),
energy_totals_name='resources/energy_totals.csv',
co2_totals_name='resources/co2_totals.csv',
transport_name='resources/transport_data.csv',
traffic_data_KFZ = "data/emobility/KFZ__count",
traffic_data_Pkw = "data/emobility/Pkw__count",
biomass_potentials='resources/biomass_potentials.csv',
biomass_transport='data/biomass/biomass_transport_costs.csv',
timezone_mappings='data/timezone_mappings.csv',
biomass_transport='data/biomass/biomass_transport_costs.csv',
heat_profile="data/heat_load_profile_BDEW.csv",
costs=config['costs_dir'] + "costs_{planning_horizons}.csv",
h2_cavern = "data/hydrogen_salt_cavern_potentials.csv",
co2_budget="data/co2_budget.csv",
costs=CDIR + "costs_{planning_horizons}.csv",
profile_offwind_ac=pypsaeur("resources/profile_offwind-ac.nc"),
profile_offwind_dc=pypsaeur("resources/profile_offwind-dc.nc"),
clustermaps=pypsaeur('resources/clustermaps_{network}_s{simpl}_{clusters}.h5'),
clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv",
simplified_pop_layout="resources/pop_layout_{network}_s{simpl}.csv",
industrial_demand="resources/industrial_energy_demand_{network}_s{simpl}_{clusters}.csv",
heat_demand_urban="resources/heat_demand_urban_{network}_s{simpl}_{clusters}.nc",
heat_demand_rural="resources/heat_demand_rural_{network}_s{simpl}_{clusters}.nc",
heat_demand_total="resources/heat_demand_total_{network}_s{simpl}_{clusters}.nc",
traffic_data = "data/emobility/",
temp_soil_total="resources/temp_soil_total_{network}_s{simpl}_{clusters}.nc",
temp_soil_rural="resources/temp_soil_rural_{network}_s{simpl}_{clusters}.nc",
temp_soil_urban="resources/temp_soil_urban_{network}_s{simpl}_{clusters}.nc",
temp_air_total="resources/temp_air_total_{network}_s{simpl}_{clusters}.nc",
temp_air_rural="resources/temp_air_rural_{network}_s{simpl}_{clusters}.nc",
temp_air_urban="resources/temp_air_urban_{network}_s{simpl}_{clusters}.nc",
cop_soil_total="resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc",
cop_soil_rural="resources/cop_soil_rural_{network}_s{simpl}_{clusters}.nc",
cop_soil_urban="resources/cop_soil_urban_{network}_s{simpl}_{clusters}.nc",
cop_air_total="resources/cop_air_total_{network}_s{simpl}_{clusters}.nc",
cop_air_rural="resources/cop_air_rural_{network}_s{simpl}_{clusters}.nc",
cop_air_urban="resources/cop_air_urban_{network}_s{simpl}_{clusters}.nc",
solar_thermal_total="resources/solar_thermal_total_{network}_s{simpl}_{clusters}.nc",
solar_thermal_urban="resources/solar_thermal_urban_{network}_s{simpl}_{clusters}.nc",
solar_thermal_rural="resources/solar_thermal_rural_{network}_s{simpl}_{clusters}.nc"
output: config['results_dir'] + config['run'] + '/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}.nc'
h2_cavern="data/hydrogen_salt_cavern_potentials.csv",
busmap_s=pypsaeur("resources/busmap_elec_s{simpl}.csv"),
busmap=pypsaeur("resources/busmap_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv",
simplified_pop_layout="resources/pop_layout_elec_s{simpl}.csv",
industrial_demand="resources/industrial_energy_demand_elec_s{simpl}_{clusters}.csv",
heat_demand_urban="resources/heat_demand_urban_elec_s{simpl}_{clusters}.nc",
heat_demand_rural="resources/heat_demand_rural_elec_s{simpl}_{clusters}.nc",
heat_demand_total="resources/heat_demand_total_elec_s{simpl}_{clusters}.nc",
temp_soil_total="resources/temp_soil_total_elec_s{simpl}_{clusters}.nc",
temp_soil_rural="resources/temp_soil_rural_elec_s{simpl}_{clusters}.nc",
temp_soil_urban="resources/temp_soil_urban_elec_s{simpl}_{clusters}.nc",
temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc",
temp_air_rural="resources/temp_air_rural_elec_s{simpl}_{clusters}.nc",
temp_air_urban="resources/temp_air_urban_elec_s{simpl}_{clusters}.nc",
cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_soil_rural="resources/cop_soil_rural_elec_s{simpl}_{clusters}.nc",
cop_soil_urban="resources/cop_soil_urban_elec_s{simpl}_{clusters}.nc",
cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc",
cop_air_rural="resources/cop_air_rural_elec_s{simpl}_{clusters}.nc",
cop_air_urban="resources/cop_air_urban_elec_s{simpl}_{clusters}.nc",
solar_thermal_total="resources/solar_thermal_total_elec_s{simpl}_{clusters}.nc",
solar_thermal_urban="resources/solar_thermal_urban_elec_s{simpl}_{clusters}.nc",
solar_thermal_rural="resources/solar_thermal_rural_elec_s{simpl}_{clusters}.nc",
**build_retro_cost_output
output: RDIR + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc'
threads: 1
resources: mem_mb=2000
benchmark: config['results_dir'] + config['run'] + "/benchmarks/prepare_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}"
benchmark: RDIR + "/benchmarks/prepare_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}"
script: "scripts/prepare_sector_network.py"
rule plot_network:
input:
network=config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}.nc"
overrides="data/override_component_attrs",
network=RDIR + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc"
output:
map=config['results_dir'] + config['run'] + "/maps/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}-costs-all_{co2_budget_name}_{planning_horizons}.pdf",
today=config['results_dir'] + config['run'] + "/maps/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}-today.pdf"
map=RDIR + "/maps/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
today=RDIR + "/maps/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}-today.pdf"
threads: 2
resources: mem_mb=10000
benchmark: RDIR + "/benchmarks/plot_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}"
script: "scripts/plot_network.py"
rule copy_config:
output:
config=config['summary_dir'] + '/' + config['run'] + '/configs/config.yaml'
output: SDIR + '/configs/config.yaml'
threads: 1
resources: mem_mb=1000
script:
'scripts/copy_config.py'
benchmark: SDIR + "/benchmarks/copy_config"
script: "scripts/copy_config.py"
rule make_summary:
input:
networks=expand(config['results_dir'] + config['run'] + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}.nc",
**config['scenario']),
costs=config['costs_dir'] + "costs_{}.csv".format(config['scenario']['planning_horizons'][0]),
plots=expand(config['results_dir'] + config['run'] + "/maps/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}-costs-all_{co2_budget_name}_{planning_horizons}.pdf",
**config['scenario'])
#heat_demand_name='data/heating/daily_heat_demand.h5'
overrides="data/override_component_attrs",
networks=expand(
RDIR + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc",
**config['scenario']
),
costs=CDIR + "costs_{}.csv".format(config['scenario']['planning_horizons'][0]),
plots=expand(
RDIR + "/maps/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
**config['scenario']
)
output:
nodal_costs=config['summary_dir'] + '/' + config['run'] + '/csvs/nodal_costs.csv',
nodal_capacities=config['summary_dir'] + '/' + config['run'] + '/csvs/nodal_capacities.csv',
nodal_cfs=config['summary_dir'] + '/' + config['run'] + '/csvs/nodal_cfs.csv',
cfs=config['summary_dir'] + '/' + config['run'] + '/csvs/cfs.csv',
costs=config['summary_dir'] + '/' + config['run'] + '/csvs/costs.csv',
capacities=config['summary_dir'] + '/' + config['run'] + '/csvs/capacities.csv',
curtailment=config['summary_dir'] + '/' + config['run'] + '/csvs/curtailment.csv',
energy=config['summary_dir'] + '/' + config['run'] + '/csvs/energy.csv',
supply=config['summary_dir'] + '/' + config['run'] + '/csvs/supply.csv',
supply_energy=config['summary_dir'] + '/' + config['run'] + '/csvs/supply_energy.csv',
prices=config['summary_dir'] + '/' + config['run'] + '/csvs/prices.csv',
weighted_prices=config['summary_dir'] + '/' + config['run'] + '/csvs/weighted_prices.csv',
market_values=config['summary_dir'] + '/' + config['run'] + '/csvs/market_values.csv',
price_statistics=config['summary_dir'] + '/' + config['run'] + '/csvs/price_statistics.csv',
metrics=config['summary_dir'] + '/' + config['run'] + '/csvs/metrics.csv'
nodal_costs=SDIR + '/csvs/nodal_costs.csv',
nodal_capacities=SDIR + '/csvs/nodal_capacities.csv',
nodal_cfs=SDIR + '/csvs/nodal_cfs.csv',
cfs=SDIR + '/csvs/cfs.csv',
costs=SDIR + '/csvs/costs.csv',
capacities=SDIR + '/csvs/capacities.csv',
curtailment=SDIR + '/csvs/curtailment.csv',
energy=SDIR + '/csvs/energy.csv',
supply=SDIR + '/csvs/supply.csv',
supply_energy=SDIR + '/csvs/supply_energy.csv',
prices=SDIR + '/csvs/prices.csv',
weighted_prices=SDIR + '/csvs/weighted_prices.csv',
market_values=SDIR + '/csvs/market_values.csv',
price_statistics=SDIR + '/csvs/price_statistics.csv',
metrics=SDIR + '/csvs/metrics.csv'
threads: 2
resources: mem_mb=10000
script:
'scripts/make_summary.py'
benchmark: SDIR + "/benchmarks/make_summary"
script: "scripts/make_summary.py"
rule plot_summary:
input:
costs=config['summary_dir'] + '/' + config['run'] + '/csvs/costs.csv',
energy=config['summary_dir'] + '/' + config['run'] + '/csvs/energy.csv',
balances=config['summary_dir'] + '/' + config['run'] + '/csvs/supply_energy.csv'
costs=SDIR + '/csvs/costs.csv',
energy=SDIR + '/csvs/energy.csv',
balances=SDIR + '/csvs/supply_energy.csv'
output:
costs=config['summary_dir'] + '/' + config['run'] + '/graphs/costs.pdf',
energy=config['summary_dir'] + '/' + config['run'] + '/graphs/energy.pdf',
balances=config['summary_dir'] + '/' + config['run'] + '/graphs/balances-energy.pdf'
costs=SDIR + '/graphs/costs.pdf',
energy=SDIR + '/graphs/energy.pdf',
balances=SDIR + '/graphs/balances-energy.pdf'
threads: 2
resources: mem_mb=10000
script:
'scripts/plot_summary.py'
benchmark: SDIR + "/benchmarks/plot_summary"
script: "scripts/plot_summary.py"
if config["foresight"] == "overnight":
rule solve_network:
input:
network=config['results_dir'] + config['run'] + "/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}.nc",
costs=config['costs_dir'] + "costs_{planning_horizons}.csv",
config=config['summary_dir'] + '/' + config['run'] + '/configs/config.yaml'
output: config['results_dir'] + config['run'] + "/postnetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}.nc"
overrides="data/override_component_attrs",
network=RDIR + "/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc",
costs=CDIR + "costs_{planning_horizons}.csv",
config=SDIR + '/configs/config.yaml'
output: RDIR + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc"
shadow: "shallow"
log:
solver=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}_solver.log",
python=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}_python.log",
memory=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}_memory.log"
benchmark: config['results_dir'] + config['run'] + "/benchmarks/solve_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}"
solver=RDIR + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
python=RDIR + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_python.log",
memory=RDIR + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_memory.log"
threads: 4
resources: mem_mb=config['solving']['mem']
# group: "solve" # with group, threads is ignored https://bitbucket.org/snakemake/snakemake/issues/971/group-job-description-does-not-contain
benchmark: RDIR + "/benchmarks/solve_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}"
script: "scripts/solve_network.py"
@ -418,52 +455,67 @@ if config["foresight"] == "myopic":
rule add_existing_baseyear:
input:
network=config['results_dir'] + config['run'] + '/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}.nc',
overrides="data/override_component_attrs",
network=RDIR + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc',
powerplants=pypsaeur('resources/powerplants.csv'),
clustermaps=pypsaeur('resources/clustermaps_{network}_s{simpl}_{clusters}.h5'),
clustered_pop_layout="resources/pop_layout_{network}_s{simpl}_{clusters}.csv",
costs=config['costs_dir'] + "costs_{}.csv".format(config['scenario']['planning_horizons'][0]),
cop_soil_total="resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc",
cop_air_total="resources/cop_air_total_{network}_s{simpl}_{clusters}.nc"
output: config['results_dir'] + config['run'] + '/prenetworks-brownfield/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}.nc'
busmap_s=pypsaeur("resources/busmap_elec_s{simpl}.csv"),
busmap=pypsaeur("resources/busmap_elec_s{simpl}_{clusters}.csv"),
clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv",
costs=CDIR + "costs_{}.csv".format(config['scenario']['planning_horizons'][0]),
cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc",
existing_heating='data/existing_infrastructure/existing_heating_raw.csv',
country_codes='data/Country_codes.csv',
existing_solar='data/existing_infrastructure/solar_capacity_IRENA.csv',
existing_onwind='data/existing_infrastructure/onwind_capacity_IRENA.csv',
existing_offwind='data/existing_infrastructure/offwind_capacity_IRENA.csv',
output: RDIR + '/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc'
wildcard_constraints:
planning_horizons=config['scenario']['planning_horizons'][0] #only applies to baseyear
threads: 1
resources: mem_mb=2000
benchmark: RDIR + '/benchmarks/add_existing_baseyear/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}'
script: "scripts/add_existing_baseyear.py"
def process_input(wildcards):
i = config["scenario"]["planning_horizons"].index(int(wildcards.planning_horizons))
return config['results_dir'] + config['run'] + "/postnetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_" + str(config["scenario"]["planning_horizons"][i-1]) + ".nc"
def solved_previous_horizon(wildcards):
planning_horizons = config["scenario"]["planning_horizons"]
i = planning_horizons.index(int(wildcards.planning_horizons))
planning_horizon_p = str(planning_horizons[i-1])
return RDIR + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_" + planning_horizon_p + ".nc"
rule add_brownfield:
input:
network=config['results_dir'] + config['run'] + '/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}.nc',
network_p=process_input, #solved network at previous time step
costs=config['costs_dir'] + "costs_{planning_horizons}.csv",
cop_soil_total="resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc",
cop_air_total="resources/cop_air_total_{network}_s{simpl}_{clusters}.nc"
output: config['results_dir'] + config['run'] + "/prenetworks-brownfield/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}.nc"
overrides="data/override_component_attrs",
network=RDIR + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc',
network_p=solved_previous_horizon, #solved network at previous time step
costs=CDIR + "costs_{planning_horizons}.csv",
cop_soil_total="resources/cop_soil_total_elec_s{simpl}_{clusters}.nc",
cop_air_total="resources/cop_air_total_elec_s{simpl}_{clusters}.nc"
output: RDIR + "/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc"
threads: 4
resources: mem_mb=2000
resources: mem_mb=10000
benchmark: RDIR + '/benchmarks/add_brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}'
script: "scripts/add_brownfield.py"
ruleorder: add_existing_baseyear > add_brownfield
rule solve_network_myopic:
input:
network=config['results_dir'] + config['run'] + "/prenetworks-brownfield/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}.nc",
costs=config['costs_dir'] + "costs_{planning_horizons}.csv",
config=config['summary_dir'] + '/' + config['run'] + '/configs/config.yaml'
output: config['results_dir'] + config['run'] + "/postnetworks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}.nc"
overrides="data/override_component_attrs",
network=RDIR + "/prenetworks-brownfield/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc",
costs=CDIR + "costs_{planning_horizons}.csv",
config=SDIR + '/configs/config.yaml'
output: RDIR + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc"
shadow: "shallow"
log:
solver=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}_solver.log",
python=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}_python.log",
memory=config['results_dir'] + config['run'] + "/logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}_memory.log"
benchmark: config['results_dir'] + config['run'] + "/benchmarks/solve_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{co2_budget_name}_{planning_horizons}"
solver=RDIR + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
python=RDIR + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_python.log",
memory=RDIR + "/logs/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_memory.log"
threads: 4
resources: mem_mb=config['solving']['mem']
benchmark: RDIR + "/benchmarks/solve_network/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}"
script: "scripts/solve_network.py"

View File

@ -1,42 +1,66 @@
version: 0.3.0
version: 0.5.0
logging_level: INFO
results_dir: 'results/'
results_dir: results/
summary_dir: results
costs_dir: '../technology-data/outputs/'
run: 'your-run-name' # use this to keep track of runs with different settings
foresight: 'overnight' #options are overnight, myopic, perfect (perfect is not yet implemented)
costs_dir: ../technology-data/outputs/
run: your-run-name # use this to keep track of runs with different settings
foresight: overnight # options are overnight, myopic, perfect (perfect is not yet implemented)
# if you use myopic or perfect foresight, set the investment years in "planning_horizons" below
scenario:
sectors: [E] # ignore this legacy setting
simpl: [''] # only relevant for PyPSA-Eur
lv: [1.0,1.5] # allowed transmission line volume expansion, can be any float >= 1.0 (today) or "opt"
clusters: [45,50] # number of nodes in Europe, any integer between 37 (1 node per country-zone) and several hundred
opts: [''] # only relevant for PyPSA-Eur
sector_opts: [Co2L0-3H-T-H-B-I-solar3-dist1] # this is where the main scenario settings are
simpl: # only relevant for PyPSA-Eur
- ''
lv: # allowed transmission line volume expansion, can be any float >= 1.0 (today) or "opt"
- 1.0
- 1.5
clusters: # number of nodes in Europe, any integer between 37 (1 node per country-zone) and several hundred
- 45
- 50
opts: # only relevant for PyPSA-Eur
- ''
sector_opts: # this is where the main scenario settings are
- Co2L0-3H-T-H-B-I-solar+p3-dist1
# to really understand the options here, look in scripts/prepare_sector_network.py
# Co2Lx specifies the CO2 target in x% of the 1990 values; default will give default (5%);
# Co2L0p25 will give 25% CO2 emissions; Co2Lm0p05 will give 5% negative emissions
# xH is the temporal resolution; 3H is 3-hourly, i.e. one snapshot every 3 hours
# single letters are sectors: T for land transport, H for building heating,
# B for biomass supply, I for industry, shipping and aviation
# solarx or onwindx changes the available installable potential by factor x
# solar+c0.5 reduces the capital cost of solar to 50\% of reference value
# solar+p3 multiplies the available installable potential by factor 3
# dist{n} includes distribution grids with investment cost of n times cost in data/costs.csv
planning_horizons : [2030] #investment years for myopic and perfect; or costs year for overnight
co2_budget_name: ['go'] #gives shape of CO2 budgets over planning horizon
# for myopic/perfect foresight cb states the carbon budget in GtCO2 (cumulative
# emissions throughout the transition path in the timeframe determined by the
# planning_horizons), be:beta decay; ex:exponential decay
# cb40ex0 distributes a carbon budget of 40 GtCO2 following an exponential
# decay with initial growth rate 0
planning_horizons: # investment years for myopic and perfect; or costs year for overnight
- 2030
# for example, set to [2020, 2030, 2040, 2050] for myopic foresight
# CO2 budget as a fraction of 1990 emissions
# this is over-ridden if CO2Lx is set in sector_opts
# this is also over-ridden if cb is set in sector_opts
co2_budget:
2020: 0.7011648746
2025: 0.5241935484
2030: 0.2970430108
2035: 0.1500896057
2040: 0.0712365591
2045: 0.0322580645
2050: 0
# snapshots are originally set in PyPSA-Eur/config.yaml but used again by PyPSA-Eur-Sec
snapshots:
# arguments to pd.date_range
start: "2013-01-01"
end: "2014-01-01"
closed: 'left' # end is not inclusive
closed: left # end is not inclusive
atlite:
cutout_dir: '../pypsa-eur/cutouts'
cutout_name: "europe-2013-era5"
cutout: ../pypsa-eur/cutouts/europe-2013-era5.nc
# this information is NOT used but needed as an argument for
# pypsa-eur/scripts/add_electricity.py/load_costs in make_summary.py
@ -45,69 +69,180 @@ electricity:
battery: 6
H2: 168
# regulate what components with which carriers are kept from PyPSA-Eur;
# some technologies are removed because they are implemented differently
# or have different year-dependent costs in PyPSA-Eur-Sec
pypsa_eur:
Bus:
- AC
Link:
- DC
Generator:
- onwind
- offwind-ac
- offwind-dc
- solar
- ror
StorageUnit:
- PHS
- hydro
Store: []
energy:
energy_totals_year: 2011
base_emissions_year: 1990
eurostat_report_year: 2016
emissions: CO2 # "CO2" or "All greenhouse gases - (CO2 equivalent)"
biomass:
year: 2030
scenario: "Med"
scenario: Med
classes:
solid biomass: ['Primary agricultural residues', 'Forestry energy residue', 'Secondary forestry residues', 'Secondary Forestry residues sawdust', 'Forestry residues from landscape care biomass', 'Municipal waste']
not included: ['Bioethanol sugar beet biomass', 'Rapeseeds for biodiesel', 'sunflower and soya for Biodiesel', 'Starchy crops biomass', 'Grassy crops biomass', 'Willow biomass', 'Poplar biomass potential', 'Roundwood fuelwood', 'Roundwood Chips & Pellets']
biogas: ['Manure biomass potential', 'Sludge biomass']
solid biomass:
- Primary agricultural residues
- Forestry energy residue
- Secondary forestry residues
- Secondary Forestry residues sawdust
- Forestry residues from landscape care biomass
- Municipal waste
not included:
- Bioethanol sugar beet biomass
- Rapeseeds for biodiesel
- sunflower and soya for Biodiesel
- Starchy crops biomass
- Grassy crops biomass
- Willow biomass
- Poplar biomass potential
- Roundwood fuelwood
- Roundwood Chips & Pellets
biogas:
- Manure biomass potential
- Sludge biomass
solar_thermal:
clearsky_model: simple # should be "simple" or "enhanced"?
orientation:
slope: 45.
azimuth: 180.
# only relevant for foresight = myopic or perfect
existing_capacities:
grouping_years: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019]
threshold_capacity: 10
conventional_carriers: ['lignite', 'coal', 'oil', 'uranium']
conventional_carriers:
- lignite
- coal
- oil
- uranium
sector:
'central' : True
'central_fraction' : 0.6
'dsm_restriction_value' : 0.75 #Set to 0 for no restriction on BEV DSM
'dsm_restriction_time' : 7 #Time at which SOC of BEV has to be dsm_restriction_value
'transport_heating_deadband_upper' : 20.
'transport_heating_deadband_lower' : 15.
'ICE_lower_degree_factor' : 0.375 #in per cent increase in fuel consumption per degree above deadband
'ICE_upper_degree_factor' : 1.6
'EV_lower_degree_factor' : 0.98
'EV_upper_degree_factor' : 0.63
'district_heating_loss' : 0.15
'bev' : True #turns on EV battery
'bev_availability' : 0.5 #How many cars do smart charging
'v2g' : True #allows feed-in to grid from EV battery
'transport_fuel_cell_share' : 0. #0 means all EVs, 1 means all FCs
'shipping_average_efficiency' : 0.4 #For conversion of fuel oil to propulsion in 2011
'time_dep_hp_cop' : True
'space_heating_fraction' : 1.0 #fraction of space heating active
'retrofitting' : False
'retroI-fraction' : 0.25
'retroII-fraction' : 0.55
'retrofitting-cost_factor' : 1.0
'tes' : True
'tes_tau' : 3.
'boilers' : True
'oil_boilers': False
'chp' : True
'solar_thermal' : True
'solar_cf_correction': 0.788457 # = >>> 1/1.2683
'marginal_cost_storage' : 0. #1e-4
'methanation' : True
'helmeth' : True
'dac' : True
'co2_vent' : True
'SMR' : True
'ccs_fraction' : 0.9
'hydrogen_underground_storage' : True
'use_fischer_tropsch_waste_heat' : True
'use_fuel_cell_waste_heat' : True
'electricity_distribution_grid' : False
'electricity_distribution_grid_cost_factor' : 1.0 #multiplies cost in data/costs.csv
'electricity_grid_connection' : True # only applies to onshore wind and utility PV
'gas_distribution_grid' : True
'gas_distribution_grid_cost_factor' : 1.0 #multiplies cost in data/costs.csv
'biomass_transport': False # biomass potential per country + transport between countries
central: true
central_fraction: 0.6
bev_dsm_restriction_value: 0.75 #Set to 0 for no restriction on BEV DSM
bev_dsm_restriction_time: 7 #Time at which SOC of BEV has to be dsm_restriction_value
transport_heating_deadband_upper: 20.
transport_heating_deadband_lower: 15.
ICE_lower_degree_factor: 0.375 #in per cent increase in fuel consumption per degree above deadband
ICE_upper_degree_factor: 1.6
EV_lower_degree_factor: 0.98
EV_upper_degree_factor: 0.63
district_heating_loss: 0.15
bev_dsm: true #turns on EV battery
bev_availability: 0.5 #How many cars do smart charging
bev_energy: 0.05 #average battery size in MWh
bev_charge_efficiency: 0.9 #BEV (dis-)charging efficiency
bev_plug_to_wheel_efficiency: 0.2 #kWh/km from EPA https://www.fueleconomy.gov/feg/ for Tesla Model S
bev_charge_rate: 0.011 #3-phase charger with 11 kW
bev_avail_max: 0.95
bev_avail_mean: 0.8
v2g: true #allows feed-in to grid from EV battery
#what is not EV or FCEV is oil-fuelled ICE
land_transport_fuel_cell_share: # 1 means all FCEVs
2020: 0
2030: 0.05
2040: 0.1
2050: 0.15
land_transport_electric_share: # 1 means all EVs
2020: 0
2030: 0.25
2040: 0.6
2050: 0.85
transport_fuel_cell_efficiency: 0.5
transport_internal_combustion_efficiency: 0.3
shipping_average_efficiency: 0.4 #For conversion of fuel oil to propulsion in 2011
time_dep_hp_cop: true #time dependent heat pump coefficient of performance
heat_pump_sink_T: 55. # Celsius, based on DTU / large area radiators; used in build_cop_profiles.py
# conservatively high to cover hot water and space heating in poorly-insulated buildings
reduce_space_heat_exogenously: true # reduces space heat demand by a given factor (applied before losses in DH)
# this can represent e.g. building renovation, building demolition, or if
# the factor is negative: increasing floor area, increased thermal comfort, population growth
reduce_space_heat_exogenously_factor: # per unit reduction in space heat demand
# the default factors are determined by the LTS scenario from http://tool.european-calculator.eu/app/buildings/building-types-area/?levers=1ddd4444421213bdbbbddd44444ffffff11f411111221111211l212221
2020: 0.10 # this results in a space heat demand reduction of 10%
2025: 0.09 # first heat demand increases compared to 2020 because of larger floor area per capita
2030: 0.09
2035: 0.11
2040: 0.16
2045: 0.21
2050: 0.29
retrofitting : # co-optimises building renovation to reduce space heat demand
retro_endogen: false # co-optimise space heat savings
cost_factor: 1.0 # weight costs for building renovation
interest_rate: 0.04 # for investment in building components
annualise_cost: true # annualise the investment costs
tax_weighting: false # weight costs depending on taxes in countries
construction_index: true # weight costs depending on labour/material costs per country
tes: true
tes_tau: # 180 day time constant for centralised, 3 day for decentralised
decentral: 3
central: 180
boilers: true
oil_boilers: false
chp: true
micro_chp: false
solar_thermal: true
solar_cf_correction: 0.788457 # = >>> 1/1.2683
marginal_cost_storage: 0. #1e-4
methanation: true
helmeth: true
dac: true
co2_vent: true
SMR: true
co2_sequestration_potential: 200 #MtCO2/a sequestration potential for Europe
co2_sequestration_cost: 20 #EUR/tCO2 for transport and sequestration of CO2
cc_fraction: 0.9 # default fraction of CO2 captured with post-combustion capture
hydrogen_underground_storage: true
use_fischer_tropsch_waste_heat: true
use_fuel_cell_waste_heat: true
electricity_distribution_grid: false
electricity_distribution_grid_cost_factor: 1.0 #multiplies cost in data/costs.csv
electricity_grid_connection: true # only applies to onshore wind and utility PV
gas_distribution_grid: true
gas_distribution_grid_cost_factor: 1.0 #multiplies cost in data/costs.csv
biomass_transport: false # biomass potential per country + transport between countries
conventional_generation: # generator : carrier
OCGT: gas
industry:
St_primary_fraction: 0.3 # fraction of steel produced via primary route (DRI + EAF) versus secondary route (EAF); today fraction is 0.6
H2_DRI: 1.7 #H2 consumption in Direct Reduced Iron (DRI), MWh_H2,LHV/ton_Steel from 51kgH2/tSt in Vogl et al (2018) doi:10.1016/j.jclepro.2018.08.279
elec_DRI: 0.322 #electricity consumption in Direct Reduced Iron (DRI) shaft, MWh/tSt HYBRIT brochure https://ssabwebsitecdn.azureedge.net/-/media/hybrit/files/hybrit_brochure.pdf
Al_primary_fraction: 0.2 # fraction of aluminium produced via the primary route versus scrap; today fraction is 0.4
MWh_CH4_per_tNH3_SMR: 10.8 # 2012's demand from https://ec.europa.eu/docsroom/documents/4165/attachments/1/translations/en/renditions/pdf
MWh_elec_per_tNH3_SMR: 0.7 # same source, assuming 94-6% split methane-elec of total energy demand 11.5 MWh/tNH3
MWh_H2_per_tNH3_electrolysis: 6.5 # from https://doi.org/10.1016/j.joule.2018.04.017, around 0.197 tH2/tHN3 (>3/17 since some H2 lost and used for energy)
MWh_elec_per_tNH3_electrolysis: 1.17 # from https://doi.org/10.1016/j.joule.2018.04.017 Table 13 (air separation and HB)
NH3_process_emissions: 24.5 # in MtCO2/a from SMR for H2 production for NH3 from UNFCCC for 2015 for EU28
petrochemical_process_emissions: 25.5 # in MtCO2/a for petrochemical and other from UNFCCC for 2015 for EU28
HVC_primary_fraction: 1.0 #fraction of current non-ammonia basic chemicals produced via primary route
hotmaps_locate_missing: false
reference_year: 2015
costs:
year: 2030
lifetime: 25 #default lifetime
# From a Lion Hirth paper, also reflects average of Noothout et al 2016
discountrate: 0.07
@ -116,8 +251,8 @@ costs:
# Marginal and capital costs can be overwritten
# capital_cost:
# Wind: Bla
marginal_cost: #
# onwind: 500
marginal_cost:
solar: 0.01
onwind: 0.015
offwind: 0.015
@ -139,17 +274,17 @@ solving:
clip_p_max_pu: 1.e-2
load_shedding: false
noisy_costs: true
min_iterations: 1
max_iterations: 1
# nhours: 1
skip_iterations: true
track_iterations: false
min_iterations: 4
max_iterations: 6
solver:
name: gurobi
threads: 4
method: 2 # barrier
crossover: 0
BarConvTol: 1.e-5
BarConvTol: 1.e-6
Seed: 123
AggFill: 0
PreDual: 0
@ -164,180 +299,176 @@ solving:
#feasopt_tolerance: 1.e-6
mem: 30000 #memory in MB; 20 GB enough for 50+B+I+H2; 100 GB for 181+B+I+H2
industry:
'St_primary_fraction' : 0.3 # fraction of steel produced via primary route (DRI + EAF) versus secondary route (EAF); today fraction is 0.6
'H2_DRI' : 1.7 #H2 consumption in Direct Reduced Iron (DRI), MWh_H2,LHV/ton_Steel from Vogl et al (2018) doi:10.1016/j.jclepro.2018.08.279
'elec_DRI' : 0.322 #electricity consumption in Direct Reduced Iron (DRI) shaft, MWh/tSt HYBRIT brochure https://ssabwebsitecdn.azureedge.net/-/media/hybrit/files/hybrit_brochure.pdf
'Al_primary_fraction' : 0.2 # fraction of aluminium produced via the primary route versus scrap; today fraction is 0.4
'MWh_CH4_per_tNH3_SMR' : 10.8 # 2012's demand from https://ec.europa.eu/docsroom/documents/4165/attachments/1/translations/en/renditions/pdf
'MWh_elec_per_tNH3_SMR' : 0.7 # same source, assuming 94-6% split methane-elec of total energy demand 11.5 MWh/tNH3
'MWh_H2_per_tNH3_electrolysis' : 6.5 # from https://doi.org/10.1016/j.joule.2018.04.017, around 0.197 tH2/tHN3 (>3/17 since some H2 lost and used for energy)
'MWh_elec_per_tNH3_electrolysis' : 1.17 # from https://doi.org/10.1016/j.joule.2018.04.017 Table 13 (air separation and HB)
'NH3_process_emissions' : 24.5 # in MtCO2/a from SMR for H2 production for NH3 from UNFCCC for 2015 for EU28
'petrochemical_process_emissions' : 25.5 # in MtCO2/a for petrochemical and other from UNFCCC for 2015 for EU28
plotting:
map:
figsize: [7, 7]
boundaries: [-10.2, 29, 35, 72]
p_nom:
bus_size_factor: 5.e+4
linewidth_factor: 3.e+3 # 1.e+3 #3.e+3
costs_max: 1200
boundaries: [-11, 30, 34, 71]
color_geomap:
ocean: white
land: whitesmoke
costs_max: 1000
costs_threshold: 1
energy_max: 20000.
energy_min: -15000.
energy_threshold: 50.
vre_techs: ["onwind", "offwind-ac", "offwind-dc", "solar", "ror"]
renewable_storage_techs: ["PHS","hydro"]
conv_techs: ["OCGT", "CCGT", "Nuclear", "Coal"]
storage_techs: ["hydro+PHS", "battery", "H2"]
# store_techs: ["Li ion", "water tanks"]
load_carriers: ["AC load"] #, "heat load", "Li ion load"]
AC_carriers: ["AC line", "AC transformer"]
link_carriers: ["DC line", "Converter AC-DC"]
heat_links: ["heat pump", "resistive heater", "CHP heat", "CHP electric",
"gas boiler", "central heat pump", "central resistive heater", "central CHP heat",
"central CHP electric", "central gas boiler"]
heat_generators: ["gas boiler", "central gas boiler", "solar thermal collector", "central solar thermal collector"]
energy_max: 20000
energy_min: -20000
energy_threshold: 50
vre_techs:
- onwind
- offwind-ac
- offwind-dc
- solar
- ror
renewable_storage_techs:
- PHS
- hydro
conv_techs:
- OCGT
- CCGT
- Nuclear
- Coal
storage_techs:
- hydro+PHS
- battery
- H2
load_carriers:
- AC load
AC_carriers:
- AC line
- AC transformer
link_carriers:
- DC line
- Converter AC-DC
heat_links:
- heat pump
- resistive heater
- CHP heat
- CHP electric
- gas boiler
- central heat pump
- central resistive heater
- central CHP heat
- central CHP electric
- central gas boiler
heat_generators:
- gas boiler
- central gas boiler
- solar thermal collector
- central solar thermal collector
tech_colors:
"onwind" : "b"
"onshore wind" : "b"
'offwind' : "c"
'offshore wind' : "c"
'offwind-ac' : "c"
'offshore wind (AC)' : "c"
'offwind-dc' : "#009999"
'offshore wind (DC)' : "#009999"
'wave' : "#004444"
"hydro" : "#3B5323"
"hydro reservoir" : "#3B5323"
"ror" : "#78AB46"
"run of river" : "#78AB46"
'hydroelectricity' : '#006400'
'solar' : "y"
'solar PV' : "y"
'solar thermal' : 'coral'
'solar rooftop' : '#e6b800'
"OCGT" : "wheat"
"OCGT marginal" : "sandybrown"
"OCGT-heat" : "orange"
"gas boiler" : "orange"
"gas boilers" : "orange"
"gas boiler marginal" : "orange"
"gas-to-power/heat" : "orange"
"gas" : "brown"
"natural gas" : "brown"
"SMR" : "#4F4F2F"
"oil" : "#B5A642"
"oil boiler" : "#B5A677"
"lines" : "k"
"transmission lines" : "k"
"H2" : "m"
"hydrogen storage" : "m"
"battery" : "slategray"
"battery storage" : "slategray"
"home battery" : "#614700"
"home battery storage" : "#614700"
"Nuclear" : "r"
"Nuclear marginal" : "r"
"nuclear" : "r"
"uranium" : "r"
"Coal" : "k"
"coal" : "k"
"Coal marginal" : "k"
"Lignite" : "grey"
"lignite" : "grey"
"Lignite marginal" : "grey"
"CCGT" : "orange"
"CCGT marginal" : "orange"
"heat pumps" : "#76EE00"
"heat pump" : "#76EE00"
"air heat pump" : "#76EE00"
"ground heat pump" : "#40AA00"
"power-to-heat" : "#40AA00"
"resistive heater" : "pink"
"Sabatier" : "#FF1493"
"methanation" : "#FF1493"
"power-to-gas" : "#FF1493"
"power-to-liquid" : "#FFAAE9"
"helmeth" : "#7D0552"
"helmeth" : "#7D0552"
"DAC" : "#E74C3C"
"co2 stored" : "#123456"
"CO2 sequestration" : "#123456"
"CCS" : "k"
"co2" : "#123456"
"co2 vent" : "#654321"
"solid biomass for industry co2 from atmosphere" : "#654321"
"solid biomass for industry co2 to stored": "#654321"
"gas for industry co2 to atmosphere": "#654321"
"gas for industry co2 to stored": "#654321"
"Fischer-Tropsch" : "#44DD33"
"kerosene for aviation": "#44BB11"
"naphtha for industry" : "#44FF55"
"water tanks" : "#BBBBBB"
"hot water storage" : "#BBBBBB"
"hot water charging" : "#BBBBBB"
"hot water discharging" : "#999999"
"CHP" : "r"
"CHP heat" : "r"
"CHP electric" : "r"
"PHS" : "g"
"Ambient" : "k"
"Electric load" : "b"
"Heat load" : "r"
"Transport load" : "grey"
"heat" : "darkred"
"rural heat" : "#880000"
"central heat" : "#b22222"
"decentral heat" : "#800000"
"low-temperature heat for industry" : "#991111"
"process heat" : "#FF3333"
"heat demand" : "darkred"
"electric demand" : "k"
"Li ion" : "grey"
"district heating" : "#CC4E5C"
"retrofitting" : "purple"
"building retrofitting" : "purple"
"BEV charger" : "grey"
"V2G" : "grey"
"transport" : "grey"
"electricity" : "k"
"gas for industry" : "#333333"
"solid biomass for industry" : "#555555"
"industry new electricity" : "#222222"
"process emissions to stored" : "#444444"
"process emissions to atmosphere" : "#888888"
"process emissions" : "#222222"
"transport fuel cell" : "#AAAAAA"
"biogas" : "#800000"
"solid biomass" : "#DAA520"
"today" : "#D2691E"
"shipping" : "#6495ED"
"electricity distribution grid" : "#333333"
'industry electricity': "black"
"solid biomass transport": "green"
nice_names:
# OCGT: "Gas"
# OCGT marginal: "Gas (marginal)"
offwind: "offshore wind"
onwind: "onshore wind"
battery: "Battery storage"
lines: "Transmission lines"
AC line: "AC lines"
AC-AC: "DC lines"
ror: "Run of river"
nice_names_n:
offwind: "offshore\nwind"
onwind: "onshore\nwind"
# OCGT: "Gas"
H2: "Hydrogen\nstorage"
# OCGT marginal: "Gas (marginal)"
lines: "transmission\nlines"
ror: "run of river"
onwind: "#235ebc"
onshore wind: "#235ebc"
offwind: "#6895dd"
offshore wind: "#6895dd"
offwind-ac: "#6895dd"
offshore wind (AC): "#6895dd"
offwind-dc: "#74c6f2"
offshore wind (DC): "#74c6f2"
wave: '#004444'
hydro: '#3B5323'
hydro reservoir: '#3B5323'
ror: '#78AB46'
run of river: '#78AB46'
hydroelectricity: '#006400'
solar: "#f9d002"
solar PV: "#f9d002"
solar thermal: coral
solar rooftop: '#ffef60'
OCGT: wheat
OCGT marginal: sandybrown
OCGT-heat: '#ee8340'
gas boiler: '#ee8340'
gas boilers: '#ee8340'
gas boiler marginal: '#ee8340'
gas-to-power/heat: '#ee8340'
gas: brown
natural gas: brown
SMR: '#4F4F2F'
oil: '#B5A642'
oil boiler: '#B5A677'
lines: k
transmission lines: k
H2: m
hydrogen storage: m
battery: slategray
battery storage: slategray
home battery: '#614700'
home battery storage: '#614700'
Nuclear: r
Nuclear marginal: r
nuclear: r
uranium: r
Coal: k
coal: k
Coal marginal: k
Lignite: grey
lignite: grey
Lignite marginal: grey
CCGT: '#ee8340'
CCGT marginal: '#ee8340'
heat pumps: '#76EE00'
heat pump: '#76EE00'
air heat pump: '#76EE00'
ground heat pump: '#40AA00'
power-to-heat: '#40AA00'
resistive heater: pink
Sabatier: '#FF1493'
methanation: '#FF1493'
power-to-gas: '#FF1493'
power-to-liquid: '#FFAAE9'
helmeth: '#7D0552'
DAC: '#E74C3C'
co2 stored: '#123456'
CO2 sequestration: '#123456'
CC: k
co2: '#123456'
co2 vent: '#654321'
solid biomass for industry co2 from atmosphere: '#654321'
solid biomass for industry co2 to stored: '#654321'
gas for industry co2 to atmosphere: '#654321'
gas for industry co2 to stored: '#654321'
Fischer-Tropsch: '#44DD33'
kerosene for aviation: '#44BB11'
naphtha for industry: '#44FF55'
land transport oil: '#44DD33'
water tanks: '#BBBBBB'
hot water storage: '#BBBBBB'
hot water charging: '#BBBBBB'
hot water discharging: '#999999'
CHP: r
CHP heat: r
CHP electric: r
PHS: g
Ambient: k
Electric load: b
Heat load: r
heat: darkred
rural heat: '#880000'
central heat: '#b22222'
decentral heat: '#800000'
low-temperature heat for industry: '#991111'
process heat: '#FF3333'
heat demand: darkred
electric demand: k
Li ion: grey
district heating: '#CC4E5C'
retrofitting: purple
building retrofitting: purple
BEV charger: grey
V2G: grey
land transport EV: grey
electricity: k
gas for industry: '#333333'
solid biomass for industry: '#555555'
industry electricity: '#222222'
industry new electricity: '#222222'
process emissions to stored: '#444444'
process emissions to atmosphere: '#888888'
process emissions: '#222222'
oil emissions: '#666666'
land transport oil emissions: '#666666'
land transport fuel cell: '#AAAAAA'
biogas: '#800000'
solid biomass: '#DAA520'
today: '#D2691E'
shipping: '#6495ED'
electricity distribution grid: '#333333'
solid biomass transport: green

View File

@ -1,343 +0,0 @@
version: 0.3.0
logging_level: INFO
results_dir: 'results/'
summary_dir: results
costs_dir: '../technology-data/outputs/'
run: 'your-run-name' # use this to keep track of runs with different settings
foresight: 'myopic' #options are overnight, myopic, perfect (perfect is not yet implemented)
scenario:
sectors: [E] # ignore this legacy setting
simpl: [''] # only relevant for PyPSA-Eur
lv: [1.0,1.5] # allowed transmission line volume expansion, can be any float >= 1.0 (today) or "opt"
clusters: [45,50] # number of nodes in Europe, any integer between 37 (1 node per country-zone) and several hundred
opts: [''] # only relevant for PyPSA-Eur
sector_opts: [Co2L0-3H-H-B-solar3-dist1] # this is where the main scenario settings are
# to really understand the options here, look in scripts/prepare_sector_network.py
# Co2Lx specifies the CO2 target in x% of the 1990 values; default will give default (5%);
# Co2L0p25 will give 25% CO2 emissions; Co2Lm0p05 will give 5% negative emissions
# xH is the temporal resolution; 3H is 3-hourly, i.e. one snapshot every 3 hours
# single letters are sectors: T for land transport, H for building heating,
# B for biomass supply, I for industry, shipping and aviation
# solarx or onwindx changes the available installable potential by factor x
# dist{n} includes distribution grids with investment cost of n times cost in data/costs.csv
planning_horizons : [2020, 2030, 2040, 2050] #investment years for myopic and perfect; or costs year for overnight
co2_budget_name: ['go'] #gives shape of CO2 budgets over planning horizon
# snapshots are originally set in PyPSA-Eur/config.yaml but used again by PyPSA-Eur-Sec
snapshots:
# arguments to pd.date_range
start: "2013-01-01"
end: "2014-01-01"
closed: 'left' # end is not inclusive
atlite:
cutout_dir: '../pypsa-eur/cutouts'
cutout_name: "europe-2013-era5"
# this information is NOT used but needed as an argument for
# pypsa-eur/scripts/add_electricity.py/load_costs in make_summary.py
electricity:
max_hours:
battery: 6
H2: 168
biomass:
year: 2030
scenario: "Med"
classes:
solid biomass: ['Primary agricultural residues', 'Forestry energy residue', 'Secondary forestry residues', 'Secondary Forestry residues sawdust', 'Forestry residues from landscape care biomass', 'Municipal waste']
not included: ['Bioethanol sugar beet biomass', 'Rapeseeds for biodiesel', 'sunflower and soya for Biodiesel', 'Starchy crops biomass', 'Grassy crops biomass', 'Willow biomass', 'Poplar biomass potential', 'Roundwood fuelwood', 'Roundwood Chips & Pellets']
biogas: ['Manure biomass potential', 'Sludge biomass']
# only relevant for foresight = myopic or perfect
existing_capacities:
grouping_years: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019]
threshold_capacity: 10
conventional_carriers: ['lignite', 'coal', 'oil', 'uranium']
sector:
'central' : True
'central_fraction' : 0.6
'dsm_restriction_value' : 0.75 #Set to 0 for no restriction on BEV DSM
'dsm_restriction_time' : 7 #Time at which SOC of BEV has to be dsm_restriction_value
'transport_heating_deadband_upper' : 20.
'transport_heating_deadband_lower' : 15.
'ICE_lower_degree_factor' : 0.375 #in per cent increase in fuel consumption per degree above deadband
'ICE_upper_degree_factor' : 1.6
'EV_lower_degree_factor' : 0.98
'EV_upper_degree_factor' : 0.63
'district_heating_loss' : 0.15
'bev' : True #turns on EV battery
'bev_availability' : 0.5 #How many cars do smart charging
'v2g' : True #allows feed-in to grid from EV battery
'transport_fuel_cell_share' : 0. #0 means all EVs, 1 means all FCs
'shipping_average_efficiency' : 0.4 #For conversion of fuel oil to propulsion in 2011
'time_dep_hp_cop' : True
'space_heating_fraction' : 1.0 #fraction of space heating active
'retrofitting' : False
'retroI-fraction' : 0.25
'retroII-fraction' : 0.55
'retrofitting-cost_factor' : 1.0
'tes' : True
'tes_tau' : 3.
'boilers' : True
'oil_boilers': False
'chp' : True
'solar_thermal' : True
'solar_cf_correction': 0.788457 # = >>> 1/1.2683
'marginal_cost_storage' : 0. #1e-4
'methanation' : True
'helmeth' : True
'dac' : True
'co2_vent' : True
'SMR' : True
'ccs_fraction' : 0.9
'hydrogen_underground_storage' : True
'use_fischer_tropsch_waste_heat' : True
'use_fuel_cell_waste_heat' : True
'electricity_distribution_grid' : False
'electricity_distribution_grid_cost_factor' : 1.0 #multiplies cost in data/costs.csv
'electricity_grid_connection' : True # only applies to onshore wind and utility PV
'gas_distribution_grid' : True
'gas_distribution_grid_cost_factor' : 1.0 #multiplies cost in data/costs.csv
'biomass_transport': False # biomass potential per country + transport between countries
costs:
year: 2030
lifetime: 25 #default lifetime
# From a Lion Hirth paper, also reflects average of Noothout et al 2016
discountrate: 0.07
# [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html # noqa: E501
USD2013_to_EUR2013: 0.7532
# Marginal and capital costs can be overwritten
# capital_cost:
# Wind: Bla
marginal_cost: #
solar: 0.01
onwind: 0.015
offwind: 0.015
hydro: 0.
H2: 0.
battery: 0.
emission_prices: # only used with the option Ep (emission prices)
co2: 0.
lines:
length_factor: 1.25 #to estimate offwind connection costs
solving:
#tmpdir: "path/to/tmp"
options:
formulation: kirchhoff
clip_p_max_pu: 1.e-2
load_shedding: false
noisy_costs: true
min_iterations: 1
max_iterations: 1
# nhours: 1
solver:
name: gurobi
threads: 4
method: 2 # barrier
crossover: 0
BarConvTol: 1.e-5
Seed: 123
AggFill: 0
PreDual: 0
GURO_PAR_BARDENSETHRESH: 200
#FeasibilityTol: 1.e-6
#name: cplex
#threads: 4
#lpmethod: 4 # barrier
#solutiontype: 2 # non basic solution, ie no crossover
#barrier_convergetol: 1.e-5
#feasopt_tolerance: 1.e-6
mem: 30000 #memory in MB; 20 GB enough for 50+B+I+H2; 100 GB for 181+B+I+H2
industry:
'St_primary_fraction' : 0.3 # fraction of steel produced via primary route (DRI + EAF) versus secondary route (EAF); today fraction is 0.6
'H2_DRI' : 1.7 #H2 consumption in Direct Reduced Iron (DRI), MWh_H2,LHV/ton_Steel from Vogl et al (2018) doi:10.1016/j.jclepro.2018.08.279
'elec_DRI' : 0.322 #electricity consumption in Direct Reduced Iron (DRI) shaft, MWh/tSt HYBRIT brochure https://ssabwebsitecdn.azureedge.net/-/media/hybrit/files/hybrit_brochure.pdf
'Al_primary_fraction' : 0.2 # fraction of aluminium produced via the primary route versus scrap; today fraction is 0.4
'MWh_CH4_per_tNH3_SMR' : 10.8 # 2012's demand from https://ec.europa.eu/docsroom/documents/4165/attachments/1/translations/en/renditions/pdf
'MWh_elec_per_tNH3_SMR' : 0.7 # same source, assuming 94-6% split methane-elec of total energy demand 11.5 MWh/tNH3
'MWh_H2_per_tNH3_electrolysis' : 6.5 # from https://doi.org/10.1016/j.joule.2018.04.017, around 0.197 tH2/tHN3 (>3/17 since some H2 lost and used for energy)
'MWh_elec_per_tNH3_electrolysis' : 1.17 # from https://doi.org/10.1016/j.joule.2018.04.017 Table 13 (air separation and HB)
'NH3_process_emissions' : 24.5 # in MtCO2/a from SMR for H2 production for NH3 from UNFCCC for 2015 for EU28
'petrochemical_process_emissions' : 25.5 # in MtCO2/a for petrochemical and other from UNFCCC for 2015 for EU28
plotting:
map:
figsize: [7, 7]
boundaries: [-10.2, 29, 35, 72]
p_nom:
bus_size_factor: 5.e+4
linewidth_factor: 3.e+3 # 1.e+3 #3.e+3
costs_max: 1200
costs_threshold: 1
energy_max: 20000.
energy_min: -15000.
energy_threshold: 50.
vre_techs: ["onwind", "offwind-ac", "offwind-dc", "solar", "ror"]
renewable_storage_techs: ["PHS","hydro"]
conv_techs: ["OCGT", "CCGT", "Nuclear", "Coal"]
storage_techs: ["hydro+PHS", "battery", "H2"]
# store_techs: ["Li ion", "water tanks"]
load_carriers: ["AC load"] #, "heat load", "Li ion load"]
AC_carriers: ["AC line", "AC transformer"]
link_carriers: ["DC line", "Converter AC-DC"]
heat_links: ["heat pump", "resistive heater", "CHP heat", "CHP electric",
"gas boiler", "central heat pump", "central resistive heater", "central CHP heat",
"central CHP electric", "central gas boiler"]
heat_generators: ["gas boiler", "central gas boiler", "solar thermal collector", "central solar thermal collector"]
tech_colors:
"onwind" : "b"
"onshore wind" : "b"
'offwind' : "c"
'offshore wind' : "c"
'offwind-ac' : "c"
'offshore wind (AC)' : "c"
'offwind-dc' : "#009999"
'offshore wind (DC)' : "#009999"
'wave' : "#004444"
"hydro" : "#3B5323"
"hydro reservoir" : "#3B5323"
"ror" : "#78AB46"
"run of river" : "#78AB46"
'hydroelectricity' : '#006400'
'solar' : "y"
'solar PV' : "y"
'solar thermal' : 'coral'
'solar rooftop' : '#e6b800'
"OCGT" : "wheat"
"OCGT marginal" : "sandybrown"
"OCGT-heat" : "orange"
"gas boiler" : "orange"
"gas boilers" : "orange"
"gas boiler marginal" : "orange"
"gas-to-power/heat" : "orange"
"gas" : "brown"
"natural gas" : "brown"
"SMR" : "#4F4F2F"
"oil" : "#B5A642"
"oil boiler" : "#B5A677"
"lines" : "k"
"transmission lines" : "k"
"H2" : "m"
"hydrogen storage" : "m"
"battery" : "slategray"
"battery storage" : "slategray"
"home battery" : "#614700"
"home battery storage" : "#614700"
"Nuclear" : "r"
"Nuclear marginal" : "r"
"nuclear" : "r"
"uranium" : "r"
"Coal" : "k"
"coal" : "k"
"Coal marginal" : "k"
"Lignite" : "grey"
"lignite" : "grey"
"Lignite marginal" : "grey"
"CCGT" : "orange"
"CCGT marginal" : "orange"
"heat pumps" : "#76EE00"
"heat pump" : "#76EE00"
"air heat pump" : "#76EE00"
"ground heat pump" : "#40AA00"
"power-to-heat" : "#40AA00"
"resistive heater" : "pink"
"Sabatier" : "#FF1493"
"methanation" : "#FF1493"
"power-to-gas" : "#FF1493"
"power-to-liquid" : "#FFAAE9"
"helmeth" : "#7D0552"
"helmeth" : "#7D0552"
"DAC" : "#E74C3C"
"co2 stored" : "#123456"
"CO2 sequestration" : "#123456"
"CCS" : "k"
"co2" : "#123456"
"co2 vent" : "#654321"
"solid biomass for industry co2 from atmosphere" : "#654321"
"solid biomass for industry co2 to stored": "#654321"
"gas for industry co2 to atmosphere": "#654321"
"gas for industry co2 to stored": "#654321"
"Fischer-Tropsch" : "#44DD33"
"kerosene for aviation": "#44BB11"
"naphtha for industry" : "#44FF55"
"water tanks" : "#BBBBBB"
"hot water storage" : "#BBBBBB"
"hot water charging" : "#BBBBBB"
"hot water discharging" : "#999999"
"CHP" : "r"
"CHP heat" : "r"
"CHP electric" : "r"
"PHS" : "g"
"Ambient" : "k"
"Electric load" : "b"
"Heat load" : "r"
"Transport load" : "grey"
"heat" : "darkred"
"rural heat" : "#880000"
"central heat" : "#b22222"
"decentral heat" : "#800000"
"low-temperature heat for industry" : "#991111"
"process heat" : "#FF3333"
"heat demand" : "darkred"
"electric demand" : "k"
"Li ion" : "grey"
"district heating" : "#CC4E5C"
"retrofitting" : "purple"
"building retrofitting" : "purple"
"BEV charger" : "grey"
"V2G" : "grey"
"transport" : "grey"
"electricity" : "k"
"gas for industry" : "#333333"
"solid biomass for industry" : "#555555"
"industry new electricity" : "#222222"
"process emissions to stored" : "#444444"
"process emissions to atmosphere" : "#888888"
"process emissions" : "#222222"
"transport fuel cell" : "#AAAAAA"
"biogas" : "#800000"
"solid biomass" : "#DAA520"
"today" : "#D2691E"
"shipping" : "#6495ED"
"electricity distribution grid" : "#333333"
'industry electricity': "black"
"solid biomass transport": "green"
nice_names:
# OCGT: "Gas"
# OCGT marginal: "Gas (marginal)"
offwind: "offshore wind"
onwind: "onshore wind"
battery: "Battery storage"
lines: "Transmission lines"
AC line: "AC lines"
AC-AC: "DC lines"
ror: "Run of river"
nice_names_n:
offwind: "offshore\nwind"
onwind: "onshore\nwind"
# OCGT: "Gas"
H2: "Hydrogen\nstorage"
# OCGT marginal: "Gas (marginal)"
lines: "transmission\nlines"
ror: "run of river"

View File

@ -1,8 +0,0 @@
,go,wait
2020,0.7011648746,0.7011648746
2025,0.5241935484,0.6285842294
2030,0.2970430108,0.3503584229
2035,0.1500896057,0.0725806452
2040,0.0712365591,0
2045,0.0322580645,0
2050,0,0
1 go wait
2 2020 0.7011648746 0.7011648746
3 2025 0.5241935484 0.6285842294
4 2030 0.2970430108 0.3503584229
5 2035 0.1500896057 0.0725806452
6 2040 0.0712365591 0
7 2045 0.0322580645 0
8 2050 0 0

View File

@ -0,0 +1,3 @@
attribute,type,unit,default,description,status
location,string,n/a,n/a,Reference to original electricity bus,Input (optional)
unit,string,n/a,MWh,Unit of the bus (descriptive only), Input (optional)
1 attribute type unit default description status
2 location string n/a n/a Reference to original electricity bus Input (optional)
3 unit string n/a MWh Unit of the bus (descriptive only) Input (optional)

View File

@ -0,0 +1,3 @@
attribute,type,unit,default,description,status
build_year,integer,year,n/a,build year,Input (optional)
lifetime,float,years,n/a,lifetime,Input (optional)
1 attribute type unit default description status
2 build_year integer year n/a build year Input (optional)
3 lifetime float years n/a lifetime Input (optional)

View File

@ -0,0 +1,13 @@
attribute,type,unit,default,description,status
bus2,string,n/a,n/a,2nd bus,Input (optional)
bus3,string,n/a,n/a,3rd bus,Input (optional)
bus4,string,n/a,n/a,4th bus,Input (optional)
efficiency2,static or series,per unit,1.,2nd bus efficiency,Input (optional)
efficiency3,static or series,per unit,1.,3rd bus efficiency,Input (optional)
efficiency4,static or series,per unit,1.,4th bus efficiency,Input (optional)
p2,series,MW,0.,2nd bus output,Output
p3,series,MW,0.,3rd bus output,Output
p4,series,MW,0.,4th bus output,Output
build_year,integer,year,n/a,build year,Input (optional)
lifetime,float,years,n/a,lifetime,Input (optional)
carrier,string,n/a,n/a,carrier,Input (optional)
1 attribute type unit default description status
2 bus2 string n/a n/a 2nd bus Input (optional)
3 bus3 string n/a n/a 3rd bus Input (optional)
4 bus4 string n/a n/a 4th bus Input (optional)
5 efficiency2 static or series per unit 1. 2nd bus efficiency Input (optional)
6 efficiency3 static or series per unit 1. 3rd bus efficiency Input (optional)
7 efficiency4 static or series per unit 1. 4th bus efficiency Input (optional)
8 p2 series MW 0. 2nd bus output Output
9 p3 series MW 0. 3rd bus output Output
10 p4 series MW 0. 4th bus output Output
11 build_year integer year n/a build year Input (optional)
12 lifetime float years n/a lifetime Input (optional)
13 carrier string n/a n/a carrier Input (optional)

View File

@ -0,0 +1,2 @@
attribute,type,unit,default,description,status
carrier,string,n/a,n/a,carrier,Input (optional)
1 attribute type unit default description status
2 carrier string n/a n/a carrier Input (optional)

View File

@ -0,0 +1,4 @@
attribute,type,unit,default,description,status
build_year,integer,year,n/a,build year,Input (optional)
lifetime,float,years,n/a,lifetime,Input (optional)
carrier,string,n/a,n/a,carrier,Input (optional)
1 attribute type unit default description status
2 build_year integer year n/a build year Input (optional)
3 lifetime float years n/a lifetime Input (optional)
4 carrier string n/a n/a carrier Input (optional)

View File

@ -0,0 +1,49 @@
NA_ITEM,Price level indices (EU28=100),,,,,,,,,
PPP_CAT,Actual individual consumption,,,,,,,,,
,,,,,,,,,,
GEO/TIME,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018
European Union - 28 countries,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0,100.0
Belgium,113.6,111.9,112.4,111.5,111.0,108.9,106.3,110.3,112.3,112.5
Bulgaria,47.1,45.7,45.5,45.0,44.2,42.6,42.2,43.2,45.1,46.3
Czech Republic,64.5,66.6,68.9,66.9,63.3,58.3,58.4,60.5,62.4,65.0
Denmark,141.7,140.0,139.9,140.0,139.3,138.5,135.0,140.0,138.9,138.1
Germany,104.6,103.1,102.2,101.1,102.5,101.5,100.4,102.6,103.7,104.1
Estonia,67.5,66.0,67.2,67.6,69.9,69.9,68.9,71.0,73.9,76.3
Ireland,129.9,122.7,122.5,120.5,123.2,124.9,122.2,126.5,129.1,129.2
Greece,93.6,95.4,94.9,91.9,87.8,83.8,81.0,82.3,83.0,81.8
Spain,97.5,98.7,98.5,95.8,95.1,92.7,90.0,92.7,93.7,93.7
France,111.2,109.9,109.6,108.7,107.0,106.0,104.0,105.8,107.1,107.4
Croatia,70.2,70.1,68.1,65.5,64.5,62.5,60.7,61.3,63.0,64.0
Italy,103.6,100.4,101.5,101.1,102.3,102.6,100.3,101.1,101.6,101.4
Cyprus,92.0,94.6,95.8,96.0,95.2,92.0,88.5,89.8,91.2,90.6
Latvia,68.1,62.3,65.5,65.9,66.0,66.0,64.2,66.9,68.3,69.5
Lithuania,60.3,57.8,58.3,58.0,57.8,56.9,55.9,58.3,60.0,61.4
Luxembourg,130.0,136.5,136.0,135.8,135.1,135.7,132.1,137.0,139.9,141.6
Hungary,58.2,57.4,56.4,54.9,54.4,53.4,53.3,56.2,59.4,59.0
Malta,75.8,76.6,78.0,78.0,80.8,80.5,79.8,81.4,81.9,83.4
Netherlands,108.5,112.3,112.7,111.3,111.9,111.9,109.6,113.8,114.6,114.8
Austria,109.9,109.2,110.1,108.9,109.1,109.1,107.2,110.2,112.8,113.7
Poland,53.1,55.2,53.7,52.1,52.4,52.5,51.1,50.9,53.5,54.3
Portugal,85.2,85.0,85.3,82.7,81.1,80.4,78.7,81.6,83.5,84.6
Romania,49.1,46.9,47.7,45.6,47.8,47.6,47.2,46.8,48.0,48.6
Slovenia,85.3,84.3,83.7,81.8,82.1,81.5,79.8,82.3,82.7,83.8
Slovakia,66.6,62.5,63.4,63.4,63.4,63.3,62.3,63.6,65.4,66.1
Finland,121.0,120.3,121.6,121.8,124.0,122.9,119.6,122.8,123.3,123.4
Sweden,109.5,124.6,131.7,134.3,140.5,133.6,128.8,135.3,134.5,126.9
United Kingdom,107.5,111.4,111.3,118.6,117.0,123.6,134.7,123.5,117.6,117.7
Iceland,94.9,107.6,109.6,111.6,116.0,123.4,132.5,154.5,172.3,163.7
Norway,142.4,158.8,165.3,172.5,166.9,157.2,152.2,155.0,157.3,155.4
Switzerland,131.6,146.4,161.7,160.6,155.1,153.0,167.0,169.8,167.1,159.1
Candidate and potential candidate countries except Turkey and Kosovo (under United Nations Security Council Resolution 1244/99),48.0,45.6,47.1,44.8,46.4,45.2,43.4,44.4,46.0,47.5
Montenegro,52.3,49.5,49.3,50.1,50.5,49.3,48.0,48.7,50.5,51.1
North Macedonia,41.4,41.3,42.7,42.1,42.5,41.9,40.9,41.7,43.2,43.3
Albania,46.2,42.8,42.1,40.6,41.9,41.5,39.8,43.0,43.5,46.6
Serbia,48.3,45.0,48.0,44.5,47.3,45.5,43.1,43.8,46.1,47.9
Turkey,55.4,61.2,54.7,58.5,57.7,51.6,50.5,50.2,45.4,37.0
Bosnia and Herzegovina,51.6,50.7,50.6,49.2,49.1,48.4,47.0,47.5,48.2,48.9
Kosovo (under United Nations Security Council Resolution 1244/99),:,:,:,:,:,:,:,:,:,:
United States,92.4,98,93.3,101.2,100.3,99,115.9,121.1,120.8,115.2
Japan,115.1,126.1,127.8,133.8,101.7,94.8,96.5,113,109.4,103.9
,,,,,,,,,,
"Source: Eurostat Purchasing power parities (PPPs), price level indices and real expenditures for ESA 2010 aggregates (2019)",,,,,,,,,,
https://ec.europa.eu/eurostat/statistics-explained/index.php?title=Comparative_price_levels_for_investment,,,,,,,,,,
1 NA_ITEM Price level indices (EU28=100)
2 PPP_CAT Actual individual consumption
3
4 GEO/TIME 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018
5 European Union - 28 countries 100.0 100.0 100.0 100.0 100.0 100.0 100.0 100.0 100.0 100.0
6 Belgium 113.6 111.9 112.4 111.5 111.0 108.9 106.3 110.3 112.3 112.5
7 Bulgaria 47.1 45.7 45.5 45.0 44.2 42.6 42.2 43.2 45.1 46.3
8 Czech Republic 64.5 66.6 68.9 66.9 63.3 58.3 58.4 60.5 62.4 65.0
9 Denmark 141.7 140.0 139.9 140.0 139.3 138.5 135.0 140.0 138.9 138.1
10 Germany 104.6 103.1 102.2 101.1 102.5 101.5 100.4 102.6 103.7 104.1
11 Estonia 67.5 66.0 67.2 67.6 69.9 69.9 68.9 71.0 73.9 76.3
12 Ireland 129.9 122.7 122.5 120.5 123.2 124.9 122.2 126.5 129.1 129.2
13 Greece 93.6 95.4 94.9 91.9 87.8 83.8 81.0 82.3 83.0 81.8
14 Spain 97.5 98.7 98.5 95.8 95.1 92.7 90.0 92.7 93.7 93.7
15 France 111.2 109.9 109.6 108.7 107.0 106.0 104.0 105.8 107.1 107.4
16 Croatia 70.2 70.1 68.1 65.5 64.5 62.5 60.7 61.3 63.0 64.0
17 Italy 103.6 100.4 101.5 101.1 102.3 102.6 100.3 101.1 101.6 101.4
18 Cyprus 92.0 94.6 95.8 96.0 95.2 92.0 88.5 89.8 91.2 90.6
19 Latvia 68.1 62.3 65.5 65.9 66.0 66.0 64.2 66.9 68.3 69.5
20 Lithuania 60.3 57.8 58.3 58.0 57.8 56.9 55.9 58.3 60.0 61.4
21 Luxembourg 130.0 136.5 136.0 135.8 135.1 135.7 132.1 137.0 139.9 141.6
22 Hungary 58.2 57.4 56.4 54.9 54.4 53.4 53.3 56.2 59.4 59.0
23 Malta 75.8 76.6 78.0 78.0 80.8 80.5 79.8 81.4 81.9 83.4
24 Netherlands 108.5 112.3 112.7 111.3 111.9 111.9 109.6 113.8 114.6 114.8
25 Austria 109.9 109.2 110.1 108.9 109.1 109.1 107.2 110.2 112.8 113.7
26 Poland 53.1 55.2 53.7 52.1 52.4 52.5 51.1 50.9 53.5 54.3
27 Portugal 85.2 85.0 85.3 82.7 81.1 80.4 78.7 81.6 83.5 84.6
28 Romania 49.1 46.9 47.7 45.6 47.8 47.6 47.2 46.8 48.0 48.6
29 Slovenia 85.3 84.3 83.7 81.8 82.1 81.5 79.8 82.3 82.7 83.8
30 Slovakia 66.6 62.5 63.4 63.4 63.4 63.3 62.3 63.6 65.4 66.1
31 Finland 121.0 120.3 121.6 121.8 124.0 122.9 119.6 122.8 123.3 123.4
32 Sweden 109.5 124.6 131.7 134.3 140.5 133.6 128.8 135.3 134.5 126.9
33 United Kingdom 107.5 111.4 111.3 118.6 117.0 123.6 134.7 123.5 117.6 117.7
34 Iceland 94.9 107.6 109.6 111.6 116.0 123.4 132.5 154.5 172.3 163.7
35 Norway 142.4 158.8 165.3 172.5 166.9 157.2 152.2 155.0 157.3 155.4
36 Switzerland 131.6 146.4 161.7 160.6 155.1 153.0 167.0 169.8 167.1 159.1
37 Candidate and potential candidate countries except Turkey and Kosovo (under United Nations Security Council Resolution 1244/99) 48.0 45.6 47.1 44.8 46.4 45.2 43.4 44.4 46.0 47.5
38 Montenegro 52.3 49.5 49.3 50.1 50.5 49.3 48.0 48.7 50.5 51.1
39 North Macedonia 41.4 41.3 42.7 42.1 42.5 41.9 40.9 41.7 43.2 43.3
40 Albania 46.2 42.8 42.1 40.6 41.9 41.5 39.8 43.0 43.5 46.6
41 Serbia 48.3 45.0 48.0 44.5 47.3 45.5 43.1 43.8 46.1 47.9
42 Turkey 55.4 61.2 54.7 58.5 57.7 51.6 50.5 50.2 45.4 37.0
43 Bosnia and Herzegovina 51.6 50.7 50.6 49.2 49.1 48.4 47.0 47.5 48.2 48.9
44 Kosovo (under United Nations Security Council Resolution 1244/99) : : : : : : : : : :
45 United States 92.4 98 93.3 101.2 100.3 99 115.9 121.1 120.8 115.2
46 Japan 115.1 126.1 127.8 133.8 101.7 94.8 96.5 113 109.4 103.9
47
48 Source: Eurostat Purchasing power parities (PPPs), price level indices and real expenditures for ESA 2010 aggregates (2019)
49 https://ec.europa.eu/eurostat/statistics-explained/index.php?title=Comparative_price_levels_for_investment

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,164 @@
Electricity prices for household consumers - bi-annual data (from 2007 onwards) [nrg_pc_204],,,,
,,,,
Last update,30.10.19,,,
Extracted on,14.11.19,,,
Source of data,Eurostat,,,
,,,,
PRODUCT,Electrical energy,,,
CONSOM,Band DC : 2 500 kWh < Consumption < 5 000 kWh,,,
UNIT,Kilowatt-hour,,,
TIME,2018S1,,,
,,,,
CURRENCY,Euro,Euro,Euro,
GEO/TAX,Excluding taxes and levies,Excluding VAT and other recoverable taxes and levies,All taxes and levies included,% cost without taxes
European Union - 28 countries,0.1285,0.1756,0.2052,0.626218323586745
"Euro area (EA11-2000, EA12-2006, EA13-2007, EA15-2008, EA16-2010, EA17-2013, EA18-2014, EA19)",0.1331,0.1855,0.2188,0.608318098720293
Belgium,0.1903,0.2279,0.2733,0.696304427369191
Bulgaria,0.0816,0.0816,0.0979,0.833503575076609
Czech Republic,0.1286,0.1298,0.1573,0.817546090273363
Denmark,0.1011,0.2501,0.3126,0.32341650671785
Germany,0.1379,0.2510,0.2987,0.461667224640107
Estonia,0.0989,0.1123,0.1348,0.733679525222552
Ireland,0.1846,0.2087,0.2369,0.779231743351625
Greece,0.1132,0.1482,0.1672,0.677033492822967
Spain,0.1873,0.1969,0.2383,0.785984053713806
France,0.1134,0.1492,0.1748,0.648741418764302
Croatia,0.1020,0.1160,0.1311,0.778032036613272
Italy,0.1285,0.1873,0.2067,0.621673923560716
Cyprus,0.1445,0.1606,0.1893,0.763338615953513
Latvia,0.1035,0.1266,0.1531,0.676028739386022
Lithuania,0.0771,0.0906,0.1097,0.702825888787603
Luxembourg,0.1283,0.1547,0.1671,0.767803710353082
Hungary,0.0885,0.0885,0.1123,0.78806767586821
Malta,0.1209,0.1224,0.1285,0.940856031128405
Netherlands,0.1187,0.1410,0.1706,0.6957796014068
Austria,0.1232,0.1638,0.1966,0.626653102746694
Poland,0.0906,0.1146,0.1410,0.642553191489362
Portugal,0.1007,0.1826,0.2246,0.448352626892253
Romania,0.0990,0.1120,0.1333,0.742685671417854
Slovenia,0.1108,0.1322,0.1613,0.686918784872908
Slovakia,0.0942,0.1305,0.1566,0.601532567049808
Finland,0.1074,0.1300,0.1612,0.666253101736973
Sweden,0.1202,0.1513,0.1891,0.635642517186674
United Kingdom,0.1347,0.1797,0.1887,0.713831478537361
Iceland,0.1222,0.1246,0.1545,0.790938511326861
Liechtenstein,:,:,:,#VALUE!
Norway,0.1254,0.1434,0.1751,0.716162193032553
Montenegro,0.0828,0.0844,0.1024,0.80859375
North Macedonia,0.0662,0.0662,0.0781,0.847631241997439
Albania,:,:,:,#VALUE!
Serbia,0.0539,0.0587,0.0705,0.764539007092199
Turkey,0.0727,0.0766,0.0904,0.804203539823009
Bosnia and Herzegovina,0.0722,0.0738,0.0864,0.835648148148148
Kosovo (under United Nations Security Council Resolution 1244/99),0.0569,0.0586,0.0633,0.898894154818325
Moldova,0.1020,0.1020,0.1020,1
Ukraine,0.0342,0.0342,0.0410,0.834146341463415
,,,0.157271052631579,
Special value:,,,,
:,not available,,,
,,,,
PRODUCT,Electrical energy,,,
CONSOM,Band DC : 2 500 kWh < Consumption < 5 000 kWh,,,
UNIT,Kilowatt-hour,,,
TIME,2018S2,,,
,,,,
CURRENCY,Euro,Euro,Euro,
GEO/TAX,Excluding taxes and levies,Excluding VAT and other recoverable taxes and levies,All taxes and levies included,
European Union - 28 countries,0.1329,0.1810,0.2113,
"Euro area (EA11-2000, EA12-2006, EA13-2007, EA15-2008, EA16-2010, EA17-2013, EA18-2014, EA19)",0.1376,0.1902,0.2242,
Belgium,0.1998,0.2429,0.2937,
Bulgaria,0.0838,0.0838,0.1005,
Czechia,0.1299,0.1311,0.1586,
Denmark,0.1116,0.2499,0.3123,
Germany (until 1990 former territory of the FRG),0.1378,0.2521,0.3000,
Estonia,0.1048,0.1182,0.1418,
Ireland,0.2006,0.2237,0.2539,
Greece,0.1125,0.1458,0.1646,
Spain,0.1947,0.2047,0.2477,
France,0.1168,0.1537,0.1799,
Croatia,0.1028,0.1169,0.1321,
Italy,0.1416,0.1964,0.2161,
Cyprus,0.1745,0.1850,0.2183,
Latvia,0.1041,0.1249,0.1511,
Lithuania,0.0771,0.0906,0.1097,
Luxembourg,0.1302,0.1566,0.1691,
Hungary,0.0880,0.0880,0.1118,
Malta,0.1229,0.1244,0.1306,
Netherlands,0.1212,0.1420,0.1707,
Austria,0.1265,0.1676,0.2012,
Poland,0.0889,0.1135,0.1396,
Portugal,0.1028,0.1864,0.2293,
Romania,0.0964,0.1107,0.1317,
Slovenia,0.1125,0.1342,0.1638,
Slovakia,0.0849,0.1218,0.1462,
Finland,0.1144,0.1369,0.1698,
Sweden,0.1287,0.1592,0.1990,
United Kingdom,0.1401,0.1927,0.2024,
Iceland,0.1152,0.1175,0.1457,
Liechtenstein,:,:,:,
Norway,0.1382,0.1562,0.1907,
Montenegro,0.0829,0.0848,0.1030,
North Macedonia,0.0667,0.0667,0.0787,
Albania,0.0759,0.0759,0.0910,
Serbia,0.0542,0.0591,0.0709,
Turkey,0.0688,0.0726,0.0857,
Bosnia and Herzegovina,0.0729,0.0744,0.0871,
Kosovo (under United Nations Security Council Resolution 1244/99),0.0579,0.0591,0.0638,
Moldova,0.0960,0.0960,0.1029,
Ukraine,0.0342,0.0342,0.0410,
,,,,
Special value:,,,,
:,not available,,,
,,,,
PRODUCT,Electrical energy,,,
CONSOM,Band DC : 2 500 kWh < Consumption < 5 000 kWh,,,
UNIT,Kilowatt-hour,,,
TIME,2019S1,,,
,,,,
CURRENCY,Euro,Euro,Euro,
GEO/TAX,Excluding taxes and levies,Excluding VAT and other recoverable taxes and levies,All taxes and levies included,
European Union - 28 countries,0.1351,0.1841,0.2147,
"Euro area (EA11-2000, EA12-2006, EA13-2007, EA15-2008, EA16-2010, EA17-2013, EA18-2014, EA19)",0.1396,0.1928,0.2270,
Belgium,0.1965,0.2355,0.2839,
Bulgaria,0.0831,0.0831,0.0997,
Czechia,0.1433,0.1444,0.1748,
Denmark,0.1084,0.2387,0.2984,
Germany (until 1990 former territory of the FRG),0.1473,0.2595,0.3088,
Estonia,0.0982,0.1131,0.1357,
Ireland,0.2027,0.2134,0.2423,
Greece,0.1139,0.1482,0.1650,
Spain,0.1889,0.1986,0.2403,
France,0.1138,0.1508,0.1765,
Croatia,0.1028,0.1169,0.1321,
Italy,0.1432,0.2090,0.2301,
Cyprus,0.1762,0.1867,0.2203,
Latvia,0.1136,0.1347,0.1629,
Lithuania,0.0947,0.1037,0.1255,
Luxembourg,0.1326,0.1666,0.1798,
Hungary,0.0882,0.0882,0.1120,
Malta,0.1228,0.1243,0.1305,
Netherlands,0.1357,0.1708,0.2052,
Austria,0.1316,0.1695,0.2034,
Poland,0.0884,0.1092,0.1343,
Portugal,0.1103,0.1751,0.2154,
Romania,0.0983,0.1141,0.1358,
Slovenia,0.1125,0.1339,0.1634,
Slovakia,0.0962,0.1314,0.1577,
Finland,0.1173,0.1398,0.1734,
Sweden,0.1297,0.1612,0.2015,
United Kingdom,0.1450,0.2021,0.2122,
Iceland,0.1112,0.1134,0.1406,
Liechtenstein,:,:,:,
Norway,0.1360,0.1529,0.1867,
Montenegro,0.0834,0.0850,0.1032,
North Macedonia,:,:,:,
Albania,:,:,:,
Serbia,0.0541,0.0589,0.0706,
Turkey,0.0684,0.0718,0.0847,
Bosnia and Herzegovina,0.0729,0.0746,0.0873,
Kosovo (under United Nations Security Council Resolution 1244/99),0.0537,0.0556,0.0600,
Moldova,0.0936,0.0936,0.0936,
Ukraine,0.0369,0.0369,0.0442,
,,,,
Special value:,,,,
:,not available,,,
1 Electricity prices for household consumers - bi-annual data (from 2007 onwards) [nrg_pc_204]
2
3 Last update 30.10.19
4 Extracted on 14.11.19
5 Source of data Eurostat
6
7 PRODUCT Electrical energy
8 CONSOM Band DC : 2 500 kWh < Consumption < 5 000 kWh
9 UNIT Kilowatt-hour
10 TIME 2018S1
11
12 CURRENCY Euro Euro Euro
13 GEO/TAX Excluding taxes and levies Excluding VAT and other recoverable taxes and levies All taxes and levies included % cost without taxes
14 European Union - 28 countries 0.1285 0.1756 0.2052 0.626218323586745
15 Euro area (EA11-2000, EA12-2006, EA13-2007, EA15-2008, EA16-2010, EA17-2013, EA18-2014, EA19) 0.1331 0.1855 0.2188 0.608318098720293
16 Belgium 0.1903 0.2279 0.2733 0.696304427369191
17 Bulgaria 0.0816 0.0816 0.0979 0.833503575076609
18 Czech Republic 0.1286 0.1298 0.1573 0.817546090273363
19 Denmark 0.1011 0.2501 0.3126 0.32341650671785
20 Germany 0.1379 0.2510 0.2987 0.461667224640107
21 Estonia 0.0989 0.1123 0.1348 0.733679525222552
22 Ireland 0.1846 0.2087 0.2369 0.779231743351625
23 Greece 0.1132 0.1482 0.1672 0.677033492822967
24 Spain 0.1873 0.1969 0.2383 0.785984053713806
25 France 0.1134 0.1492 0.1748 0.648741418764302
26 Croatia 0.1020 0.1160 0.1311 0.778032036613272
27 Italy 0.1285 0.1873 0.2067 0.621673923560716
28 Cyprus 0.1445 0.1606 0.1893 0.763338615953513
29 Latvia 0.1035 0.1266 0.1531 0.676028739386022
30 Lithuania 0.0771 0.0906 0.1097 0.702825888787603
31 Luxembourg 0.1283 0.1547 0.1671 0.767803710353082
32 Hungary 0.0885 0.0885 0.1123 0.78806767586821
33 Malta 0.1209 0.1224 0.1285 0.940856031128405
34 Netherlands 0.1187 0.1410 0.1706 0.6957796014068
35 Austria 0.1232 0.1638 0.1966 0.626653102746694
36 Poland 0.0906 0.1146 0.1410 0.642553191489362
37 Portugal 0.1007 0.1826 0.2246 0.448352626892253
38 Romania 0.0990 0.1120 0.1333 0.742685671417854
39 Slovenia 0.1108 0.1322 0.1613 0.686918784872908
40 Slovakia 0.0942 0.1305 0.1566 0.601532567049808
41 Finland 0.1074 0.1300 0.1612 0.666253101736973
42 Sweden 0.1202 0.1513 0.1891 0.635642517186674
43 United Kingdom 0.1347 0.1797 0.1887 0.713831478537361
44 Iceland 0.1222 0.1246 0.1545 0.790938511326861
45 Liechtenstein : : : #VALUE!
46 Norway 0.1254 0.1434 0.1751 0.716162193032553
47 Montenegro 0.0828 0.0844 0.1024 0.80859375
48 North Macedonia 0.0662 0.0662 0.0781 0.847631241997439
49 Albania : : : #VALUE!
50 Serbia 0.0539 0.0587 0.0705 0.764539007092199
51 Turkey 0.0727 0.0766 0.0904 0.804203539823009
52 Bosnia and Herzegovina 0.0722 0.0738 0.0864 0.835648148148148
53 Kosovo (under United Nations Security Council Resolution 1244/99) 0.0569 0.0586 0.0633 0.898894154818325
54 Moldova 0.1020 0.1020 0.1020 1
55 Ukraine 0.0342 0.0342 0.0410 0.834146341463415
56 0.157271052631579
57 Special value:
58 : not available
59
60 PRODUCT Electrical energy
61 CONSOM Band DC : 2 500 kWh < Consumption < 5 000 kWh
62 UNIT Kilowatt-hour
63 TIME 2018S2
64
65 CURRENCY Euro Euro Euro
66 GEO/TAX Excluding taxes and levies Excluding VAT and other recoverable taxes and levies All taxes and levies included
67 European Union - 28 countries 0.1329 0.1810 0.2113
68 Euro area (EA11-2000, EA12-2006, EA13-2007, EA15-2008, EA16-2010, EA17-2013, EA18-2014, EA19) 0.1376 0.1902 0.2242
69 Belgium 0.1998 0.2429 0.2937
70 Bulgaria 0.0838 0.0838 0.1005
71 Czechia 0.1299 0.1311 0.1586
72 Denmark 0.1116 0.2499 0.3123
73 Germany (until 1990 former territory of the FRG) 0.1378 0.2521 0.3000
74 Estonia 0.1048 0.1182 0.1418
75 Ireland 0.2006 0.2237 0.2539
76 Greece 0.1125 0.1458 0.1646
77 Spain 0.1947 0.2047 0.2477
78 France 0.1168 0.1537 0.1799
79 Croatia 0.1028 0.1169 0.1321
80 Italy 0.1416 0.1964 0.2161
81 Cyprus 0.1745 0.1850 0.2183
82 Latvia 0.1041 0.1249 0.1511
83 Lithuania 0.0771 0.0906 0.1097
84 Luxembourg 0.1302 0.1566 0.1691
85 Hungary 0.0880 0.0880 0.1118
86 Malta 0.1229 0.1244 0.1306
87 Netherlands 0.1212 0.1420 0.1707
88 Austria 0.1265 0.1676 0.2012
89 Poland 0.0889 0.1135 0.1396
90 Portugal 0.1028 0.1864 0.2293
91 Romania 0.0964 0.1107 0.1317
92 Slovenia 0.1125 0.1342 0.1638
93 Slovakia 0.0849 0.1218 0.1462
94 Finland 0.1144 0.1369 0.1698
95 Sweden 0.1287 0.1592 0.1990
96 United Kingdom 0.1401 0.1927 0.2024
97 Iceland 0.1152 0.1175 0.1457
98 Liechtenstein : : :
99 Norway 0.1382 0.1562 0.1907
100 Montenegro 0.0829 0.0848 0.1030
101 North Macedonia 0.0667 0.0667 0.0787
102 Albania 0.0759 0.0759 0.0910
103 Serbia 0.0542 0.0591 0.0709
104 Turkey 0.0688 0.0726 0.0857
105 Bosnia and Herzegovina 0.0729 0.0744 0.0871
106 Kosovo (under United Nations Security Council Resolution 1244/99) 0.0579 0.0591 0.0638
107 Moldova 0.0960 0.0960 0.1029
108 Ukraine 0.0342 0.0342 0.0410
109
110 Special value:
111 : not available
112
113 PRODUCT Electrical energy
114 CONSOM Band DC : 2 500 kWh < Consumption < 5 000 kWh
115 UNIT Kilowatt-hour
116 TIME 2019S1
117
118 CURRENCY Euro Euro Euro
119 GEO/TAX Excluding taxes and levies Excluding VAT and other recoverable taxes and levies All taxes and levies included
120 European Union - 28 countries 0.1351 0.1841 0.2147
121 Euro area (EA11-2000, EA12-2006, EA13-2007, EA15-2008, EA16-2010, EA17-2013, EA18-2014, EA19) 0.1396 0.1928 0.2270
122 Belgium 0.1965 0.2355 0.2839
123 Bulgaria 0.0831 0.0831 0.0997
124 Czechia 0.1433 0.1444 0.1748
125 Denmark 0.1084 0.2387 0.2984
126 Germany (until 1990 former territory of the FRG) 0.1473 0.2595 0.3088
127 Estonia 0.0982 0.1131 0.1357
128 Ireland 0.2027 0.2134 0.2423
129 Greece 0.1139 0.1482 0.1650
130 Spain 0.1889 0.1986 0.2403
131 France 0.1138 0.1508 0.1765
132 Croatia 0.1028 0.1169 0.1321
133 Italy 0.1432 0.2090 0.2301
134 Cyprus 0.1762 0.1867 0.2203
135 Latvia 0.1136 0.1347 0.1629
136 Lithuania 0.0947 0.1037 0.1255
137 Luxembourg 0.1326 0.1666 0.1798
138 Hungary 0.0882 0.0882 0.1120
139 Malta 0.1228 0.1243 0.1305
140 Netherlands 0.1357 0.1708 0.2052
141 Austria 0.1316 0.1695 0.2034
142 Poland 0.0884 0.1092 0.1343
143 Portugal 0.1103 0.1751 0.2154
144 Romania 0.0983 0.1141 0.1358
145 Slovenia 0.1125 0.1339 0.1634
146 Slovakia 0.0962 0.1314 0.1577
147 Finland 0.1173 0.1398 0.1734
148 Sweden 0.1297 0.1612 0.2015
149 United Kingdom 0.1450 0.2021 0.2122
150 Iceland 0.1112 0.1134 0.1406
151 Liechtenstein : : :
152 Norway 0.1360 0.1529 0.1867
153 Montenegro 0.0834 0.0850 0.1032
154 North Macedonia : : :
155 Albania : : :
156 Serbia 0.0541 0.0589 0.0706
157 Turkey 0.0684 0.0718 0.0847
158 Bosnia and Herzegovina 0.0729 0.0746 0.0873
159 Kosovo (under United Nations Security Council Resolution 1244/99) 0.0537 0.0556 0.0600
160 Moldova 0.0936 0.0936 0.0936
161 Ukraine 0.0369 0.0369 0.0442
162
163 Special value:
164 : not available

View File

@ -0,0 +1,17 @@
country,sector,estimated,value,source,,comments,population [in Million],
AL,residential,0,64,p.13 1.6 million m² = 2.5% of total floor area,https://www.buildup.eu/sites/default/files/content/sled_albania_residential_building_eng.pdf,,,
AL,services,0,,,,,,
BA,residential,0,125.89,Tabula,https://episcope.eu/building-typology/country/ba/,strong differences ? other source claims more than 300 Million m²,,https://www.buildup.eu/sites/default/files/content/sled_serbia_building_eng.pdf
BA,services,0,,,,,,
RS,residential,0,72.3,Odyssee(2011),https://odyssee.enerdata.net/database/,,,
RS,services,0,,,,,,
MK,residential,0,,"Worldbank p.7 Skopje 75% residential, 25% commercial",http://documents.albankaldawli.org/curated/ar/838951574180734318/pdf/Project-Information-Document-North-Macedonia-Public-Sector-Energy-Efficiency-Project-P149990.pdf,15 % live in illegal constructed buildings ? not part of the statistics,2.1,
MK,services,0,,,,,,
ME,residential,0,19.625,p.13 0.314 million m² = 1.6% of total floor area,buildup.eu/sites/default/files/content/sled_montenegro_building_eng.pdf,Only 50 % of the floor area is heated p.12,,buildup.eu/sites/default/files/content/sled_montenegro_building_eng.pdf
ME,services,0,,,,,,
CH,residential,0,99.45,Odyssee(2015),,,,
CH,services,1,78.1392857142857,p.8 44%floor area is services,https://bta.climate-kic.org/wp-content/uploads/2018/04/171123-CK-BTA-DEF-BMB_SWITZERLAND_.pdf,,,
NO,residential,0,121.55,Odyssee(2015),,,,
NO,services,0,115.21,Odyssee(2015),,,,
PL,residential,0,1028.41,EU Building Database,,,,
PL,services,0,498.84,EU Building Database,,,,
1 country sector estimated value source comments population [in Million]
2 AL residential 0 64 p.13 1.6 million m² = 2.5% of total floor area https://www.buildup.eu/sites/default/files/content/sled_albania_residential_building_eng.pdf
3 AL services 0
4 BA residential 0 125.89 Tabula https://episcope.eu/building-typology/country/ba/ strong differences ? other source claims more than 300 Million m² https://www.buildup.eu/sites/default/files/content/sled_serbia_building_eng.pdf
5 BA services 0
6 RS residential 0 72.3 Odyssee(2011) https://odyssee.enerdata.net/database/
7 RS services 0
8 MK residential 0 Worldbank p.7 Skopje 75% residential, 25% commercial http://documents.albankaldawli.org/curated/ar/838951574180734318/pdf/Project-Information-Document-North-Macedonia-Public-Sector-Energy-Efficiency-Project-P149990.pdf 15 % live in illegal constructed buildings ? not part of the statistics 2.1
9 MK services 0
10 ME residential 0 19.625 p.13 0.314 million m² = 1.6% of total floor area buildup.eu/sites/default/files/content/sled_montenegro_building_eng.pdf Only 50 % of the floor area is heated p.12 buildup.eu/sites/default/files/content/sled_montenegro_building_eng.pdf
11 ME services 0
12 CH residential 0 99.45 Odyssee(2015)
13 CH services 1 78.1392857142857 p.8 44%floor area is services https://bta.climate-kic.org/wp-content/uploads/2018/04/171123-CK-BTA-DEF-BMB_SWITZERLAND_.pdf
14 NO residential 0 121.55 Odyssee(2015)
15 NO services 0 115.21 Odyssee(2015)
16 PL residential 0 1028.41 EU Building Database
17 PL services 0 498.84 EU Building Database

View File

@ -0,0 +1,7 @@
component,cost_fix,cost_var,life_time,comment,additional source
wall,70.34,2.36,40,Agora Energiewende p.110,
floor,39.39,1.3,40,Agora Energiewende p.110,
roof,75.61,1.3,40,Agora Energiewende p.110,https://www.baulinks.de/webplugin/2018/1524.php4
window,nan,nan,35,,
source: p.37 https://www.umweltbundesamt.de/sites/default/files/medien/1410/publikationen/2019-10-29_texte_132-2019_energieaufwand-gebaeudekonzepte.pdf,,,https://www.agora-energiewende.de/en/publications/building-sector-efficiency-a-crucial-component-of-the-energy-transition/,,
,,,p.115,,
1 component cost_fix cost_var life_time comment additional source
2 wall 70.34 2.36 40 Agora Energiewende p.110
3 floor 39.39 1.3 40 Agora Energiewende p.110
4 roof 75.61 1.3 40 Agora Energiewende p.110 https://www.baulinks.de/webplugin/2018/1524.php4
5 window nan nan 35
6 source: p.37 https://www.umweltbundesamt.de/sites/default/files/medien/1410/publikationen/2019-10-29_texte_132-2019_energieaufwand-gebaeudekonzepte.pdf https://www.agora-energiewende.de/en/publications/building-sector-efficiency-a-crucial-component-of-the-energy-transition/
7 p.115

View File

@ -0,0 +1,9 @@
component,Before 1945,1945 - 1969,1970 - 1979,1980 - 1989,1990 - 1999,2000 - 2010,Post 2010,sector
Walls,1.7,1.4,0.9,0.9,0.6,0.4,1.7,residential
Windows,4.6,3.6,2.6,2.6,2.1,2.1,2.1,residential
Roof,0.8,0.7,0.6,0.6,0.6,0.4,0.33,residential
Floor,1.9,1.4,1.2,1.1,0.9,0.6,0.45,residential
Walls,1.3,1.3,1.3,0.8,0.6,0.6,0.6,services
Windows,4.7,3.7,2.6,2.6,2.3,2.1,2.1,services
Roof,1,0.9,0.7,0.5,0.3,0.3,0.3,services
Floor,1.6,1.2,1.2,1.1,1,0.7,0.7,services
1 component Before 1945 1945 - 1969 1970 - 1979 1980 - 1989 1990 - 1999 2000 - 2010 Post 2010 sector
2 Walls 1.7 1.4 0.9 0.9 0.6 0.4 1.7 residential
3 Windows 4.6 3.6 2.6 2.6 2.1 2.1 2.1 residential
4 Roof 0.8 0.7 0.6 0.6 0.6 0.4 0.33 residential
5 Floor 1.9 1.4 1.2 1.1 0.9 0.6 0.45 residential
6 Walls 1.3 1.3 1.3 0.8 0.6 0.6 0.6 services
7 Windows 4.7 3.7 2.6 2.6 2.3 2.1 2.1 services
8 Roof 1 0.9 0.7 0.5 0.3 0.3 0.3 services
9 Floor 1.6 1.2 1.2 1.1 1 0.7 0.7 services

View File

@ -0,0 +1,8 @@
strength,u_value,cost,u_limit,comment
[m],[W/m^2K],EUR/m^2,[W/m^2K],
0.076,1.34,180.08,3.5,Double-glazing
0.197,0.8,225,1.3,Triple-glazing
,,,,
"source: https://www.agora-energiewende.de/en/publications/building-sector-efficiency-a-crucial-component-of-the-energy-transition/
p.115
",,,,
1 strength u_value cost u_limit comment
2 [m] [W/m^2K] EUR/m^2 [W/m^2K]
3 0.076 1.34 180.08 3.5 Double-glazing
4 0.197 0.8 225 1.3 Triple-glazing
5
6 source: https://www.agora-energiewende.de/en/publications/building-sector-efficiency-a-crucial-component-of-the-energy-transition/ p.115

View File

@ -70,9 +70,9 @@ author = u'2019-2020 Tom Brown (KIT), Marta Victoria (Aarhus University), Lisa Z
# built documents.
#
# The short X.Y version.
version = u'0.3'
version = u'0.5'
# The full version, including alpha/beta/rc tags.
release = u'0.3.0'
release = u'0.5.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View File

@ -2,7 +2,7 @@ description,file/folder,licence,source
JRC IDEES database,jrc-idees-2015/,CC BY 4.0,https://ec.europa.eu/jrc/en/potencia/jrc-idees
urban/rural fraction,urban_percent.csv,unknown,unknown
JRC biomass potentials,biomass/,unknown,https://doi.org/10.2790/39014
EEA emission statistics,eea/,unknown,https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-14
EEA emission statistics,eea/UNFCCC_v23.csv,EEA standard re-use policy,https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
Eurostat Energy Balances,eurostat-energy_balances-*/,Eurostat,https://ec.europa.eu/eurostat/web/energy/data/energy-balances
Swiss energy statistics from Swiss Federal Office of Energy,switzerland-sfoe/,unknown,http://www.bfe.admin.ch/themen/00526/00541/00542/02167/index.html?dossier_id=02169
BASt emobility statistics,emobility/,unknown,http://www.bast.de/DE/Verkehrstechnik/Fachthemen/v2-verkehrszaehlung/Stundenwerte.html?nn=626916
@ -17,3 +17,10 @@ IRENA existing VRE capacities,existing_infrastructure/{solar|onwind|offwind}_cap
USGS ammonia production,myb1-2017-nitro.xls,unknown,https://www.usgs.gov/centers/nmic/nitrogen-statistics-and-information
hydrogen salt cavern potentials,hydrogen_salt_cavern_potentials.csv,CC BY 4.0,https://doi.org/10.1016/j.ijhydene.2019.12.161
hotmaps industrial site database,Industrial_Database.csv,CC BY 4.0,https://gitlab.com/hotmaps/industrial_sites/industrial_sites_Industrial_Database
Hotmaps building stock data,data_building_stock.csv,CC BY 4.0,https://gitlab.com/hotmaps/building-stock
U-values Poland,u_values_poland.csv,unknown,https://data.europa.eu/euodp/de/data/dataset/building-stock-observatory
Floor area missing in hotmaps building stock data,floor_area_missing.csv,unknown,https://data.europa.eu/euodp/de/data/dataset/building-stock-observatory
Comparative level investment,comparative_level_investment.csv,Eurostat,https://ec.europa.eu/eurostat/statistics-explained/index.php?title=Comparative_price_levels_for_investment
Electricity taxes,electricity_taxes_eu.csv,Eurostat,https://appsso.eurostat.ec.europa.eu/nui/show.do?dataset=nrg_pc_204&lang=en
Building topologies and corresponding standard values,tabula-calculator-calcsetbuilding.csv,unknown,https://episcope.eu/fileadmin/tabula/public/calc/tabula-calculator.xlsx
Retrofitting thermal envelope costs for Germany,retro_cost_germany.csv,unkown,https://www.iwu.de/forschung/handlungslogiken/kosten-energierelevanter-bau-und-anlagenteile-bei-modernisierung/

1 description file/folder licence source
2 JRC IDEES database jrc-idees-2015/ CC BY 4.0 https://ec.europa.eu/jrc/en/potencia/jrc-idees
3 urban/rural fraction urban_percent.csv unknown unknown
4 JRC biomass potentials biomass/ unknown https://doi.org/10.2790/39014
5 EEA emission statistics eea/ eea/UNFCCC_v23.csv unknown EEA standard re-use policy https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-14 https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
6 Eurostat Energy Balances eurostat-energy_balances-*/ Eurostat https://ec.europa.eu/eurostat/web/energy/data/energy-balances
7 Swiss energy statistics from Swiss Federal Office of Energy switzerland-sfoe/ unknown http://www.bfe.admin.ch/themen/00526/00541/00542/02167/index.html?dossier_id=02169
8 BASt emobility statistics emobility/ unknown http://www.bast.de/DE/Verkehrstechnik/Fachthemen/v2-verkehrszaehlung/Stundenwerte.html?nn=626916
17 USGS ammonia production myb1-2017-nitro.xls unknown https://www.usgs.gov/centers/nmic/nitrogen-statistics-and-information
18 hydrogen salt cavern potentials hydrogen_salt_cavern_potentials.csv CC BY 4.0 https://doi.org/10.1016/j.ijhydene.2019.12.161
19 hotmaps industrial site database Industrial_Database.csv CC BY 4.0 https://gitlab.com/hotmaps/industrial_sites/industrial_sites_Industrial_Database
20 Hotmaps building stock data data_building_stock.csv CC BY 4.0 https://gitlab.com/hotmaps/building-stock
21 U-values Poland u_values_poland.csv unknown https://data.europa.eu/euodp/de/data/dataset/building-stock-observatory
22 Floor area missing in hotmaps building stock data floor_area_missing.csv unknown https://data.europa.eu/euodp/de/data/dataset/building-stock-observatory
23 Comparative level investment comparative_level_investment.csv Eurostat https://ec.europa.eu/eurostat/statistics-explained/index.php?title=Comparative_price_levels_for_investment
24 Electricity taxes electricity_taxes_eu.csv Eurostat https://appsso.eurostat.ec.europa.eu/nui/show.do?dataset=nrg_pc_204&lang=en
25 Building topologies and corresponding standard values tabula-calculator-calcsetbuilding.csv unknown https://episcope.eu/fileadmin/tabula/public/calc/tabula-calculator.xlsx
26 Retrofitting thermal envelope costs for Germany retro_cost_germany.csv unkown https://www.iwu.de/forschung/handlungslogiken/kosten-energierelevanter-bau-und-anlagenteile-bei-modernisierung/

View File

@ -4,8 +4,8 @@ PyPSA-Eur-Sec: A Sector-Coupled Open Optimisation Model of the European Energy S
.. image:: https://img.shields.io/github/v/release/pypsa/pypsa-eur-sec?include_prereleases
:alt: GitHub release (latest by date including pre-releases)
.. image:: https://readthedocs.org/projects/pypsa-eur/badge/?version=latest
:target: https://pypsa-eur.readthedocs.io/en/latest/?badge=latest
.. image:: https://readthedocs.org/projects/pypsa-eur-sec/badge/?version=latest
:target: https://pypsa-eur-sec.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://img.shields.io/github/license/pypsa/pypsa-eur-sec
@ -66,42 +66,6 @@ PyPSA-Eur-Sec is designed to be imported into the open toolbox `PyPSA <https://w
This project is maintained by the `Energy System Modelling group <https://www.iai.kit.edu/english/2338.php>`_ at the `Institute for Automation and Applied Informatics <https://www.iai.kit.edu/english/index.php>`_ at the `Karlsruhe Institute of Technology <http://www.kit.edu/english/index.php>`_. The group is funded by the `Helmholtz Association <https://www.helmholtz.de/en/>`_ until 2024. Previous versions were developed by the `Renewable Energy Group <https://fias.uni-frankfurt.de/physics/schramm/renewable-energy-system-and-network-analysis/>`_ at `FIAS <https://fias.uni-frankfurt.de/>`_ to carry out simulations for the `CoNDyNet project <http://condynet.de/>`_, financed by the `German Federal Ministry for Education and Research (BMBF) <https://www.bmbf.de/en/index.html>`_ as part of the `Stromnetze Research Initiative <http://forschung-stromnetze.info/projekte/grundlagen-und-konzepte-fuer-effiziente-dezentrale-stromnetze/>`_.
Spatial resolution of sectors
=============================
Not all of the sectors are at the full nodal resolution, and some are
distributed to nodes using heuristics that need to be corrected. Some
networks are copper-plated to reduce computational times.
For example:
Electricity network: nodal.
Electricity demand: nodal, distributed in each country based on
population and GDP.
Building heating demand: nodal, distributed in each country based on
population.
Industry demand: nodal, distributed in each country based on
population (will be corrected to real locations of industry, see
github issue).
Hydrogen network: nodal.
Methane network: copper-plated for Europe, since future demand is so
low and no bottlenecks are expected.
Solid biomass: copper-plated until transport costs can be
incorporated.
CO2: copper-plated (but a transport and storage cost is added for
sequestered CO2).
Liquid hydrocarbons: copper-plated since transport costs are low.
Documentation
=============
@ -116,6 +80,20 @@ Documentation
installation
**Implementation details**
* :doc:`spatial_resolution`
* :doc:`supply_demand`
.. toctree::
:hidden:
:maxdepth: 1
:caption: Implementation details
spatial_resolution
supply_demand
**Foresight options**
* :doc:`overnight`

View File

@ -16,7 +16,7 @@ its dependencies. Clone the repository:
.. code:: bash
projects % git clone git@github.com:PyPSA/pypsa-eur.git
projects % git clone https://github.com/PyPSA/pypsa-eur.git
then download and unpack all the PyPSA-Eur data files by running the following snakemake rule:
@ -32,7 +32,7 @@ Next install the technology assumptions database `technology-data <https://githu
.. code:: bash
projects % git clone git@github.com:PyPSA/technology-data.git
projects % git clone https://github.com/PyPSA/technology-data.git
Clone PyPSA-Eur-Sec repository
@ -42,7 +42,7 @@ Create a parallel directory for `PyPSA-Eur-Sec <https://github.com/PyPSA/pypsa-e
.. code:: bash
projects % git clone git@github.com:PyPSA/pypsa-eur-sec.git
projects % git clone https://github.com/PyPSA/pypsa-eur-sec.git
Environment/package requirements
================================
@ -54,6 +54,13 @@ The requirements are the same as `PyPSA-Eur <https://github.com/PyPSA/pypsa-eur>
xarray version >= 0.15.1, you will need the latest master branch of
atlite version 0.0.2.
You can create an enviroment using the environment.yaml file in pypsa-eur/envs:
.../pypsa-eur % conda env create -f envs/environment.yaml
.../pypsa-eur % conda activate pypsa-eur
See details in `PyPSA-Eur Installation <https://pypsa-eur.readthedocs.io/en/latest/installation.html>`_
Data requirements
=================
@ -66,8 +73,8 @@ To download and extract the data bundle on the command line:
.. code:: bash
projects/pypsa-eur-sec/data % wget "https://nworbmot.org/pypsa-eur-sec-data-bundle-201012.tar.gz"
projects/pypsa-eur-sec/data % tar xvzf pypsa-eur-sec-data-bundle-201012.tar.gz
projects/pypsa-eur-sec/data % wget "https://nworbmot.org/pypsa-eur-sec-data-bundle-210418.tar.gz"
projects/pypsa-eur-sec/data % tar xvzf pypsa-eur-sec-data-bundle-210418.tar.gz
The data licences and sources are given in the following table.

View File

@ -6,7 +6,7 @@ Myopic transition path
The myopic code can be used to investigate progressive changes in a network, for instance, those taking place throughout a transition path. The capacities installed in a certain time step are maintained in the network until their operational lifetime expires.
The myopic approach was initially developed and used in the paper `Early decarbonisation of the European Energy system pays off (2020) <https://arxiv.org/abs/2004.11009>`__ but the current implementation complies with the pypsa-eur-sec standard working flow and is compatible with using the higher resolution electricity transmission model `PyPSA-Eur <https://github.com/PyPSA/pypsa-eur>`__ rather than a one-node-per-country model.
The myopic approach was initially developed and used in the paper `Early decarbonisation of the European Energy system pays off (2020) <https://www.nature.com/articles/s41467-020-20015-4>`__ but the current implementation complies with the pypsa-eur-sec standard working flow and is compatible with using the higher resolution electricity transmission model `PyPSA-Eur <https://github.com/PyPSA/pypsa-eur>`__ rather than a one-node-per-country model.
The current code applies the myopic approach to generators, storage technologies and links in the power sector and the space and water heating sector.
@ -17,12 +17,14 @@ See also other `outstanding issues <https://github.com/PyPSA/pypsa-eur-sec/issue
Configuration
=================
PyPSA-Eur-Sec has several configuration options which are collected in a config.yaml file located in the root directory. For myopic optimization, users should copy the provided myopic configuration ``config.myopic.yaml`` and make their own modifications and assumptions in the user-specific configuration file (``config.yaml``).
PyPSA-Eur-Sec has several configuration options which are collected in a config.yaml file located in the root directory. For myopic optimization, users should copy the provided default configuration ``config.default.yaml`` and make their own modifications and assumptions in the user-specific configuration file (``config.yaml``).
The following options included in the config.yaml file are relevant for the myopic code.
To activate the myopic option select ``foresight: 'myopic'`` in ``config.yaml``.
To set the investment years which are sequentially simulated for the myopic investment planning, select for example ``planning_horizons : [2020, 2030, 2040, 2050]`` in ``config.yaml``.
**existing capacities**
@ -59,12 +61,15 @@ Wildcards
The {planning_horizons} wildcard indicates the timesteps in which the network is optimized, e.g. planning_horizons: [2020, 2030, 2040, 2050]
Options
=============
The total carbon budget for the entire transition path can be indicated in the ``scenario.sector_opts`` in ``config.yaml``.
The carbon budget can be split among the ``planning_horizons`` following an exponential or beta decay.
E.g. ``'cb40ex0'`` splits the a carbon budget equal to 40 GtCO_2 following an exponential decay whose initial linear growth rate $r$ is zero
**{co2_budget_name} wildcard**
$e(t) = e_0 (1+ (r+m)t) e^(-mt)$
The {co2_budget_name} wildcard indicates the name of the co2 budget.
A csv file is used as input including the planning_horizons as index, the name of co2_budget as column name, and the maximum co2 emissions (relative to 1990) as values.
See details in Supplementary Note 1 of the paper `Early decarbonisation of the European Energy system pays off (2020) <https://www.nature.com/articles/s41467-020-20015-4>`__
Rules overview
=================
@ -72,17 +77,17 @@ Rules overview
General myopic code structure
===============================
The myopic code solves the network for the time steps included in planning_horizons in a recursive loop, so that:
The myopic code solves the network for the time steps included in ``planning_horizons`` in a recursive loop, so that:
1.The existing capacities (those installed before the base year are added as fixed capacities with p_nom=value, p_nom_extendable=False). E.g. for baseyear=2020, capacities installed before 2020 are added. In addition, the network comprises additional generator, storage, and link capacities with p_nom_extendable=True. The non-solved network is saved in ``results/run_name/networks/prenetworks-brownfield``.
The base year is the first element in planning_horizons. Step 1 is implemented with the rule add_baseyear for the base year and with the rule add_brownfield for the remaining planning_horizons.
The base year is the first element in ``planning_horizons``. Step 1 is implemented with the rule add_baseyear for the base year and with the rule add_brownfield for the remaining planning_horizons.
2.The 2020 network is optimized. The solved network is saved in results/run_name/networks/postnetworks
2.The 2020 network is optimized. The solved network is saved in ``results/run_name/networks/postnetworks``
3.For the next planning horizon, e.g. 2030, the capacities from a previous time step are added if they are still in operation (i.e., if they fulfil planning horizon <= commissioned year + lifetime). In addition, the network comprises additional generator, storage, and link capacities with p_nom_extendable=True. The non-solved network is saved in ``results/run_name/networks/prenetworks-brownfield``.
Steps 2 and 3 are solved recursively for all the planning_horizons included in the configuration file.
Steps 2 and 3 are solved recursively for all the planning_horizons included in ``config.yaml``.
rule add_existing baseyear
@ -108,8 +113,8 @@ Then, the resulting network is saved in ``results/run_name/networks/prenetworks-
rule add_brownfield
===================
The rule add_brownfield loads the network in results/run_name/networks/prenetworks and performs the following operation:
The rule add_brownfield loads the network in ``results/run_name/networks/prenetworks`` and performs the following operation:
1.Read the capacities optimized in the previous time step and add them to the network if they are still in operation (i.e., if they fulfil planning horizon < commissioned year + lifetime)
1.Read the capacities optimized in the previous time step and add them to the network if they are still in operation (i.e., if they fulfill planning horizon < commissioned year + lifetime)
Then, the resulting network is saved in ``results/run_name/networks/prenetworks_brownfield``.

View File

@ -7,3 +7,5 @@ Overnight (greenfield) scenarios
The default is to calculate a rebuilding of the energy system to meet demand, a so-called overnight or greenfield approach.
For this, use ``foresight : 'overnight'`` in ``config.yaml``, like the example in ``config.default.yaml``.
In this case, the ``planning_horizons : [2030]`` scenario parameter can be set to use the year from which cost and other technology assumptions are set (forecasts for 2030 in this case).

View File

@ -2,6 +2,110 @@
Release Notes
##########################################
Future release
==============
.. note::
This unreleased version currently requires the master branches of PyPSA, PyPSA-Eur, and the technology-data repository.
* Extended use of ``multiprocessing`` for much better performance
(from up to 20 minutes to less than one minute).
* Compatibility with ``atlite>=0.2``. Older versions of ``atlite`` will no longer work.
* Handle most input files (or base directories) via ``snakemake.input``.
* Use of ``mock_snakemake`` from PyPSA-Eur.
* Update ``solve_network`` rule to match implementation in PyPSA-Eur by using ``n.ilopf()`` and remove outdated code using ``pyomo``.
Allows the new setting to skip iterated impedance updates with ``solving: options: skip_iterations: true``.
* The component attributes that are to be overridden are now stored in the folder
``data/override_component_attrs`` analogous to ``pypsa/component_attrs``.
This reduces verbosity and also allows circumventing the ``n.madd()`` hack
for individual components with non-default attributes.
This data is also tracked in the Snakefile.
A function ``helper.override_component_attrs`` was added that loads this data
and can pass the overridden component attributes into ``pypsa.Network()``:
>>> from helper import override_component_attrs
>>> overrides = override_component_attrs(snakemake.input.overrides)
>>> n = pypsa.Network("mynetwork.nc", override_component_attrs=overrides)
* Add various parameters to ``config.default.yaml`` which were previously hardcoded inside the scripts
(e.g. energy reference years, BEV settings, solar thermal collector models, geomap colours).
* Removed stale industry demand rules ``build_industrial_energy_demand_per_country``
and ``build_industrial_demand``. These are superseded with more regionally resolved rules.
* Use simpler and shorter ``gdf.sjoin()`` function to allocate industrial sites
from the Hotmaps database to onshore regions.
This change also fixes a bug:
The previous version allocated sites to the closest bus,
but at country borders (where Voronoi cells are distorted by the borders),
this had resulted in e.g. a Spanish site close to the French border
being wrongly allocated to the French bus if the bus center was closer.
* Bugfix: Corrected calculation of "gas for industry" carbon capture efficiency.
* Retrofitting rule is now only triggered if endogeneously optimised.
* Show progress in build rules with ``tqdm`` progress bars.
* Reduced verbosity of ``Snakefile`` through directory prefixes.
* Improve legibility of ``config.default.yaml`` and remove unused options.
* Add optional function to use ``geopy`` to locate entries of the Hotmaps database of industrial sites
with missing location based on city and country, which reduces missing entries by half. It can be
activated by setting ``industry: hotmaps_locate_missing: true``, takes a few minutes longer,
and should only be used if spatial resolution is coarser than city level.
* Use the country-specific time zone mappings from ``pytz`` rather than a manual mapping.
* A function ``add_carrier_buses()`` was added to the ``prepare_network`` rule to reduce code duplication.
* In the ``prepare_network`` rule the cost and potential adjustment was moved into an
own function ``maybe_adjust_costs_and_potentials()``.
* Use ``matplotlibrc`` to set the default plotting style and backend``.
* Added benchmark files for each rule.
* Implements changes to ``n.snapshot_weightings`` in upcoming PyPSA version (cf. `PyPSA/#227 <https://github.com/PyPSA/PyPSA/pull/227>`_).
* New dependencies: ``tqdm``, ``atlite>=0.2.4``, ``pytz`` and ``geopy`` (optional).
These are included in the environment specifications of PyPSA-Eur.
* Consistent use of ``__main__`` block and further unspecific code cleaning.
* Distinguish costs for home battery storage and inverter from utility-scale battery costs.
PyPSA-Eur-Sec 0.5.0 (21st May 2021)
===================================
This release includes improvements to the cost database for building retrofits, carbon budget management and wildcard settings, as well as an important bugfix for the emissions from land transport.
This release is known to work with `PyPSA-Eur <https://github.com/PyPSA/pypsa-eur>`_ Version 0.3.0 and `Technology Data <https://github.com/PyPSA/technology-data>`_ Version 0.2.0.
Please note that the data bundle has also been updated.
New features and bugfixes:
* The cost database for retrofitting of the thermal envelope of buildings has been updated. Now, for calculating the space heat savings of a building, losses by thermal bridges and ventilation are included as well as heat gains (internal and by solar radiation). See the section :ref:`retro` for more details on the retrofitting module.
* For the myopic investment option, a carbon budget and a type of decay (exponential or beta) can be selected in the ``config.yaml`` file to distribute the budget across the ``planning_horizons``. For example, ``cb40ex0`` in the ``{sector_opts}`` wildcard will distribute a carbon budget of 40 GtCO2 following an exponential decay with initial growth rate 0.
* Added an option to alter the capital cost or maximum capacity of carriers by a factor via ``carrier+factor`` in the ``{sector_opts}`` wildcard. This can be useful for exploring uncertain cost parameters. Example: ``solar+c0.5`` reduces the ``capital_cost`` of solar to 50\% of original values. Similarly ``solar+p3`` multiplies the ``p_nom_max`` by 3.
* Rename the bus for European liquid hydrocarbons from ``Fischer-Tropsch`` to ``EU oil``, since it can be supplied not just with the Fischer-Tropsch process, but also with fossil oil.
* Bugfix: The new separation of land transport by carrier in Version 0.4.0 failed to account for the carbon dioxide emissions from internal combustion engines in land transport. This is now treated as a negative load on the atmospheric carbon dioxide bus, just like aviation emissions.
* Bugfix: Fix reading in of ``pypsa-eur/resources/powerplants.csv`` to PyPSA-Eur Version 0.3.0 (use column attribute name ``DateIn`` instead of old ``YearDecommissioned``).
* Bugfix: Make sure that ``Store`` components (battery and H2) are also removed from PyPSA-Eur, so they can be added later by PyPSA-Eur-Sec.
Thanks to Lisa Zeyen (KIT) for the retrofitting improvements and Marta Victoria (Aarhus University) for the carbon budget and wildcard management.
PyPSA-Eur-Sec 0.4.0 (11th December 2020)
=========================================
This release includes a more accurate nodal disaggregation of industry demand within each country, fixes to CHP and CCS representations, as well as changes to some configuration settings.
It has been released to coincide with `PyPSA-Eur <https://github.com/PyPSA/pypsa-eur>`_ Version 0.3.0 and `Technology Data <https://github.com/PyPSA/technology-data>`_ Version 0.2.0, and is known to work with these releases.
New features:
* The `Hotmaps Industrial Database <https://gitlab.com/hotmaps/industrial_sites/industrial_sites_Industrial_Database>`_ is used to disaggregate the industrial demand spatially to the nodes inside each country (previously it was distributed by population density).
* Electricity demand from industry is now separated from the regular electricity demand and distributed according to the industry demand. Only the remaining regular electricity demand for households and services is distributed according to GDP and population.
* A cost database for the retrofitting of the thermal envelope of residential and services buildings has been integrated, as well as endogenous optimisation of the level of retrofitting. This is described in the paper `Mitigating heat demand peaks in buildings in a highly renewable European energy system <https://arxiv.org/abs/2012.01831>`_. Retrofitting can be activated both exogenously and endogenously from the ``config.yaml``.
* The biomass and gas combined heat and power (CHP) parameters ``c_v`` and ``c_b`` were read in assuming they were extraction plants rather than back pressure plants. The data is now corrected in `Technology Data <https://github.com/PyPSA/technology-data>`_ Version 0.2.0 to the correct DEA back pressure assumptions and they are now implemented as single links with a fixed ratio of electricity to heat output (even as extraction plants, they were always sitting on the backpressure line in simulations, so there was no point in modelling the full heat-electricity feasibility polygon). The old assumptions underestimated the heat output.
* The Danish Energy Agency released `new assumptions for carbon capture <https://ens.dk/en/our-services/projections-and-models/technology-data/technology-data-industrial-process-heat-and>`_ in October 2020, which have now been incorporated in PyPSA-Eur-Sec, including direct air capture (DAC) and post-combustion capture on CHPs, cement kilns and other industrial facilities. The electricity and heat demand for DAC is modelled for each node (with heat coming from district heating), but currently the electricity and heat demand for industrial capture is not modelled very cleanly (for process heat, 10% of the energy is assumed to go to carbon capture) - a new issue will be opened on this.
* Land transport is separated by energy carrier (fossil, hydrogen fuel cell electric vehicle, and electric vehicle), but still needs to be separated into heavy and light vehicles (the data is there, just not the code yet).
* For assumptions that change with the investment year, there is a new time-dependent format in the ``config.yaml`` using a dictionary with keys for each year. Implemented examples include the CO2 budget, exogenous retrofitting share and land transport energy carrier; more parameters will be dynamised like this in future.
* Some assumptions have been moved out of the code and into the ``config.yaml``, including the carbon sequestration potential and cost, the heat pump sink temperature, reductions in demand for high value chemicals, and some BEV DSM parameters and transport efficiencies.
* Documentation on :doc:`supply_demand` options has been added.
Many thanks to Fraunhofer ISI for opening the hotmaps database and to Lisa Zeyen (KIT) for implementing the building retrofitting.
PyPSA-Eur-Sec 0.3.0 (27th September 2020)
=========================================
@ -52,7 +156,7 @@ Many thanks to Marta Victoria for implementing the myopic foresight, and Marta V
PyPSA-Eur-Sec 0.1.0 (8th July 2020)
===================================
This is the first release of PyPSA-Eur-Sec, a model of the European energy system at the transmission network level that covers the full ENTSO-E area.
This is the first proper release of PyPSA-Eur-Sec, a model of the European energy system at the transmission network level that covers the full ENTSO-E area.
It is known to work with PyPSA-Eur v0.1.0 (commit bb3477cd69) and PyPSA v0.17.0.
@ -65,7 +169,7 @@ heating, biomass, industry and industrial feedstocks. This completes
the energy system and includes all greenhouse gas emitters except
waste management, agriculture, forestry and land use.
PyPSA-Eur-Sec was initially based on the model PyPSA-Eur-Sec-30 described
PyPSA-Eur-Sec was initially based on the model PyPSA-Eur-Sec-30 (Version 0.0.1 below) described
in the paper `Synergies of sector coupling and transmission
reinforcement in a cost-optimised, highly renewable European energy
system <https://arxiv.org/abs/1801.05290>`_ (2018) but it differs by
@ -85,6 +189,40 @@ PyPSA-Eur-Sec adds other conventional generators, storage units and
the additional sectors.
PyPSA-Eur-Sec 0.0.2 (4th September 2020)
========================================
This version, also called PyPSA-Eur-Sec-30-Path, built on
PyPSA-Eur-Sec 0.0.1 (also called PyPSA-Eur-Sec-30) to include myopic
pathway optimisation for the paper `Early decarbonisation of the
European energy system pays off <https://arxiv.org/abs/2004.11009>`_
(2020). The myopic pathway optimisation was then merged into the main
PyPSA-Eur-Sec codebase in Version 0.2.0 above.
This model has `its own github repository
<https://github.com/martavp/pypsa-eur-sec-30-path>`_ and is `archived
on Zenodo <https://zenodo.org/record/4014807>`_.
PyPSA-Eur-Sec 0.0.1 (12th January 2018)
========================================
This is the first published version of PyPSA-Eur-Sec, also called
PyPSA-Eur-Sec-30. It was first used in the research paper `Synergies of
sector coupling and transmission reinforcement in a cost-optimised,
highly renewable European energy system
<https://arxiv.org/abs/1801.05290>`_ (2018). The model covers 30
European countries with one node per country. It includes demand and
supply for electricity, space and water heating in buildings, and land
transport.
It is `archived on Zenodo <https://zenodo.org/record/1146666>`_.
Release Process
===============
@ -92,6 +230,8 @@ Release Process
* Update version number in ``doc/conf.py`` and ``*config.*.yaml``.
* Make a ``git commit``.
* Tag a release by running ``git tag v0.x.x``, ``git push``, ``git push --tags``. Include release notes in the tag message.
* Make a `GitHub release <https://github.com/PyPSA/pypsa-eur-sec/releases>`_, which automatically triggers archiving by `zenodo <https://doi.org/10.5281/zenodo.3938042>`_.
@ -102,4 +242,4 @@ To make a new release of the data bundle, make an archive of the files in ``data
.. code:: bash
data % tar pczf pypsa-eur-sec-data-bundle-date.tar.gz eea switzerland-sfoe biomass eurostat-energy_balances-* jrc-idees-2015 emobility urban_percent.csv timezone_mappings.csv heat_load_profile_DK_AdamJensen.csv WindWaveWEC_GLTB.xlsx myb1-2017-nitro.xls Industrial_Database.csv
data % tar pczf pypsa-eur-sec-data-bundle-YYMMDD.tar.gz eea/UNFCCC_v23.csv switzerland-sfoe biomass eurostat-energy_balances-* jrc-idees-2015 emobility urban_percent.csv timezone_mappings.csv heat_load_profile_DK_AdamJensen.csv WindWaveWEC_GLTB.xlsx myb1-2017-nitro.xls Industrial_Database.csv retro/tabula-calculator-calcsetbuilding.csv

View File

@ -0,0 +1,54 @@
.. _spatial_resolution:
##########################################
Spatial resolution
##########################################
The default nodal resolution of the model follows the electricity
generation and transmission model `PyPSA-Eur
<https://github.com/PyPSA/pypsa-eur>`_, which clusters down the
electricity transmission substations in each European country based on
the k-means algorithm. This gives nodes which correspond to major load
and generation centres (typically cities).
The total number of nodes for Europe is set in the ``config.yaml`` file
under ``clusters``. The number of nodes can vary between 37, the number
of independent countries / synchronous areas, and several
hundred. With 200-300 nodes the model needs 100-150 GB RAM to solve
with a commerical solver like Gurobi.
Not all of the sectors are at the full nodal resolution, and some
demand for some sectors is distributed to nodes using heuristics that
need to be corrected. Some networks are copper-plated to reduce
computational times.
For example:
Electricity network: nodal.
Electricity residential and commercial demand: nodal, distributed in
each country based on population and GDP.
Electricity demand in industry: based on the location of industrial
facilities from `HotMaps database <https://gitlab.com/hotmaps/industrial_sites/industrial_sites_Industrial_Database>`_.
Building heating demand: nodal, distributed in each country based on
population.
Industry demand: nodal, distributed in each country based on
locations of industry from `HotMaps database <https://gitlab.com/hotmaps/industrial_sites/industrial_sites_Industrial_Database>`_.
Hydrogen network: nodal.
Methane network: single node for Europe, since future demand is so
low and no bottlenecks are expected.
Solid biomass: single node for Europe, until transport costs can be
incorporated.
CO2: single node for Europe, but a transport and storage cost is added for
sequestered CO2.
Liquid hydrocarbons: single node for Europe, since transport costs for
liquids are low.

230
doc/supply_demand.rst Normal file
View File

@ -0,0 +1,230 @@
.. _supply_demand:
##########################################
Supply and demand
##########################################
An initial orientation to the supply and demand options in the model
PyPSA-Eur-Sec can be found in the description of the model
PyPSA-Eur-Sec-30 in the paper `Synergies of sector coupling and
transmission reinforcement in a cost-optimised, highly renewable
European energy system <https://arxiv.org/abs/1801.05290>`_ (2018).
The latest version of PyPSA-Eur-Sec differs by including biomass,
industry, industrial feedstocks, aviation, shipping, better carbon
management, carbon capture and usage/sequestration, and gas networks.
The basic supply (left column) and demand (right column) options in the model are described in this figure:
.. image:: ../graphics/multisector_figure.png
Electricity supply and demand
=============================
Electricity supply and demand follows the electricity generation and
transmission model `PyPSA-Eur <https://github.com/PyPSA/pypsa-eur>`_,
except that hydrogen storage is integrated into the hydrogen supply,
demand and network, and PyPSA-Eur-Sec includes CHPs.
Unlike PyPSA-Eur, PyPSA-Eur-Sec does not distribution electricity demand for industry according to population and GDP, but uses the
geographical data from the `Hotmaps Industrial Database
<https://gitlab.com/hotmaps/industrial_sites/industrial_sites_Industrial_Database>`_.
Also unlike PyPSA-Eur, PyPSA-Eur-Sec subtracts existing electrified heating from the existing electricity demand, so that power-to-heat can be optimised separately.
The remaining electricity demand for households and services is distributed inside each country proportional to GDP and population.
Heat demand
=============================
Heat demand is split into:
* ``urban central``: large-scale district heating networks in urban areas with dense heat demand
* ``residential/services urban decentral``: heating for individual buildings in urban areas
* ``residential/services rural``: heating for individual buildings in rural areas
Heat supply
=======================
Oil and gas boilers
--------------------
Heat pumps
-------------
Either air-to-water or ground-to-water heat pumps are implemented.
They have coefficient of performance (COP) based on either the
external air or the soil hourly temperature.
Ground-source heat pumps are only allowed in rural areas because of
space constraints.
Only air-source heat pumps are allowed in urban areas. This is a
conservative assumption, since there are many possible sources of
low-temperature heat that could be tapped in cities (waste water,
rivers, lakes, seas, etc.).
Resistive heaters
--------------------
Large Combined Heat and Power (CHP) plants
--------------------------------------------
A good summary of CHP options that can be implemented in PyPSA can be found in the paper `Cost sensitivity of optimal sector-coupled district heating production systems <https://doi.org/10.1016/j.energy.2018.10.044>`_.
PyPSA-Eur-Sec includes CHP plants fuelled by methane, hydrogen and solid biomass from waste and residues.
Hydrogen CHPs are fuel cells.
Methane and biomass CHPs are based on back pressure plants operating with a fixed ratio of electricity to heat output. The methane CHP is modelled on the Danish Energy Agency (DEA) "Gas turbine simple cycle (large)" while the solid biomass CHP is based on the DEA's "09b Wood Pellets Medium".
The efficiencies of each are given on the back pressure line, where the back pressure coefficient ``c_b`` is the electricity output divided by the heat output. The plants are not allowed to deviate from the back pressure line and are implement as ``Link`` objects with a fixed ratio of heat to electricity output.
NB: The old PyPSA-Eur-Sec-30 model assumed an extraction plant (like the DEA coal CHP) for gas which has flexible production of heat and electricity within the feasibility diagram of Figure 4 in the `Synergies paper <https://arxiv.org/abs/1801.05290>`_. We have switched to the DEA back pressure plants since these are more common for smaller plants for biomass, and because the extraction plants were on the back pressure line for 99.5% of the time anyway. The plants were all changed to back pressure in PyPSA-Eur-Sec v0.4.0.
Micro-CHP for individual buildings
-----------------------------------
Optional.
Waste heat from Fuel Cells, Methanation and Fischer-Tropsch plants
-------------------------------------------------------------------
Solar thermal collectors
-------------------------
Thermal energy storage using hot water tanks
---------------------------------------------
Small for decentral applications.
Big water pit storage for district heating.
.. _retro:
Retrofitting of the thermal envelope of buildings
===================================================
Co-optimising building renovation is only enabled if in the ``config.yaml`` the
option :mod:`retro_endogen: True`. To reduce the computational burden
default setting is
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 134-135
Renovation of the thermal envelope reduces the space heating demand and is
optimised at each node for every heat bus. Renovation measures through additional
insulation material and replacement of energy inefficient windows are considered.
In a first step, costs per energy savings are estimated in :mod:`build_retro_cost.py`.
They depend on the insulation condition of the building stock and costs for
renovation of the building elements.
In a second step, for those cost per energy savings two possible renovation
strengths are determined: a moderate renovation with lower costs and lower
maximum possible space heat savings, and an ambitious renovation with associated
higher costs and higher efficiency gains. They are added by step-wise
linearisation in form of two additional generations in
:mod:`prepare_sector_network.py`.
Settings in the config.yaml concerning the endogenously optimisation of building
renovation
.. literalinclude:: ../config.default.yaml
:language: yaml
:lines: 136-140
Further information are given in the publication
`Mitigating heat demand peaks in buildings in a highly renewable European energy system, (2021) <https://arxiv.org/abs/2012.01831>`_.
Hydrogen demand
==================
Stationary fuel cell CHP.
Transport applications.
Industry (ammonia, precursor to hydrocarbons for chemicals and iron/steel).
Hydrogen supply
=================
Steam Methane Reforming (SMR), SMR+CCS, electrolysers.
Methane demand
==================
Can be used in boilers, in CHPs, in industry for high temperature heat, in OCGT.
Not used in transport because of engine slippage.
Methane supply
=================
Fossil, biogas, Sabatier (hydrogen to methane), HELMETH (directly power to methane with efficient heat integration).
Solid biomass demand
=====================
Solid biomass provides process heat up to 500 Celsius in industry, as well as feeding CHP plants in district heating networks.
Solid biomass supply
=====================
Only wastes and residues from the JRC biomass dataset.
Oil product demand
=====================
Transport fuels and naphtha as a feedstock for the chemicals industry.
Oil product supply
======================
Fossil or Fischer-Tropsch.
Industry demand
================
Based on materials demand from JRC-IDEES and other sources such as the USGS for ammonia.
Industry is split into many sectors, including iron and steel, ammonia, other basic chemicals, cement, non-metalic minerals, alumuninium, other non-ferrous metals, pulp, paper and printing, food, beverages and tobacco, and other more minor sectors.
Inside each country the industrial demand is distributed using the `Hotmaps Industrial Database <https://gitlab.com/hotmaps/industrial_sites/industrial_sites_Industrial_Database>`_.
Industry supply
================
Process switching (e.g. from blast furnaces to direct reduction and electric arc furnaces for steel) is defined exogenously.
Fuel switching for process heat is mostly also done exogenously.
Solid biomass is used for up to 500 Celsius, mostly in paper and pulp and food and beverages.
Higher temperatures are met with methane.
Carbon dioxide capture, usage and sequestration (CCU/S)
=========================================================
Carbon dioxide can be captured from industry process emissions,
emissions related to industry process heat, combined heat and power
plants, and directly from the air (DAC).
Carbon dioxide can be used as an input for methanation and
Fischer-Tropsch fuels, or it can be sequestered underground.

4
matplotlibrc Normal file
View File

@ -0,0 +1,4 @@
backend: Agg
font.family: sans-serif
font.sans-serif: Ubuntu, DejaVu Sans
image.cmap: viridis

View File

@ -2,43 +2,16 @@
import logging
logger = logging.getLogger(__name__)
import pandas as pd
idx = pd.IndexSlice
import numpy as np
import scipy as sp
import xarray as xr
import re, os
from six import iteritems, string_types
import pypsa
import yaml
import pytz
from add_existing_baseyear import add_build_year_to_new_assets
from helper import override_component_attrs
#First tell PyPSA that links can have multiple outputs by
#overriding the component_attrs. This can be done for
#as many buses as you need with format busi for i = 2,3,4,5,....
#See https://pypsa.org/doc/components.html#link-with-multiple-outputs-or-inputs
override_component_attrs = pypsa.descriptors.Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()})
override_component_attrs["Link"].loc["bus2"] = ["string",np.nan,np.nan,"2nd bus","Input (optional)"]
override_component_attrs["Link"].loc["bus3"] = ["string",np.nan,np.nan,"3rd bus","Input (optional)"]
override_component_attrs["Link"].loc["efficiency2"] = ["static or series","per unit",1.,"2nd bus efficiency","Input (optional)"]
override_component_attrs["Link"].loc["efficiency3"] = ["static or series","per unit",1.,"3rd bus efficiency","Input (optional)"]
override_component_attrs["Link"].loc["p2"] = ["series","MW",0.,"2nd bus output","Output"]
override_component_attrs["Link"].loc["p3"] = ["series","MW",0.,"3rd bus output","Output"]
override_component_attrs["Link"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"]
override_component_attrs["Link"].loc["lifetime"] = ["float","years",np.nan,"build year","Input (optional)"]
override_component_attrs["Generator"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"]
override_component_attrs["Generator"].loc["lifetime"] = ["float","years",np.nan,"build year","Input (optional)"]
override_component_attrs["Store"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"]
override_component_attrs["Store"].loc["lifetime"] = ["float","years",np.nan,"build year","Input (optional)"]
def add_brownfield(n, n_p, year):
@ -48,72 +21,85 @@ def add_brownfield(n, n_p, year):
attr = "e" if c.name == "Store" else "p"
#first, remove generators, links and stores that track CO2 or global EU values
#since these are already in n
n_p.mremove(c.name,
c.df.index[c.df.lifetime.isna()])
# first, remove generators, links and stores that track
# CO2 or global EU values since these are already in n
n_p.mremove(
c.name,
c.df.index[c.df.lifetime.isna()]
)
#remove assets whose build_year + lifetime < year
n_p.mremove(c.name,
c.df.index[c.df.build_year + c.df.lifetime < year])
# remove assets whose build_year + lifetime < year
n_p.mremove(
c.name,
c.df.index[c.df.build_year + c.df.lifetime < year]
)
#remove assets if their optimized nominal capacity is lower than a threshold
#since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible
chp_heat = c.df.index[c.df[attr + "_nom_extendable"] & c.df.index.str.contains("urban central") & c.df.index.str.contains("CHP") & c.df.index.str.contains("heat")]
# remove assets if their optimized nominal capacity is lower than a threshold
# since CHP heat Link is proportional to CHP electric Link, make sure threshold is compatible
chp_heat = c.df.index[(
c.df[attr + "_nom_extendable"]
& c.df.index.str.contains("urban central")
& c.df.index.str.contains("CHP")
& c.df.index.str.contains("heat")
)]
threshold = snakemake.config['existing_capacities']['threshold_capacity']
if not chp_heat.empty:
n_p.mremove(c.name,
chp_heat[c.df.loc[chp_heat, attr + "_nom_opt"] < snakemake.config['existing_capacities']['threshold_capacity']*c.df.efficiency[chp_heat.str.replace("heat","electric")].values*c.df.p_nom_ratio[chp_heat.str.replace("heat","electric")].values/c.df.efficiency[chp_heat].values])
n_p.mremove(c.name,
c.df.index[c.df[attr + "_nom_extendable"] & ~c.df.index.isin(chp_heat) & (c.df[attr + "_nom_opt"] < snakemake.config['existing_capacities']['threshold_capacity'])])
threshold_chp_heat = (threshold
* c.df.efficiency[chp_heat.str.replace("heat", "electric")].values
* c.df.p_nom_ratio[chp_heat.str.replace("heat", "electric")].values
/ c.df.efficiency[chp_heat].values
)
n_p.mremove(
c.name,
chp_heat[c.df.loc[chp_heat, attr + "_nom_opt"] < threshold_chp_heat]
)
n_p.mremove(
c.name,
c.df.index[c.df[attr + "_nom_extendable"] & ~c.df.index.isin(chp_heat) & (c.df[attr + "_nom_opt"] < threshold)]
)
#copy over assets but fix their capacity
# copy over assets but fix their capacity
c.df[attr + "_nom"] = c.df[attr + "_nom_opt"]
c.df[attr + "_nom_extendable"] = False
n.import_components_from_dataframe(c.df,
c.name)
n.import_components_from_dataframe(c.df, c.name)
#copy time-dependent
for tattr in n.component_attrs[c.name].index[(n.component_attrs[c.name].type.str.contains("series") &
n.component_attrs[c.name].status.str.contains("Input"))]:
n.import_series_from_dataframe(c.pnl[tattr],
c.name,
tattr)
# copy time-dependent
selection = (
n.component_attrs[c.name].type.str.contains("series")
& n.component_attrs[c.name].status.str.contains("Input")
)
for tattr in n.component_attrs[c.name].index[selection]:
n.import_series_from_dataframe(c.pnl[tattr], c.name, tattr)
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals():
from vresutils.snakemake import MockSnakemake
snakemake = MockSnakemake(
wildcards=dict(network='elec', simpl='', clusters='37', lv='1.0',
sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1',
co2_budget_name='go',
planning_horizons='2030'),
input=dict(network='pypsa-eur-sec/results/test/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_{planning_horizons}.nc',
network_p='pypsa-eur-sec/results/test/postnetworks/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_2020.nc',
costs='pypsa-eur-sec/data/costs/costs_{planning_horizons}.csv',
cop_air_total="pypsa-eur-sec/resources/cop_air_total_{network}_s{simpl}_{clusters}.nc",
cop_soil_total="pypsa-eur-sec/resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc"),
output=['pypsa-eur-sec/results/test/prenetworks_brownfield/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc']
from helper import mock_snakemake
snakemake = mock_snakemake(
'add_brownfield',
simpl='',
clusters=48,
lv=1.0,
sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1',
planning_horizons=2030,
)
import yaml
with open('config.yaml', encoding='utf8') as f:
snakemake.config = yaml.safe_load(f)
print(snakemake.input.network_p)
logging.basicConfig(level=snakemake.config['logging_level'])
year=int(snakemake.wildcards.planning_horizons)
year = int(snakemake.wildcards.planning_horizons)
n = pypsa.Network(snakemake.input.network,
override_component_attrs=override_component_attrs)
overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
add_build_year_to_new_assets(n, year)
n_p = pypsa.Network(snakemake.input.network_p,
override_component_attrs=override_component_attrs)
#%%
n_p = pypsa.Network(snakemake.input.network_p, override_component_attrs=overrides)
add_brownfield(n, n_p, year)
n.export_to_netcdf(snakemake.output[0])

View File

@ -2,261 +2,244 @@
import logging
logger = logging.getLogger(__name__)
import pandas as pd
idx = pd.IndexSlice
import numpy as np
import scipy as sp
import xarray as xr
import re, os
from six import iteritems, string_types
import pypsa
import yaml
import pytz
from vresutils.costdata import annuity
from prepare_sector_network import prepare_costs
#First tell PyPSA that links can have multiple outputs by
#overriding the component_attrs. This can be done for
#as many buses as you need with format busi for i = 2,3,4,5,....
#See https://pypsa.org/doc/components.html#link-with-multiple-outputs-or-inputs
override_component_attrs = pypsa.descriptors.Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()})
override_component_attrs["Link"].loc["bus2"] = ["string",np.nan,np.nan,"2nd bus","Input (optional)"]
override_component_attrs["Link"].loc["bus3"] = ["string",np.nan,np.nan,"3rd bus","Input (optional)"]
override_component_attrs["Link"].loc["efficiency2"] = ["static or series","per unit",1.,"2nd bus efficiency","Input (optional)"]
override_component_attrs["Link"].loc["efficiency3"] = ["static or series","per unit",1.,"3rd bus efficiency","Input (optional)"]
override_component_attrs["Link"].loc["p2"] = ["series","MW",0.,"2nd bus output","Output"]
override_component_attrs["Link"].loc["p3"] = ["series","MW",0.,"3rd bus output","Output"]
override_component_attrs["Link"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"]
override_component_attrs["Link"].loc["lifetime"] = ["float","years",np.nan,"build year","Input (optional)"]
override_component_attrs["Generator"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"]
override_component_attrs["Generator"].loc["lifetime"] = ["float","years",np.nan,"build year","Input (optional)"]
override_component_attrs["Store"].loc["build_year"] = ["integer","year",np.nan,"build year","Input (optional)"]
override_component_attrs["Store"].loc["lifetime"] = ["float","years",np.nan,"build year","Input (optional)"]
from helper import override_component_attrs
def add_build_year_to_new_assets(n, baseyear):
"""
Parameters
----------
n : network
baseyear: year in which optimized assets are built
n : pypsa.Network
baseyear : int
year in which optimized assets are built
"""
#Give assets with lifetimes and no build year the build year baseyear
# Give assets with lifetimes and no build year the build year baseyear
for c in n.iterate_components(["Link", "Generator", "Store"]):
assets = c.df.index[~c.df.lifetime.isna() & c.df.build_year.isna()]
c.df.loc[assets, "build_year"] = baseyear
#add -baseyear to name
# add -baseyear to name
rename = pd.Series(c.df.index, c.df.index)
rename[assets] += "-" + str(baseyear)
c.df.rename(index=rename, inplace=True)
#rename time-dependent
for attr in n.component_attrs[c.name].index[(n.component_attrs[c.name].type.str.contains("series") &
n.component_attrs[c.name].status.str.contains("Input"))]:
# rename time-dependent
selection = (
n.component_attrs[c.name].type.str.contains("series")
& n.component_attrs[c.name].status.str.contains("Input")
)
for attr in n.component_attrs[c.name].index[selection]:
c.pnl[attr].rename(columns=rename, inplace=True)
def add_existing_renewables(df_agg):
"""
Append existing renewables to the df_agg pd.DataFrame
with the conventional power plants.
"""
cc = pd.read_csv('data/Country_codes.csv',
index_col=0)
cc = pd.read_csv(snakemake.input.country_codes, index_col=0)
carriers = {"solar" : "solar",
"onwind" : "onwind",
"offwind" : "offwind-ac"}
carriers = {
"solar": "solar",
"onwind": "onwind",
"offwind": "offwind-ac"
}
for tech in ['solar', 'onwind', 'offwind']:
carrier = carriers[tech]
df = pd.read_csv('data/existing_infrastructure/{}_capacity_IRENA.csv'.format(tech),
index_col=0)
df = df.fillna(0.)
df = pd.read_csv(snakemake.input[f"existing_{tech}"], index_col=0).fillna(0.)
df.columns = df.columns.astype(int)
df.rename(index={'Czechia':'Czech Republic',
'UK':'United Kingdom',
'Bosnia Herzg':'Bosnia Herzegovina',
'North Macedonia': 'Macedonia'}, inplace=True)
rename_countries = {
'Czechia': 'Czech Republic',
'UK': 'United Kingdom',
'Bosnia Herzg': 'Bosnia Herzegovina',
'North Macedonia': 'Macedonia'
}
df.rename(index=rename_countries, inplace=True)
df.rename(index=cc["2 letter code (ISO-3166-2)"], inplace=True)
# calculate yearly differences
df.insert(loc=0, value=.0, column='1999')
df = df.diff(axis=1).drop('1999', axis=1)
df = df.clip(lower=0)
df = df.diff(axis=1).drop('1999', axis=1).clip(lower=0)
#distribute capacities among nodes according to capacity factor
#weighting with nodal_fraction
# distribute capacities among nodes according to capacity factor
# weighting with nodal_fraction
elec_buses = n.buses.index[n.buses.carrier == "AC"].union(n.buses.index[n.buses.carrier == "DC"])
nodal_fraction = pd.Series(0.,elec_buses)
nodal_fraction = pd.Series(0., elec_buses)
for country in n.buses.loc[elec_buses,"country"].unique():
for country in n.buses.loc[elec_buses, "country"].unique():
gens = n.generators.index[(n.generators.index.str[:2] == country) & (n.generators.carrier == carrier)]
cfs = n.generators_t.p_max_pu[gens].mean()
cfs_key = cfs/cfs.sum()
nodal_fraction.loc[n.generators.loc[gens,"bus"]] = cfs_key.values
cfs_key = cfs / cfs.sum()
nodal_fraction.loc[n.generators.loc[gens, "bus"]] = cfs_key.values
nodal_df = df.loc[n.buses.loc[elec_buses,"country"]]
nodal_df = df.loc[n.buses.loc[elec_buses, "country"]]
nodal_df.index = elec_buses
nodal_df = nodal_df.multiply(nodal_fraction,axis=0)
nodal_df = nodal_df.multiply(nodal_fraction, axis=0)
for year in nodal_df.columns:
for node in nodal_df.index:
name = f"{node}-{tech}-{year}"
capacity = nodal_df.loc[node,year]
capacity = nodal_df.loc[node, year]
if capacity > 0.:
df_agg.at[name,"Fueltype"] = tech
df_agg.at[name,"Capacity"] = capacity
df_agg.at[name,"YearCommissioned"] = year
df_agg.at[name,"cluster_bus"] = node
df_agg.at[name, "Fueltype"] = tech
df_agg.at[name, "Capacity"] = capacity
df_agg.at[name, "DateIn"] = year
df_agg.at[name, "cluster_bus"] = node
def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, baseyear):
"""
Parameters
----------
n : network
grouping_years : intervals to group existing capacities
costs : to read lifetime to estimate YearDecomissioning
n : pypsa.Network
grouping_years :
intervals to group existing capacities
costs :
to read lifetime to estimate YearDecomissioning
baseyear : int
"""
print("adding power capacities installed before baseyear")
print("adding power capacities installed before baseyear from powerplants.csv")
### add conventional capacities using 'powerplants.csv'
df_agg = pd.read_csv(snakemake.input.powerplants, index_col=0)
rename_fuel = {'Hard Coal':'coal',
'Lignite':'lignite',
'Nuclear':'nuclear',
'Oil':'oil',
'OCGT':'OCGT',
'CCGT':'CCGT',
'Natural Gas':'gas',}
fueltype_to_drop = ['Hydro',
'Wind',
'Solar',
'Geothermal',
'Bioenergy',
'Waste',
'Other',
'CCGT, Thermal']
technology_to_drop = ['Pv',
'Storage Technologies']
rename_fuel = {
'Hard Coal': 'coal',
'Lignite': 'lignite',
'Nuclear': 'nuclear',
'Oil': 'oil',
'OCGT': 'OCGT',
'CCGT': 'CCGT',
'Natural Gas': 'gas'
}
df_agg.drop(df_agg.index[df_agg.Fueltype.isin(fueltype_to_drop)],inplace=True)
df_agg.drop(df_agg.index[df_agg.Technology.isin(technology_to_drop)],inplace=True)
fueltype_to_drop = [
'Hydro',
'Wind',
'Solar',
'Geothermal',
'Bioenergy',
'Waste',
'Other',
'CCGT, Thermal'
]
technology_to_drop = [
'Pv',
'Storage Technologies'
]
df_agg.drop(df_agg.index[df_agg.Fueltype.isin(fueltype_to_drop)], inplace=True)
df_agg.drop(df_agg.index[df_agg.Technology.isin(technology_to_drop)], inplace=True)
df_agg.Fueltype = df_agg.Fueltype.map(rename_fuel)
#assign clustered bus
busmap_s = pd.read_hdf(snakemake.input.clustermaps,
key="/busmap_s")
busmap = pd.read_hdf(snakemake.input.clustermaps,
key="/busmap")
# assign clustered bus
busmap_s = pd.read_csv(snakemake.input.busmap_s, index_col=0, squeeze=True)
busmap = pd.read_csv(snakemake.input.busmap, index_col=0, squeeze=True)
clustermaps = busmap_s.map(busmap)
clustermaps.index = clustermaps.index.astype(int)
df_agg["cluster_bus"] = df_agg.bus.map(clustermaps)
#include renewables in df_agg
# include renewables in df_agg
add_existing_renewables(df_agg)
df_agg["grouping_year"] = np.take(grouping_years,
np.digitize(df_agg.YearCommissioned,
grouping_years,
right=True))
df_agg["grouping_year"] = np.take(
grouping_years,
np.digitize(df_agg.DateIn, grouping_years, right=True)
)
df = df_agg.pivot_table(index=["grouping_year",'Fueltype'], columns='cluster_bus',
values='Capacity', aggfunc='sum')
df = df_agg.pivot_table(
index=["grouping_year", 'Fueltype'],
columns='cluster_bus',
values='Capacity',
aggfunc='sum'
)
carrier = {"OCGT" : "gas",
"CCGT" : "gas",
"coal" : "coal",
"oil" : "oil",
"lignite" : "lignite",
"nuclear" : "uranium"}
carrier = {
"OCGT": "gas",
"CCGT": "gas",
"coal": "coal",
"oil": "oil",
"lignite": "lignite",
"nuclear": "uranium"
}
for grouping_year, generator in df.index:
#capacity is the capacity in MW at each node for this
# capacity is the capacity in MW at each node for this
capacity = df.loc[grouping_year, generator]
capacity = capacity[~capacity.isna()]
capacity = capacity[capacity > snakemake.config['existing_capacities']['threshold_capacity']]
if generator in ['solar', 'onwind', 'offwind']:
if generator =='offwind':
p_max_pu=n.generators_t.p_max_pu[capacity.index + ' offwind-ac' + '-' + str(baseyear)]
else:
p_max_pu=n.generators_t.p_max_pu[capacity.index + ' ' + generator + '-' + str(baseyear)]
rename = {"offwind": "offwind-ac"}
p_max_pu=n.generators_t.p_max_pu[capacity.index + ' ' + rename.get(generator, generator) + '-' + str(baseyear)]
n.madd("Generator",
capacity.index,
suffix=' ' + generator +"-"+ str(grouping_year),
bus=capacity.index,
carrier=generator,
p_nom=capacity,
marginal_cost=costs.at[generator,'VOM'],
capital_cost=costs.at[generator,'fixed'],
efficiency=costs.at[generator, 'efficiency'],
p_max_pu=p_max_pu.rename(columns=n.generators.bus),
build_year=grouping_year,
lifetime=costs.at[generator,'lifetime'])
capacity.index,
suffix=' ' + generator +"-"+ str(grouping_year),
bus=capacity.index,
carrier=generator,
p_nom=capacity,
marginal_cost=costs.at[generator, 'VOM'],
capital_cost=costs.at[generator, 'fixed'],
efficiency=costs.at[generator, 'efficiency'],
p_max_pu=p_max_pu.rename(columns=n.generators.bus),
build_year=grouping_year,
lifetime=costs.at[generator, 'lifetime']
)
else:
n.madd("Link",
capacity.index,
suffix= " " + generator +"-" + str(grouping_year),
bus0="EU " + carrier[generator],
bus1=capacity.index,
bus2="co2 atmosphere",
carrier=generator,
marginal_cost=costs.at[generator,'efficiency']*costs.at[generator,'VOM'], #NB: VOM is per MWel
capital_cost=costs.at[generator,'efficiency']*costs.at[generator,'fixed'], #NB: fixed cost is per MWel
p_nom=capacity/costs.at[generator,'efficiency'],
efficiency=costs.at[generator,'efficiency'],
efficiency2=costs.at[carrier[generator],'CO2 intensity'],
build_year=grouping_year,
lifetime=costs.at[generator,'lifetime'])
capacity.index,
suffix= " " + generator +"-" + str(grouping_year),
bus0="EU " + carrier[generator],
bus1=capacity.index,
bus2="co2 atmosphere",
carrier=generator,
marginal_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'VOM'], #NB: VOM is per MWel
capital_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'fixed'], #NB: fixed cost is per MWel
p_nom=capacity / costs.at[generator, 'efficiency'],
efficiency=costs.at[generator, 'efficiency'],
efficiency2=costs.at[carrier[generator], 'CO2 intensity'],
build_year=grouping_year,
lifetime=costs.at[generator, 'lifetime']
)
def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years, ashp_cop, gshp_cop, time_dep_hp_cop, costs, default_lifetime):
"""
Parameters
----------
n : network
baseyear: last year covered in the existing capacities database
n : pypsa.Network
baseyear : last year covered in the existing capacities database
grouping_years : intervals to group existing capacities
linear decomissioning of heating capacities from 2020 to 2045 is
currently assumed
heating capacities split between residential and services proportional
to heating load in both
50% capacities in rural busess 50% in urban buses
linear decommissioning of heating capacities from 2020 to 2045 is
currently assumed heating capacities split between residential and
services proportional to heating load in both 50% capacities
in rural busess 50% in urban buses
"""
print("adding heating capacities installed before baseyear")
@ -265,43 +248,42 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years
# heating/cooling fuel deployment (fossil/renewables) "
# https://ec.europa.eu/energy/studies/mapping-and-analyses-current-and-future-2020-2030-heatingcooling-fuel-deployment_en?redir=1
# file: "WP2_DataAnnex_1_BuildingTechs_ForPublication_201603.xls" -> "existing_heating_raw.csv".
# TODO start from original file
# retrieve existing heating capacities
techs = ['gas boiler',
'oil boiler',
'resistive heater',
'air heat pump',
'ground heat pump']
df = pd.read_csv('data/existing_infrastructure/existing_heating_raw.csv',
index_col=0,
header=0)
# data for Albania, Montenegro and Macedonia not included in database
df.loc['Albania']=np.nan
df.loc['Montenegro']=np.nan
df.loc['Macedonia']=np.nan
df.fillna(0, inplace=True)
df *= 1e3 # GW to MW
techs = [
'gas boiler',
'oil boiler',
'resistive heater',
'air heat pump',
'ground heat pump'
]
df = pd.read_csv(snakemake.input.existing_heating, index_col=0, header=0)
cc = pd.read_csv('data/Country_codes.csv',
index_col=0)
# data for Albania, Montenegro and Macedonia not included in database
df.loc['Albania'] = np.nan
df.loc['Montenegro'] = np.nan
df.loc['Macedonia'] = np.nan
df.fillna(0., inplace=True)
# convert GW to MW
df *= 1e3
cc = pd.read_csv(snakemake.input.country_codes, index_col=0)
df.rename(index=cc["2 letter code (ISO-3166-2)"], inplace=True)
# coal and oil boilers are assimilated to oil boilers
df['oil boiler'] =df['oil boiler'] + df['coal boiler']
df['oil boiler'] = df['oil boiler'] + df['coal boiler']
df.drop(['coal boiler'], axis=1, inplace=True)
# distribute technologies to nodes by population
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout,
index_col=0)
pop_layout["ct"] = pop_layout.index.str[:2]
ct_total = pop_layout.total.groupby(pop_layout["ct"]).sum()
pop_layout["ct_total"] = pop_layout["ct"].map(ct_total.get)
pop_layout["fraction"] = pop_layout["total"]/pop_layout["ct_total"]
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
nodal_df = df.loc[pop_layout.ct]
nodal_df.index = pop_layout.index
nodal_df = nodal_df.multiply(pop_layout.fraction,axis=0)
nodal_df = nodal_df.multiply(pop_layout.fraction, axis=0)
# split existing capacities between residential and services
# proportional to energy demand
@ -311,121 +293,126 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years
for node in nodal_df.index], index=nodal_df.index)
for tech in techs:
nodal_df['residential ' + tech] = nodal_df[tech]*ratio_residential
nodal_df['services ' + tech] = nodal_df[tech]*(1-ratio_residential)
nodal_df['residential ' + tech] = nodal_df[tech] * ratio_residential
nodal_df['services ' + tech] = nodal_df[tech] * (1 - ratio_residential)
nodes={}
p_nom={}
for name in ["residential rural",
"services rural",
"residential urban decentral",
"services urban decentral",
"urban central"]:
names = [
"residential rural",
"services rural",
"residential urban decentral",
"services urban decentral",
"urban central"
]
nodes = {}
p_nom = {}
for name in names:
name_type = "central" if name == "urban central" else "decentral"
nodes[name] = pd.Index([n.buses.at[index,"location"] for index in n.buses.index[n.buses.index.str.contains(name) & n.buses.index.str.contains('heat')]])
nodes[name] = pd.Index([n.buses.at[index, "location"] for index in n.buses.index[n.buses.index.str.contains(name) & n.buses.index.str.contains('heat')]])
heat_pump_type = "air" if "urban" in name else "ground"
heat_type= "residential" if "residential" in name else "services"
if name == "urban central":
p_nom[name]=nodal_df['air heat pump'][nodes[name]]
p_nom[name] = nodal_df['air heat pump'][nodes[name]]
else:
p_nom[name] = nodal_df['{} {} heat pump'.format(heat_type, heat_pump_type)][nodes[name]]
p_nom[name] = nodal_df[f'{heat_type} {heat_pump_type} heat pump'][nodes[name]]
# Add heat pumps
costs_name = "{} {}-sourced heat pump".format("decentral", heat_pump_type)
costs_name = f"decentral {heat_pump_type}-sourced heat pump"
cop = {"air": ashp_cop, "ground": gshp_cop}
if time_dep_hp_cop:
efficiency = cop[heat_pump_type][nodes[name]]
else:
efficiency = costs.at[costs_name, 'efficiency']
for i, grouping_year in enumerate(grouping_years):
cop = {"air" : ashp_cop, "ground" : gshp_cop}
efficiency = cop[heat_pump_type][nodes[name]] if time_dep_hp_cop else costs.at[costs_name,'efficiency']
for i,grouping_year in enumerate(grouping_years):
if int(grouping_year) + default_lifetime <= int(baseyear):
ratio=0
ratio = 0
else:
#installation is assumed to be linear for the past 25 years (default lifetime)
ratio = (int(grouping_year)-int(grouping_years[i-1]))/default_lifetime
# installation is assumed to be linear for the past 25 years (default lifetime)
ratio = (int(grouping_year) - int(grouping_years[i-1])) / default_lifetime
n.madd("Link",
nodes[name],
suffix=" {} {} heat pump-{}".format(name,heat_pump_type, grouping_year),
bus0=nodes[name],
bus1=nodes[name] + " " + name + " heat",
carrier="{} {} heat pump".format(name,heat_pump_type),
efficiency=efficiency,
capital_cost=costs.at[costs_name,'efficiency']*costs.at[costs_name,'fixed'],
p_nom=p_nom[name]*ratio/costs.at[costs_name,'efficiency'],
build_year=int(grouping_year),
lifetime=costs.at[costs_name,'lifetime'])
nodes[name],
suffix=f" {name} {heat_pump_type} heat pump-{grouping_year}",
bus0=nodes[name],
bus1=nodes[name] + " " + name + " heat",
carrier=f"{name} {heat_pump_type} heat pump",
efficiency=efficiency,
capital_cost=costs.at[costs_name, 'efficiency'] * costs.at[costs_name, 'fixed'],
p_nom=p_nom[name] * ratio / costs.at[costs_name, 'efficiency'],
build_year=int(grouping_year),
lifetime=costs.at[costs_name, 'lifetime']
)
# add resistive heater, gas boilers and oil boilers
# (50% capacities to rural buses, 50% to urban buses)
n.madd("Link",
nodes[name],
suffix= " " + name + " resistive heater-{}".format(grouping_year),
bus0=nodes[name],
bus1=nodes[name] + " " + name + " heat",
carrier=name + " resistive heater",
efficiency=costs.at[name_type + ' resistive heater','efficiency'],
capital_cost=costs.at[name_type + ' resistive heater','efficiency']*costs.at[name_type + ' resistive heater','fixed'],
p_nom=0.5*nodal_df['{} resistive heater'.format(heat_type)][nodes[name]]*ratio/costs.at[name_type + ' resistive heater','efficiency'],
build_year=int(grouping_year),
lifetime=costs.at[costs_name,'lifetime'])
nodes[name],
suffix=f" {name} resistive heater-{grouping_year}",
bus0=nodes[name],
bus1=nodes[name] + " " + name + " heat",
carrier=name + " resistive heater",
efficiency=costs.at[name_type + ' resistive heater', 'efficiency'],
capital_cost=costs.at[name_type + ' resistive heater', 'efficiency'] * costs.at[name_type + ' resistive heater', 'fixed'],
p_nom=0.5 * nodal_df[f'{heat_type} resistive heater'][nodes[name]] * ratio / costs.at[name_type + ' resistive heater', 'efficiency'],
build_year=int(grouping_year),
lifetime=costs.at[costs_name, 'lifetime']
)
n.madd("Link",
nodes[name],
suffix= " " + name + " gas boiler-{}".format(grouping_year),
bus0=["EU gas"]*len(nodes[name]),
bus1=nodes[name] + " " + name + " heat",
bus2="co2 atmosphere",
carrier=name + " gas boiler",
efficiency=costs.at[name_type + ' gas boiler','efficiency'],
efficiency2=costs.at['gas','CO2 intensity'],
capital_cost=costs.at[name_type + ' gas boiler','efficiency']*costs.at[name_type + ' gas boiler','fixed'],
p_nom=0.5*nodal_df['{} gas boiler'.format(heat_type)][nodes[name]]*ratio/costs.at[name_type + ' gas boiler','efficiency'],
build_year=int(grouping_year),
lifetime=costs.at[name_type + ' gas boiler','lifetime'])
nodes[name],
suffix= f" {name} gas boiler-{grouping_year}",
bus0="EU gas",
bus1=nodes[name] + " " + name + " heat",
bus2="co2 atmosphere",
carrier=name + " gas boiler",
efficiency=costs.at[name_type + ' gas boiler', 'efficiency'],
efficiency2=costs.at['gas', 'CO2 intensity'],
capital_cost=costs.at[name_type + ' gas boiler', 'efficiency'] * costs.at[name_type + ' gas boiler', 'fixed'],
p_nom=0.5*nodal_df[f'{heat_type} gas boiler'][nodes[name]] * ratio / costs.at[name_type + ' gas boiler', 'efficiency'],
build_year=int(grouping_year),
lifetime=costs.at[name_type + ' gas boiler', 'lifetime']
)
n.madd("Link",
nodes[name],
suffix=" " + name + " oil boiler-{}".format(grouping_year),
bus0=["EU oil"]*len(nodes[name]),
bus1=nodes[name] + " " + name + " heat",
bus2="co2 atmosphere",
carrier=name + " oil boiler",
efficiency=costs.at['decentral oil boiler','efficiency'],
efficiency2=costs.at['oil','CO2 intensity'],
capital_cost=costs.at['decentral oil boiler','efficiency']*costs.at['decentral oil boiler','fixed'],
p_nom=0.5*nodal_df['{} oil boiler'.format(heat_type)][nodes[name]]*ratio/costs.at['decentral oil boiler','efficiency'],
build_year=int(grouping_year),
lifetime=costs.at[name_type + ' gas boiler','lifetime'])
nodes[name],
suffix=f" {name} oil boiler-{grouping_year}",
bus0="EU oil",
bus1=nodes[name] + " " + name + " heat",
bus2="co2 atmosphere",
carrier=name + " oil boiler",
efficiency=costs.at['decentral oil boiler', 'efficiency'],
efficiency2=costs.at['oil', 'CO2 intensity'],
capital_cost=costs.at['decentral oil boiler', 'efficiency'] * costs.at['decentral oil boiler', 'fixed'],
p_nom=0.5 * nodal_df[f'{heat_type} oil boiler'][nodes[name]] * ratio / costs.at['decentral oil boiler', 'efficiency'],
build_year=int(grouping_year),
lifetime=costs.at[name_type + ' gas boiler', 'lifetime']
)
# delete links with p_nom=nan corresponding to extra nodes in country
n.mremove("Link", [index for index in n.links.index.to_list() if str(grouping_year) in index and np.isnan(n.links.p_nom[index])])
# delete links if their lifetime is over and p_nom=0
n.mremove("Link", [index for index in n.links.index.to_list() if str(grouping_year) in index and n.links.p_nom[index]<snakemake.config['existing_capacities']['threshold_capacity']])
threshold = snakemake.config['existing_capacities']['threshold_capacity']
n.mremove("Link", [index for index in n.links.index.to_list() if str(grouping_year) in index and n.links.p_nom[index] < threshold])
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals():
from vresutils.snakemake import MockSnakemake
snakemake = MockSnakemake(
wildcards=dict(network='elec', simpl='', clusters='39', lv='1.0',
sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1',
co2_budget_name='b30b3',
planning_horizons='2020'),
input=dict(network='pypsa-eur-sec/results/test/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_{planning_horizons}.nc',
powerplants='pypsa-eur/resources/powerplants.csv',
clustermaps='pypsa-eur/resources/clustermaps_{network}_s{simpl}_{clusters}.h5',
costs='pypsa-eur-sec/data/costs/costs_{planning_horizons}.csv',
cop_air_total="pypsa-eur-sec/resources/cop_air_total_{network}_s{simpl}_{clusters}.nc",
cop_soil_total="pypsa-eur-sec/resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc"),
output=['pypsa-eur-sec/results/test/prenetworks_brownfield/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'],
from helper import mock_snakemake
snakemake = mock_snakemake(
'add_existing_baseyear',
simpl='',
clusters=45,
lv=1.0,
sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1',
planning_horizons=2020,
)
import yaml
with open('config.yaml', encoding='utf8') as f:
snakemake.config = yaml.safe_load(f)
logging.basicConfig(level=snakemake.config['logging_level'])
@ -434,24 +421,27 @@ if __name__ == "__main__":
baseyear= snakemake.config['scenario']["planning_horizons"][0]
n = pypsa.Network(snakemake.input.network,
override_component_attrs=override_component_attrs)
overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
add_build_year_to_new_assets(n, baseyear)
Nyears = n.snapshot_weightings.sum()/8760.
costs = prepare_costs(snakemake.input.costs,
snakemake.config['costs']['USD2013_to_EUR2013'],
snakemake.config['costs']['discountrate'],
Nyears)
Nyears = n.snapshot_weightings.generators.sum() / 8760.
costs = prepare_costs(
snakemake.input.costs,
snakemake.config['costs']['USD2013_to_EUR2013'],
snakemake.config['costs']['discountrate'],
Nyears,
snakemake.config['costs']['lifetime']
)
grouping_years=snakemake.config['existing_capacities']['grouping_years']
add_power_capacities_installed_before_baseyear(n, grouping_years, costs, baseyear)
if "H" in opts:
time_dep_hp_cop = options["time_dep_hp_cop"]
ashp_cop = xr.open_dataarray(snakemake.input.cop_air_total).T.to_pandas().reindex(index=n.snapshots)
gshp_cop = xr.open_dataarray(snakemake.input.cop_soil_total).T.to_pandas().reindex(index=n.snapshots)
ashp_cop = xr.open_dataarray(snakemake.input.cop_air_total).to_pandas().reindex(index=n.snapshots)
gshp_cop = xr.open_dataarray(snakemake.input.cop_soil_total).to_pandas().reindex(index=n.snapshots)
default_lifetime = snakemake.config['costs']['lifetime']
add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years, ashp_cop, gshp_cop, time_dep_hp_cop, costs, default_lifetime)

View File

@ -1,45 +1,53 @@
"""Build ammonia production."""
import pandas as pd
ammonia = pd.read_excel(snakemake.input.usgs,
sheet_name="T12",
skiprows=5,
header=0,
index_col=0,
skipfooter=19)
rename = {"Austriae" : "AT",
"Bulgaria" : "BG",
"Belgiume" : "BE",
"Croatia" : "HR",
"Czechia" : "CZ",
"Estonia" : "EE",
"Finland" : "FI",
"France" : "FR",
"Germany" : "DE",
"Greece" : "GR",
"Hungarye" : "HU",
"Italye" : "IT",
"Lithuania" : "LT",
"Netherlands" : "NL",
"Norwaye" : "NO",
"Poland" : "PL",
"Romania" : "RO",
"Serbia" : "RS",
"Slovakia" : "SK",
"Spain" : "ES",
"Switzerland" : "CH",
"United Kingdom" : "GB",
country_to_alpha2 = {
"Austriae": "AT",
"Bulgaria": "BG",
"Belgiume": "BE",
"Croatia": "HR",
"Czechia": "CZ",
"Estonia": "EE",
"Finland": "FI",
"France": "FR",
"Germany": "DE",
"Greece": "GR",
"Hungarye": "HU",
"Italye": "IT",
"Lithuania": "LT",
"Netherlands": "NL",
"Norwaye": "NO",
"Poland": "PL",
"Romania": "RO",
"Serbia": "RS",
"Slovakia": "SK",
"Spain": "ES",
"Switzerland": "CH",
"United Kingdom": "GB",
}
ammonia = ammonia.rename(rename)
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake('build_ammonia_production')
ammonia = ammonia.loc[rename.values(),[str(i) for i in range(2013,2018)]].astype(float)
ammonia = pd.read_excel(snakemake.input.usgs,
sheet_name="T12",
skiprows=5,
header=0,
index_col=0,
skipfooter=19)
#convert from ktonN to ktonNH3
ammonia = ammonia*17/14
ammonia.rename(country_to_alpha2, inplace=True)
ammonia.index.name = "ktonNH3/a"
years = [str(i) for i in range(2013, 2018)]
countries = country_to_alpha2.values()
ammonia = ammonia.loc[countries, years].astype(float)
ammonia.to_csv(snakemake.output.ammonia_production)
# convert from ktonN to ktonNH3
ammonia *= 17 / 14
ammonia.index.name = "ktonNH3/a"
ammonia.to_csv(snakemake.output.ammonia_production)

View File

@ -1,63 +1,68 @@
import pandas as pd
idx = pd.IndexSlice
rename = {"UK" : "GB", "BH" : "BA"}
def build_biomass_potentials():
#delete empty column C from this sheet first before reading it in
config = snakemake.config['biomass']
year = config["year"]
scenario = config["scenario"]
df = pd.read_excel(snakemake.input.jrc_potentials,
"Potentials (PJ)",
index_col=[0,1])
"Potentials (PJ)",
index_col=[0,1])
df.rename(columns={"Unnamed: 18":"Municipal waste"},inplace=True)
df.drop(columns="Total",inplace=True)
df.replace("-",0.,inplace=True)
df.rename(columns={"Unnamed: 18": "Municipal waste"}, inplace=True)
df.drop(columns="Total", inplace=True)
df.replace("-", 0., inplace=True)
df_dict = {}
column = df.iloc[:,0]
countries = column.where(column.str.isalpha()).pad()
countries = [rename.get(ct, ct) for ct in countries]
countries_i = pd.Index(countries, name='country')
df.set_index(countries_i, append=True, inplace=True)
for i in range(36):
df_dict[df.iloc[i*16,1]] = df.iloc[1+i*16:(i+1)*16].astype(float)
df.drop(index='MS', level=0, inplace=True)
#convert from PJ to MWh
df_new = pd.concat(df_dict).rename({"UK" : "GB", "BH" : "BA"})/3.6*1e6
df_new.index.name = "MWh/a"
df_new.to_csv(snakemake.output.biomass_potentials_all)
# convert from PJ to MWh
df = df / 3.6 * 1e6
# solid biomass includes: Primary agricultural residues (MINBIOAGRW1),
# Forestry energy residue (MINBIOFRSF1),
# Secondary forestry residues (MINBIOWOOW1),
# Secondary Forestry residues sawdust (MINBIOWOO1a)',
# Forestry residues from landscape care biomass (MINBIOFRSF1a),
# Municipal waste (MINBIOMUN1)',
df.to_csv(snakemake.output.biomass_potentials_all)
# biogas includes : Manure biomass potential (MINBIOGAS1),
# Sludge biomass (MINBIOSLU1)
# solid biomass includes:
# Primary agricultural residues (MINBIOAGRW1),
# Forestry energy residue (MINBIOFRSF1),
# Secondary forestry residues (MINBIOWOOW1),
# Secondary Forestry residues sawdust (MINBIOWOO1a)',
# Forestry residues from landscape care biomass (MINBIOFRSF1a),
# Municipal waste (MINBIOMUN1)',
us_type = pd.Series("", df_new.columns)
# biogas includes:
# Manure biomass potential (MINBIOGAS1),
# Sludge biomass (MINBIOSLU1),
for k,v in snakemake.config['biomass']['classes'].items():
us_type.loc[v] = k
df = df.loc[year, scenario, :]
biomass_potentials = df_new.swaplevel(0,2).loc[snakemake.config['biomass']['scenario'],snakemake.config['biomass']['year']].groupby(us_type,axis=1).sum()
biomass_potentials.index.name = "MWh/a"
biomass_potentials.to_csv(snakemake.output.biomass_potentials)
grouper = {v: k for k, vv in config["classes"].items() for v in vv}
df = df.groupby(grouper, axis=1).sum()
df.index.name = "MWh/a"
df.to_csv(snakemake.output.biomass_potentials)
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals():
from vresutils import Dict
import yaml
snakemake = Dict()
snakemake.input = Dict()
snakemake.input['jrc_potentials'] = "data/biomass/JRC Biomass Potentials.xlsx"
snakemake.output = Dict()
snakemake.output['biomass_potentials'] = 'data/biomass_potentials.csv'
with open('config.yaml', encoding='utf8') as f:
snakemake.config = yaml.safe_load(f)
from helper import mock_snakemake
snakemake = mock_snakemake('build_biomass_potentials')
# This is a hack, to be replaced once snakemake is unicode-conform
solid_biomass = snakemake.config['biomass']['classes']['solid biomass']
if 'Secondary Forestry residues sawdust' in solid_biomass:
solid_biomass.remove('Secondary Forestry residues sawdust')
solid_biomass.append('Secondary Forestry residues sawdust')
build_biomass_potentials()

View File

@ -1,32 +1,36 @@
"""Build clustered population layouts."""
import geopandas as gpd
import xarray as xr
import pandas as pd
import atlite
import helper
cutout = atlite.Cutout(snakemake.config['atlite']['cutout_name'],
cutout_dir=snakemake.config['atlite']['cutout_dir'])
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
'build_clustered_population_layouts',
simpl='',
clusters=48,
)
cutout = atlite.Cutout(snakemake.config['atlite']['cutout'])
clustered_busregions_as_geopd = gpd.read_file(snakemake.input.regions_onshore).set_index('name', drop=True)
clustered_regions = gpd.read_file(
snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze()
clustered_busregions = pd.Series(clustered_busregions_as_geopd.geometry, index=clustered_busregions_as_geopd.index)
I = cutout.indicatormatrix(clustered_regions)
helper.clean_invalid_geometries(clustered_busregions)
pop = {}
for item in ["total", "urban", "rural"]:
pop_layout = xr.open_dataarray(snakemake.input[f'pop_layout_{item}'])
pop[item] = I.dot(pop_layout.stack(spatial=('y', 'x')))
I = cutout.indicatormatrix(clustered_busregions)
pop = pd.DataFrame(pop, index=clustered_regions.index)
pop["ct"] = pop.index.str[:2]
country_population = pop.total.groupby(pop.ct).sum()
pop["fraction"] = pop.total / pop.ct.map(country_population)
items = ["total","urban","rural"]
pop = pd.DataFrame(columns=items,
index=clustered_busregions.index)
for item in items:
pop_layout = xr.open_dataarray(snakemake.input['pop_layout_'+item])
pop[item] = I.dot(pop_layout.stack(spatial=('y', 'x')))
pop.to_csv(snakemake.output.clustered_pop_layout)
pop.to_csv(snakemake.output.clustered_pop_layout)

View File

@ -1,25 +1,40 @@
"""Build COP time series for air- or ground-sourced heat pumps."""
import xarray as xr
#quadratic regression based on Staffell et al. (2012)
#https://doi.org/10.1039/C2EE22653G
# COP is function of temp difference source to sink
cop_f = {"air" : lambda d_t: 6.81 -0.121*d_t + 0.000630*d_t**2,
"soil" : lambda d_t: 8.77 -0.150*d_t + 0.000734*d_t**2}
sink_T = 55. # Based on DTU / large area radiators
def coefficient_of_performance(delta_T, source='air'):
"""
COP is function of temp difference source to sink.
The quadratic regression is based on Staffell et al. (2012)
https://doi.org/10.1039/C2EE22653G.
"""
if source == 'air':
return 6.81 - 0.121 * delta_T + 0.000630 * delta_T**2
elif source == 'soil':
return 8.77 - 0.150 * delta_T + 0.000734 * delta_T**2
else:
raise NotImplementedError("'source' must be one of ['air', 'soil']")
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
'build_cop_profiles',
simpl='',
clusters=48,
)
for area in ["total", "urban", "rural"]:
for source in ["air", "soil"]:
for area in ["total", "urban", "rural"]:
source_T = xr.open_dataarray(snakemake.input["temp_{}_{}".format(source,area)])
for source in ["air", "soil"]:
delta_T = sink_T - source_T
source_T = xr.open_dataarray(
snakemake.input[f"temp_{source}_{area}"])
cop = cop_f[source](delta_T)
delta_T = snakemake.config['sector']['heat_pump_sink_T'] - source_T
cop.to_netcdf(snakemake.output["cop_{}_{}".format(source,area)])
cop = coefficient_of_performance(delta_T, source)
cop.to_netcdf(snakemake.output[f"cop_{source}_{area}"])

File diff suppressed because it is too large Load Diff

View File

@ -1,42 +1,46 @@
"""Build heat demand time series."""
import geopandas as gpd
import atlite
import pandas as pd
import xarray as xr
import scipy as sp
import helper
import numpy as np
if 'snakemake' not in globals():
from vresutils import Dict
import yaml
snakemake = Dict()
with open('config.yaml') as f:
snakemake.config = yaml.load(f)
snakemake.input = Dict()
snakemake.output = Dict()
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
'build_heat_demands',
simpl='',
clusters=48,
)
time = pd.date_range(freq='m', **snakemake.config['snapshots'])
params = dict(years=slice(*time.year[[0, -1]]), months=slice(*time.month[[0, -1]]))
if 'snakemake' not in globals():
from vresutils import Dict
import yaml
snakemake = Dict()
with open('config.yaml') as f:
snakemake.config = yaml.safe_load(f)
snakemake.input = Dict()
snakemake.output = Dict()
cutout = atlite.Cutout(snakemake.config['atlite']['cutout_name'],
cutout_dir=snakemake.config['atlite']['cutout_dir'],
**params)
time = pd.date_range(freq='h', **snakemake.config['snapshots'])
cutout_config = snakemake.config['atlite']['cutout']
cutout = atlite.Cutout(cutout_config).sel(time=time)
clustered_busregions_as_geopd = gpd.read_file(snakemake.input.regions_onshore).set_index('name', drop=True)
clustered_regions = gpd.read_file(
snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze()
clustered_busregions = pd.Series(clustered_busregions_as_geopd.geometry, index=clustered_busregions_as_geopd.index)
I = cutout.indicatormatrix(clustered_regions)
helper.clean_invalid_geometries(clustered_busregions)
for area in ["rural", "urban", "total"]:
I = cutout.indicatormatrix(clustered_busregions)
pop_layout = xr.open_dataarray(snakemake.input[f'pop_layout_{area}'])
stacked_pop = pop_layout.stack(spatial=('y', 'x'))
M = I.T.dot(np.diag(I.dot(stacked_pop)))
for item in ["rural","urban","total"]:
heat_demand = cutout.heat_demand(
matrix=M.T, index=clustered_regions.index)
pop_layout = xr.open_dataarray(snakemake.input['pop_layout_'+item])
M = I.T.dot(sp.diag(I.dot(pop_layout.stack(spatial=('y', 'x')))))
heat_demand = cutout.heat_demand(matrix=M.T,index=clustered_busregions.index)
heat_demand.to_netcdf(snakemake.output["heat_demand_"+item])
heat_demand.to_netcdf(snakemake.output[f"heat_demand_{area}"])

View File

@ -1,39 +0,0 @@
import pandas as pd
idx = pd.IndexSlice
def build_industrial_demand():
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout,index_col=0)
pop_layout["ct"] = pop_layout.index.str[:2]
ct_total = pop_layout.total.groupby(pop_layout["ct"]).sum()
pop_layout["ct_total"] = pop_layout["ct"].map(ct_total)
pop_layout["fraction"] = pop_layout["total"]/pop_layout["ct_total"]
industrial_demand_per_country = pd.read_csv(snakemake.input.industrial_demand_per_country,index_col=0)
industrial_demand = industrial_demand_per_country.loc[pop_layout.ct].fillna(0.)
industrial_demand.index = pop_layout.index
industrial_demand = industrial_demand.multiply(pop_layout.fraction,axis=0)
industrial_demand.to_csv(snakemake.output.industrial_demand)
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals():
from vresutils import Dict
import yaml
snakemake = Dict()
snakemake.input = Dict()
snakemake.input['clustered_pop_layout'] = "resources/pop_layout_elec_s_128.csv"
snakemake.input['industrial_demand_per_country']="resources/industrial_demand_per_country.csv"
snakemake.output = Dict()
snakemake.output['industrial_demand'] = "resources/industrial_demand_elec_s_128.csv"
with open('config.yaml', encoding='utf8') as f:
snakemake.config = yaml.safe_load(f)
build_industrial_demand()

View File

@ -1,153 +1,131 @@
"""Build industrial distribution keys from hotmaps database."""
import pypsa
import uuid
import pandas as pd
import geopandas as gpd
from shapely import wkt, prepared
from scipy.spatial import cKDTree as KDTree
from itertools import product
def prepare_hotmaps_database():
def locate_missing_industrial_sites(df):
"""
Locate industrial sites without valid locations based on
city and countries. Should only be used if the model's
spatial resolution is coarser than individual cities.
"""
df = pd.read_csv(snakemake.input.hotmaps_industrial_database,
sep=";",
index_col=0)
try:
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
except:
raise ModuleNotFoundError("Optional dependency 'geopy' not found."
"Install via 'conda install -c conda-forge geopy'"
"or set 'industry: hotmaps_locate_missing: false'.")
#remove those sites without valid geometries
df.drop(df.index[df.geom.isna()],
inplace=True)
locator = Nominatim(user_agent=str(uuid.uuid4()))
geocode = RateLimiter(locator.geocode, min_delay_seconds=2)
#parse geometry
#https://geopandas.org/gallery/create_geopandas_from_pandas.html?highlight=parse#from-wkt-format
df["Coordinates"] = df.geom.apply(lambda x : wkt.loads(x[x.find(";POINT")+1:]))
def locate_missing(s):
gdf = gpd.GeoDataFrame(df, geometry='Coordinates')
if pd.isna(s.City) or s.City == "CONFIDENTIAL":
return None
europe_shape = gpd.read_file(snakemake.input.europe_shape).loc[0, 'geometry']
europe_shape_prepped = prepared.prep(europe_shape)
not_in_europe = gdf.index[~gdf.geometry.apply(europe_shape_prepped.contains)]
print("Removing the following industrial facilities since they are not in European area:")
print(gdf.loc[not_in_europe])
gdf.drop(not_in_europe,
inplace=True)
loc = geocode([s.City, s.Country], geometry='wkt')
if loc is not None:
print(f"Found:\t{loc}\nFor:\t{s['City']}, {s['Country']}\n")
return f"POINT({loc.longitude} {loc.latitude})"
else:
return None
country_to_code = {
'Belgium' : 'BE',
'Bulgaria' : 'BG',
'Czech Republic' : 'CZ',
'Denmark' : 'DK',
'Germany' : 'DE',
'Estonia' : 'EE',
'Ireland' : 'IE',
'Greece' : 'GR',
'Spain' : 'ES',
'France' : 'FR',
'Croatia' : 'HR',
'Italy' : 'IT',
'Cyprus' : 'CY',
'Latvia' : 'LV',
'Lithuania' : 'LT',
'Luxembourg' : 'LU',
'Hungary' : 'HU',
'Malta' : 'MA',
'Netherland' : 'NL',
'Austria' : 'AT',
'Poland' : 'PL',
'Portugal' : 'PT',
'Romania' : 'RO',
'Slovenia' : 'SI',
'Slovakia' : 'SK',
'Finland' : 'FI',
'Sweden' : 'SE',
'United Kingdom' : 'GB',
'Iceland' : 'IS',
'Norway' : 'NO',
'Montenegro' : 'ME',
'FYR of Macedonia' : 'MK',
'Albania' : 'AL',
'Serbia' : 'RS',
'Turkey' : 'TU',
'Bosnia and Herzegovina' : 'BA',
'Switzerland' : 'CH',
'Liechtenstein' : 'AT',
}
gdf["country_code"] = gdf.Country.map(country_to_code)
missing = df.index[df.geom.isna()]
df.loc[missing, 'coordinates'] = df.loc[missing].apply(locate_missing, axis=1)
if gdf["country_code"].isna().any():
print("Warning, some countries not assigned an ISO code")
# report stats
num_still_missing = df.coordinates.isna().sum()
num_found = len(missing) - num_still_missing
share_missing = len(missing) / len(df) * 100
share_still_missing = num_still_missing / len(df) * 100
print(f"Found {num_found} missing locations.",
f"Share of missing locations reduced from {share_missing:.2f}% to {share_still_missing:.2f}%.")
gdf["x"] = gdf.geometry.x
gdf["y"] = gdf.geometry.y
return df
def prepare_hotmaps_database(regions):
"""
Load hotmaps database of industrial sites and map onto bus regions.
"""
df = pd.read_csv(snakemake.input.hotmaps_industrial_database, sep=";", index_col=0)
df[["srid", "coordinates"]] = df.geom.str.split(';', expand=True)
if snakemake.config['industry'].get('hotmaps_locate_missing', False):
df = locate_missing_industrial_sites(df)
# remove those sites without valid locations
df.drop(df.index[df.coordinates.isna()], inplace=True)
df['coordinates'] = gpd.GeoSeries.from_wkt(df['coordinates'])
gdf = gpd.GeoDataFrame(df, geometry='coordinates', crs="EPSG:4326")
gdf = gpd.sjoin(gdf, regions, how="inner", op='within')
gdf.rename(columns={"index_right": "bus"}, inplace=True)
gdf["country"] = gdf.bus.str[:2]
return gdf
def assign_buses(gdf):
def build_nodal_distribution_key(hotmaps, regions):
"""Build nodal distribution keys for each sector."""
gdf["bus"] = ""
sectors = hotmaps.Subsector.unique()
countries = regions.index.str[:2].unique()
for c in n.buses.country.unique():
buses_i = n.buses.index[n.buses.country == c]
kdtree = KDTree(n.buses.loc[buses_i, ['x','y']].values)
keys = pd.DataFrame(index=regions.index, columns=sectors, dtype=float)
industry_i = gdf.index[(gdf.country_code == c)]
pop = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
pop['country'] = pop.index.str[:2]
ct_total = pop.total.groupby(pop['country']).sum()
keys['population'] = pop.total / pop.country.map(ct_total)
if industry_i.empty:
print("Skipping country with no industry:",c)
else:
tree_i = kdtree.query(gdf.loc[industry_i, ['x','y']].values)[1]
gdf.loc[industry_i, 'bus'] = buses_i[tree_i]
for sector, country in product(sectors, countries):
if (gdf.bus == "").any():
print("Some industrial facilities have empty buses")
if gdf.bus.isna().any():
print("Some industrial facilities have NaN buses")
regions_ct = regions.index[regions.index.str.contains(country)]
facilities = hotmaps.query("country == @country and Subsector == @sector")
def build_nodal_distribution_key(gdf):
sectors = ['Iron and steel','Chemical industry','Cement','Non-metallic mineral products','Glass','Paper and printing','Non-ferrous metals']
distribution_keys = pd.DataFrame(index=n.buses.index,
columns=sectors,
dtype=float)
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout,index_col=0)
pop_layout["ct"] = pop_layout.index.str[:2]
ct_total = pop_layout.total.groupby(pop_layout["ct"]).sum()
pop_layout["ct_total"] = pop_layout["ct"].map(ct_total)
distribution_keys["population"] = pop_layout["total"]/pop_layout["ct_total"]
for c in n.buses.country.unique():
buses = n.buses.index[n.buses.country == c]
for sector in sectors:
facilities = gdf.index[(gdf.country_code == c) & (gdf.Subsector == sector)]
if not facilities.empty:
emissions = gdf.loc[facilities,"Emissions_ETS_2014"]
if emissions.sum() == 0:
distribution_key = pd.Series(1/len(facilities),
facilities)
else:
#BEWARE: this is a strong assumption
emissions = emissions.fillna(emissions.mean())
distribution_key = emissions/emissions.sum()
distribution_key = distribution_key.groupby(gdf.loc[facilities,"bus"]).sum().reindex(buses,fill_value=0.)
if not facilities.empty:
emissions = facilities["Emissions_ETS_2014"]
if emissions.sum() == 0:
key = pd.Series(1 / len(facilities), facilities.index)
else:
distribution_key = distribution_keys.loc[buses,"population"]
#BEWARE: this is a strong assumption
emissions = emissions.fillna(emissions.mean())
key = emissions / emissions.sum()
key = key.groupby(facilities.bus).sum().reindex(regions_ct, fill_value=0.)
else:
key = keys.loc[regions_ct, 'population']
if abs(distribution_key.sum() - 1) > 1e-4:
print(c,sector,distribution_key)
keys.loc[regions_ct, sector] = key
distribution_keys.loc[buses,sector] = distribution_key
return keys
distribution_keys.to_csv(snakemake.output.industrial_distribution_key)
if __name__ == "__main__":
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
'build_industrial_distribution_key',
simpl='',
clusters=48,
)
regions = gpd.read_file(snakemake.input.regions_onshore).set_index('name')
n = pypsa.Network(snakemake.input.network)
hotmaps = prepare_hotmaps_database(regions)
hotmaps_database = prepare_hotmaps_database()
keys = build_nodal_distribution_key(hotmaps, regions)
assign_buses(hotmaps_database)
build_nodal_distribution_key(hotmaps_database)
keys.to_csv(snakemake.output.industrial_distribution_key)

View File

@ -1,83 +0,0 @@
import pandas as pd
import numpy as np
tj_to_ktoe = 0.0238845
ktoe_to_twh = 0.01163
eb_base_dir = "data/eurostat-energy_balances-may_2018_edition"
jrc_base_dir = "data/jrc-idees-2015"
# import EU ratios df as csv
industry_sector_ratios=pd.read_csv(snakemake.input.industry_sector_ratios,
index_col=0)
#material demand per country and industry (kton/a)
countries_production = pd.read_csv(snakemake.input.industrial_production_per_country, index_col=0)
#Annual energy consumption in Switzerland by sector in 2015 (in TJ)
#From: Energieverbrauch in der Industrie und im Dienstleistungssektor, Der Bundesrat
#http://www.bfe.admin.ch/themen/00526/00541/00543/index.html?lang=de&dossier_id=00775
dic_Switzerland ={'Iron and steel': 7889.,
'Chemicals Industry': 26871.,
'Non-metallic mineral products': 15513.+3820.,
'Pulp, paper and printing': 12004.,
'Food, beverages and tobacco': 17728.,
'Non Ferrous Metals': 3037.,
'Transport Equipment': 14993.,
'Machinery Equipment': 4724.,
'Textiles and leather': 1742.,
'Wood and wood products': 0.,
'Other Industrial Sectors': 10825.,
'current electricity': 53760.}
eb_names={'NO':'Norway', 'AL':'Albania', 'BA':'Bosnia and Herzegovina',
'MK':'FYR of Macedonia', 'GE':'Georgia', 'IS':'Iceland',
'KO':'Kosovo', 'MD':'Moldova', 'ME':'Montenegro', 'RS':'Serbia',
'UA':'Ukraine', 'TR':'Turkey', }
jrc_names = {"GR" : "EL",
"GB" : "UK"}
#final energy consumption per country and industry (TWh/a)
countries_df = countries_production.dot(industry_sector_ratios.T)
countries_df*= 0.001 #GWh -> TWh (ktCO2 -> MtCO2)
non_EU = ['NO', 'CH', 'ME', 'MK', 'RS', 'BA', 'AL']
# save current electricity consumption
for country in countries_df.index:
if country in non_EU:
if country == 'CH':
countries_df.loc[country, 'current electricity']=dic_Switzerland['current electricity']*tj_to_ktoe*ktoe_to_twh
else:
excel_balances = pd.read_excel('{}/{}.XLSX'.format(eb_base_dir,eb_names[country]),
sheet_name='2016', index_col=1,header=0, skiprows=1 ,squeeze=True)
countries_df.loc[country, 'current electricity'] = excel_balances.loc['Industry', 'Electricity']*ktoe_to_twh
else:
excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(jrc_base_dir,jrc_names.get(country,country)),
sheet_name='Ind_Summary',index_col=0,header=0,squeeze=True) # the summary sheet
s_out = excel_out.iloc[27:48,-1]
countries_df.loc[country, 'current electricity'] = s_out['Electricity']*ktoe_to_twh
rename_sectors = {'elec':'electricity',
'biomass':'solid biomass',
'heat':'low-temperature heat'}
countries_df.rename(columns=rename_sectors,inplace=True)
countries_df.index.name = "TWh/a (MtCO2/a)"
countries_df.to_csv(snakemake.output.industrial_energy_demand_per_country,
float_format='%.2f')

View File

@ -1,140 +1,165 @@
"""Build industrial energy demand per country."""
import pandas as pd
# sub-sectors as used in PyPSA-Eur-Sec and listed in JRC-IDEES industry sheets
sub_sectors = {'Iron and steel' : ['Integrated steelworks','Electric arc'],
'Non-ferrous metals' : ['Alumina production','Aluminium - primary production','Aluminium - secondary production','Other non-ferrous metals'],
'Chemicals' : ['Basic chemicals', 'Other chemicals', 'Pharmaceutical products etc.', 'Basic chemicals feedstock'],
'Non-metalic mineral' : ['Cement','Ceramics & other NMM','Glass production'],
'Printing' : ['Pulp production','Paper production','Printing and media reproduction'],
'Food' : ['Food, beverages and tobacco'],
'Transport equipment' : ['Transport Equipment'],
'Machinery equipment' : ['Machinery Equipment'],
'Textiles and leather' : ['Textiles and leather'],
'Wood and wood products' : ['Wood and wood products'],
'Other Industrial Sectors' : ['Other Industrial Sectors'],
}
# name in JRC-IDEES Energy Balances
eb_sheet_name = {'Integrated steelworks' : 'cisb',
'Electric arc' : 'cise',
'Alumina production' : 'cnfa',
'Aluminium - primary production' : 'cnfp',
'Aluminium - secondary production' : 'cnfs',
'Other non-ferrous metals' : 'cnfo',
'Basic chemicals' : 'cbch',
'Other chemicals' : 'coch',
'Pharmaceutical products etc.' : 'cpha',
'Basic chemicals feedstock' : 'cpch',
'Cement' : 'ccem',
'Ceramics & other NMM' : 'ccer',
'Glass production' : 'cgla',
'Pulp production' : 'cpul',
'Paper production' : 'cpap',
'Printing and media reproduction' : 'cprp',
'Food, beverages and tobacco' : 'cfbt',
'Transport Equipment' : 'ctre',
'Machinery Equipment' : 'cmae',
'Textiles and leather' : 'ctel',
'Wood and wood products' : 'cwwp',
'Mining and quarrying' : 'cmiq',
'Construction' : 'ccon',
'Non-specified': 'cnsi',
}
fuels = {'all' : ['All Products'],
'solid' : ['Solid Fuels'],
'liquid' : ['Total petroleum products (without biofuels)'],
'gas' : ['Gases'],
'heat' : ['Nuclear heat','Derived heat'],
'biomass' : ['Biomass and Renewable wastes'],
'waste' : ['Wastes (non-renewable)'],
'electricity' : ['Electricity'],
}
import multiprocessing as mp
from tqdm import tqdm
ktoe_to_twh = 0.011630
# name in JRC-IDEES Energy Balances
sector_sheets = {'Integrated steelworks': 'cisb',
'Electric arc': 'cise',
'Alumina production': 'cnfa',
'Aluminium - primary production': 'cnfp',
'Aluminium - secondary production': 'cnfs',
'Other non-ferrous metals': 'cnfo',
'Basic chemicals': 'cbch',
'Other chemicals': 'coch',
'Pharmaceutical products etc.': 'cpha',
'Basic chemicals feedstock': 'cpch',
'Cement': 'ccem',
'Ceramics & other NMM': 'ccer',
'Glass production': 'cgla',
'Pulp production': 'cpul',
'Paper production': 'cpap',
'Printing and media reproduction': 'cprp',
'Food, beverages and tobacco': 'cfbt',
'Transport Equipment': 'ctre',
'Machinery Equipment': 'cmae',
'Textiles and leather': 'ctel',
'Wood and wood products': 'cwwp',
'Mining and quarrying': 'cmiq',
'Construction': 'ccon',
'Non-specified': 'cnsi',
}
fuels = {'All Products': 'all',
'Solid Fuels': 'solid',
'Total petroleum products (without biofuels)': 'liquid',
'Gases': 'gas',
'Nuclear heat': 'heat',
'Derived heat': 'heat',
'Biomass and Renewable wastes': 'biomass',
'Wastes (non-renewable)': 'waste',
'Electricity': 'electricity'
}
eu28 = ['FR', 'DE', 'GB', 'IT', 'ES', 'PL', 'SE', 'NL', 'BE', 'FI',
'DK', 'PT', 'RO', 'AT', 'BG', 'EE', 'GR', 'LV', 'CZ',
'HU', 'IE', 'SK', 'LT', 'HR', 'LU', 'SI', 'CY', 'MT']
jrc_names = {"GR" : "EL",
"GB" : "UK"}
year = 2015
summaries = {}
#for some reason the Energy Balances list Other Industrial Sectors separately
ois_subs = ['Mining and quarrying','Construction','Non-specified']
jrc_names = {"GR": "EL", "GB": "UK"}
#MtNH3/a
ammonia = pd.read_csv(snakemake.input.ammonia_production,
index_col=0)/1e3
def industrial_energy_demand_per_country(country):
jrc_dir = snakemake.input.jrc
jrc_country = jrc_names.get(country, country)
fn = f'{jrc_dir}/JRC-IDEES-2015_EnergyBalance_{jrc_country}.xlsx'
sheets = list(sector_sheets.values())
df_dict = pd.read_excel(fn, sheet_name=sheets, index_col=0)
def get_subsector_data(sheet):
df = df_dict[sheet][year].groupby(fuels).sum()
df['other'] = df['all'] - df.loc[df.index != 'all'].sum()
return df
df = pd.concat({sub: get_subsector_data(sheet)
for sub, sheet in sector_sheets.items()}, axis=1)
sel = ['Mining and quarrying', 'Construction', 'Non-specified']
df['Other Industrial Sectors'] = df[sel].sum(axis=1)
df['Basic chemicals'] += df['Basic chemicals feedstock']
df.drop(columns=sel+['Basic chemicals feedstock'], index='all', inplace=True)
df *= ktoe_to_twh
return df
def add_ammonia_energy_demand(demand):
for ct in eu28:
print(ct)
filename = 'data/jrc-idees-2015/JRC-IDEES-2015_EnergyBalance_{}.xlsx'.format(jrc_names.get(ct,ct))
# MtNH3/a
fn = snakemake.input.ammonia_production
ammonia = pd.read_csv(fn, index_col=0)[str(year)] / 1e3
summary = pd.DataFrame(index=list(fuels.keys()) + ['other'])
def ammonia_by_fuel(x):
for sector in sub_sectors:
if sector == 'Other Industrial Sectors':
subs = ois_subs
else:
subs = sub_sectors[sector]
fuels = {'gas': config['MWh_CH4_per_tNH3_SMR'],
'electricity': config['MWh_elec_per_tNH3_SMR']}
for sub in subs:
df = pd.read_excel(filename,
sheet_name=eb_sheet_name[sub],
index_col=0)
return pd.Series({k: x*v for k,v in fuels.items()})
s = df[year].astype(float)
ammonia = ammonia.apply(ammonia_by_fuel).T
for fuel in fuels:
summary.at[fuel,sub] = s[fuels[fuel]].sum()
summary.at['other',sub] = summary.at['all',sub] - summary.loc[summary.index^['all','other'],sub].sum()
demand['Ammonia'] = ammonia.unstack().reindex(index=demand.index, fill_value=0.)
summary['Other Industrial Sectors'] = summary[ois_subs].sum(axis=1)
summary.drop(columns=ois_subs,inplace=True)
demand['Basic chemicals (without ammonia)'] = demand["Basic chemicals"] - demand["Ammonia"]
summary.drop(index=['all'],inplace=True)
demand['Basic chemicals (without ammonia)'].clip(lower=0, inplace=True)
demand.drop(columns='Basic chemicals', inplace=True)
summary *= ktoe_to_twh
summary['Basic chemicals'] += summary['Basic chemicals feedstock']
summary.drop(columns=['Basic chemicals feedstock'], inplace=True)
summary['Ammonia'] = 0.
summary.at['gas','Ammonia'] = snakemake.config['industry']['MWh_CH4_per_tNH3_SMR']*ammonia[str(year)].get(ct,0.)
summary.at['electricity','Ammonia'] = snakemake.config['industry']['MWh_elec_per_tNH3_SMR']*ammonia[str(year)].get(ct,0.)
summary['Basic chemicals (without ammonia)'] = summary['Basic chemicals'] - summary['Ammonia']
summary.loc[summary['Basic chemicals (without ammonia)'] < 0, 'Basic chemicals (without ammonia)'] = 0.
summary.drop(columns=['Basic chemicals'], inplace=True)
summaries[ct] = summary
final_summary = pd.concat(summaries,axis=1)
# add in the non-EU28 based on their output (which is derived from their energy too)
# output in MtMaterial/a
output = pd.read_csv(snakemake.input.industrial_production_per_country,
index_col=0)/1e3
eu28_averages = final_summary.groupby(level=1,axis=1).sum().divide(output.loc[eu28].sum(),axis=1)
non_eu28 = output.index^eu28
for ct in non_eu28:
print(ct)
final_summary = pd.concat((final_summary,pd.concat({ct : eu28_averages.multiply(output.loc[ct],axis=1)},axis=1)),axis=1)
return demand
final_summary.index.name = 'TWh/a'
def add_non_eu28_industrial_energy_demand(demand):
final_summary.to_csv(snakemake.output.industrial_energy_demand_per_country_today)
# output in MtMaterial/a
fn = snakemake.input.industrial_production_per_country
production = pd.read_csv(fn, index_col=0) / 1e3
eu28_production = production.loc[eu28].sum()
eu28_energy = demand.groupby(level=1).sum()
eu28_averages = eu28_energy / eu28_production
non_eu28 = production.index.symmetric_difference(eu28)
demand_non_eu28 = pd.concat({k: v * eu28_averages
for k, v in production.loc[non_eu28].iterrows()})
return pd.concat([demand, demand_non_eu28])
def industrial_energy_demand(countries):
nprocesses = snakemake.threads
func = industrial_energy_demand_per_country
tqdm_kwargs = dict(ascii=False, unit=' country', total=len(countries),
desc="Build industrial energy demand")
with mp.Pool(processes=nprocesses) as pool:
demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))
demand = pd.concat(demand_l, keys=countries)
return demand
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake('build_industrial_energy_demand_per_country_today')
config = snakemake.config['industry']
year = config.get('reference_year', 2015)
demand = industrial_energy_demand(eu28)
demand = add_ammonia_energy_demand(demand)
demand = add_non_eu28_industrial_energy_demand(demand)
# for format compatibility
demand = demand.stack(dropna=False).unstack(level=[0,2])
# style and annotation
demand.index.name = 'TWh/a'
demand.sort_index(axis=1, inplace=True)
fn = snakemake.output.industrial_energy_demand_per_country_today
demand.to_csv(fn)

View File

@ -1,33 +1,44 @@
"""Build industrial energy demand per node."""
import pandas as pd
import numpy as np
# import EU ratios df as csv
industry_sector_ratios=pd.read_csv(snakemake.input.industry_sector_ratios,
index_col=0)
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
'build_industrial_energy_demand_per_node',
simpl='',
clusters=48,
)
# import EU ratios df as csv
fn = snakemake.input.industry_sector_ratios
industry_sector_ratios = pd.read_csv(fn, index_col=0)
#material demand per node and industry (kton/a)
nodal_production = pd.read_csv(snakemake.input.industrial_production_per_node,
index_col=0)
# material demand per node and industry (kton/a)
fn = snakemake.input.industrial_production_per_node
nodal_production = pd.read_csv(fn, index_col=0)
#energy demand today to get current electricity
nodal_today = pd.read_csv(snakemake.input.industrial_energy_demand_per_node_today,
index_col=0)
# energy demand today to get current electricity
fn = snakemake.input.industrial_energy_demand_per_node_today
nodal_today = pd.read_csv(fn, index_col=0)
#final energy consumption per node and industry (TWh/a)
nodal_df = nodal_production.dot(industry_sector_ratios.T)
nodal_df*= 0.001 #GWh -> TWh (ktCO2 -> MtCO2)
# final energy consumption per node and industry (TWh/a)
nodal_df = nodal_production.dot(industry_sector_ratios.T)
# convert GWh to TWh and ktCO2 to MtCO2
nodal_df *= 0.001
rename_sectors = {
'elec': 'electricity',
'biomass': 'solid biomass',
'heat': 'low-temperature heat'
}
nodal_df.rename(columns=rename_sectors, inplace=True)
rename_sectors = {'elec':'electricity',
'biomass':'solid biomass',
'heat':'low-temperature heat'}
nodal_df["current electricity"] = nodal_today["electricity"]
nodal_df.rename(columns=rename_sectors,inplace=True)
nodal_df.index.name = "TWh/a (MtCO2/a)"
nodal_df["current electricity"] = nodal_today["electricity"]
nodal_df.index.name = "TWh/a (MtCO2/a)"
nodal_df.to_csv(snakemake.output.industrial_energy_demand_per_node,
float_format='%.2f')
fn = snakemake.output.industrial_energy_demand_per_node
nodal_df.to_csv(fn, float_format='%.2f')

View File

@ -1,54 +1,73 @@
"""Build industrial energy demand per node."""
import pandas as pd
import numpy as np
from itertools import product
def build_nodal_demand():
# map JRC/our sectors to hotmaps sector, where mapping exist
sector_mapping = {
'Electric arc': 'Iron and steel',
'Integrated steelworks': 'Iron and steel',
'DRI + Electric arc': 'Iron and steel',
'Ammonia': 'Chemical industry',
'Basic chemicals (without ammonia)': 'Chemical industry',
'Other chemicals': 'Chemical industry',
'Pharmaceutical products etc.': 'Chemical industry',
'Cement': 'Cement',
'Ceramics & other NMM': 'Non-metallic mineral products',
'Glass production': 'Glass',
'Pulp production': 'Paper and printing',
'Paper production': 'Paper and printing',
'Printing and media reproduction': 'Paper and printing',
'Alumina production': 'Non-ferrous metals',
'Aluminium - primary production': 'Non-ferrous metals',
'Aluminium - secondary production': 'Non-ferrous metals',
'Other non-ferrous metals': 'Non-ferrous metals',
}
industrial_demand = pd.read_csv(snakemake.input.industrial_energy_demand_per_country_today,
header=[0,1],
index_col=0)
distribution_keys = pd.read_csv(snakemake.input.industrial_distribution_key,
index_col=0)
distribution_keys["country"] = distribution_keys.index.str[:2]
def build_nodal_industrial_energy_demand():
nodal_demand = pd.DataFrame(0.,
index=distribution_keys.index,
columns=industrial_demand.index,
dtype=float)
fn = snakemake.input.industrial_energy_demand_per_country_today
industrial_demand = pd.read_csv(fn, header=[0, 1], index_col=0)
#map JRC/our sectors to hotmaps sector, where mapping exist
sector_mapping = {'Electric arc' : 'Iron and steel',
'Integrated steelworks' : 'Iron and steel',
'DRI + Electric arc' : 'Iron and steel',
'Ammonia' : 'Chemical industry',
'Basic chemicals (without ammonia)' : 'Chemical industry',
'Other chemicals' : 'Chemical industry',
'Pharmaceutical products etc.' : 'Chemical industry',
'Cement' : 'Cement',
'Ceramics & other NMM' : 'Non-metallic mineral products',
'Glass production' : 'Glass',
'Pulp production' : 'Paper and printing',
'Paper production' : 'Paper and printing',
'Printing and media reproduction' : 'Paper and printing',
'Alumina production' : 'Non-ferrous metals',
'Aluminium - primary production' : 'Non-ferrous metals',
'Aluminium - secondary production' : 'Non-ferrous metals',
'Other non-ferrous metals' : 'Non-ferrous metals',
}
fn = snakemake.input.industrial_distribution_key
keys = pd.read_csv(fn, index_col=0)
keys["country"] = keys.index.str[:2]
for c in distribution_keys.country.unique():
buses = distribution_keys.index[distribution_keys.country == c]
for sector in industrial_demand.columns.levels[1]:
distribution_key = distribution_keys.loc[buses,sector_mapping.get(sector,"population")]
demand = industrial_demand[c,sector]
outer = pd.DataFrame(np.outer(distribution_key,demand),index=distribution_key.index,columns=demand.index)
nodal_demand.loc[buses] += outer
nodal_demand = pd.DataFrame(0., dtype=float,
index=keys.index,
columns=industrial_demand.index)
countries = keys.country.unique()
sectors = industrial_demand.columns.levels[1]
for country, sector in product(countries, sectors):
buses = keys.index[keys.country == country]
mapping = sector_mapping.get(sector, 'population')
key = keys.loc[buses, mapping]
demand = industrial_demand[country, sector]
outer = pd.DataFrame(np.outer(key, demand),
index=key.index,
columns=demand.index)
nodal_demand.loc[buses] += outer
nodal_demand.index.name = "TWh/a"
nodal_demand.to_csv(snakemake.output.industrial_energy_demand_per_node_today)
if __name__ == "__main__":
build_nodal_demand()
if __name__ == "__main__":
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
'build_industrial_energy_demand_per_node_today',
simpl='',
clusters=48,
)
build_nodal_industrial_energy_demand()

View File

@ -1,218 +1,222 @@
"""Build industrial production per country."""
import pandas as pd
import numpy as np
import multiprocessing as mp
from tqdm import tqdm
tj_to_ktoe = 0.0238845
ktoe_to_twh = 0.01163
jrc_base_dir = "data/jrc-idees-2015"
eb_base_dir = "data/eurostat-energy_balances-may_2018_edition"
# year for which data is retrieved
raw_year = 2015
year = raw_year-2016
sub_sheet_name_dict = { 'Iron and steel':'ISI',
'Chemicals Industry':'CHI',
'Non-metallic mineral products': 'NMM',
'Pulp, paper and printing': 'PPA',
'Food, beverages and tobacco': 'FBT',
'Non Ferrous Metals' : 'NFM',
'Transport Equipment': 'TRE',
'Machinery Equipment': 'MAE',
'Textiles and leather':'TEL',
'Wood and wood products': 'WWP',
'Other Industrial Sectors': 'OIS'}
index = ['elec','biomass','methane','hydrogen','heat','naphtha','process emission','process emission from feedstock']
sub_sheet_name_dict = {'Iron and steel': 'ISI',
'Chemicals Industry': 'CHI',
'Non-metallic mineral products': 'NMM',
'Pulp, paper and printing': 'PPA',
'Food, beverages and tobacco': 'FBT',
'Non Ferrous Metals': 'NFM',
'Transport Equipment': 'TRE',
'Machinery Equipment': 'MAE',
'Textiles and leather': 'TEL',
'Wood and wood products': 'WWP',
'Other Industrial Sectors': 'OIS'}
non_EU = ['NO', 'CH', 'ME', 'MK', 'RS', 'BA', 'AL']
jrc_names = {"GR" : "EL",
"GB" : "UK"}
jrc_names = {"GR": "EL", "GB": "UK"}
eu28 = ['FR', 'DE', 'GB', 'IT', 'ES', 'PL', 'SE', 'NL', 'BE', 'FI',
'DK', 'PT', 'RO', 'AT', 'BG', 'EE', 'GR', 'LV', 'CZ',
'HU', 'IE', 'SK', 'LT', 'HR', 'LU', 'SI', 'CY', 'MT']
countries = non_EU + eu28
sectors = ['Iron and steel','Chemicals Industry','Non-metallic mineral products',
'Pulp, paper and printing', 'Food, beverages and tobacco', 'Non Ferrous Metals',
'Transport Equipment', 'Machinery Equipment', 'Textiles and leather',
'Wood and wood products', 'Other Industrial Sectors']
sect2sub = {'Iron and steel':['Electric arc','Integrated steelworks'],
sect2sub = {'Iron and steel': ['Electric arc', 'Integrated steelworks'],
'Chemicals Industry': ['Basic chemicals', 'Other chemicals', 'Pharmaceutical products etc.'],
'Non-metallic mineral products': ['Cement','Ceramics & other NMM','Glass production'],
'Pulp, paper and printing': ['Pulp production','Paper production','Printing and media reproduction'],
'Non-metallic mineral products': ['Cement', 'Ceramics & other NMM', 'Glass production'],
'Pulp, paper and printing': ['Pulp production', 'Paper production', 'Printing and media reproduction'],
'Food, beverages and tobacco': ['Food, beverages and tobacco'],
'Non Ferrous Metals': ['Alumina production', 'Aluminium - primary production', 'Aluminium - secondary production', 'Other non-ferrous metals'],
'Transport Equipment': ['Transport Equipment'],
'Machinery Equipment': ['Machinery Equipment'],
'Textiles and leather': ['Textiles and leather'],
'Wood and wood products' :['Wood and wood products'],
'Other Industrial Sectors':['Other Industrial Sectors']}
'Wood and wood products': ['Wood and wood products'],
'Other Industrial Sectors': ['Other Industrial Sectors']}
subsectors = [ss for s in sectors for ss in sect2sub[s]]
sub2sect = {v: k for k, vv in sect2sub.items() for v in vv}
#material demand per country and industry (kton/a)
countries_demand = pd.DataFrame(index=countries,
columns=subsectors,
dtype=float)
out_dic ={'Electric arc': 'Electric arc',
fields = {'Electric arc': 'Electric arc',
'Integrated steelworks': 'Integrated steelworks',
'Basic chemicals': 'Basic chemicals (kt ethylene eq.)',
'Other chemicals':'Other chemicals (kt ethylene eq.)',
'Pharmaceutical products etc.':'Pharmaceutical products etc. (kt ethylene eq.)',
'Cement':'Cement (kt)',
'Ceramics & other NMM':'Ceramics & other NMM (kt bricks eq.)',
'Glass production':'Glass production (kt)',
'Pulp production':'Pulp production (kt)',
'Paper production':'Paper production (kt)',
'Printing and media reproduction':'Printing and media reproduction (kt paper eq.)',
'Other chemicals': 'Other chemicals (kt ethylene eq.)',
'Pharmaceutical products etc.': 'Pharmaceutical products etc. (kt ethylene eq.)',
'Cement': 'Cement (kt)',
'Ceramics & other NMM': 'Ceramics & other NMM (kt bricks eq.)',
'Glass production': 'Glass production (kt)',
'Pulp production': 'Pulp production (kt)',
'Paper production': 'Paper production (kt)',
'Printing and media reproduction': 'Printing and media reproduction (kt paper eq.)',
'Food, beverages and tobacco': 'Physical output (index)',
'Alumina production':'Alumina production (kt)',
'Alumina production': 'Alumina production (kt)',
'Aluminium - primary production': 'Aluminium - primary production',
'Aluminium - secondary production': 'Aluminium - secondary production',
'Other non-ferrous metals' : 'Other non-ferrous metals (kt lead eq.)',
'Other non-ferrous metals': 'Other non-ferrous metals (kt lead eq.)',
'Transport Equipment': 'Physical output (index)',
'Machinery Equipment': 'Physical output (index)',
'Textiles and leather': 'Physical output (index)',
'Wood and wood products': 'Physical output (index)',
'Other Industrial Sectors': 'Physical output (index)'}
loc_dic={'Iron and steel':[5,8],
'Chemicals Industry': [7,11],
'Non-metallic mineral products': [6,10],
'Pulp, paper and printing': [7,11],
'Food, beverages and tobacco': [2,6],
'Non Ferrous Metals': [9,14],
'Transport Equipment': [3,5],
'Machinery Equipment': [3,5],
'Textiles and leather': [3,5],
'Wood and wood products': [3,5],
'Other Industrial Sectors': [3,5]}
eb_names = {'NO': 'Norway', 'AL': 'Albania', 'BA': 'Bosnia and Herzegovina',
'MK': 'FYR of Macedonia', 'GE': 'Georgia', 'IS': 'Iceland',
'KO': 'Kosovo', 'MD': 'Moldova', 'ME': 'Montenegro', 'RS': 'Serbia',
'UA': 'Ukraine', 'TR': 'Turkey', }
# In the summary sheet (IDEES database) some names include a white space
dic_sec_summary = {'Iron and steel': 'Iron and steel',
'Chemicals Industry': 'Chemicals Industry',
'Non-metallic mineral products': 'Non-metallic mineral products',
'Pulp, paper and printing': 'Pulp, paper and printing',
'Food, beverages and tobacco': ' Food, beverages and tobacco',
'Non Ferrous Metals': 'Non Ferrous Metals',
'Transport Equipment': ' Transport Equipment',
'Machinery Equipment': ' Machinery Equipment',
'Textiles and leather': ' Textiles and leather',
'Wood and wood products': ' Wood and wood products',
'Other Industrial Sectors': ' Other Industrial Sectors'}
eb_sectors = {'Iron & steel industry': 'Iron and steel',
'Chemical and Petrochemical industry': 'Chemicals Industry',
'Non-ferrous metal industry': 'Non-metallic mineral products',
'Paper, Pulp and Print': 'Pulp, paper and printing',
'Food and Tabacco': 'Food, beverages and tobacco',
'Non-metallic Minerals (Glass, pottery & building mat. Industry)': 'Non Ferrous Metals',
'Transport Equipment': 'Transport Equipment',
'Machinery': 'Machinery Equipment',
'Textile and Leather': 'Textiles and leather',
'Wood and Wood Products': 'Wood and wood products',
'Non-specified (Industry)': 'Other Industrial Sectors'}
#countries=['CH']
eb_names={'NO':'Norway', 'AL':'Albania', 'BA':'Bosnia and Herzegovina',
'MK':'FYR of Macedonia', 'GE':'Georgia', 'IS':'Iceland',
'KO':'Kosovo', 'MD':'Moldova', 'ME':'Montenegro', 'RS':'Serbia',
'UA':'Ukraine', 'TR':'Turkey', }
dic_sec ={'Iron and steel':'Iron & steel industry',
'Chemicals Industry': 'Chemical and Petrochemical industry',
'Non-metallic mineral products': 'Non-ferrous metal industry',
'Pulp, paper and printing': 'Paper, Pulp and Print',
'Food, beverages and tobacco': 'Food and Tabacco',
'Non Ferrous Metals': 'Non-metallic Minerals (Glass, pottery & building mat. Industry)',
'Transport Equipment': 'Transport Equipment',
'Machinery Equipment': 'Machinery',
'Textiles and leather': 'Textile and Leather',
'Wood and wood products': 'Wood and Wood Products',
'Other Industrial Sectors': 'Non-specified (Industry)'}
# Mining and Quarrying, Construction
#Annual energy consumption in Switzerland by sector in 2015 (in TJ)
#From: Energieverbrauch in der Industrie und im Dienstleistungssektor, Der Bundesrat
#http://www.bfe.admin.ch/themen/00526/00541/00543/index.html?lang=de&dossier_id=00775
dic_Switzerland ={'Iron and steel': 7889.,
'Chemicals Industry': 26871.,
'Non-metallic mineral products': 15513.+3820.,
'Pulp, paper and printing': 12004.,
'Food, beverages and tobacco': 17728.,
'Non Ferrous Metals': 3037.,
'Transport Equipment': 14993.,
'Machinery Equipment': 4724.,
'Textiles and leather': 1742.,
'Wood and wood products': 0.,
'Other Industrial Sectors': 10825.,
'current electricity': 53760.}
dic_sec_position={}
for country in countries:
countries_demand.loc[country] = 0.
print(country)
for sector in sectors:
if country in non_EU:
if country == 'CH':
e_country = dic_Switzerland[sector]*tj_to_ktoe
else:
# estimate physical output
#energy consumption in the sector and country
excel_balances = pd.read_excel('{}/{}.XLSX'.format(eb_base_dir,eb_names[country]),
sheet_name='2016', index_col=2,header=0, skiprows=1 ,squeeze=True)
e_country = excel_balances.loc[dic_sec[sector], 'Total all products']
#energy consumption in the sector and EU28
excel_sum_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_EU28.xlsx'.format(jrc_base_dir),
sheet_name='Ind_Summary', index_col=0,header=0,squeeze=True) # the summary sheet
s_sum_out = excel_sum_out.iloc[49:76,year]
e_EU28 = s_sum_out[dic_sec_summary[sector]]
ratio_country_EU28=e_country/e_EU28
excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_EU28.xlsx'.format(jrc_base_dir),
sheet_name=sub_sheet_name_dict[sector],index_col=0,header=0,squeeze=True) # the summary sheet
s_out = excel_out.iloc[loc_dic[sector][0]:loc_dic[sector][1],year]
for subsector in sect2sub[sector]:
countries_demand.loc[country,subsector] = ratio_country_EU28*s_out[out_dic[subsector]]
else:
# read the input sheets
excel_out = pd.read_excel('{}/JRC-IDEES-2015_Industry_{}.xlsx'.format(jrc_base_dir,jrc_names.get(country,country)), sheet_name=sub_sheet_name_dict[sector],index_col=0,header=0,squeeze=True) # the summary sheet
s_out = excel_out.iloc[loc_dic[sector][0]:loc_dic[sector][1],year]
for subsector in sect2sub[sector]:
countries_demand.loc[country,subsector] = s_out[out_dic[subsector]]
# TODO: this should go in a csv in `data`
# Annual energy consumption in Switzerland by sector in 2015 (in TJ)
# From: Energieverbrauch in der Industrie und im Dienstleistungssektor, Der Bundesrat
# http://www.bfe.admin.ch/themen/00526/00541/00543/index.html?lang=de&dossier_id=00775
e_switzerland = pd.Series({'Iron and steel': 7889.,
'Chemicals Industry': 26871.,
'Non-metallic mineral products': 15513.+3820.,
'Pulp, paper and printing': 12004.,
'Food, beverages and tobacco': 17728.,
'Non Ferrous Metals': 3037.,
'Transport Equipment': 14993.,
'Machinery Equipment': 4724.,
'Textiles and leather': 1742.,
'Wood and wood products': 0.,
'Other Industrial Sectors': 10825.,
'current electricity': 53760.})
#include ammonia demand separately and remove ammonia from basic chemicals
def find_physical_output(df):
start = np.where(df.index.str.contains('Physical output', na=''))[0][0]
empty_row = np.where(df.index.isnull())[0]
end = empty_row[np.argmax(empty_row > start)]
return slice(start, end)
ammonia = pd.read_csv(snakemake.input.ammonia_production,
index_col=0)
there = ammonia.index.intersection(countries_demand.index)
missing = countries_demand.index^there
def get_energy_ratio(country):
print("Following countries have no ammonia demand:", missing)
if country == 'CH':
e_country = e_switzerland * tj_to_ktoe
else:
# estimate physical output, energy consumption in the sector and country
fn = f"{eurostat_dir}/{eb_names[country]}.XLSX"
df = pd.read_excel(fn, sheet_name='2016', index_col=2,
header=0, skiprows=1, squeeze=True)
e_country = df.loc[eb_sectors.keys(
), 'Total all products'].rename(eb_sectors)
countries_demand.insert(2,"Ammonia",0.)
fn = f'{jrc_dir}/JRC-IDEES-2015_Industry_EU28.xlsx'
countries_demand.loc[there,"Ammonia"] = ammonia.loc[there, str(raw_year)]
df = pd.read_excel(fn, sheet_name='Ind_Summary',
index_col=0, header=0, squeeze=True)
countries_demand["Basic chemicals"] -= countries_demand["Ammonia"]
assert df.index[48] == "by sector"
year_i = df.columns.get_loc(year)
e_eu28 = df.iloc[49:76, year_i]
e_eu28.index = e_eu28.index.str.lstrip()
#EE, HR and LT got negative demand through subtraction - poor data
countries_demand.loc[countries_demand["Basic chemicals"] < 0.,"Basic chemicals"] = 0.
e_ratio = e_country / e_eu28
countries_demand.rename(columns={"Basic chemicals" : "Basic chemicals (without ammonia)"},
inplace=True)
return pd.Series({k: e_ratio[v] for k, v in sub2sect.items()})
countries_demand.index.name = "kton/a"
countries_demand.to_csv(snakemake.output.industrial_production_per_country,
float_format='%.2f')
def industry_production_per_country(country):
def get_sector_data(sector, country):
jrc_country = jrc_names.get(country, country)
fn = f'{jrc_dir}/JRC-IDEES-2015_Industry_{jrc_country}.xlsx'
sheet = sub_sheet_name_dict[sector]
df = pd.read_excel(fn, sheet_name=sheet,
index_col=0, header=0, squeeze=True)
year_i = df.columns.get_loc(year)
df = df.iloc[find_physical_output(df), year_i]
df = df.loc[map(fields.get, sect2sub[sector])]
df.index = sect2sub[sector]
return df
ct = "EU28" if country in non_EU else country
demand = pd.concat([get_sector_data(s, ct) for s in sect2sub.keys()])
if country in non_EU:
demand *= get_energy_ratio(country)
demand.name = country
return demand
def industry_production(countries):
nprocesses = snakemake.threads
func = industry_production_per_country
tqdm_kwargs = dict(ascii=False, unit=' country', total=len(countries),
desc="Build industry production")
with mp.Pool(processes=nprocesses) as pool:
demand_l = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))
demand = pd.concat(demand_l, axis=1).T
demand.index.name = "kton/a"
return demand
def add_ammonia_demand_separately(demand):
"""Include ammonia demand separately and remove ammonia from basic chemicals."""
ammonia = pd.read_csv(snakemake.input.ammonia_production, index_col=0)
there = ammonia.index.intersection(demand.index)
missing = demand.index.symmetric_difference(there)
print("Following countries have no ammonia demand:", missing)
demand.insert(2, "Ammonia", 0.)
demand.loc[there, "Ammonia"] = ammonia.loc[there, str(year)]
demand["Basic chemicals"] -= demand["Ammonia"]
# EE, HR and LT got negative demand through subtraction - poor data
demand['Basic chemicals'].clip(lower=0., inplace=True)
to_rename = {"Basic chemicals": "Basic chemicals (without ammonia)"}
demand.rename(columns=to_rename, inplace=True)
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake('build_industrial_production_per_country')
countries = non_EU + eu28
year = snakemake.config['industry']['reference_year']
jrc_dir = snakemake.input.jrc
eurostat_dir = snakemake.input.eurostat
demand = industry_production(countries)
add_ammonia_demand_separately(demand)
fn = snakemake.output.industrial_production_per_country
demand.to_csv(fn, float_format='%.2f')

View File

@ -1,27 +1,39 @@
"""Build future industrial production per country."""
import pandas as pd
industrial_production = pd.read_csv(snakemake.input.industrial_production_per_country,
index_col=0)
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake('build_industrial_production_per_country_tomorrow')
total_steel = industrial_production[["Integrated steelworks","Electric arc"]].sum(axis=1)
config = snakemake.config["industry"]
fraction_primary_stays_primary = snakemake.config["industry"]["St_primary_fraction"]*total_steel.sum()/industrial_production["Integrated steelworks"].sum()
fn = snakemake.input.industrial_production_per_country
production = pd.read_csv(fn, index_col=0)
industrial_production.insert(2, "DRI + Electric arc",
fraction_primary_stays_primary*industrial_production["Integrated steelworks"])
keys = ["Integrated steelworks", "Electric arc"]
total_steel = production[keys].sum(axis=1)
industrial_production["Electric arc"] = total_steel - industrial_production["DRI + Electric arc"]
industrial_production["Integrated steelworks"] = 0.
int_steel = production["Integrated steelworks"].sum()
fraction_persistent_primary = config["St_primary_fraction"] * total_steel.sum() / int_steel
dri = fraction_persistent_primary * production["Integrated steelworks"]
production.insert(2, "DRI + Electric arc", dri)
total_aluminium = industrial_production[["Aluminium - primary production","Aluminium - secondary production"]].sum(axis=1)
production["Electric arc"] = total_steel - production["DRI + Electric arc"]
production["Integrated steelworks"] = 0.
fraction_primary_stays_primary = snakemake.config["industry"]["Al_primary_fraction"]*total_aluminium.sum()/industrial_production["Aluminium - primary production"].sum()
keys = ["Aluminium - primary production", "Aluminium - secondary production"]
total_aluminium = production[keys].sum(axis=1)
industrial_production["Aluminium - primary production"] = fraction_primary_stays_primary*industrial_production["Aluminium - primary production"]
industrial_production["Aluminium - secondary production"] = total_aluminium - industrial_production["Aluminium - primary production"]
key_pri = "Aluminium - primary production"
key_sec = "Aluminium - secondary production"
fraction_persistent_primary = config["Al_primary_fraction"] * total_aluminium.sum() / production[key_pri].sum()
production[key_pri] = fraction_persistent_primary * production[key_pri]
production[key_sec] = total_aluminium - production[key_pri]
production["Basic chemicals (without ammonia)"] *= config['HVC_primary_fraction']
industrial_production.to_csv(snakemake.output.industrial_production_per_country_tomorrow,
float_format='%.2f')
fn = snakemake.output.industrial_production_per_country_tomorrow
production.to_csv(fn, float_format='%.2f')

View File

@ -1,47 +1,63 @@
"""Build industrial production per node."""
import pandas as pd
from itertools import product
# map JRC/our sectors to hotmaps sector, where mapping exist
sector_mapping = {
'Electric arc': 'Iron and steel',
'Integrated steelworks': 'Iron and steel',
'DRI + Electric arc': 'Iron and steel',
'Ammonia': 'Chemical industry',
'Basic chemicals (without ammonia)': 'Chemical industry',
'Other chemicals': 'Chemical industry',
'Pharmaceutical products etc.': 'Chemical industry',
'Cement': 'Cement',
'Ceramics & other NMM': 'Non-metallic mineral products',
'Glass production': 'Glass',
'Pulp production': 'Paper and printing',
'Paper production': 'Paper and printing',
'Printing and media reproduction': 'Paper and printing',
'Alumina production': 'Non-ferrous metals',
'Aluminium - primary production': 'Non-ferrous metals',
'Aluminium - secondary production': 'Non-ferrous metals',
'Other non-ferrous metals': 'Non-ferrous metals',
}
def build_nodal_industrial_production():
industrial_production = pd.read_csv(snakemake.input.industrial_production_per_country_tomorrow,
index_col=0)
fn = snakemake.input.industrial_production_per_country_tomorrow
industrial_production = pd.read_csv(fn, index_col=0)
distribution_keys = pd.read_csv(snakemake.input.industrial_distribution_key,
index_col=0)
distribution_keys["country"] = distribution_keys.index.str[:2]
fn = snakemake.input.industrial_distribution_key
keys = pd.read_csv(fn, index_col=0)
keys["country"] = keys.index.str[:2]
nodal_industrial_production = pd.DataFrame(index=distribution_keys.index,
columns=industrial_production.columns,
dtype=float)
nodal_production = pd.DataFrame(index=keys.index,
columns=industrial_production.columns,
dtype=float)
#map JRC/our sectors to hotmaps sector, where mapping exist
sector_mapping = {'Electric arc' : 'Iron and steel',
'Integrated steelworks' : 'Iron and steel',
'DRI + Electric arc' : 'Iron and steel',
'Ammonia' : 'Chemical industry',
'Basic chemicals (without ammonia)' : 'Chemical industry',
'Other chemicals' : 'Chemical industry',
'Pharmaceutical products etc.' : 'Chemical industry',
'Cement' : 'Cement',
'Ceramics & other NMM' : 'Non-metallic mineral products',
'Glass production' : 'Glass',
'Pulp production' : 'Paper and printing',
'Paper production' : 'Paper and printing',
'Printing and media reproduction' : 'Paper and printing',
'Alumina production' : 'Non-ferrous metals',
'Aluminium - primary production' : 'Non-ferrous metals',
'Aluminium - secondary production' : 'Non-ferrous metals',
'Other non-ferrous metals' : 'Non-ferrous metals',
}
countries = keys.country.unique()
sectors = industrial_production.columns
for country, sector in product(countries, sectors):
for c in distribution_keys.country.unique():
buses = distribution_keys.index[distribution_keys.country == c]
for sector in industrial_production.columns:
distribution_key = distribution_keys.loc[buses,sector_mapping.get(sector,"population")]
nodal_industrial_production.loc[buses,sector] = industrial_production.at[c,sector]*distribution_key
buses = keys.index[keys.country == country]
mapping = sector_mapping.get(sector, "population")
key = keys.loc[buses, mapping]
nodal_production.loc[buses, sector] = industrial_production.at[country, sector] * key
nodal_production.to_csv(snakemake.output.industrial_production_per_node)
nodal_industrial_production.to_csv(snakemake.output.industrial_production_per_node)
if __name__ == "__main__":
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake('build_industrial_production_per_node',
simpl='',
clusters=48,
)
build_nodal_industrial_production()

File diff suppressed because it is too large Load Diff

View File

@ -1,103 +1,98 @@
"""Build mapping between grid cells and population (total, urban, rural)"""
# Build mapping between grid cells and population (total, urban, rural)
import multiprocessing as mp
import atlite
import numpy as np
import pandas as pd
import xarray as xr
import geopandas as gpd
from vresutils import shapes as vshapes
import geopandas as gpd
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake('build_population_layouts')
cutout = atlite.Cutout(snakemake.config['atlite']['cutout'])
if 'snakemake' not in globals():
from vresutils import Dict
import yaml
snakemake = Dict()
with open('config.yaml') as f:
snakemake.config = yaml.load(f)
snakemake.input = Dict()
snakemake.output = Dict()
grid_cells = cutout.grid_cells()
snakemake.input["urban_percent"] = "data/urban_percent.csv"
# nuts3 has columns country, gdp, pop, geometry
# population is given in dimensions of 1e3=k
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index')
cutout = atlite.Cutout(snakemake.config['atlite']['cutout_name'],
cutout_dir=snakemake.config['atlite']['cutout_dir'])
# Indicator matrix NUTS3 -> grid cells
I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells)
grid_cells = cutout.grid_cells()
# Indicator matrix grid_cells -> NUTS3; inprinciple Iinv*I is identity
# but imprecisions mean not perfect
Iinv = cutout.indicatormatrix(nuts3.geometry)
#nuts3 has columns country, gdp, pop, geometry
#population is given in dimensions of 1e3=k
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index')
countries = np.sort(nuts3.country.unique())
urban_fraction = pd.read_csv(snakemake.input.urban_percent,
header=None, index_col=0,
names=['fraction'], squeeze=True) / 100.
# Indicator matrix NUTS3 -> grid cells
I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells)
# fill missing Balkans values
missing = ["AL", "ME", "MK"]
reference = ["RS", "BA"]
average = urban_fraction[reference].mean()
fill_values = pd.Series({ct: average for ct in missing})
urban_fraction = urban_fraction.append(fill_values)
# Indicator matrix grid_cells -> NUTS3; inprinciple Iinv*I is identity
# but imprecisions mean not perfect
Iinv = cutout.indicatormatrix(nuts3.geometry)
# population in each grid cell
pop_cells = pd.Series(I.dot(nuts3['pop']))
countries = nuts3.country.value_counts().index.sort_values()
# in km^2
with mp.Pool(processes=snakemake.threads) as pool:
cell_areas = pd.Series(pool.map(vshapes.area, grid_cells)) / 1e6
urban_fraction = pd.read_csv(snakemake.input.urban_percent,
header=None,index_col=0,squeeze=True)/100.
# pop per km^2
density_cells = pop_cells / cell_areas
#fill missing Balkans values
missing = ["AL","ME","MK"]
reference = ["RS","BA"]
urban_fraction = urban_fraction.reindex(urban_fraction.index|missing)
urban_fraction.loc[missing] = urban_fraction[reference].mean()
# rural or urban population in grid cell
pop_rural = pd.Series(0., density_cells.index)
pop_urban = pd.Series(0., density_cells.index)
for ct in countries:
print(ct, urban_fraction[ct])
#population in each grid cell
pop_cells = pd.Series(I.dot(nuts3['pop']))
indicator_nuts3_ct = nuts3.country.apply(lambda x: 1. if x == ct else 0.)
#in km^2
cell_areas = pd.Series(cutout.grid_cells()).map(vshapes.area)/1e6
indicator_cells_ct = pd.Series(Iinv.T.dot(indicator_nuts3_ct))
#pop per km^2
density_cells = pop_cells/cell_areas
density_cells_ct = indicator_cells_ct * density_cells
pop_cells_ct = indicator_cells_ct * pop_cells
#rural or urban population in grid cell
pop_rural = pd.Series(0.,density_cells.index)
pop_urban = pd.Series(0.,density_cells.index)
# correct for imprecision of Iinv*I
pop_ct = nuts3.loc[nuts3.country==ct,'pop'].sum()
pop_cells_ct *= pop_ct / pop_cells_ct.sum()
for ct in countries:
print(ct,urban_fraction[ct])
# The first low density grid cells to reach rural fraction are rural
asc_density_i = density_cells_ct.sort_values().index
asc_density_cumsum = pop_cells_ct[asc_density_i].cumsum() / pop_cells_ct.sum()
rural_fraction_ct = 1 - urban_fraction[ct]
pop_ct_rural_b = asc_density_cumsum < rural_fraction_ct
pop_ct_urban_b = ~pop_ct_rural_b
indicator_nuts3_ct = pd.Series(0.,nuts3.index)
indicator_nuts3_ct[nuts3.index[nuts3.country==ct]] = 1.
pop_ct_rural_b[indicator_cells_ct == 0.] = False
pop_ct_urban_b[indicator_cells_ct == 0.] = False
indicator_cells_ct = pd.Series(Iinv.T.dot(indicator_nuts3_ct))
pop_rural += pop_cells_ct.where(pop_ct_rural_b, 0.)
pop_urban += pop_cells_ct.where(pop_ct_urban_b, 0.)
density_cells_ct = indicator_cells_ct*density_cells
pop_cells = {"total": pop_cells}
pop_cells["rural"] = pop_rural
pop_cells["urban"] = pop_urban
pop_cells_ct = indicator_cells_ct*pop_cells
for key, pop in pop_cells.items():
#correct for imprecision of Iinv*I
pop_ct = nuts3['pop'][indicator_nuts3_ct.index[indicator_nuts3_ct == 1.]].sum()
pop_cells_ct = pop_cells_ct*pop_ct/pop_cells_ct.sum()
ycoords = ('y', cutout.coords['y'])
xcoords = ('x', cutout.coords['x'])
values = pop.values.reshape(cutout.shape)
layout = xr.DataArray(values, [ycoords, xcoords])
# The first low density grid cells to reach rural fraction are rural
index_from_low_d_to_high_d = density_cells_ct.sort_values().index
pop_ct_rural_b = pop_cells_ct[index_from_low_d_to_high_d].cumsum()/pop_cells_ct.sum() < (1-urban_fraction[ct])
pop_ct_urban_b = ~pop_ct_rural_b
pop_ct_rural_b[indicator_cells_ct==0.] = False
pop_ct_urban_b[indicator_cells_ct==0.] = False
pop_rural += pop_cells_ct.where(pop_ct_rural_b,0.)
pop_urban += pop_cells_ct.where(pop_ct_urban_b,0.)
pop_cells = {"total" : pop_cells}
pop_cells["rural"] = pop_rural
pop_cells["urban"] = pop_urban
for key in pop_cells.keys():
layout = xr.DataArray(pop_cells[key].values.reshape(cutout.shape),
[('y', cutout.coords['y']), ('x', cutout.coords['x'])])
layout.to_netcdf(snakemake.output["pop_layout_"+key])
layout.to_netcdf(snakemake.output[f"pop_layout_{key}"])

884
scripts/build_retro_cost.py Normal file
View File

@ -0,0 +1,884 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 22 10:36:39 2021
This script should calculate the space heating savings through better
insulation of the thermal envelope of a building and corresponding costs for
different building types in different countries.
-----------------METHODOLOGY ------------------------------------------------
The energy savings calculations are based on the
EN ISO 13790 / seasonal method https://www.iso.org/obp/ui/#iso:std:iso:13790:ed-2:v1:en:
- calculations heavily oriented on the TABULAWebTool
http://webtool.building-typology.eu/
http://www.episcope.eu/fileadmin/tabula/public/docs/report/TABULA_CommonCalculationMethod.pdf
which is following the EN ISO 13790 / seasonal method
- building stock data:
mainly: hotmaps project https://gitlab.com/hotmaps/building-stock
missing: EU building observatory https://ec.europa.eu/energy/en/eu-buildings-database
- building types with typical surfaces/ standard values:
- tabula https://episcope.eu/fileadmin/tabula/public/calc/tabula-calculator.xlsx
---------------------BASIC EQUAIONS -------------------------------------------
The basic equations:
The Energy needed for space heating E_space [W/] are calculated as the
sum of heat losses and heat gains:
E_space = H_losses - H_gains
Heat losses constitute from the losses through heat trasmission (H_tr [W/m²K])
(this includes heat transfer through building elements and thermal bridges)
and losses by ventilation (H_ve [W/m²K]):
H_losses = (H_tr + H_ve) * F_red * (T_threshold - T_averaged_d_heat) * d_heat * 1/365
F_red : reduction factor, considering non-uniform heating [°C], p.16 chapter 2.6 [-]
T_threshold : heating temperature threshold, assumed 15 C
d_heat : Length of heating season, number of days with daily averaged temperature below T_threshold
T_averaged_d_heat : mean daily averaged temperature of the days within heating season d_heat
Heat gains constitute from the gains by solar radiation (H_solar) and
internal heat gains (H_int) weighted by a gain utilisation factor nu:
H_gains = nu * (H_solar + H_int)
---------------- STRUCTURE OF THE SCRIPT --------------------------------------
The script has the following structure:
(i) fixed parameters are set
(ii) functions
(1) prepare data, bring to same format
(2) calculate space heat demand depending on additional insulation material
(3) calculate costs for corresponding additional insulation material
(4) get cost savings per retrofitting measures for each sector by weighting
with heated floor area
-------------------------------------------------------------------------------
@author: Lisa
"""
import pandas as pd
import xarray as xr
# (i) --- FIXED PARAMETER / STANDARD VALUES -----------------------------------
# thermal conductivity standard value
k = 0.035
# strenght of relative retrofitting depending on the component
# determined by historical data of insulation thickness for retrofitting
l_weight = pd.DataFrame({"weight": [1.95, 1.48, 1.]},
index=["Roof", "Wall", "Floor"])
# standard room height [m], used to calculate heat transfer by ventilation
h_room = 2.5
# volume specific heat capacity air [Wh/m^3K]
c_p_air = 0.34
# internal heat capacity per m² A_c_ref [Wh/(m^2K)]
c_m = 45
# average thermal output of the internal heat sources per m^2 reference area [W/m^2]
phi_int = 3
# constant parameter tau_H_0 [h] according to EN 13790 seasonal method
tau_H_0 = 30
# constant parameter alpha_H_0 [-] according to EN 13790 seasonal method
alpha_H_0 = 0.8
# paramter for solar heat load during heating season -------------------------
# tabular standard values table p.8 in documenation
external_shading = 0.6 # vertical orientation: fraction of window area shaded [-]
frame_area_fraction = 0.3 # fraction of frame area of window [-]
non_perpendicular = 0.9 # reduction factor, considering radiation non perpendicular to the glazing[-]
solar_energy_transmittance = 0.5 # solar energy transmiitance for radiation perpecidular to the glazing [-]
# solar global radiation [kWh/(m^2a)]
solar_global_radiation = pd.Series([246, 401, 246, 148],
index=["east", "south", "west", "north"],
name="solar_global_radiation [kWh/(m^2a)]")
# threshold temperature for heating [Celsius] --------------------------------
t_threshold = 15
# rename sectors
# rename residential sub sectors
rename_sectors = {'Single family- Terraced houses': "SFH",
'Multifamily houses': "MFH",
'Appartment blocks': "AB"}
# additional insulation thickness, determines maximum possible savings [m]
l_strength = [
"0.07","0.075", "0.08", "0.1", "0.15",
"0.22", "0.24", "0.26"
]
# (ii) --- FUNCTIONS ----------------------------------------------------------
def get_average_temperature_during_heating_season(temperature, t_threshold=15):
"""
returns average temperature during heating season
input:
temperature : pd.Series(Index=time, values=temperature)
t_threshold : threshold temperature for heating degree days (HDD)
returns:
average temperature
"""
t_average_daily = temperature.resample("1D").mean()
return t_average_daily.loc[t_average_daily < t_threshold].mean()
def prepare_building_stock_data():
"""
reads building stock data and cleans up the format, returns
--------
u_values: pd.DataFrame current U-values
area_tot: heated floor area per country and sector [Mm²]
area: heated floor area [Mm²] for country, sector, building
type and period
"""
building_data = pd.read_csv(snakemake.input.building_stock,
usecols=list(range(13)))
# standardize data
building_data["type"].replace(
{'Covered area: heated [Mm²]': 'Heated area [Mm²]',
'Windows ': 'Window',
'Windows': 'Window',
'Walls ': 'Wall',
'Walls': 'Wall',
'Roof ': 'Roof',
'Floor ': 'Floor',
}, inplace=True)
building_data.country_code = building_data.country_code.str.upper()
building_data["subsector"].replace({'Hotels and Restaurants':
'Hotels and restaurants'}, inplace=True)
building_data["sector"].replace({'Residential sector': 'residential',
'Service sector': 'services'},
inplace=True)
# extract u-values
u_values = building_data[(building_data.feature.str.contains("U-values"))
& (building_data.subsector != "Total")]
components = list(u_values.type.unique())
country_iso_dic = building_data.set_index("country")["country_code"].to_dict()
# add missing /rename countries
country_iso_dic.update({'Norway': 'NO',
'Iceland': 'IS',
'Montenegro': 'ME',
'Serbia': 'RS',
'Albania': 'AL',
'United Kingdom': 'GB',
'Bosnia and Herzegovina': 'BA',
'Switzerland': 'CH'})
# heated floor area ----------------------------------------------------------
area = building_data[(building_data.type == 'Heated area [Mm²]') &
(building_data.subsector != "Total")]
area_tot = area.groupby(["country", "sector"]).sum()
area = pd.concat([area, area.apply(lambda x: x.value /
area_tot.value.loc[(x.country, x.sector)],
axis=1).rename("weight")],axis=1)
area = area.groupby(['country', 'sector', 'subsector', 'bage']).sum()
area_tot.rename(index=country_iso_dic, inplace=True)
# add for some missing countries floor area from other data sources
area_missing = pd.read_csv(snakemake.input.floor_area_missing,
index_col=[0, 1], usecols=[0, 1, 2, 3],
encoding='ISO-8859-1')
area_tot = area_tot.append(area_missing.unstack(level=-1).dropna().stack())
area_tot = area_tot.loc[~area_tot.index.duplicated(keep='last')]
# for still missing countries calculate floor area by population size
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
pop_layout["ct"] = pop_layout.index.str[:2]
ct_total = pop_layout.total.groupby(pop_layout["ct"]).sum()
area_per_pop = area_tot.unstack().reindex(index=ct_total.index).apply(lambda x: x / ct_total[x.index])
missing_area_ct = ct_total.index.difference(area_tot.index.levels[0])
for ct in missing_area_ct.intersection(ct_total.index):
averaged_data = pd.DataFrame(
area_per_pop.value.reindex(map_for_missings[ct]).mean()
* ct_total[ct],
columns=["value"])
index = pd.MultiIndex.from_product([[ct], averaged_data.index.to_list()])
averaged_data.index = index
averaged_data["estimated"] = 1
if ct not in area_tot.index.levels[0]:
area_tot = area_tot.append(averaged_data, sort=True)
else:
area_tot.loc[averaged_data.index] = averaged_data
# u_values for Poland are missing -> take them from eurostat -----------
u_values_PL = pd.read_csv(snakemake.input.u_values_PL)
u_values_PL.component.replace({"Walls":"Wall", "Windows": "Window"},
inplace=True)
area_PL = area.loc["Poland"].reset_index()
data_PL = pd.DataFrame(columns=u_values.columns, index=area_PL.index)
data_PL["country"] = "Poland"
data_PL["country_code"] = "PL"
# data from area
for col in ["sector", "subsector", "bage"]:
data_PL[col] = area_PL[col]
data_PL["btype"] = area_PL["subsector"]
data_PL_final = pd.DataFrame()
for component in components:
data_PL["type"] = component
data_PL["value"] = data_PL.apply(lambda x: u_values_PL[(u_values_PL.component==component)
& (u_values_PL.sector==x["sector"])]
[x["bage"]].iloc[0], axis=1)
data_PL_final = data_PL_final.append(data_PL)
u_values = pd.concat([u_values,
data_PL_final]).reset_index(drop=True)
# clean data ---------------------------------------------------------------
# smallest possible today u values for windows 0.8 (passive house standard)
# maybe the u values for the glass and not the whole window including frame
# for those types assumed in the dataset
u_values.loc[(u_values.type=="Window") & (u_values.value<0.8), "value"] = 0.8
# drop unnecessary columns
u_values.drop(['topic', 'feature','detail', 'estimated','unit'],
axis=1, inplace=True, errors="ignore")
u_values.subsector.replace(rename_sectors, inplace=True)
u_values.btype.replace(rename_sectors, inplace=True)
# for missing weighting of surfaces of building types assume MFH
u_values["assumed_subsector"] = u_values.subsector
u_values.loc[~u_values.subsector.isin(rename_sectors.values()),
"assumed_subsector"] = 'MFH'
u_values.country_code.replace({"UK":"GB"}, inplace=True)
u_values.bage.replace({'Berfore 1945':'Before 1945'}, inplace=True)
u_values = u_values[~u_values.bage.isna()]
u_values.set_index(["country_code", "subsector", "bage", "type"],
inplace=True)
# only take in config.yaml specified countries into account
countries = ct_total.index
area_tot = area_tot.loc[countries]
return u_values, country_iso_dic, countries, area_tot, area
def prepare_building_topology(u_values, same_building_topology=True):
"""
reads in typical building topologies (e.g. average surface of building elements)
and typical losses trough thermal bridging and air ventilation
"""
data_tabula = pd.read_csv(snakemake.input.data_tabula,
skiprows=lambda x: x in range(1,11),
low_memory=False).iloc[:2974]
parameters = ["Code_Country",
# building type (SFH/MFH/AB)
"Code_BuildingSizeClass",
# time period of build year
"Year1_Building", "Year2_Building",
# areas [m^2]
"A_C_Ref", # conditioned area, internal
"A_Roof_1", "A_Roof_2", "A_Wall_1", "A_Wall_2",
"A_Floor_1", "A_Floor_2", "A_Window_1", "A_Window_2",
# for air ventilation loses [1/h]
"n_air_use", "n_air_infiltration",
# for losses due to thermal bridges, standard values [W/(m^2K)]
"delta_U_ThermalBridging",
# floor area related heat transfer coefficient by transmission [-]
"F_red_temp",
# refurbishment state [1: not refurbished, 2: moderate ,3: strong refurbishment]
'Number_BuildingVariant',
]
data_tabula = data_tabula[parameters]
building_elements = ["Roof", "Wall", "Floor", "Window"]
# get total area of building components
for element in building_elements:
elements = ["A_{}_1".format(element),
"A_{}_2".format(element)]
data_tabula = pd.concat([data_tabula.drop(elements, axis=1),
data_tabula[elements].sum(axis=1).rename("A_{}".format(element))],
axis=1)
# clean data
data_tabula = data_tabula.loc[pd.concat([data_tabula[col]!=0 for col in
["A_Wall", "A_Floor", "A_Window", "A_Roof", "A_C_Ref"]],
axis=1).all(axis=1)]
data_tabula = data_tabula[data_tabula.Number_BuildingVariant.isin([1,2,3])]
data_tabula = data_tabula[data_tabula.Code_BuildingSizeClass.isin(["AB", "SFH", "MFH", "TH"])]
# map tabula building periods to hotmaps building periods
def map_periods(build_year1, build_year2):
periods = {(0, 1945): 'Before 1945',
(1945,1969) : '1945 - 1969',
(1970, 1979) :'1970 - 1979',
(1980, 1989) : '1980 - 1989',
(1990, 1999) :'1990 - 1999',
(2000, 2010) : '2000 - 2010',
(2010, 10000) : 'Post 2010'}
minimum = 1e5
for key in periods:
diff = abs(build_year1-key[0]) + abs(build_year2-key[1])
if diff < minimum:
minimum = diff
searched_period = periods[key]
return searched_period
data_tabula["bage"] = data_tabula.apply(lambda x: map_periods(x.Year1_Building, x.Year2_Building),
axis=1)
# set new index
data_tabula = data_tabula.set_index(['Code_Country', 'Code_BuildingSizeClass',
'bage', 'Number_BuildingVariant'])
# get typical building topology
area_cols = ['A_C_Ref', 'A_Floor', 'A_Roof', 'A_Wall', 'A_Window']
typical_building = (data_tabula.groupby(level=[1,2]).mean()
.rename(index={"TH": "SFH"}).groupby(level=[0,1]).mean())
# drop duplicates
data_tabula = data_tabula[~data_tabula.index.duplicated(keep="first")]
# fill missing values
hotmaps_data_i = u_values.reset_index().set_index(["country_code", "assumed_subsector",
"bage"]).index
# missing countries in tabular
missing_ct = data_tabula.unstack().reindex(hotmaps_data_i.unique())
# areas should stay constant for different retrofitting measures
cols_constant = ['Year1_Building', 'Year2_Building', 'A_C_Ref','A_Roof',
'A_Wall', 'A_Floor', 'A_Window']
for col in cols_constant:
missing_ct[col] = missing_ct[col].combine_first(missing_ct[col]
.groupby(level=[0,1,2]).mean())
missing_ct = missing_ct.unstack().unstack().fillna(missing_ct.unstack()
.unstack().mean())
data_tabula = missing_ct.stack(level=[-1,-2, -3],dropna=False)
# sets for different countries same building topology which only depends on
# build year and subsector (MFH, SFH, AB)
if same_building_topology:
typical_building = ((typical_building.reindex(data_tabula.droplevel(0).index))
.set_index(data_tabula.index))
data_tabula.update(typical_building[area_cols])
# total buildings envelope surface [m^2]
data_tabula["A_envelope"] = data_tabula[["A_{}".format(element) for
element in building_elements]].sum(axis=1)
return data_tabula
def prepare_cost_retro(country_iso_dic):
"""
read and prepare retro costs, annualises them if annualise_cost=True
"""
cost_retro = pd.read_csv(snakemake.input.cost_germany,
nrows=4, index_col=0, usecols=[0, 1, 2, 3])
cost_retro.rename(lambda x: x.capitalize(), inplace=True)
window_assumptions = pd.read_csv(snakemake.input.window_assumptions,
skiprows=[1], usecols=[0,1,2,3], nrows=2)
if annualise_cost:
cost_retro[["cost_fix", "cost_var"]] = (cost_retro[["cost_fix", "cost_var"]]
.apply(lambda x: x * interest_rate /
(1 - (1 + interest_rate)
** -cost_retro.loc[x.index,
"life_time"])))
# weightings of costs ---------------------------------------------
if construction_index:
cost_w = pd.read_csv(snakemake.input.construction_index,
skiprows=3, nrows=32, index_col=0)
# since German retrofitting costs are assumed
cost_w = ((cost_w["2018"] / cost_w.loc["Germany", "2018"])
.rename(index=country_iso_dic))
else:
cost_w = None
if tax_weighting:
tax_w = pd.read_csv(snakemake.input.tax_w,
header=12, nrows=39, index_col=0, usecols=[0, 4])
tax_w.rename(index=country_iso_dic, inplace=True)
tax_w = tax_w.apply(pd.to_numeric, errors='coerce').iloc[:, 0]
tax_w.dropna(inplace=True)
else:
tax_w = None
return cost_retro, window_assumptions, cost_w, tax_w
def prepare_temperature_data():
"""
returns the temperature dependent data for each country:
d_heat : length of heating season pd.Series(index=countries) [days/year]
on those days, daily average temperature is below
threshold temperature t_threshold
temperature_factor : accumulated difference between internal and
external temperature pd.Series(index=countries) ([K]) * [days/year]
temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365
"""
temperature = xr.open_dataarray(snakemake.input.air_temperature).to_pandas()
d_heat = (temperature.groupby(temperature.columns.str[:2], axis=1).mean()
.resample("1D").mean()<t_threshold).sum()
temperature_average_d_heat = (temperature.groupby(temperature.columns.str[:2], axis=1)
.mean()
.apply(lambda x: get_average_temperature_during_heating_season(x, t_threshold=15)))
# accumulated difference between internal and external temperature
# units ([K]-[K]) * [days/year]
temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365
return d_heat, temperature_factor
# windows ---------------------------------------------------------------
def window_limit(l, window_assumptions):
"""
define limit u value from which on window is retrofitted
"""
m = (window_assumptions.diff()["u_limit"] /
window_assumptions.diff()["strength"]).dropna().iloc[0]
a = window_assumptions["u_limit"][0] - m * window_assumptions["strength"][0]
return m*l + a
def u_retro_window(l, window_assumptions):
"""
define retrofitting value depending on renovation strength
"""
m = (window_assumptions.diff()["u_value"] /
window_assumptions.diff()["strength"]).dropna().iloc[0]
a = window_assumptions["u_value"][0] - m * window_assumptions["strength"][0]
return max(m*l + a, 0.8)
def window_cost(u, cost_retro, window_assumptions):
"""
get costs for new windows depending on u value
"""
m = (window_assumptions.diff()["cost"] /
window_assumptions.diff()["u_value"]).dropna().iloc[0]
a = window_assumptions["cost"][0] - m * window_assumptions["u_value"][0]
window_cost = m*u + a
if annualise_cost:
window_cost = window_cost * interest_rate / (1 - (1 + interest_rate)
** -cost_retro.loc["Window", "life_time"])
return window_cost
def calculate_costs(u_values, l, cost_retro, window_assumptions):
"""
returns costs for a given retrofitting strength weighted by the average
surface/volume ratio of the component for each building type
"""
return u_values.apply(lambda x: (cost_retro.loc[x.name[3], "cost_var"] *
100 * float(l) * l_weight.loc[x.name[3]][0]
+ cost_retro.loc[x.name[3], "cost_fix"]) * x.A_element / x.A_C_Ref
if x.name[3]!="Window"
else (window_cost(x["new_U_{}".format(l)], cost_retro, window_assumptions) *
x.A_element / x.A_C_Ref
if x.value>window_limit(float(l), window_assumptions) else 0),
axis=1)
def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035):
"""
calculate U-values after building retrofitting, depending on the old
U-values (u_values). This is for simple insulation measuers, adding
an additional layer of insulation.
They depend for the components Roof, Wall, Floor on the additional
insulation thickness (l), and the weighting for the corresponding
component (l_weight).
Windows are renovated to new ones with U-value (function: u_retro_window(l))
only if the are worse insulated than a certain limit value
(function: window_limit).
Parameters
----------
u_values: pd.DataFrame
l: string
l_weight: pd.DataFrame (component, weight)
k: thermal conductivity
"""
return u_values.apply(lambda x:
k / ((k / x.value) +
(float(l) * l_weight.loc[x.name[3]]))
if x.name[3]!="Window"
else (min(x.value, u_retro_window(float(l), window_assumptions))
if x.value>window_limit(float(l), window_assumptions) else x.value),
axis=1)
def map_tabula_to_hotmaps(df_tabula, df_hotmaps, column_prefix):
"""
maps tabula data to hotmaps data with wished column name prefix
Parameters
----------
df_tabula : pd.Series
tabula data with pd.MultiIndex
df_hotmaps : pd.DataFrame
dataframe with hotmaps pd.MultiIndex
column_prefix : string
column prefix to rename column names of df_tabula
Returns
-------
pd.DataFrame (index=df_hotmaps.index)
returns df_tabula with hotmaps index
"""
values = (df_tabula.unstack()
.reindex(df_hotmaps.rename(index =
lambda x: "MFH" if x not in rename_sectors.values()
else x, level=1).index))
values.columns = pd.MultiIndex.from_product([[column_prefix], values.columns])
values.index = df_hotmaps.index
return values
def get_solar_gains_per_year(window_area):
"""
returns solar heat gains during heating season in [kWh/a] depending on
the window area [m^2] of the building, assuming a equal distributed window
orientation (east, south, north, west)
"""
return sum(external_shading * frame_area_fraction * non_perpendicular
* 0.25 * window_area * solar_global_radiation)
def map_to_lstrength(l_strength, df):
"""
renames column names from a pandas dataframe to map tabula retrofitting
strengths [2 = moderate, 3 = ambitious] to l_strength
"""
middle = len(l_strength) // 2
map_to_l = pd.MultiIndex.from_arrays([middle*[2] + len(l_strength[middle:])*[3],l_strength])
l_strength_df = (df.stack(-2).reindex(map_to_l, axis=1, level=0)
.droplevel(0, axis=1).unstack().swaplevel(axis=1).dropna(axis=1))
return pd.concat([df.drop([2,3], axis=1, level=1), l_strength_df], axis=1)
def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor):
"""
calculates total annual heat losses Q_ht for different insulation thiknesses
(l_strength), depening on current insulation state (u_values), standard
building topologies and air ventilation from TABULA (data_tabula) and
the accumulated difference between internal and external temperature
during the heating season (temperature_factor).
Total annual heat losses Q_ht constitute from losses by:
(1) transmission (H_tr_e)
(2) thermal bridges (H_tb)
(3) ventilation (H_ve)
weighted by a factor (F_red_temp) which is taken account for non-uniform heating
and the temperature factor of the heating season
Q_ht [W/m^2] = (H_tr_e + H_tb + H_ve) [W/m^2K] * F_red_temp * temperature_factor [K]
returns Q_ht as pd.DataFrame(index=['country_code', 'subsector', 'bage'],
columns=[current (1.) + retrofitted (l_strength)])
"""
# (1) by transmission
# calculate new U values of building elements due to additional insulation
for l in l_strength:
u_values["new_U_{}".format(l)] = calculate_new_u(u_values,
l, l_weight, window_assumptions)
# surface area of building components [m^2]
area_element = (data_tabula[["A_{}".format(e) for e in u_values.index.levels[3]]]
.rename(columns=lambda x: x[2:]).stack().unstack(-2).stack())
u_values["A_element"] = map_tabula_to_hotmaps(area_element,
u_values, "A_element").xs(1, level=1, axis=1)
# heat transfer H_tr_e [W/m^2K] through building element
# U_e * A_e / A_C_Ref
columns = ["value"] + ["new_U_{}".format(l) for l in l_strength]
heat_transfer = pd.concat([u_values[columns].mul(u_values.A_element, axis=0),
u_values.A_element], axis=1)
# get real subsector back in index
heat_transfer.index = u_values.index
heat_transfer = heat_transfer.groupby(level=[0,1,2]).sum()
# rename columns of heat transfer H_tr_e [W/K] and envelope surface A_envelope [m^2]
heat_transfer.rename(columns={"A_element":"A_envelope",
},inplace=True)
# map reference area
heat_transfer["A_C_Ref"] = map_tabula_to_hotmaps(data_tabula.A_C_Ref,
heat_transfer,
"A_C_Ref").xs(1.,level=1,axis=1)
u_values["A_C_Ref"] = map_tabula_to_hotmaps(data_tabula.A_C_Ref,
u_values,
"A_C_Ref").xs(1.,level=1,axis=1)
# get heat transfer by transmission through building element [W/(m^2K)]
heat_transfer_perm2 = heat_transfer[columns].div(heat_transfer.A_C_Ref, axis=0)
heat_transfer_perm2.columns = pd.MultiIndex.from_product([["H_tr_e"], [1.] + l_strength])
# (2) heat transfer by thermal bridges H_tb [W/(m^2K)]
# H_tb = delta_U [W/(m^2K)]* A_envelope [m^2] / A_C_Ref [m^2]
H_tb_tabula = data_tabula.delta_U_ThermalBridging * data_tabula.A_envelope / data_tabula.A_C_Ref
heat_transfer_perm2 = pd.concat([heat_transfer_perm2,
map_tabula_to_hotmaps(H_tb_tabula, heat_transfer_perm2, "H_tb")], axis=1)
# (3) by ventilation H_ve [W/(m²K)]
# = c_p_air [Wh/(m^3K)] * (n_air_use + n_air_infilitraion) [1/h] * h_room [m]
H_ve_tabula = (data_tabula.n_air_infiltration + data_tabula.n_air_use) * c_p_air * h_room
heat_transfer_perm2 = pd.concat([heat_transfer_perm2,
map_tabula_to_hotmaps(H_ve_tabula, heat_transfer_perm2, "H_ve")],
axis=1)
# F_red_temp factor which is taken account for non-uniform heating e.g.
# lower heating/switch point during night times/weekends
# effect is significant for buildings with poor insulation
# for well insulated buildings/passive houses it has nearly no effect
# based on tabula values depending on the building type
F_red_temp = map_tabula_to_hotmaps(data_tabula.F_red_temp,
heat_transfer_perm2,
"F_red_temp")
# total heat transfer Q_ht [W/m^2] =
# (H_tr_e + H_tb + H_ve) [W/m^2K] * F_red_temp * temperature_factor [K]
# temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365
heat_transfer_perm2 = map_to_lstrength(l_strength, heat_transfer_perm2)
F_red_temp = map_to_lstrength(l_strength, F_red_temp)
Q_ht = (heat_transfer_perm2.groupby(level=1,axis=1).sum()
.mul(F_red_temp.droplevel(0, axis=1))
.mul(temperature_factor.reindex(heat_transfer_perm2.index,level=0), axis=0))
return Q_ht, heat_transfer_perm2
def calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat):
"""
calculates heat gains Q_gain [W/m^2], which consititure from gains by:
(1) solar radiation
(2) internal heat gains
"""
# (1) by solar radiation H_solar [W/m^2]
# solar radiation [kWhm^2/a] / A_C_Ref [m^2] *1e3[1/k] / 8760 [a/h]
H_solar = (data_tabula.A_Window.apply(lambda x: get_solar_gains_per_year(x))
/ data_tabula.A_C_Ref * 1e3 / 8760)
Q_gain = map_tabula_to_hotmaps(H_solar, heat_transfer_perm2, "H_solar").xs(1.,level=1, axis=1)
# (2) by internal H_int
# phi [W/m^2] * d_heat [d/a] * 1/365 [a/d] -> W/m^2
Q_gain["H_int"] = (phi_int * d_heat * 1/365).reindex(index=heat_transfer_perm2.index, level=0)
return Q_gain
def calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain):
"""
calculates gain utilisation factor nu
"""
# time constant of the building tau [h] = c_m [Wh/(m^2K)] * 1 /(H_tr_e+H_tb*H_ve) [m^2 K /W]
tau = c_m / heat_transfer_perm2.groupby(level=1,axis=1).sum()
alpha = alpha_H_0 + (tau/tau_H_0)
# heat balance ratio
gamma = (1 / Q_ht).mul(Q_gain.sum(axis=1), axis=0)
# gain utilisation factor
nu = (1 - gamma**alpha) / (1 - gamma**(alpha+1))
return nu
def calculate_space_heat_savings(u_values, data_tabula, l_strength,
temperature_factor, d_heat):
"""
calculates space heat savings (dE_space [per unit of unrefurbished state])
through retrofitting of the thermal envelope by additional insulation
material (l_strength[m])
"""
# heat losses Q_ht [W/m^2]
Q_ht, heat_transfer_perm2 = calculate_heat_losses(u_values, data_tabula,
l_strength, temperature_factor)
# heat gains Q_gain [W/m^2]
Q_gain = calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat)
# calculate gain utilisation factor nu [dimensionless]
nu = calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain)
# total space heating demand E_space
E_space = Q_ht - nu.mul(Q_gain.sum(axis=1), axis=0)
dE_space = E_space.div(E_space[1.], axis=0).iloc[:, 1:]
dE_space.columns = pd.MultiIndex.from_product([["dE"], l_strength])
return dE_space
def calculate_retro_costs(u_values, l_strength, cost_retro):
"""
returns costs of different retrofitting measures
"""
costs = pd.concat([calculate_costs(u_values, l, cost_retro, window_assumptions).rename(l)
for l in l_strength], axis=1)
# energy and costs per country, sector, subsector and year
cost_tot = costs.groupby(level=['country_code', 'subsector', 'bage']).sum()
cost_tot.columns = pd.MultiIndex.from_product([["cost"], cost_tot.columns])
return cost_tot
def sample_dE_costs_area(area, area_tot, costs, dE_space, countries,
construction_index, tax_weighting):
"""
bring costs and energy savings together, fill area and costs per energy
savings for missing countries, weight costs,
determine "moderate" and "ambitious" retrofitting
"""
sub_to_sector_dict = (area.reset_index().replace(rename_sectors)
.set_index("subsector")["sector"].to_dict())
area_reordered = ((area.rename(index=country_iso_dic, level=0)
.rename(index=rename_sectors, level=2)
.reset_index()).rename(columns={"country":"country_code"})
.set_index(["country_code", "subsector", "bage"]))
cost_dE =(pd.concat([costs, dE_space], axis=1)
.mul(area_reordered.weight, axis=0)
.rename(sub_to_sector_dict,level=1).groupby(level=[0,1]).sum())
# map missing countries
for ct in countries.difference(cost_dE.index.levels[0]):
averaged_data = (cost_dE.reindex(index=map_for_missings[ct], level=0).mean(level=1)
.set_index(pd.MultiIndex
.from_product([[ct], cost_dE.index.levels[1]])))
cost_dE = cost_dE.append(averaged_data)
# weights costs after construction index
if construction_index:
for ct in list(map_for_missings.keys() - cost_w.index):
cost_w.loc[ct] = cost_w.reindex(index=map_for_missings[ct]).mean()
cost_dE.cost = cost_dE.cost.mul(cost_w, level=0, axis=0)
# weights cost depending on country taxes
if tax_weighting:
for ct in list(map_for_missings.keys() - tax_w.index):
tax_w[ct] = tax_w.reindex(index=map_for_missings[ct]).mean()
cost_dE.cost = cost_dE.cost.mul(tax_w, level=0, axis=0)
# drop not considered countries
cost_dE = cost_dE.reindex(countries,level=0)
# get share of residential and sevice floor area
sec_w = area_tot.value / area_tot.value.groupby(level=0).sum()
# get the total cost-energy-savings weight by sector area
tot = (cost_dE.mul(sec_w, axis=0).groupby(level="country_code").sum()
.set_index(pd.MultiIndex
.from_product([cost_dE.index.unique(level="country_code"), ["tot"]])))
cost_dE = cost_dE.append(tot).unstack().stack()
summed_area = (pd.DataFrame(area_tot.groupby("country").sum())
.set_index(pd.MultiIndex.from_product(
[area_tot.index.unique(level="country"), ["tot"]])))
area_tot = area_tot.append(summed_area).unstack().stack()
cost_per_saving = (cost_dE["cost"] / (1-cost_dE["dE"])) #.diff(axis=1).dropna(axis=1)
moderate_min = cost_per_saving.idxmin(axis=1)
moderate_dE_cost = pd.concat([cost_dE.loc[i].xs(moderate_min.loc[i], level=1)
for i in moderate_min.index], axis=1).T
moderate_dE_cost.columns = pd.MultiIndex.from_product([moderate_dE_cost.columns,
["moderate"]])
ambitious_dE_cost = cost_dE.xs("0.26", level=1,axis=1)
ambitious_dE_cost.columns = pd.MultiIndex.from_product([ambitious_dE_cost.columns,
["ambitious"]])
cost_dE_new = pd.concat([moderate_dE_cost, ambitious_dE_cost], axis=1)
return cost_dE_new, area_tot
#%% --- MAIN --------------------------------------------------------------
if __name__ == "__main__":
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
'build_retro_cost',
simpl='',
clusters=48,
lv=1.0,
sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1'
)
# ******** config *********************************************************
retro_opts = snakemake.config["sector"]["retrofitting"]
interest_rate = retro_opts["interest_rate"]
annualise_cost = retro_opts["annualise_cost"] # annualise the investment costs
tax_weighting = retro_opts["tax_weighting"] # weight costs depending on taxes in countries
construction_index = retro_opts["construction_index"] # weight costs depending on labour/material costs per ct
# mapping missing countries by neighbours
map_for_missings = {
"AL": ["BG", "RO", "GR"],
"BA": ["HR"],
"RS": ["BG", "RO", "HR", "HU"],
"MK": ["BG", "GR"],
"ME": ["BA", "AL", "RS", "HR"],
"CH": ["SE", "DE"],
"NO": ["SE"],
}
# (1) prepare data **********************************************************
# building stock data -----------------------------------------------------
# hotmaps u_values, heated floor areas per sector
u_values, country_iso_dic, countries, area_tot, area = prepare_building_stock_data()
# building topology, thermal bridges, ventilation losses
data_tabula = prepare_building_topology(u_values)
# costs for retrofitting -------------------------------------------------
cost_retro, window_assumptions, cost_w, tax_w = prepare_cost_retro(country_iso_dic)
# temperature dependend parameters
d_heat, temperature_factor = prepare_temperature_data()
# (2) space heat savings ****************************************************
dE_space = calculate_space_heat_savings(u_values, data_tabula, l_strength,
temperature_factor, d_heat)
# (3) costs *****************************************************************
costs = calculate_retro_costs(u_values, l_strength, cost_retro)
# (4) cost-dE and area per sector *******************************************
cost_dE, area_tot = sample_dE_costs_area(area, area_tot, costs, dE_space, countries,
construction_index, tax_weighting)
# save *********************************************************************
cost_dE.to_csv(snakemake.output.retro_cost)
area_tot.to_csv(snakemake.output.floor_area)

View File

@ -1,52 +1,52 @@
"""Build solar thermal collector time series."""
import geopandas as gpd
import atlite
import pandas as pd
import xarray as xr
import scipy as sp
import helper
import numpy as np
if 'snakemake' not in globals():
from vresutils import Dict
import yaml
snakemake = Dict()
with open('config.yaml') as f:
snakemake.config = yaml.load(f)
snakemake.input = Dict()
snakemake.output = Dict()
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
'build_solar_thermal_profiles',
simpl='',
clusters=48,
)
time = pd.date_range(freq='m', **snakemake.config['snapshots'])
params = dict(years=slice(*time.year[[0, -1]]), months=slice(*time.month[[0, -1]]))
if 'snakemake' not in globals():
from vresutils import Dict
import yaml
snakemake = Dict()
with open('config.yaml') as f:
snakemake.config = yaml.safe_load(f)
snakemake.input = Dict()
snakemake.output = Dict()
config = snakemake.config['solar_thermal']
time = pd.date_range(freq='h', **snakemake.config['snapshots'])
cutout_config = snakemake.config['atlite']['cutout']
cutout = atlite.Cutout(cutout_config).sel(time=time)
cutout = atlite.Cutout(snakemake.config['atlite']['cutout_name'],
cutout_dir=snakemake.config['atlite']['cutout_dir'],
**params)
clustered_regions = gpd.read_file(
snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze()
clustered_busregions_as_geopd = gpd.read_file(snakemake.input.regions_onshore).set_index('name', drop=True)
I = cutout.indicatormatrix(clustered_regions)
clustered_busregions = pd.Series(clustered_busregions_as_geopd.geometry, index=clustered_busregions_as_geopd.index)
for area in ["total", "rural", "urban"]:
helper.clean_invalid_geometries(clustered_busregions)
pop_layout = xr.open_dataarray(snakemake.input[f'pop_layout_{area}'])
I = cutout.indicatormatrix(clustered_busregions)
stacked_pop = pop_layout.stack(spatial=('y', 'x'))
M = I.T.dot(np.diag(I.dot(stacked_pop)))
nonzero_sum = M.sum(axis=0, keepdims=True)
nonzero_sum[nonzero_sum == 0.] = 1.
M_tilde = M / nonzero_sum
for item in ["total","rural","urban"]:
solar_thermal = cutout.solar_thermal(**config, matrix=M_tilde.T,
index=clustered_regions.index)
pop_layout = xr.open_dataarray(snakemake.input['pop_layout_'+item])
M = I.T.dot(sp.diag(I.dot(pop_layout.stack(spatial=('y', 'x')))))
nonzero_sum = M.sum(axis=0, keepdims=True)
nonzero_sum[nonzero_sum == 0.] = 1.
M_tilde = M/nonzero_sum
solar_thermal_angle = 45.
#should clearsky_model be "simple" or "enhanced"?
solar_thermal = cutout.solar_thermal(clearsky_model="simple",
orientation={'slope': solar_thermal_angle, 'azimuth': 180.},
matrix = M_tilde.T,
index=clustered_busregions.index)
solar_thermal.to_netcdf(snakemake.output["solar_thermal_"+item])
solar_thermal.to_netcdf(snakemake.output[f"solar_thermal_{area}"])

View File

@ -1,50 +1,46 @@
"""Build temperature profiles."""
import geopandas as gpd
import atlite
import pandas as pd
import xarray as xr
import scipy as sp
import helper
import numpy as np
if 'snakemake' not in globals():
from vresutils import Dict
import yaml
snakemake = Dict()
with open('config.yaml') as f:
snakemake.config = yaml.load(f)
snakemake.input = Dict()
snakemake.output = Dict()
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
'build_temperature_profiles',
simpl='',
clusters=48,
)
time = pd.date_range(freq='m', **snakemake.config['snapshots'])
params = dict(years=slice(*time.year[[0, -1]]), months=slice(*time.month[[0, -1]]))
time = pd.date_range(freq='h', **snakemake.config['snapshots'])
cutout_config = snakemake.config['atlite']['cutout']
cutout = atlite.Cutout(cutout_config).sel(time=time)
clustered_regions = gpd.read_file(
snakemake.input.regions_onshore).set_index('name').buffer(0).squeeze()
cutout = atlite.Cutout(snakemake.config['atlite']['cutout_name'],
cutout_dir=snakemake.config['atlite']['cutout_dir'],
**params)
I = cutout.indicatormatrix(clustered_regions)
clustered_busregions_as_geopd = gpd.read_file(snakemake.input.regions_onshore).set_index('name', drop=True)
for area in ["total", "rural", "urban"]:
clustered_busregions = pd.Series(clustered_busregions_as_geopd.geometry, index=clustered_busregions_as_geopd.index)
pop_layout = xr.open_dataarray(snakemake.input[f'pop_layout_{area}'])
helper.clean_invalid_geometries(clustered_busregions)
stacked_pop = pop_layout.stack(spatial=('y', 'x'))
M = I.T.dot(np.diag(I.dot(stacked_pop)))
I = cutout.indicatormatrix(clustered_busregions)
nonzero_sum = M.sum(axis=0, keepdims=True)
nonzero_sum[nonzero_sum == 0.] = 1.
M_tilde = M / nonzero_sum
temp_air = cutout.temperature(
matrix=M_tilde.T, index=clustered_regions.index)
for item in ["total","rural","urban"]:
temp_air.to_netcdf(snakemake.output[f"temp_air_{area}"])
pop_layout = xr.open_dataarray(snakemake.input['pop_layout_'+item])
temp_soil = cutout.soil_temperature(
matrix=M_tilde.T, index=clustered_regions.index)
M = I.T.dot(sp.diag(I.dot(pop_layout.stack(spatial=('y', 'x')))))
nonzero_sum = M.sum(axis=0, keepdims=True)
nonzero_sum[nonzero_sum == 0.] = 1.
M_tilde = M/nonzero_sum
temp_air = cutout.temperature(matrix=M_tilde.T,index=clustered_busregions.index)
temp_air.to_netcdf(snakemake.output["temp_air_"+item])
temp_soil = cutout.soil_temperature(matrix=M_tilde.T,index=clustered_busregions.index)
temp_soil.to_netcdf(snakemake.output["temp_soil_"+item])
temp_soil.to_netcdf(snakemake.output[f"temp_soil_{area}"])

View File

@ -1,10 +1,17 @@
from shutil import copy
files = ["config.yaml",
"Snakefile",
"scripts/solve_network.py",
"scripts/prepare_sector_network.py"]
files = [
"config.yaml",
"Snakefile",
"scripts/solve_network.py",
"scripts/prepare_sector_network.py"
]
for f in files:
copy(f,snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/configs/')
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake('copy_config')
for f in files:
copy(f,snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/configs/')

View File

@ -1,15 +1,91 @@
import os
import pandas as pd
from pathlib import Path
from pypsa.descriptors import Dict
from pypsa.components import components, component_attrs
import logging
logger = logging.getLogger(__name__)
#https://stackoverflow.com/questions/20833344/fix-invalid-polygon-in-shapely
#https://stackoverflow.com/questions/13062334/polygon-intersection-error-in-shapely-shapely-geos-topologicalerror-the-opera
#https://shapely.readthedocs.io/en/latest/manual.html#object.buffer
def clean_invalid_geometries(geometries):
"""Fix self-touching or self-crossing polygons; these seem to appear
due to numerical problems from writing and reading, since the geometries
are valid before being written in pypsa-eur/scripts/cluster_network.py"""
for i,p in geometries.items():
if not p.is_valid:
logger.warning(f'Clustered region {i} had an invalid geometry, fixing using zero buffer.')
geometries[i] = p.buffer(0)
def override_component_attrs(directory):
"""Tell PyPSA that links can have multiple outputs by
overriding the component_attrs. This can be done for
as many buses as you need with format busi for i = 2,3,4,5,....
See https://pypsa.org/doc/components.html#link-with-multiple-outputs-or-inputs
Parameters
----------
directory : string
Folder where component attributes to override are stored
analogous to ``pypsa/component_attrs``, e.g. `links.csv`.
Returns
-------
Dictionary of overriden component attributes.
"""
attrs = Dict({k : v.copy() for k,v in component_attrs.items()})
for component, list_name in components.list_name.items():
fn = f"{directory}/{list_name}.csv"
if os.path.isfile(fn):
overrides = pd.read_csv(fn, index_col=0, na_values="n/a")
attrs[component] = overrides.combine_first(attrs[component])
return attrs
# from pypsa-eur/_helpers.py
def mock_snakemake(rulename, **wildcards):
"""
This function is expected to be executed from the 'scripts'-directory of '
the snakemake project. It returns a snakemake.script.Snakemake object,
based on the Snakefile.
If a rule has wildcards, you have to specify them in **wildcards.
Parameters
----------
rulename: str
name of the rule for which the snakemake object should be generated
**wildcards:
keyword arguments fixing the wildcards. Only necessary if wildcards are
needed.
"""
import snakemake as sm
import os
from pypsa.descriptors import Dict
from snakemake.script import Snakemake
script_dir = Path(__file__).parent.resolve()
assert Path.cwd().resolve() == script_dir, \
f'mock_snakemake has to be run from the repository scripts directory {script_dir}'
os.chdir(script_dir.parent)
for p in sm.SNAKEFILE_CHOICES:
if os.path.exists(p):
snakefile = p
break
workflow = sm.Workflow(snakefile)
workflow.include(snakefile)
workflow.global_resources = {}
rule = workflow.get_rule(rulename)
dag = sm.dag.DAG(workflow, rules=[rule])
wc = Dict(wildcards)
job = sm.jobs.Job(rule, dag, wc)
def make_accessable(*ios):
for io in ios:
for i in range(len(io)):
io[i] = os.path.abspath(io[i])
make_accessable(job.input, job.output, job.log)
snakemake = Snakemake(job.input, job.output, job.params, job.wildcards,
job.threads, job.resources, job.log,
job.dag.workflow.config, job.rule.name, None,)
# create log and output dir if not existent
for path in list(snakemake.log) + list(snakemake.output):
Path(path).parent.mkdir(parents=True, exist_ok=True)
os.chdir(script_dir)
return snakemake

View File

@ -1,41 +1,21 @@
from six import iteritems
import sys
import pandas as pd
import numpy as np
import yaml
import pypsa
from vresutils.costdata import annuity
import numpy as np
import pandas as pd
from prepare_sector_network import generate_periodic_profiles, prepare_costs
import yaml
from prepare_sector_network import prepare_costs
from helper import override_component_attrs
idx = pd.IndexSlice
opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"}
#First tell PyPSA that links can have multiple outputs by
#overriding the component_attrs. This can be done for
#as many buses as you need with format busi for i = 2,3,4,5,....
#See https://pypsa.org/doc/components.html#link-with-multiple-outputs-or-inputs
override_component_attrs = pypsa.descriptors.Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()})
override_component_attrs["Link"].loc["bus2"] = ["string",np.nan,np.nan,"2nd bus","Input (optional)"]
override_component_attrs["Link"].loc["bus3"] = ["string",np.nan,np.nan,"3rd bus","Input (optional)"]
override_component_attrs["Link"].loc["efficiency2"] = ["static or series","per unit",1.,"2nd bus efficiency","Input (optional)"]
override_component_attrs["Link"].loc["efficiency3"] = ["static or series","per unit",1.,"3rd bus efficiency","Input (optional)"]
override_component_attrs["Link"].loc["p2"] = ["series","MW",0.,"2nd bus output","Output"]
override_component_attrs["Link"].loc["p3"] = ["series","MW",0.,"3rd bus output","Output"]
override_component_attrs["StorageUnit"].loc["p_dispatch"] = ["series","MW",0.,"Storage discharging.","Output"]
override_component_attrs["StorageUnit"].loc["p_store"] = ["series","MW",0.,"Storage charging.","Output"]
opt_name = {
"Store": "e",
"Line": "s",
"Transformer": "s"
}
def assign_carriers(n):
@ -45,18 +25,16 @@ def assign_carriers(n):
def assign_locations(n):
for c in n.iterate_components(n.one_port_components|n.branch_components):
ifind = pd.Series(c.df.index.str.find(" ",start=4),c.df.index)
for i in ifind.unique():
names = ifind.index[ifind == i]
if i == -1:
c.df.loc[names,'location'] = ""
c.df.loc[names, 'location'] = ""
else:
c.df.loc[names,'location'] = names.str[:i]
c.df.loc[names, 'location'] = names.str[:i]
def calculate_nodal_cfs(n,label,nodal_cfs):
def calculate_nodal_cfs(n, label, nodal_cfs):
#Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff
for c in n.iterate_components((n.branch_components^{"Line","Transformer"})|n.controllable_one_port_components^{"Load","StorageUnit"}):
capacities_c = c.df.groupby(["location","carrier"])[opt_name.get(c.name,"p") + "_nom_opt"].sum()
@ -71,21 +49,18 @@ def calculate_nodal_cfs(n,label,nodal_cfs):
sys.exit()
c.df["p"] = p
p_c = c.df.groupby(["location","carrier"])["p"].sum()
p_c = c.df.groupby(["location", "carrier"])["p"].sum()
cf_c = p_c/capacities_c
index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in cf_c.index.to_list()])
nodal_cfs = nodal_cfs.reindex(index|nodal_cfs.index)
nodal_cfs = nodal_cfs.reindex(index.union(nodal_cfs.index))
nodal_cfs.loc[index,label] = cf_c.values
return nodal_cfs
def calculate_cfs(n,label,cfs):
def calculate_cfs(n, label, cfs):
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load","StorageUnit"}):
capacities_c = c.df[opt_name.get(c.name,"p") + "_nom_opt"].groupby(c.df.carrier).sum()
@ -103,50 +78,48 @@ def calculate_cfs(n,label,cfs):
cf_c = pd.concat([cf_c], keys=[c.list_name])
cfs = cfs.reindex(cf_c.index|cfs.index)
cfs = cfs.reindex(cf_c.index.union(cfs.index))
cfs.loc[cf_c.index,label] = cf_c
return cfs
def calculate_nodal_costs(n,label,nodal_costs):
def calculate_nodal_costs(n, label, nodal_costs):
#Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}):
c.df["capital_costs"] = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"]
capital_costs = c.df.groupby(["location","carrier"])["capital_costs"].sum()
index = pd.MultiIndex.from_tuples([(c.list_name,"capital") + t for t in capital_costs.index.to_list()])
nodal_costs = nodal_costs.reindex(index|nodal_costs.index)
c.df["capital_costs"] = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
capital_costs = c.df.groupby(["location", "carrier"])["capital_costs"].sum()
index = pd.MultiIndex.from_tuples([(c.list_name, "capital") + t for t in capital_costs.index.to_list()])
nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index))
nodal_costs.loc[index,label] = capital_costs.values
if c.name == "Link":
p = c.pnl.p0.multiply(n.snapshot_weightings,axis=0).sum()
p = c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum()
elif c.name == "Line":
continue
elif c.name == "StorageUnit":
p_all = c.pnl.p.multiply(n.snapshot_weightings,axis=0)
p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
p_all[p_all < 0.] = 0.
p = p_all.sum()
else:
p = c.pnl.p.multiply(n.snapshot_weightings,axis=0).sum()
p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum()
#correct sequestration cost
if c.name == "Store":
items = c.df.index[(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.)]
c.df.loc[items,"marginal_cost"] = -20.
c.df.loc[items, "marginal_cost"] = -20.
c.df["marginal_costs"] = p*c.df.marginal_cost
marginal_costs = c.df.groupby(["location","carrier"])["marginal_costs"].sum()
index = pd.MultiIndex.from_tuples([(c.list_name,"marginal") + t for t in marginal_costs.index.to_list()])
nodal_costs = nodal_costs.reindex(index|nodal_costs.index)
nodal_costs.loc[index,label] = marginal_costs.values
marginal_costs = c.df.groupby(["location", "carrier"])["marginal_costs"].sum()
index = pd.MultiIndex.from_tuples([(c.list_name, "marginal") + t for t in marginal_costs.index.to_list()])
nodal_costs = nodal_costs.reindex(index.union(nodal_costs.index))
nodal_costs.loc[index, label] = marginal_costs.values
return nodal_costs
def calculate_costs(n,label,costs):
def calculate_costs(n, label, costs):
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}):
capital_costs = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"]
@ -155,25 +128,25 @@ def calculate_costs(n,label,costs):
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=["capital"])
capital_costs_grouped = pd.concat([capital_costs_grouped], keys=[c.list_name])
costs = costs.reindex(capital_costs_grouped.index|costs.index)
costs = costs.reindex(capital_costs_grouped.index.union(costs.index))
costs.loc[capital_costs_grouped.index,label] = capital_costs_grouped
costs.loc[capital_costs_grouped.index, label] = capital_costs_grouped
if c.name == "Link":
p = c.pnl.p0.multiply(n.snapshot_weightings,axis=0).sum()
p = c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum()
elif c.name == "Line":
continue
elif c.name == "StorageUnit":
p_all = c.pnl.p.multiply(n.snapshot_weightings,axis=0)
p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
p_all[p_all < 0.] = 0.
p = p_all.sum()
else:
p = c.pnl.p.multiply(n.snapshot_weightings,axis=0).sum()
p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum()
#correct sequestration cost
if c.name == "Store":
items = c.df.index[(c.df.carrier == "co2 stored") & (c.df.marginal_cost <= -100.)]
c.df.loc[items,"marginal_cost"] = -20.
c.df.loc[items, "marginal_cost"] = -20.
marginal_costs = p*c.df.marginal_cost
@ -182,53 +155,63 @@ def calculate_costs(n,label,costs):
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=["marginal"])
marginal_costs_grouped = pd.concat([marginal_costs_grouped], keys=[c.list_name])
costs = costs.reindex(marginal_costs_grouped.index|costs.index)
costs = costs.reindex(marginal_costs_grouped.index.union(costs.index))
costs.loc[marginal_costs_grouped.index,label] = marginal_costs_grouped
#add back in costs of links if there is a line volume limit
if label[1] != "opt":
costs.loc[("links-added","capital","transmission lines"),label] = ((costs_db.at['HVDC overhead', 'fixed']*n.links.length + costs_db.at['HVDC inverter pair', 'fixed'])*n.links.p_nom_opt)[n.links.carrier == "DC"].sum()
costs.loc[("lines-added","capital","transmission lines"),label] = costs_db.at["HVAC overhead", "fixed"]*(n.lines.length*n.lines.s_nom_opt).sum()
else:
costs.loc[("links-added","capital","transmission lines"),label] = (costs_db.at['HVDC inverter pair', 'fixed']*n.links.p_nom_opt)[n.links.carrier == "DC"].sum()
#add back in all hydro
#costs.loc[("storage_units","capital","hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro","p_nom"].sum()
#costs.loc[("storage_units","capital","PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS","p_nom"].sum()
#costs.loc[("generators","capital","ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror","p_nom"].sum()
# add back in all hydro
#costs.loc[("storage_units", "capital", "hydro"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="hydro", "p_nom"].sum()
#costs.loc[("storage_units", "capital", "PHS"),label] = (0.01)*2e6*n.storage_units.loc[n.storage_units.group=="PHS", "p_nom"].sum()
#costs.loc[("generators", "capital", "ror"),label] = (0.02)*3e6*n.generators.loc[n.generators.group=="ror", "p_nom"].sum()
return costs
def calculate_nodal_capacities(n,label,nodal_capacities):
def calculate_cumulative_cost():
planning_horizons = snakemake.config['scenario']['planning_horizons']
cumulative_cost = pd.DataFrame(index = df["costs"].sum().index,
columns=pd.Series(data=np.arange(0,0.1, 0.01), name='social discount rate'))
#discount cost and express them in money value of planning_horizons[0]
for r in cumulative_cost.columns:
cumulative_cost[r]=[df["costs"].sum()[index]/((1+r)**(index[-1]-planning_horizons[0])) for index in cumulative_cost.index]
#integrate cost throughout the transition path
for r in cumulative_cost.columns:
for cluster in cumulative_cost.index.get_level_values(level=0).unique():
for lv in cumulative_cost.index.get_level_values(level=1).unique():
for sector_opts in cumulative_cost.index.get_level_values(level=2).unique():
cumulative_cost.loc[(cluster, lv, sector_opts, 'cumulative cost'),r] = np.trapz(cumulative_cost.loc[idx[cluster, lv, sector_opts,planning_horizons],r].values, x=planning_horizons)
return cumulative_cost
def calculate_nodal_capacities(n, label, nodal_capacities):
#Beware this also has extraneous locations for country (e.g. biomass) or continent-wide (e.g. fossil gas/oil) stuff
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}):
nodal_capacities_c = c.df.groupby(["location","carrier"])[opt_name.get(c.name,"p") + "_nom_opt"].sum()
index = pd.MultiIndex.from_tuples([(c.list_name,) + t for t in nodal_capacities_c.index.to_list()])
nodal_capacities = nodal_capacities.reindex(index|nodal_capacities.index)
nodal_capacities = nodal_capacities.reindex(index.union(nodal_capacities.index))
nodal_capacities.loc[index,label] = nodal_capacities_c.values
return nodal_capacities
def calculate_capacities(n,label,capacities):
def calculate_capacities(n, label, capacities):
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}):
capacities_grouped = c.df[opt_name.get(c.name,"p") + "_nom_opt"].groupby(c.df.carrier).sum()
capacities_grouped = pd.concat([capacities_grouped], keys=[c.list_name])
capacities = capacities.reindex(capacities_grouped.index|capacities.index)
capacities = capacities.reindex(capacities_grouped.index.union(capacities.index))
capacities.loc[capacities_grouped.index,label] = capacities_grouped
capacities.loc[capacities_grouped.index, label] = capacities_grouped
return capacities
def calculate_curtailment(n,label,curtailment):
def calculate_curtailment(n, label, curtailment):
avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum()
used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()
@ -237,31 +220,32 @@ def calculate_curtailment(n,label,curtailment):
return curtailment
def calculate_energy(n,label,energy):
def calculate_energy(n, label, energy):
for c in n.iterate_components(n.one_port_components|n.branch_components):
if c.name in n.one_port_components:
c_energies = c.pnl.p.multiply(n.snapshot_weightings,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum()
c_energies = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum()
else:
c_energies = pd.Series(0.,c.df.carrier.unique())
c_energies = pd.Series(0., c.df.carrier.unique())
for port in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
totals = c.pnl["p"+port].multiply(n.snapshot_weightings,axis=0).sum()
totals = c.pnl["p" + port].multiply(n.snapshot_weightings.generators, axis=0).sum()
#remove values where bus is missing (bug in nomopyomo)
no_bus = c.df.index[c.df["bus"+port] == ""]
totals.loc[no_bus] = n.component_attrs[c.name].loc["p"+port,"default"]
no_bus = c.df.index[c.df["bus" + port] == ""]
totals.loc[no_bus] = n.component_attrs[c.name].loc["p" + port, "default"]
c_energies -= totals.groupby(c.df.carrier).sum()
c_energies = pd.concat([c_energies], keys=[c.list_name])
energy = energy.reindex(c_energies.index|energy.index)
energy = energy.reindex(c_energies.index.union(energy.index))
energy.loc[c_energies.index,label] = c_energies
energy.loc[c_energies.index, label] = c_energies
return energy
def calculate_supply(n,label,supply):
def calculate_supply(n, label, supply):
"""calculate the max dispatch of each component at the buses aggregated by carrier"""
bus_carriers = n.buses.carrier.unique()
@ -272,16 +256,16 @@ def calculate_supply(n,label,supply):
for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map)]
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0:
continue
s = c.pnl.p[items].max().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum()
s = c.pnl.p[items].max().multiply(c.df.loc[items, 'sign']).groupby(c.df.loc[items, 'carrier']).sum()
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply = supply.reindex(s.index|supply.index)
supply = supply.reindex(s.index.union(supply.index))
supply.loc[s.index,label] = s
@ -289,23 +273,23 @@ def calculate_supply(n,label,supply):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df["bus" + end].map(bus_map,na_action=False)]
items = c.df.index[c.df["bus" + end].map(bus_map, na_action=False)]
if len(items) == 0:
continue
#lots of sign compensation for direction and to do maximums
s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items,'carrier']).sum()
s.index = s.index+end
s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items, 'carrier']).sum()
s.index = s.index + end
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply = supply.reindex(s.index|supply.index)
supply.loc[s.index,label] = s
supply = supply.reindex(s.index.union(supply.index))
supply.loc[s.index, label] = s
return supply
def calculate_supply_energy(n,label,supply_energy):
def calculate_supply_energy(n, label, supply_energy):
"""calculate the total energy supply/consuption of each component at the buses aggregated by carrier"""
@ -317,61 +301,70 @@ def calculate_supply_energy(n,label,supply_energy):
for c in n.iterate_components(n.one_port_components):
items = c.df.index[c.df.bus.map(bus_map)]
items = c.df.index[c.df.bus.map(bus_map).fillna(False)]
if len(items) == 0:
continue
s = c.pnl.p[items].multiply(n.snapshot_weightings,axis=0).sum().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum()
s = c.pnl.p[items].multiply(n.snapshot_weightings.generators,axis=0).sum().multiply(c.df.loc[items, 'sign']).groupby(c.df.loc[items, 'carrier']).sum()
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply_energy = supply_energy.reindex(s.index|supply_energy.index)
supply_energy.loc[s.index,label] = s
supply_energy = supply_energy.reindex(s.index.union(supply_energy.index))
supply_energy.loc[s.index, label] = s
for c in n.iterate_components(n.branch_components):
for end in [col[3:] for col in c.df.columns if col[:3] == "bus"]:
items = c.df.index[c.df["bus" + str(end)].map(bus_map,na_action=False)]
items = c.df.index[c.df["bus" + str(end)].map(bus_map, na_action=False)]
if len(items) == 0:
continue
s = (-1)*c.pnl["p"+end][items].multiply(n.snapshot_weightings,axis=0).sum().groupby(c.df.loc[items,'carrier']).sum()
s.index = s.index+end
s = (-1)*c.pnl["p"+end][items].multiply(n.snapshot_weightings.generators,axis=0).sum().groupby(c.df.loc[items, 'carrier']).sum()
s.index = s.index + end
s = pd.concat([s], keys=[c.list_name])
s = pd.concat([s], keys=[i])
supply_energy = supply_energy.reindex(s.index|supply_energy.index)
supply_energy.loc[s.index,label] = s
supply_energy = supply_energy.reindex(s.index.union(supply_energy.index))
supply_energy.loc[s.index, label] = s
return supply_energy
def calculate_metrics(n,label,metrics):
metrics = metrics.reindex(pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"])|metrics.index)
def calculate_metrics(n, label, metrics):
metrics.at["line_volume_DC",label] = (n.links.length*n.links.p_nom_opt)[n.links.carrier == "DC"].sum()
metrics.at["line_volume_AC",label] = (n.lines.length*n.lines.s_nom_opt).sum()
metrics.at["line_volume",label] = metrics.loc[["line_volume_AC","line_volume_DC"],label].sum()
metrics_list = [
"line_volume",
"line_volume_limit",
"line_volume_AC",
"line_volume_DC",
"line_volume_shadow",
"co2_shadow"
]
if hasattr(n,"line_volume_limit"):
metrics.at["line_volume_limit",label] = n.line_volume_limit
metrics.at["line_volume_shadow",label] = n.line_volume_limit_dual
metrics = metrics.reindex(pd.Index(metrics_list).union(metrics.index))
metrics.at["line_volume_DC",label] = (n.links.length * n.links.p_nom_opt)[n.links.carrier == "DC"].sum()
metrics.at["line_volume_AC",label] = (n.lines.length * n.lines.s_nom_opt).sum()
metrics.at["line_volume",label] = metrics.loc[["line_volume_AC", "line_volume_DC"], label].sum()
if hasattr(n, "line_volume_limit"):
metrics.at["line_volume_limit", label] = n.line_volume_limit
metrics.at["line_volume_shadow", label] = n.line_volume_limit_dual
if "CO2Limit" in n.global_constraints.index:
metrics.at["co2_shadow",label] = n.global_constraints.at["CO2Limit","mu"]
metrics.at["co2_shadow", label] = n.global_constraints.at["CO2Limit", "mu"]
return metrics
def calculate_prices(n,label,prices):
def calculate_prices(n, label, prices):
prices = prices.reindex(prices.index|n.buses.carrier.unique())
prices = prices.reindex(prices.index.union(n.buses.carrier.unique()))
#WARNING: this is time-averaged, see weighted_prices for load-weighted average
prices[label] = n.buses_t.marginal_price.mean().groupby(n.buses.carrier).mean()
@ -379,20 +372,26 @@ def calculate_prices(n,label,prices):
return prices
def calculate_weighted_prices(n,label,weighted_prices):
def calculate_weighted_prices(n, label, weighted_prices):
# Warning: doesn't include storage units as loads
weighted_prices = weighted_prices.reindex(pd.Index([
"electricity",
"heat",
"space heat",
"urban heat",
"space urban heat",
"gas",
"H2"
]))
weighted_prices = weighted_prices.reindex(pd.Index(["electricity","heat","space heat","urban heat","space urban heat","gas","H2"]))
link_loads = {"electricity" : ["heat pump", "resistive heater", "battery charger", "H2 Electrolysis"],
"heat" : ["water tanks charger"],
"urban heat" : ["water tanks charger"],
"space heat" : [],
"space urban heat" : [],
"gas" : ["OCGT","gas boiler","CHP electric","CHP heat"],
"H2" : ["Sabatier", "H2 Fuel Cell"]}
link_loads = {"electricity": ["heat pump", "resistive heater", "battery charger", "H2 Electrolysis"],
"heat": ["water tanks charger"],
"urban heat": ["water tanks charger"],
"space heat": [],
"space urban heat": [],
"gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"],
"H2": ["Sabatier", "H2 Fuel Cell"]}
for carrier in link_loads:
@ -408,14 +407,13 @@ def calculate_weighted_prices(n,label,weighted_prices):
if buses.empty:
continue
if carrier in ["H2","gas"]:
load = pd.DataFrame(index=n.snapshots,columns=buses,data=0.)
if carrier in ["H2", "gas"]:
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.)
elif carrier[:5] == "space":
load = heat_demand_df[buses.str[:2]].rename(columns=lambda i: str(i)+suffix)
else:
load = n.loads_t.p_set[buses]
for tech in link_loads[carrier]:
names = n.links.index[n.links.index.to_series().str[-len(tech):] == tech]
@ -423,24 +421,22 @@ def calculate_weighted_prices(n,label,weighted_prices):
if names.empty:
continue
load += n.links_t.p0[names].groupby(n.links.loc[names,"bus0"],axis=1).sum()
load += n.links_t.p0[names].groupby(n.links.loc[names, "bus0"],axis=1).sum()
#Add H2 Store when charging
# Add H2 Store when charging
#if carrier == "H2":
# stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1)
# stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store", "bus"],axis=1).sum(axis=1)
# stores[stores > 0.] = 0.
# load += -stores
weighted_prices.loc[carrier,label] = (load*n.buses_t.marginal_price[buses]).sum().sum()/load.sum().sum()
weighted_prices.loc[carrier,label] = (load * n.buses_t.marginal_price[buses]).sum().sum() / load.sum().sum()
if carrier[:5] == "space":
print(load*n.buses_t.marginal_price[buses])
print(load * n.buses_t.marginal_price[buses])
return weighted_prices
def calculate_market_values(n, label, market_values):
# Warning: doesn't include storage units
@ -450,41 +446,40 @@ def calculate_market_values(n, label, market_values):
## First do market value of generators ##
generators = n.generators.index[n.buses.loc[n.generators.bus,"carrier"] == carrier]
generators = n.generators.index[n.buses.loc[n.generators.bus, "carrier"] == carrier]
techs = n.generators.loc[generators,"carrier"].value_counts().index
techs = n.generators.loc[generators, "carrier"].value_counts().index
market_values = market_values.reindex(market_values.index | techs)
market_values = market_values.reindex(market_values.index.union(techs))
for tech in techs:
gens = generators[n.generators.loc[generators,"carrier"] == tech]
gens = generators[n.generators.loc[generators, "carrier"] == tech]
dispatch = n.generators_t.p[gens].groupby(n.generators.loc[gens,"bus"],axis=1).sum().reindex(columns=buses,fill_value=0.)
dispatch = n.generators_t.p[gens].groupby(n.generators.loc[gens, "bus"], axis=1).sum().reindex(columns=buses, fill_value=0.)
revenue = dispatch*n.buses_t.marginal_price[buses]
market_values.at[tech,label] = revenue.sum().sum()/dispatch.sum().sum()
revenue = dispatch * n.buses_t.marginal_price[buses]
market_values.at[tech,label] = revenue.sum().sum() / dispatch.sum().sum()
## Now do market value of links ##
for i in ["0","1"]:
all_links = n.links.index[n.buses.loc[n.links["bus"+i],"carrier"] == carrier]
for i in ["0", "1"]:
all_links = n.links.index[n.buses.loc[n.links["bus"+i], "carrier"] == carrier]
techs = n.links.loc[all_links,"carrier"].value_counts().index
techs = n.links.loc[all_links, "carrier"].value_counts().index
market_values = market_values.reindex(market_values.index | techs)
market_values = market_values.reindex(market_values.index.union(techs))
for tech in techs:
links = all_links[n.links.loc[all_links,"carrier"] == tech]
links = all_links[n.links.loc[all_links, "carrier"] == tech]
dispatch = n.links_t["p"+i][links].groupby(n.links.loc[links,"bus"+i],axis=1).sum().reindex(columns=buses,fill_value=0.)
dispatch = n.links_t["p"+i][links].groupby(n.links.loc[links, "bus"+i], axis=1).sum().reindex(columns=buses, fill_value=0.)
revenue = dispatch*n.buses_t.marginal_price[buses]
revenue = dispatch * n.buses_t.marginal_price[buses]
market_values.at[tech,label] = revenue.sum().sum()/dispatch.sum().sum()
market_values.at[tech,label] = revenue.sum().sum() / dispatch.sum().sum()
return market_values
@ -492,17 +487,17 @@ def calculate_market_values(n, label, market_values):
def calculate_price_statistics(n, label, price_statistics):
price_statistics = price_statistics.reindex(price_statistics.index|pd.Index(["zero_hours","mean","standard_deviation"]))
price_statistics = price_statistics.reindex(price_statistics.index.union(pd.Index(["zero_hours", "mean", "standard_deviation"])))
buses = n.buses.index[n.buses.carrier == "AC"]
threshold = 0.1 #higher than phoney marginal_cost of wind/solar
threshold = 0.1 # higher than phoney marginal_cost of wind/solar
df = pd.DataFrame(data=0.,columns=buses,index=n.snapshots)
df = pd.DataFrame(data=0., columns=buses, index=n.snapshots)
df[n.buses_t.marginal_price[buses] < threshold] = 1.
price_statistics.at["zero_hours", label] = df.sum().sum()/(df.shape[0]*df.shape[1])
price_statistics.at["zero_hours", label] = df.sum().sum() / (df.shape[0] * df.shape[1])
price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].unstack().mean()
@ -511,38 +506,41 @@ def calculate_price_statistics(n, label, price_statistics):
return price_statistics
outputs = ["nodal_costs",
"nodal_capacities",
"nodal_cfs",
"cfs",
"costs",
"capacities",
"curtailment",
"energy",
"supply",
"supply_energy",
"prices",
"weighted_prices",
"price_statistics",
"market_values",
"metrics",
]
def make_summaries(networks_dict):
columns = pd.MultiIndex.from_tuples(networks_dict.keys(),names=["cluster","lv","opt", "co2_budget_name","planning_horizon"])
outputs = [
"nodal_costs",
"nodal_capacities",
"nodal_cfs",
"cfs",
"costs",
"capacities",
"curtailment",
"energy",
"supply",
"supply_energy",
"prices",
"weighted_prices",
"price_statistics",
"market_values",
"metrics",
]
columns = pd.MultiIndex.from_tuples(
networks_dict.keys(),
names=["cluster", "lv", "opt", "planning_horizon"]
)
df = {}
for output in outputs:
df[output] = pd.DataFrame(columns=columns,dtype=float)
df[output] = pd.DataFrame(columns=columns, dtype=float)
for label, filename in iteritems(networks_dict):
for label, filename in networks_dict.items():
print(label, filename)
n = pypsa.Network(filename,
override_component_attrs=override_component_attrs)
overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(filename, override_component_attrs=overrides)
assign_carriers(n)
assign_locations(n)
@ -554,58 +552,46 @@ def make_summaries(networks_dict):
def to_csv(df):
for key in df:
df[key].to_csv(snakemake.output[key])
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals():
from vresutils import Dict
import yaml
snakemake = Dict()
with open('config.yaml', encoding='utf8') as f:
snakemake.config = yaml.safe_load(f)
#overwrite some options
snakemake.config["run"] = "test"
snakemake.config["scenario"]["lv"] = [1.0]
snakemake.config["scenario"]["sector_opts"] = ["Co2L0-168H-T-H-B-I-solar3-dist1"]
snakemake.config["planning_horizons"] = ['2020', '2030', '2040', '2050']
snakemake.input = Dict()
snakemake.input['heat_demand_name'] = 'data/heating/daily_heat_demand.h5'
snakemake.output = Dict()
for item in outputs:
snakemake.output[item] = snakemake.config['summary_dir'] + '/{name}/csvs/{item}.csv'.format(name=snakemake.config['run'],item=item)
networks_dict = {(cluster,lv,opt+sector_opt, co2_budget_name, planning_horizon) :
snakemake.config['results_dir'] + snakemake.config['run'] + '/postnetworks/elec_s{simpl}_{cluster}_lv{lv}_{opt}_{sector_opt}_{co2_budget_name}_{planning_horizon}.nc'\
.format(simpl=simpl,
cluster=cluster,
opt=opt,
lv=lv,
sector_opt=sector_opt,
co2_budget_name=co2_budget_name,
planning_horizon=planning_horizon)\
for simpl in snakemake.config['scenario']['simpl'] \
for cluster in snakemake.config['scenario']['clusters'] \
for opt in snakemake.config['scenario']['opts'] \
for sector_opt in snakemake.config['scenario']['sector_opts'] \
for lv in snakemake.config['scenario']['lv'] \
for co2_budget_name in snakemake.config['scenario']['co2_budget_name'] \
for planning_horizon in snakemake.config['scenario']['planning_horizons']}
from helper import mock_snakemake
snakemake = mock_snakemake('make_summary')
networks_dict = {
(cluster, lv, opt+sector_opt, planning_horizon) :
snakemake.config['results_dir'] + snakemake.config['run'] + f'/postnetworks/elec_s{simpl}_{cluster}_lv{lv}_{opt}_{sector_opt}_{planning_horizon}.nc' \
for simpl in snakemake.config['scenario']['simpl'] \
for cluster in snakemake.config['scenario']['clusters'] \
for opt in snakemake.config['scenario']['opts'] \
for sector_opt in snakemake.config['scenario']['sector_opts'] \
for lv in snakemake.config['scenario']['lv'] \
for planning_horizon in snakemake.config['scenario']['planning_horizons']
}
print(networks_dict)
Nyears = 1
costs_db = prepare_costs(snakemake.input.costs,
snakemake.config['costs']['USD2013_to_EUR2013'],
snakemake.config['costs']['discountrate'],
Nyears)
costs_db = prepare_costs(
snakemake.input.costs,
snakemake.config['costs']['USD2013_to_EUR2013'],
snakemake.config['costs']['discountrate'],
Nyears,
snakemake.config['costs']['lifetime']
)
df = make_summaries(networks_dict)
df["metrics"].loc["total costs"] = df["costs"].sum()
to_csv(df)
if snakemake.config["foresight"]=='myopic':
cumulative_cost=calculate_cumulative_cost()
cumulative_cost.to_csv(snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/csvs/cumulative_cost.csv')

View File

@ -1,44 +1,20 @@
import pypsa
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from matplotlib.legend_handler import HandlerPatch
from matplotlib.patches import Circle, Ellipse
from make_summary import assign_carriers
from plot_summary import rename_techs, preferred_order
import numpy as np
import pypsa
import matplotlib.pyplot as plt
import pandas as pd
from helper import override_component_attrs
# allow plotting without Xwindows
import matplotlib
matplotlib.use('Agg')
plt.style.use('ggplot')
# from sector/scripts/paper_graphics-co2_sweep.py
override_component_attrs = pypsa.descriptors.Dict(
{k: v.copy() for k, v in pypsa.components.component_attrs.items()})
override_component_attrs["Link"].loc["bus2"] = [
"string", np.nan, np.nan, "2nd bus", "Input (optional)"]
override_component_attrs["Link"].loc["bus3"] = [
"string", np.nan, np.nan, "3rd bus", "Input (optional)"]
override_component_attrs["Link"].loc["efficiency2"] = [
"static or series", "per unit", 1., "2nd bus efficiency", "Input (optional)"]
override_component_attrs["Link"].loc["efficiency3"] = [
"static or series", "per unit", 1., "3rd bus efficiency", "Input (optional)"]
override_component_attrs["Link"].loc["p2"] = [
"series", "MW", 0., "2nd bus output", "Output"]
override_component_attrs["Link"].loc["p3"] = [
"series", "MW", 0., "3rd bus output", "Output"]
override_component_attrs["StorageUnit"].loc["p_dispatch"] = [
"series", "MW", 0., "Storage discharging.", "Output"]
override_component_attrs["StorageUnit"].loc["p_store"] = [
"series", "MW", 0., "Storage charging.", "Output"]
# ----------------- PLOT HELPERS ---------------------------------------------
def rename_techs_tyndp(tech):
tech = rename_techs(tech)
if "heat pump" in tech or "resistive heater" in tech:
@ -61,8 +37,7 @@ def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False):
fig = ax.get_figure()
def axes2pt():
return np.diff(ax.transData.transform([(0, 0), (1, 1)]), axis=0)[
0] * (72. / fig.dpi)
return np.diff(ax.transData.transform([(0, 0), (1, 1)]), axis=0)[0] * (72. / fig.dpi)
ellipses = []
if not dont_resize_actively:
@ -90,20 +65,14 @@ def make_legend_circles_for(sizes, scale=1.0, **kw):
def assign_location(n):
for c in n.iterate_components(n.one_port_components | n.branch_components):
ifind = pd.Series(c.df.index.str.find(" ", start=4), c.df.index)
for i in ifind.value_counts().index:
# these have already been assigned defaults
if i == -1:
continue
if i == -1: continue
names = ifind.index[ifind == i]
c.df.loc[names, 'location'] = names.str[:i]
# ----------------- PLOT FUNCTIONS --------------------------------------------
def plot_map(network, components=["links", "stores", "storage_units", "generators"],
bus_size_factor=1.7e10, transmission=False):
@ -126,11 +95,12 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
costs = pd.concat([costs, costs_c], axis=1)
print(comp, costs)
costs = costs.groupby(costs.columns, axis=1).sum()
costs.drop(list(costs.columns[(costs == 0.).all()]), axis=1, inplace=True)
new_columns = ((preferred_order & costs.columns)
new_columns = (preferred_order.intersection(costs.columns)
.append(costs.columns.difference(preferred_order)))
costs = costs[new_columns]
@ -147,7 +117,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
n.links.carrier != "B2B")], inplace=True)
# drop non-bus
to_drop = costs.index.levels[0] ^ n.buses.index
to_drop = costs.index.levels[0].symmetric_difference(n.buses.index)
if len(to_drop) != 0:
print("dropping non-buses", to_drop)
costs.drop(to_drop, level=0, inplace=True, axis=0)
@ -193,24 +163,34 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()})
fig.set_size_inches(7, 6)
n.plot(bus_sizes=costs / bus_size_factor,
bus_colors=snakemake.config['plotting']['tech_colors'],
line_colors=ac_color,
link_colors=dc_color,
line_widths=line_widths / linewidth_factor,
link_widths=link_widths / linewidth_factor,
ax=ax, boundaries=(-10, 30, 34, 70),
color_geomap={'ocean': 'lightblue', 'land': "palegoldenrod"})
n.plot(
bus_sizes=costs / bus_size_factor,
bus_colors=snakemake.config['plotting']['tech_colors'],
line_colors=ac_color,
link_colors=dc_color,
line_widths=line_widths / linewidth_factor,
link_widths=link_widths / linewidth_factor,
ax=ax, **map_opts
)
handles = make_legend_circles_for(
[5e9, 1e9], scale=bus_size_factor, facecolor="gray")
[5e9, 1e9],
scale=bus_size_factor,
facecolor="gray"
)
labels = ["{} bEUR/a".format(s) for s in (5, 1)]
l2 = ax.legend(handles, labels,
loc="upper left", bbox_to_anchor=(0.01, 1.01),
labelspacing=1.0,
framealpha=1.,
title='System cost',
handler_map=make_handler_map_to_scale_circles_as_in(ax))
l2 = ax.legend(
handles, labels,
loc="upper left",
bbox_to_anchor=(0.01, 1.01),
labelspacing=1.0,
frameon=False,
title='System cost',
handler_map=make_handler_map_to_scale_circles_as_in(ax)
)
ax.add_artist(l2)
handles = []
@ -221,16 +201,23 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
linewidth=s * 1e3 / linewidth_factor))
labels.append("{} GW".format(s))
l1_1 = ax.legend(handles, labels,
loc="upper left", bbox_to_anchor=(0.30, 1.01),
framealpha=1,
labelspacing=0.8, handletextpad=1.5,
title=title)
l1_1 = ax.legend(
handles, labels,
loc="upper left",
bbox_to_anchor=(0.22, 1.01),
frameon=False,
labelspacing=0.8,
handletextpad=1.5,
title=title
)
ax.add_artist(l1_1)
fig.savefig(snakemake.output.map, transparent=True,
bbox_inches="tight")
fig.savefig(
snakemake.output.map,
transparent=True,
bbox_inches="tight"
)
def plot_h2_map(network):
@ -253,7 +240,7 @@ def plot_h2_map(network):
elec = n.links.index[n.links.carrier == "H2 Electrolysis"]
bus_sizes = n.links.loc[elec,"p_nom_opt"].groupby(n.links.loc[elec,"bus0"]).sum() / bus_size_factor
bus_sizes = n.links.loc[elec,"p_nom_opt"].groupby(n.links.loc[elec, "bus0"]).sum() / bus_size_factor
# make a fake MultiIndex so that area is correct for legend
bus_sizes.index = pd.MultiIndex.from_product(
@ -271,26 +258,38 @@ def plot_h2_map(network):
print(n.links[["bus0", "bus1"]])
fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()})
fig, ax = plt.subplots(
figsize=(7, 6),
subplot_kw={"projection": ccrs.PlateCarree()}
)
fig.set_size_inches(7, 6)
n.plot(bus_sizes=bus_sizes,
bus_colors={"electrolysis": bus_color},
link_colors=link_color,
link_widths=link_widths,
branch_components=["Link"],
ax=ax, boundaries=(-10, 30, 34, 70))
n.plot(
bus_sizes=bus_sizes,
bus_colors={"electrolysis": bus_color},
link_colors=link_color,
link_widths=link_widths,
branch_components=["Link"],
ax=ax, **map_opts
)
handles = make_legend_circles_for(
[50000, 10000], scale=bus_size_factor, facecolor=bus_color)
[50000, 10000],
scale=bus_size_factor,
facecolor=bus_color
)
labels = ["{} GW".format(s) for s in (50, 10)]
l2 = ax.legend(handles, labels,
loc="upper left", bbox_to_anchor=(0.01, 1.01),
labelspacing=1.0,
framealpha=1.,
title='Electrolyzer capacity',
handler_map=make_handler_map_to_scale_circles_as_in(ax))
l2 = ax.legend(
handles, labels,
loc="upper left",
bbox_to_anchor=(0.01, 1.01),
labelspacing=1.0,
frameon=False,
title='Electrolyzer capacity',
handler_map=make_handler_map_to_scale_circles_as_in(ax)
)
ax.add_artist(l2)
handles = []
@ -300,15 +299,24 @@ def plot_h2_map(network):
handles.append(plt.Line2D([0], [0], color=link_color,
linewidth=s * 1e3 / linewidth_factor))
labels.append("{} GW".format(s))
l1_1 = ax.legend(handles, labels,
loc="upper left", bbox_to_anchor=(0.30, 1.01),
framealpha=1,
labelspacing=0.8, handletextpad=1.5,
title='H2 pipeline capacity')
l1_1 = ax.legend(
handles, labels,
loc="upper left",
bbox_to_anchor=(0.28, 1.01),
frameon=False,
labelspacing=0.8,
handletextpad=1.5,
title='H2 pipeline capacity'
)
ax.add_artist(l1_1)
fig.savefig(snakemake.output.map.replace("-costs-all","-h2_network"), transparent=True,
bbox_inches="tight")
fig.savefig(
snakemake.output.map.replace("-costs-all","-h2_network"),
transparent=True,
bbox_inches="tight"
)
def plot_map_without(network):
@ -319,12 +327,13 @@ def plot_map_without(network):
# Drop non-electric buses so they don't clutter the plot
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()})
fig.set_size_inches(7, 6)
fig, ax = plt.subplots(
figsize=(7, 6),
subplot_kw={"projection": ccrs.PlateCarree()}
)
# PDF has minimum width, so set these to zero
line_lower_threshold = 0.
line_lower_threshold = 200.
line_upper_threshold = 1e4
linewidth_factor = 2e3
ac_color = "gray"
@ -333,8 +342,8 @@ def plot_map_without(network):
# hack because impossible to drop buses...
n.buses.loc["EU gas", ["x", "y"]] = n.buses.loc["DE0 0", ["x", "y"]]
n.links.drop(n.links.index[(n.links.carrier != "DC") & (
n.links.carrier != "B2B")], inplace=True)
to_drop = n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")]
n.links.drop(to_drop, inplace=True)
if snakemake.wildcards["lv"] == "1.0":
line_widths = n.lines.s_nom
@ -343,19 +352,20 @@ def plot_map_without(network):
line_widths = n.lines.s_nom_min
link_widths = n.links.p_nom_min
line_widths[line_widths < line_upper_threshold] = 0.
link_widths[link_widths < line_upper_threshold] = 0.
line_widths[line_widths < line_lower_threshold] = 0.
link_widths[link_widths < line_lower_threshold] = 0.
line_widths[line_widths > line_upper_threshold] = line_upper_threshold
link_widths[link_widths > line_upper_threshold] = line_upper_threshold
n.plot(bus_sizes=10,
bus_colors="k",
line_colors=ac_color,
link_colors=dc_color,
line_widths=line_widths / linewidth_factor,
link_widths=link_widths / linewidth_factor,
ax=ax, boundaries=(-10, 30, 34, 70))
n.plot(
bus_colors="k",
line_colors=ac_color,
link_colors=dc_color,
line_widths=line_widths / linewidth_factor,
link_widths=link_widths / linewidth_factor,
ax=ax, **map_opts
)
handles = []
labels = []
@ -366,12 +376,16 @@ def plot_map_without(network):
labels.append("{} GW".format(s))
l1_1 = ax.legend(handles, labels,
loc="upper left", bbox_to_anchor=(0.05, 1.01),
framealpha=1,
frameon=False,
labelspacing=0.8, handletextpad=1.5,
title='Today\'s transmission')
ax.add_artist(l1_1)
fig.savefig(snakemake.output.today, transparent=True, bbox_inches="tight")
fig.savefig(
snakemake.output.today,
transparent=True,
bbox_inches="tight"
)
def plot_series(network, carrier="AC", name="test"):
@ -463,7 +477,7 @@ def plot_series(network, carrier="AC", name="test"):
"battery storage",
"hot water storage"])
new_columns = ((preferred_order & supply.columns)
new_columns = (preferred_order.intersection(supply.columns)
.append(supply.columns.difference(preferred_order)))
supply = supply.groupby(supply.columns, axis=1).sum()
@ -488,7 +502,7 @@ def plot_series(network, carrier="AC", name="test"):
new_handles.append(handles[i])
new_labels.append(labels[i])
ax.legend(new_handles, new_labels, ncol=3, loc="upper left")
ax.legend(new_handles, new_labels, ncol=3, loc="upper left", frameon=False)
ax.set_xlim([start, stop])
ax.set_ylim([-1300, 1900])
ax.grid(True)
@ -502,41 +516,28 @@ def plot_series(network, carrier="AC", name="test"):
transparent=True)
# %%
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals():
from vresutils import Dict
import yaml
snakemake = Dict()
with open('config.yaml') as f:
snakemake.config = yaml.safe_load(f)
snakemake.config['run'] = "retro_vs_noretro"
snakemake.wildcards = {"lv": "1.0"} # lv1.0, lv1.25, lvopt
name = "elec_s_48_lv{}__Co2L0-3H-T-H-B".format(snakemake.wildcards["lv"])
suffix = "_retro_tes"
name = name + suffix
snakemake.input = Dict()
snakemake.output = Dict(
map=(snakemake.config['results_dir'] + snakemake.config['run']
+ "/maps/{}".format(name)),
today=(snakemake.config['results_dir'] + snakemake.config['run']
+ "/maps/{}.pdf".format(name)))
snakemake.input.scenario = "lv" + snakemake.wildcards["lv"]
# snakemake.config["run"] = "bio_costs"
path = snakemake.config['results_dir'] + snakemake.config['run']
snakemake.input.network = (path +
"/postnetworks/{}.nc"
.format(name))
snakemake.output.network = (path +
"/maps/{}"
.format(name))
from helper import mock_snakemake
snakemake = mock_snakemake(
'plot_network',
simpl='',
clusters=48,
lv=1.0,
sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1',
planning_horizons=2050,
)
n = pypsa.Network(snakemake.input.network,
override_component_attrs=override_component_attrs)
overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
plot_map(n, components=["generators", "links", "stores", "storage_units"],
bus_size_factor=1.5e10, transmission=False)
map_opts = snakemake.config['plotting']['map']
plot_map(n,
components=["generators", "links", "stores", "storage_units"],
bus_size_factor=1.5e10,
transmission=False
)
plot_h2_map(n)
plot_map_without(n)

View File

@ -1,43 +1,60 @@
import numpy as np
import pandas as pd
#allow plotting without Xwindows
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from prepare_sector_network import co2_emissions_year
#consolidate and rename
def rename_techs(label):
prefix_to_remove = ["residential ","services ","urban ","rural ","central ","decentral "]
prefix_to_remove = [
"residential ",
"services ",
"urban ",
"rural ",
"central ",
"decentral "
]
rename_if_contains = ["CHP","gas boiler","biogas","solar thermal","air heat pump","ground heat pump","resistive heater","Fischer-Tropsch"]
rename_if_contains = [
"CHP",
"gas boiler",
"biogas",
"solar thermal",
"air heat pump",
"ground heat pump",
"resistive heater",
"Fischer-Tropsch"
]
rename_if_contains_dict = {"water tanks" : "hot water storage",
"retrofitting" : "building retrofitting",
"H2" : "hydrogen storage",
"battery" : "battery storage",
"CCS" : "CCS"}
rename_if_contains_dict = {
"water tanks": "hot water storage",
"retrofitting": "building retrofitting",
"H2": "hydrogen storage",
"battery": "battery storage",
"CC": "CC"
}
rename = {"solar" : "solar PV",
"Sabatier" : "methanation",
"offwind" : "offshore wind",
"offwind-ac" : "offshore wind (AC)",
"offwind-dc" : "offshore wind (DC)",
"onwind" : "onshore wind",
"ror" : "hydroelectricity",
"hydro" : "hydroelectricity",
"PHS" : "hydroelectricity",
"co2 Store" : "DAC",
"co2 stored" : "CO2 sequestration",
"AC" : "transmission lines",
"DC" : "transmission lines",
"B2B" : "transmission lines"}
rename = {
"solar": "solar PV",
"Sabatier": "methanation",
"offwind": "offshore wind",
"offwind-ac": "offshore wind (AC)",
"offwind-dc": "offshore wind (DC)",
"onwind": "onshore wind",
"ror": "hydroelectricity",
"hydro": "hydroelectricity",
"PHS": "hydroelectricity",
"co2 Store": "DAC",
"co2 stored": "CO2 sequestration",
"AC": "transmission lines",
"DC": "transmission lines",
"B2B": "transmission lines"
}
for ptr in prefix_to_remove:
if label[:len(ptr)] == ptr:
@ -57,18 +74,56 @@ def rename_techs(label):
return label
preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","solid biomass","biogas","onshore wind","offshore wind","offshore wind (AC)","offshore wind (DC)","solar PV","solar thermal","solar","building retrofitting","ground heat pump","air heat pump","heat pump","resistive heater","power-to-heat","gas-to-power/heat","CHP","OCGT","gas boiler","gas","natural gas","helmeth","methanation","hydrogen storage","power-to-gas","power-to-liquid","battery storage","hot water storage","CO2 sequestration"])
preferred_order = pd.Index([
"transmission lines",
"hydroelectricity",
"hydro reservoir",
"run of river",
"pumped hydro storage",
"solid biomass",
"biogas",
"onshore wind",
"offshore wind",
"offshore wind (AC)",
"offshore wind (DC)",
"solar PV",
"solar thermal",
"solar",
"building retrofitting",
"ground heat pump",
"air heat pump",
"heat pump",
"resistive heater",
"power-to-heat",
"gas-to-power/heat",
"CHP",
"OCGT",
"gas boiler",
"gas",
"natural gas",
"helmeth",
"methanation",
"hydrogen storage",
"power-to-gas",
"power-to-liquid",
"battery storage",
"hot water storage",
"CO2 sequestration"
])
def plot_costs():
cost_df = pd.read_csv(snakemake.input.costs,index_col=list(range(3)),header=list(range(n_header)))
cost_df = pd.read_csv(
snakemake.input.costs,
index_col=list(range(3)),
header=list(range(n_header))
)
df = cost_df.groupby(cost_df.index.get_level_values(2)).sum()
#convert to billions
df = df/1e9
df = df / 1e9
df = df.groupby(df.index.map(rename_techs)).sum()
@ -82,15 +137,18 @@ def plot_costs():
print(df.sum())
new_index = (preferred_order&df.index).append(df.index.difference(preferred_order))
new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order))
new_columns = df.sum().sort_values().index
fig, ax = plt.subplots()
fig.set_size_inches((12,8))
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index])
fig, ax = plt.subplots(figsize=(12,8))
df.loc[new_index,new_columns].T.plot(
kind="bar",
ax=ax,
stacked=True,
color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index]
)
handles,labels = ax.get_legend_handles_labels()
@ -103,24 +161,25 @@ def plot_costs():
ax.set_xlabel("")
ax.grid(axis="y")
ax.grid(axis='x')
ax.legend(handles,labels,ncol=4,loc="upper left")
ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1,1], frameon=False)
fig.tight_layout()
fig.savefig(snakemake.output.costs,transparent=True)
fig.savefig(snakemake.output.costs, bbox_inches='tight')
def plot_energy():
energy_df = pd.read_csv(snakemake.input.energy,index_col=list(range(2)),header=list(range(n_header)))
energy_df = pd.read_csv(
snakemake.input.energy,
index_col=list(range(2)),
header=list(range(n_header))
)
df = energy_df.groupby(energy_df.index.get_level_values(1)).sum()
#convert MWh to TWh
df = df/1e6
df = df / 1e6
df = df.groupby(df.index.map(rename_techs)).sum()
@ -136,56 +195,60 @@ def plot_energy():
print(df)
new_index = (preferred_order&df.index).append(df.index.difference(preferred_order))
new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order))
new_columns = df.columns.sort_values()
#new_columns = df.sum().sort_values().index
fig, ax = plt.subplots()
fig.set_size_inches((12,8))
fig, ax = plt.subplots(figsize=(12,8))
print(df.loc[new_index,new_columns])
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index])
print(df.loc[new_index, new_columns])
df.loc[new_index, new_columns].T.plot(
kind="bar",
ax=ax,
stacked=True,
color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index]
)
handles,labels = ax.get_legend_handles_labels()
handles.reverse()
labels.reverse()
ax.set_ylim([snakemake.config['plotting']['energy_min'],snakemake.config['plotting']['energy_max']])
ax.set_ylim([snakemake.config['plotting']['energy_min'], snakemake.config['plotting']['energy_max']])
ax.set_ylabel("Energy [TWh/a]")
ax.set_xlabel("")
ax.grid(axis="y")
ax.grid(axis="x")
ax.legend(handles,labels,ncol=4,loc="upper left")
ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False)
fig.tight_layout()
fig.savefig(snakemake.output.energy,transparent=True)
fig.savefig(snakemake.output.energy, bbox_inches='tight')
def plot_balances():
co2_carriers = ["co2","co2 stored","process emissions"]
co2_carriers = ["co2", "co2 stored", "process emissions"]
balances_df = pd.read_csv(snakemake.input.balances,index_col=list(range(3)),header=list(range(n_header)))
balances_df = pd.read_csv(
snakemake.input.balances,
index_col=list(range(3)),
header=list(range(n_header))
)
balances = {i.replace(" ","_") : [i] for i in balances_df.index.levels[0]}
balances["energy"] = balances_df.index.levels[0]^co2_carriers
balances = {i.replace(" ","_"): [i] for i in balances_df.index.levels[0]}
balances["energy"] = [i for i in balances_df.index.levels[0] if i not in co2_carriers]
for k,v in balances.items():
for k, v in balances.items():
df = balances_df.loc[v]
df = df.groupby(df.index.get_level_values(2)).sum()
#convert MWh to TWh
df = df/1e6
df = df / 1e6
#remove trailing link ports
df.index = [i[:-1] if ((i != "co2") and (i[-1:] in ["0","1","2","3"])) else i for i in df.index]
@ -205,13 +268,11 @@ def plot_balances():
if df.empty:
continue
new_index = (preferred_order&df.index).append(df.index.difference(preferred_order))
new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order))
new_columns = df.columns.sort_values()
fig, ax = plt.subplots()
fig.set_size_inches((12,8))
fig, ax = plt.subplots(figsize=(12,8))
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index])
@ -228,37 +289,162 @@ def plot_balances():
ax.set_xlabel("")
ax.grid(axis="y")
ax.grid(axis="x")
ax.legend(handles,labels,ncol=4,loc="upper left")
ax.legend(handles, labels, ncol=1, loc="upper left", bbox_to_anchor=[1, 1], frameon=False)
fig.tight_layout()
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches='tight')
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf",transparent=True)
def historical_emissions(cts):
"""
read historical emissions to add them to the carbon budget plot
"""
#https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
#downloaded 201228 (modified by EEA last on 201221)
fn = "data/eea/UNFCCC_v23.csv"
df = pd.read_csv(fn, encoding="latin-1")
df.loc[df["Year"] == "1985-1987","Year"] = 1986
df["Year"] = df["Year"].astype(int)
df = df.set_index(['Year', 'Sector_name', 'Country_code', 'Pollutant_name']).sort_index()
e = pd.Series()
e["electricity"] = '1.A.1.a - Public Electricity and Heat Production'
e['residential non-elec'] = '1.A.4.b - Residential'
e['services non-elec'] = '1.A.4.a - Commercial/Institutional'
e['rail non-elec'] = "1.A.3.c - Railways"
e["road non-elec"] = '1.A.3.b - Road Transportation'
e["domestic navigation"] = "1.A.3.d - Domestic Navigation"
e['international navigation'] = '1.D.1.b - International Navigation'
e["domestic aviation"] = '1.A.3.a - Domestic Aviation'
e["international aviation"] = '1.D.1.a - International Aviation'
e['total energy'] = '1 - Energy'
e['industrial processes'] = '2 - Industrial Processes and Product Use'
e['agriculture'] = '3 - Agriculture'
e['LULUCF'] = '4 - Land Use, Land-Use Change and Forestry'
e['waste management'] = '5 - Waste management'
e['other'] = '6 - Other Sector'
e['indirect'] = 'ind_CO2 - Indirect CO2'
e["total wL"] = "Total (with LULUCF)"
e["total woL"] = "Total (without LULUCF)"
pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"]
cts
if "GB" in cts:
cts.remove("GB")
cts.append("UK")
year = np.arange(1990,2018).tolist()
idx = pd.IndexSlice
co2_totals = df.loc[idx[year,e.values,cts,pol],"emissions"].unstack("Year").rename(index=pd.Series(e.index,e.values))
co2_totals = (1/1e6)*co2_totals.groupby(level=0, axis=0).sum() #Gton CO2
co2_totals.loc['industrial non-elec'] = co2_totals.loc['total energy'] - co2_totals.loc[['electricity', 'services non-elec','residential non-elec', 'road non-elec',
'rail non-elec', 'domestic aviation', 'international aviation', 'domestic navigation',
'international navigation']].sum()
emissions = co2_totals.loc["electricity"]
if "T" in opts:
emissions += co2_totals.loc[[i+ " non-elec" for i in ["rail","road"]]].sum()
if "H" in opts:
emissions += co2_totals.loc[[i+ " non-elec" for i in ["residential","services"]]].sum()
if "I" in opts:
emissions += co2_totals.loc[["industrial non-elec","industrial processes",
"domestic aviation","international aviation",
"domestic navigation","international navigation"]].sum()
return emissions
def plot_carbon_budget_distribution():
"""
Plot historical carbon emissions in the EU and decarbonization path
"""
import matplotlib.gridspec as gridspec
import seaborn as sns; sns.set()
sns.set_style('ticks')
plt.style.use('seaborn-ticks')
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.figure(figsize=(10, 7))
gs1 = gridspec.GridSpec(1, 1)
ax1 = plt.subplot(gs1[0,0])
ax1.set_ylabel('CO$_2$ emissions (Gt per year)',fontsize=22)
ax1.set_ylim([0,5])
ax1.set_xlim([1990,snakemake.config['scenario']['planning_horizons'][-1]+1])
path_cb = snakemake.config['results_dir'] + snakemake.config['run'] + '/csvs/'
countries=pd.read_csv(path_cb + 'countries.csv', index_col=1)
cts=countries.index.to_list()
e_1990 = co2_emissions_year(cts, opts, year=1990)
CO2_CAP=pd.read_csv(path_cb + 'carbon_budget_distribution.csv',
index_col=0)
ax1.plot(e_1990*CO2_CAP[o],linewidth=3,
color='dodgerblue', label=None)
emissions = historical_emissions(cts)
ax1.plot(emissions, color='black', linewidth=3, label=None)
#plot commited and uder-discussion targets
#(notice that historical emissions include all countries in the
# network, but targets refer to EU)
ax1.plot([2020],[0.8*emissions[1990]],
marker='*', markersize=12, markerfacecolor='black',
markeredgecolor='black')
ax1.plot([2030],[0.45*emissions[1990]],
marker='*', markersize=12, markerfacecolor='white',
markeredgecolor='black')
ax1.plot([2030],[0.6*emissions[1990]],
marker='*', markersize=12, markerfacecolor='black',
markeredgecolor='black')
ax1.plot([2050, 2050],[x*emissions[1990] for x in [0.2, 0.05]],
color='gray', linewidth=2, marker='_', alpha=0.5)
ax1.plot([2050],[0.01*emissions[1990]],
marker='*', markersize=12, markerfacecolor='white',
linewidth=0, markeredgecolor='black',
label='EU under-discussion target', zorder=10,
clip_on=False)
ax1.plot([2050],[0.125*emissions[1990]],'ro',
marker='*', markersize=12, markerfacecolor='black',
markeredgecolor='black', label='EU commited target')
ax1.legend(fancybox=True, fontsize=18, loc=(0.01,0.01),
facecolor='white', frameon=True)
path_cb_plot = snakemake.config['results_dir'] + snakemake.config['run'] + '/graphs/'
plt.savefig(path_cb_plot+'carbon_budget_plot.pdf', dpi=300)
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals():
from vresutils import Dict
import yaml
snakemake = Dict()
with open('config.yaml', encoding='utf8') as f:
snakemake.config = yaml.safe_load(f)
snakemake.input = Dict()
snakemake.output = Dict()
from helper import mock_snakemake
snakemake = mock_snakemake('plot_summary')
n_header = 4
for item in ["costs", "energy"]:
snakemake.input[item] = snakemake.config['summary_dir'] + '/{name}/csvs/{item}.csv'.format(name=snakemake.config['run'],item=item)
snakemake.output[item] = snakemake.config['summary_dir'] + '/{name}/graphs/{item}.pdf'.format(name=snakemake.config['run'],item=item)
snakemake.input["balances"] = snakemake.config['summary_dir'] + '/test/csvs/supply_energy.csv'
snakemake.output["balances"] = snakemake.config['summary_dir'] + '/test/graphs/balances-energy.csv'
n_header = 5
plot_costs()
plot_energy()
plot_balances()
for sector_opts in snakemake.config['scenario']['sector_opts']:
opts=sector_opts.split('-')
for o in opts:
if "cb" in o:
plot_carbon_budget_distribution()

File diff suppressed because it is too large Load Diff

View File

@ -1,52 +1,35 @@
import numpy as np
import pandas as pd
import logging
logger = logging.getLogger(__name__)
import gc
import os
"""Solve network."""
import pypsa
import numpy as np
from pypsa.linopt import get_var, linexpr, define_constraints
from pypsa.descriptors import free_output_series_dataframes
# Suppress logging of the slack bus choices
pypsa.pf.logger.setLevel(logging.WARNING)
from pypsa.linopf import network_lopf, ilopf
from vresutils.benchmark import memory_logger
from helper import override_component_attrs
import logging
logger = logging.getLogger(__name__)
pypsa.pf.logger.setLevel(logging.WARNING)
#First tell PyPSA that links can have multiple outputs by
#overriding the component_attrs. This can be done for
#as many buses as you need with format busi for i = 2,3,4,5,....
#See https://pypsa.org/doc/components.html#link-with-multiple-outputs-or-inputs
def add_land_use_constraint(n):
#warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
for carrier in ['solar', 'onwind', 'offwind-ac', 'offwind-dc']:
existing = n.generators.loc[n.generators.carrier == carrier, "p_nom"].groupby(n.generators.bus.map(n.buses.location)).sum()
existing.index += " " + carrier + "-" + snakemake.wildcards.planning_horizons
n.generators.loc[existing.index, "p_nom_max"] -= existing
override_component_attrs = pypsa.descriptors.Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()})
override_component_attrs["Link"].loc["bus2"] = ["string",np.nan,np.nan,"2nd bus","Input (optional)"]
override_component_attrs["Link"].loc["bus3"] = ["string",np.nan,np.nan,"3rd bus","Input (optional)"]
override_component_attrs["Link"].loc["efficiency2"] = ["static or series","per unit",1.,"2nd bus efficiency","Input (optional)"]
override_component_attrs["Link"].loc["efficiency3"] = ["static or series","per unit",1.,"3rd bus efficiency","Input (optional)"]
override_component_attrs["Link"].loc["p2"] = ["series","MW",0.,"2nd bus output","Output"]
override_component_attrs["Link"].loc["p3"] = ["series","MW",0.,"3rd bus output","Output"]
n.generators.p_nom_max.clip(lower=0, inplace=True)
def patch_pyomo_tmpdir(tmpdir):
# PYOMO should write its lp files into tmp here
import os
if not os.path.isdir(tmpdir):
os.mkdir(tmpdir)
from pyutilib.services import TempfileManager
TempfileManager.tempdir = tmpdir
def prepare_network(n, solve_opts=None):
if solve_opts is None:
solve_opts = snakemake.config['solving']['options']
if 'clip_p_max_pu' in solve_opts:
for df in (n.generators_t.p_max_pu, n.generators_t.p_min_pu, n.storage_units_t.inflow):
df.where(df>solve_opts['clip_p_max_pu'], other=0., inplace=True)
@ -70,50 +53,31 @@ def prepare_network(n, solve_opts=None):
# t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5)
if 'marginal_cost' in t.df:
np.random.seed(174)
t.df['marginal_cost'] += 1e-2 + 2e-3*(np.random.random(len(t.df)) - 0.5)
t.df['marginal_cost'] += 1e-2 + 2e-3 * (np.random.random(len(t.df)) - 0.5)
for t in n.iterate_components(['Line', 'Link']):
np.random.seed(123)
t.df['capital_cost'] += (1e-1 + 2e-2*(np.random.random(len(t.df)) - 0.5)) * t.df['length']
t.df['capital_cost'] += (1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5)) * t.df['length']
if solve_opts.get('nhours'):
nhours = solve_opts['nhours']
n.set_snapshots(n.snapshots[:nhours])
n.snapshot_weightings[:] = 8760./nhours
if snakemake.config['foresight']=='myopic':
if snakemake.config['foresight'] == 'myopic':
add_land_use_constraint(n)
return n
def add_opts_constraints(n, opts=None):
if opts is None:
opts = snakemake.wildcards.opts.split('-')
if 'BAU' in opts:
mincaps = snakemake.config['electricity']['BAU_mincapacities']
def bau_mincapacities_rule(model, carrier):
gens = n.generators.index[n.generators.p_nom_extendable & (n.generators.carrier == carrier)]
return sum(model.generator_p_nom[gen] for gen in gens) >= mincaps[carrier]
n.model.bau_mincapacities = pypsa.opt.Constraint(list(mincaps), rule=bau_mincapacities_rule)
if 'SAFE' in opts:
peakdemand = (1. + snakemake.config['electricity']['SAFE_reservemargin']) * n.loads_t.p_set.sum(axis=1).max()
conv_techs = snakemake.config['plotting']['conv_techs']
exist_conv_caps = n.generators.loc[n.generators.carrier.isin(conv_techs) & ~n.generators.p_nom_extendable, 'p_nom'].sum()
ext_gens_i = n.generators.index[n.generators.carrier.isin(conv_techs) & n.generators.p_nom_extendable]
n.model.safe_peakdemand = pypsa.opt.Constraint(expr=sum(n.model.generator_p_nom[gen] for gen in ext_gens_i) >= peakdemand - exist_conv_caps)
def add_eps_storage_constraint(n):
if not hasattr(n, 'epsilon'):
n.epsilon = 1e-5
fix_sus_i = n.storage_units.index[~ n.storage_units.p_nom_extendable]
n.model.objective.expr += sum(n.epsilon * n.model.state_of_charge[su, n.snapshots[0]] for su in fix_sus_i)
def add_battery_constraints(n):
chargers = n.links.index[n.links.carrier.str.contains("battery charger") & n.links.p_nom_extendable]
dischargers = chargers.str.replace("charger","discharger")
chargers_b = n.links.carrier.str.contains("battery charger")
chargers = n.links.index[chargers_b & n.links.p_nom_extendable]
dischargers = chargers.str.replace("charger", "discharger")
if chargers.empty or ('Link', 'p_nom') not in n.variables.index:
return
link_p_nom = get_var(n, "Link", "p_nom")
@ -135,44 +99,28 @@ def add_chp_constraints(n):
electric = n.links.index[electric_bool]
heat = n.links.index[heat_bool]
electric_ext = n.links.index[electric_bool & n.links.p_nom_extendable]
heat_ext = n.links.index[heat_bool & n.links.p_nom_extendable]
electric_fix = n.links.index[electric_bool & ~n.links.p_nom_extendable]
heat_fix = n.links.index[heat_bool & ~n.links.p_nom_extendable]
link_p = get_var(n, "Link", "p")
if not electric_ext.empty:
link_p_nom = get_var(n, "Link", "p_nom")
#ratio of output heat to electricity set by p_nom_ratio
lhs = linexpr((n.links.loc[electric_ext,"efficiency"]
*n.links.loc[electric_ext,'p_nom_ratio'],
lhs = linexpr((n.links.loc[electric_ext, "efficiency"]
*n.links.loc[electric_ext, "p_nom_ratio"],
link_p_nom[electric_ext]),
(-n.links.loc[heat_ext,"efficiency"].values,
(-n.links.loc[heat_ext, "efficiency"].values,
link_p_nom[heat_ext].values))
define_constraints(n, lhs, "=", 0, 'chplink', 'fix_p_nom_ratio')
if not electric.empty:
link_p = get_var(n, "Link", "p")
#backpressure
lhs = linexpr((n.links.loc[electric,'c_b'].values
*n.links.loc[heat,"efficiency"],
link_p[heat]),
(-n.links.loc[electric,"efficiency"].values,
link_p[electric].values))
define_constraints(n, lhs, "<=", 0, 'chplink', 'backpressure')
if not electric_ext.empty:
link_p_nom = get_var(n, "Link", "p_nom")
link_p = get_var(n, "Link", "p")
#top_iso_fuel_line for extendable
lhs = linexpr((1,link_p[heat_ext]),
(1,link_p[electric_ext].values),
@ -180,221 +128,93 @@ def add_chp_constraints(n):
define_constraints(n, lhs, "<=", 0, 'chplink', 'top_iso_fuel_line_ext')
if not electric_fix.empty:
link_p = get_var(n, "Link", "p")
#top_iso_fuel_line for fixed
lhs = linexpr((1,link_p[heat_fix]),
(1,link_p[electric_fix].values))
define_constraints(n, lhs, "<=", n.links.loc[electric_fix,"p_nom"].values, 'chplink', 'top_iso_fuel_line_fix')
rhs = n.links.loc[electric_fix, "p_nom"].values
def add_land_use_constraint(n):
define_constraints(n, lhs, "<=", rhs, 'chplink', 'top_iso_fuel_line_fix')
#warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
for carrier in ['solar', 'onwind', 'offwind-ac', 'offwind-dc']:
existing_capacities = n.generators.loc[n.generators.carrier==carrier,"p_nom"].groupby(n.generators.bus.map(n.buses.location)).sum()
existing_capacities.index += " " + carrier + "-" + snakemake.wildcards.planning_horizons
n.generators.loc[existing_capacities.index,"p_nom_max"] -= existing_capacities
if not electric.empty:
#backpressure
lhs = linexpr((n.links.loc[electric, "c_b"].values
*n.links.loc[heat, "efficiency"],
link_p[heat]),
(-n.links.loc[electric, "efficiency"].values,
link_p[electric].values))
define_constraints(n, lhs, "<=", 0, 'chplink', 'backpressure')
n.generators.p_nom_max[n.generators.p_nom_max<0]=0.
def extra_functionality(n, snapshots):
#add_opts_constraints(n, opts)
#add_eps_storage_constraint(n)
add_chp_constraints(n)
add_battery_constraints(n)
def fix_branches(n, lines_s_nom=None, links_p_nom=None):
if lines_s_nom is not None and len(lines_s_nom) > 0:
n.lines.loc[lines_s_nom.index,"s_nom"] = lines_s_nom.values
n.lines.loc[lines_s_nom.index,"s_nom_extendable"] = False
if links_p_nom is not None and len(links_p_nom) > 0:
n.links.loc[links_p_nom.index,"p_nom"] = links_p_nom.values
n.links.loc[links_p_nom.index,"p_nom_extendable"] = False
def solve_network(n, config=None, solver_log=None, opts=None):
if config is None:
config = snakemake.config['solving']
solve_opts = config['options']
solver_options = config['solver'].copy()
if solver_log is None:
solver_log = snakemake.log.solver
def solve_network(n, config, opts='', **kwargs):
solver_options = config['solving']['solver'].copy()
solver_name = solver_options.pop('name')
cf_solving = config['solving']['options']
track_iterations = cf_solving.get('track_iterations', False)
min_iterations = cf_solving.get('min_iterations', 4)
max_iterations = cf_solving.get('max_iterations', 6)
def run_lopf(n, allow_warning_status=False, fix_zero_lines=False, fix_ext_lines=False):
free_output_series_dataframes(n)
if fix_zero_lines:
fix_lines_b = (n.lines.s_nom_opt == 0.) & n.lines.s_nom_extendable
fix_links_b = (n.links.carrier=='DC') & (n.links.p_nom_opt == 0.) & n.links.p_nom_extendable
fix_branches(n,
lines_s_nom=pd.Series(0., n.lines.index[fix_lines_b]),
links_p_nom=pd.Series(0., n.links.index[fix_links_b]))
if fix_ext_lines:
fix_branches(n,
lines_s_nom=n.lines.loc[n.lines.s_nom_extendable, 's_nom_opt'],
links_p_nom=n.links.loc[(n.links.carrier=='DC') & n.links.p_nom_extendable, 'p_nom_opt'])
if "line_volume_constraint" in n.global_constraints.index:
n.global_constraints.drop("line_volume_constraint",inplace=True)
else:
if "line_volume_constraint" not in n.global_constraints.index:
line_volume = getattr(n, 'line_volume_limit', None)
if line_volume is not None and not np.isinf(line_volume):
n.add("GlobalConstraint",
"line_volume_constraint",
type="transmission_volume_expansion_limit",
carrier_attribute="AC,DC",
sense="<=",
constant=line_volume)
# Firing up solve will increase memory consumption tremendously, so
# make sure we freed everything we can
gc.collect()
#from pyomo.opt import ProblemFormat
#print("Saving model to MPS")
#n.model.write('/home/ka/ka_iai/ka_kc5996/projects/pypsa-eur/128-B-I.mps', format=ProblemFormat.mps)
#print("Model is saved to MPS")
#sys.exit()
status, termination_condition = n.lopf(pyomo=False,
solver_name=solver_name,
solver_logfile=solver_log,
solver_options=solver_options,
extra_functionality=extra_functionality,
formulation=solve_opts['formulation'])
#extra_postprocessing=extra_postprocessing
#keep_files=True
#free_memory={'pypsa'}
assert status == "ok" or allow_warning_status and status == 'warning', \
("network_lopf did abort with status={} "
"and termination_condition={}"
.format(status, termination_condition))
if not fix_ext_lines and "line_volume_constraint" in n.global_constraints.index:
n.line_volume_limit_dual = n.global_constraints.at["line_volume_constraint","mu"]
print("line volume limit dual:",n.line_volume_limit_dual)
return status, termination_condition
lines_ext_b = n.lines.s_nom_extendable
if lines_ext_b.any():
# puh: ok, we need to iterate, since there is a relation
# between s/p_nom and r, x for branches.
msq_threshold = 0.01
lines = pd.DataFrame(n.lines[['r', 'x', 'type', 'num_parallel']])
lines['s_nom'] = (
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom)
).where(n.lines.type != '', n.lines['s_nom'])
lines_ext_typed_b = (n.lines.type != '') & lines_ext_b
lines_ext_untyped_b = (n.lines.type == '') & lines_ext_b
def update_line_parameters(n, zero_lines_below=10, fix_zero_lines=False):
if zero_lines_below > 0:
n.lines.loc[n.lines.s_nom_opt < zero_lines_below, 's_nom_opt'] = 0.
n.links.loc[(n.links.carrier=='DC') & (n.links.p_nom_opt < zero_lines_below), 'p_nom_opt'] = 0.
if lines_ext_untyped_b.any():
for attr in ('r', 'x'):
n.lines.loc[lines_ext_untyped_b, attr] = (
lines[attr].multiply(lines['s_nom']/n.lines['s_nom_opt'])
)
if lines_ext_typed_b.any():
n.lines.loc[lines_ext_typed_b, 'num_parallel'] = (
n.lines['s_nom_opt']/lines['s_nom']
)
logger.debug("lines.num_parallel={}".format(n.lines.loc[lines_ext_typed_b, 'num_parallel']))
iteration = 1
lines['s_nom_opt'] = lines['s_nom'] * n.lines['num_parallel'].where(n.lines.type != '', 1.)
status, termination_condition = run_lopf(n, allow_warning_status=True)
def msq_diff(n):
lines_err = np.sqrt(((n.lines['s_nom_opt'] - lines['s_nom_opt'])**2).mean())/lines['s_nom_opt'].mean()
logger.info("Mean square difference after iteration {} is {}".format(iteration, lines_err))
return lines_err
min_iterations = solve_opts.get('min_iterations', 2)
max_iterations = solve_opts.get('max_iterations', 999)
while msq_diff(n) > msq_threshold or iteration < min_iterations:
if iteration >= max_iterations:
logger.info("Iteration {} beyond max_iterations {}. Stopping ...".format(iteration, max_iterations))
break
update_line_parameters(n)
lines['s_nom_opt'] = n.lines['s_nom_opt']
iteration += 1
status, termination_condition = run_lopf(n, allow_warning_status=True)
update_line_parameters(n, zero_lines_below=100)
logger.info("Starting last run with fixed extendable lines")
# Not really needed, could also be taken out
# if 'snakemake' in globals():
# fn = os.path.basename(snakemake.output[0])
# n.export_to_netcdf('/home/vres/data/jonas/playground/pypsa-eur/' + fn)
status, termination_condition = run_lopf(n, fix_ext_lines=True)
# Drop zero lines from network
# zero_lines_i = n.lines.index[(n.lines.s_nom_opt == 0.) & n.lines.s_nom_extendable]
# if len(zero_lines_i):
# n.mremove("Line", zero_lines_i)
# zero_links_i = n.links.index[(n.links.p_nom_opt == 0.) & n.links.p_nom_extendable]
# if len(zero_links_i):
# n.mremove("Link", zero_links_i)
# add to network for extra_functionality
n.config = config
n.opts = opts
if cf_solving.get('skip_iterations', False):
network_lopf(n, solver_name=solver_name, solver_options=solver_options,
extra_functionality=extra_functionality, **kwargs)
else:
ilopf(n, solver_name=solver_name, solver_options=solver_options,
track_iterations=track_iterations,
min_iterations=min_iterations,
max_iterations=max_iterations,
extra_functionality=extra_functionality, **kwargs)
return n
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals():
from vresutils.snakemake import MockSnakemake, Dict
snakemake = MockSnakemake(
wildcards=dict(network='elec', simpl='', clusters='39', lv='1.0',
sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1',
co2_budget_name='b30b3', planning_horizons='2050'),
input=dict(network="pypsa-eur-sec/results/test/prenetworks_brownfield/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_{planning_horizons}.nc"),
output=["results/networks/s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}-test.nc"],
log=dict(gurobi="logs/{network}_s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}_gurobi-test.log",
python="logs/{network}_s{simpl}_{clusters}_lv{lv}_{sector_opts}_{co2_budget_name}_{planning_horizons}_python-test.log")
from helper import mock_snakemake
snakemake = mock_snakemake(
'solve_network',
simpl='',
clusters=48,
lv=1.0,
sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1',
planning_horizons=2050,
)
import yaml
with open('config.yaml', encoding='utf8') as f:
snakemake.config = yaml.safe_load(f)
tmpdir = snakemake.config['solving'].get('tmpdir')
if tmpdir is not None:
patch_pyomo_tmpdir(tmpdir)
logging.basicConfig(filename=snakemake.log.python,
level=snakemake.config['logging_level'])
with memory_logger(filename=getattr(snakemake.log, 'memory', None), interval=30.) as mem:
tmpdir = snakemake.config['solving'].get('tmpdir')
if tmpdir is not None:
Path(tmpdir).mkdir(parents=True, exist_ok=True)
opts = snakemake.wildcards.opts.split('-')
solve_opts = snakemake.config['solving']['options']
n = pypsa.Network(snakemake.input.network,
override_component_attrs=override_component_attrs)
fn = getattr(snakemake.log, 'memory', None)
with memory_logger(filename=fn, interval=30.) as mem:
n = prepare_network(n)
overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
n = solve_network(n)
n = prepare_network(n, solve_opts)
n = solve_network(n, config=snakemake.config, opts=opts,
solver_dir=tmpdir,
solver_logfile=snakemake.log.solver)
if "lv_limit" in n.global_constraints.index:
n.line_volume_limit = n.global_constraints.at["lv_limit", "constant"]
n.line_volume_limit_dual = n.global_constraints.at["lv_limit", "mu"]
n.export_to_netcdf(snakemake.output[0])