Merge branch 'master' into nh3

This commit is contained in:
Fabian Neumann 2022-10-01 16:01:39 +02:00 committed by GitHub
commit 1611d63a8a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 588 additions and 226 deletions

View File

@ -104,6 +104,6 @@ jobs:
conda activate pypsa-eur
conda list
cp test/config.overnight.yaml config.yaml
snakemake -call solve_all_networks
snakemake -call
cp test/config.myopic.yaml config.yaml
snakemake -call solve_all_networks
snakemake -call

View File

@ -20,14 +20,16 @@ all greenhouse gas emitters except waste management and land use.
**WARNING**: PyPSA-Eur-Sec is under active development and has several
[limitations](https://pypsa-eur-sec.readthedocs.io/en/latest/limitations.html) which
you should understand before using the model. The github repository
[issues](https://github.com/PyPSA/pypsa-eur-sec/issues) collects known
topics we are working on (please feel free to help or make suggestions). There is neither a full
documentation nor a paper yet, but we hope to have a preprint out by mid-2022.
You can find out more about the model capabilities in [a recent
presentation at EMP-E](https://nworbmot.org/energy/brown-empe.pdf) or the
following [paper in Joule with a description of the industry
sector](https://arxiv.org/abs/2109.09563). We cannot support this model if you
choose to use it.
[issues](https://github.com/PyPSA/pypsa-eur-sec/issues) collect known
topics we are working on (please feel free to help or make suggestions).
The [documentation](https://pypsa-eur-sec.readthedocs.io/) remains somewhat
patchy.
You can find showcases of the model's capabilities in the preprint
[Benefits of a Hydrogen Network in Europe](https://arxiv.org/abs/2207.05816),
a [paper in Joule with a description of the industry
sector](https://arxiv.org/abs/2109.09563), or in [a 2021
presentation at EMP-E](https://nworbmot.org/energy/brown-empe.pdf).
We cannot support this model if you choose to use it.
Please see the [documentation](https://pypsa-eur-sec.readthedocs.io/)
for installation instructions and other useful information about the snakemake workflow.

View File

@ -256,9 +256,9 @@ rule build_biomass_potentials:
enspreso_biomass=HTTP.remote("https://cidportal.jrc.ec.europa.eu/ftp/jrc-opendata/ENSPRESO/ENSPRESO_BIOMASS.xlsx", keep_local=True),
nuts2="data/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", # https://gisco-services.ec.europa.eu/distribution/v2/nuts/download/#nuts21
regions_onshore=pypsaeur("resources/regions_onshore_elec_s{simpl}_{clusters}.geojson"),
nuts3_population=pypsaeur("data/bundle/nama_10r_3popgdp.tsv.gz"),
swiss_cantons=pypsaeur("data/bundle/ch_cantons.csv"),
swiss_population=pypsaeur("data/bundle/je-e-21.03.02.xls"),
nuts3_population="../pypsa-eur/data/bundle/nama_10r_3popgdp.tsv.gz",
swiss_cantons="../pypsa-eur/data/bundle/ch_cantons.csv",
swiss_population="../pypsa-eur/data/bundle/je-e-21.03.02.xls",
country_shapes=pypsaeur('resources/country_shapes.geojson')
output:
biomass_potentials_all='resources/biomass_potentials_all_s{simpl}_{clusters}.csv',
@ -442,14 +442,14 @@ rule build_population_weighted_energy_totals:
rule build_transport_demand:
input:
input:
clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv",
pop_weighted_energy_totals="resources/pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
transport_data='resources/transport_data.csv',
traffic_data_KFZ="data/emobility/KFZ__count",
traffic_data_Pkw="data/emobility/Pkw__count",
temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc",
output:
output:
transport_demand="resources/transport_demand_s{simpl}_{clusters}.csv",
transport_data="resources/transport_data_s{simpl}_{clusters}.csv",
avail_profile="resources/avail_profile_s{simpl}_{clusters}.csv",
@ -464,15 +464,17 @@ rule prepare_sector_network:
overrides="data/override_component_attrs",
network=pypsaeur('networks/elec_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc'),
energy_totals_name='resources/energy_totals.csv',
eurostat=input_eurostat,
pop_weighted_energy_totals="resources/pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
transport_demand="resources/transport_demand_s{simpl}_{clusters}.csv",
transport_data="resources/transport_data_s{simpl}_{clusters}.csv",
avail_profile="resources/avail_profile_s{simpl}_{clusters}.csv",
dsm_profile="resources/dsm_profile_s{simpl}_{clusters}.csv",
co2_totals_name='resources/co2_totals.csv',
co2="data/eea/UNFCCC_v23.csv",
biomass_potentials='resources/biomass_potentials_s{simpl}_{clusters}.csv',
heat_profile="data/heat_load_profile_BDEW.csv",
costs=CDIR + "costs_{planning_horizons}.csv",
costs=CDIR + "costs_{}.csv".format(config['costs']['year']) if config["foresight"] == "overnight" else CDIR + "costs_{planning_horizons}.csv",
profile_offwind_ac=pypsaeur("resources/profile_offwind-ac.nc"),
profile_offwind_dc=pypsaeur("resources/profile_offwind-dc.nc"),
h2_cavern="resources/salt_cavern_potentials_s{simpl}_{clusters}.csv",
@ -537,7 +539,7 @@ rule make_summary:
RDIR + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc",
**config['scenario']
),
costs=CDIR + "costs_{}.csv".format(config['scenario']['planning_horizons'][0]),
costs=CDIR + "costs_{}.csv".format(config['costs']['year']) if config["foresight"] == "overnight" else CDIR + "costs_{}.csv".format(config['scenario']['planning_horizons'][0]),
plots=expand(
RDIR + "/maps/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
**config['scenario']
@ -568,7 +570,9 @@ rule plot_summary:
input:
costs=SDIR + '/csvs/costs.csv',
energy=SDIR + '/csvs/energy.csv',
balances=SDIR + '/csvs/supply_energy.csv'
balances=SDIR + '/csvs/supply_energy.csv',
eurostat=input_eurostat,
country_codes='data/Country_codes.csv',
output:
costs=SDIR + '/graphs/costs.pdf',
energy=SDIR + '/graphs/energy.pdf',
@ -585,7 +589,7 @@ if config["foresight"] == "overnight":
input:
overrides="data/override_component_attrs",
network=RDIR + "/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc",
costs=CDIR + "costs_{planning_horizons}.csv",
costs=CDIR + "costs_{}.csv".format(config['costs']['year']),
config=SDIR + '/configs/config.yaml'
output: RDIR + "/postnetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc"
shadow: "shallow"

View File

@ -33,16 +33,21 @@ scenario:
# A for agriculture, forestry and fishing
# solar+c0.5 reduces the capital cost of solar to 50\% of reference value
# solar+p3 multiplies the available installable potential by factor 3
# co2 stored+e2 multiplies the potential of CO2 sequestration by a factor 2
# seq400 sets the potential of CO2 sequestration to 400 Mt CO2 per year
# dist{n} includes distribution grids with investment cost of n times cost in data/costs.csv
# for myopic/perfect foresight cb states the carbon budget in GtCO2 (cumulative
# emissions throughout the transition path in the timeframe determined by the
# planning_horizons), be:beta decay; ex:exponential decay
# cb40ex0 distributes a carbon budget of 40 GtCO2 following an exponential
# decay with initial growth rate 0
planning_horizons: # investment years for myopic and perfect; or costs year for overnight
- 2030
# for example, set to [2020, 2030, 2040, 2050] for myopic foresight
planning_horizons: # investment years for myopic and perfect; for overnight, year of cost assumptions can be different and is defined under 'costs'
- 2050
# for example, set to
# - 2020
# - 2030
# - 2040
# - 2050
# for myopic foresight
# CO2 budget as a fraction of 1990 emissions
# this is over-ridden if CO2Lx is set in sector_opts
@ -134,7 +139,8 @@ solar_thermal:
# only relevant for foresight = myopic or perfect
existing_capacities:
grouping_years: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030]
grouping_years_power: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2020, 2025, 2030]
grouping_years_heat: [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015, 2019] # these should not extend 2020
threshold_capacity: 10
conventional_carriers:
- lignite
@ -148,11 +154,11 @@ sector:
potential: 0.6 # maximum fraction of urban demand which can be supplied by district heating
# increase of today's district heating demand to potential maximum district heating share
# progress = 0 means today's district heating share, progress = 1 means maximum fraction of urban demand is supplied by district heating
progress: 1
# 2020: 0.0
# 2030: 0.3
# 2040: 0.6
# 2050: 1.0
progress:
2020: 0.0
2030: 0.3
2040: 0.6
2050: 1.0
district_heating_loss: 0.15
bev_dsm_restriction_value: 0.75 #Set to 0 for no restriction on BEV DSM
bev_dsm_restriction_time: 7 #Time at which SOC of BEV has to be dsm_restriction_value
@ -172,16 +178,16 @@ sector:
bev_avail_mean: 0.8
v2g: true #allows feed-in to grid from EV battery
#what is not EV or FCEV is oil-fuelled ICE
land_transport_fuel_cell_share: 0.15 # 1 means all FCEVs
# 2020: 0
# 2030: 0.05
# 2040: 0.1
# 2050: 0.15
land_transport_electric_share: 0.85 # 1 means all EVs
# 2020: 0
# 2030: 0.25
# 2040: 0.6
# 2050: 0.85
land_transport_fuel_cell_share: # 1 means all FCEVs
2020: 0
2030: 0.05
2040: 0.1
2050: 0.15
land_transport_electric_share: # 1 means all EVs
2020: 0
2030: 0.25
2040: 0.6
2050: 0.85
transport_fuel_cell_efficiency: 0.5
transport_internal_combustion_efficiency: 0.3
agriculture_machinery_electric_share: 0
@ -189,29 +195,29 @@ sector:
agriculture_machinery_electric_efficiency: 0.3 # electricity per use
shipping_average_efficiency: 0.4 #For conversion of fuel oil to propulsion in 2011
shipping_hydrogen_liquefaction: false # whether to consider liquefaction costs for shipping H2 demands
shipping_hydrogen_share: 1 # 1 means all hydrogen FC
# 2020: 0
# 2025: 0
# 2030: 0.05
# 2035: 0.15
# 2040: 0.3
# 2045: 0.6
# 2050: 1
shipping_hydrogen_share: # 1 means all hydrogen FC
2020: 0
2025: 0
2030: 0.05
2035: 0.15
2040: 0.3
2045: 0.6
2050: 1
time_dep_hp_cop: true #time dependent heat pump coefficient of performance
heat_pump_sink_T: 55. # Celsius, based on DTU / large area radiators; used in build_cop_profiles.py
# conservatively high to cover hot water and space heating in poorly-insulated buildings
reduce_space_heat_exogenously: true # reduces space heat demand by a given factor (applied before losses in DH)
# this can represent e.g. building renovation, building demolition, or if
# the factor is negative: increasing floor area, increased thermal comfort, population growth
reduce_space_heat_exogenously_factor: 0.29 # per unit reduction in space heat demand
reduce_space_heat_exogenously_factor: # per unit reduction in space heat demand
# the default factors are determined by the LTS scenario from http://tool.european-calculator.eu/app/buildings/building-types-area/?levers=1ddd4444421213bdbbbddd44444ffffff11f411111221111211l212221
# 2020: 0.10 # this results in a space heat demand reduction of 10%
# 2025: 0.09 # first heat demand increases compared to 2020 because of larger floor area per capita
# 2030: 0.09
# 2035: 0.11
# 2040: 0.16
# 2045: 0.21
# 2050: 0.29
2020: 0.10 # this results in a space heat demand reduction of 10%
2025: 0.09 # first heat demand increases compared to 2020 because of larger floor area per capita
2030: 0.09
2035: 0.11
2040: 0.16
2045: 0.21
2050: 0.29
retrofitting : # co-optimises building renovation to reduce space heat demand
retro_endogen: false # co-optimise space heat savings
cost_factor: 1.0 # weight costs for building renovation
@ -225,6 +231,7 @@ sector:
central: 180
boilers: true
oil_boilers: false
biomass_boiler: true
chp: true
micro_chp: false
solar_thermal: true
@ -232,8 +239,9 @@ sector:
marginal_cost_storage: 0. #1e-4
methanation: true
helmeth: true
coal_cc: false
dac: true
co2_vent: true
co2_vent: false
SMR: true
co2_sequestration_potential: 200 #MtCO2/a sequestration potential for Europe
co2_sequestration_cost: 10 #EUR/tCO2 for sequestration of CO2
@ -263,35 +271,38 @@ sector:
biomass_transport: false # biomass transport between nodes
conventional_generation: # generator : carrier
OCGT: gas
biomass_to_liquid: false
biosng: false
industry:
St_primary_fraction: 0.3 # fraction of steel produced via primary route versus secondary route (scrap+EAF); today fraction is 0.6
# 2020: 0.6
# 2025: 0.55
# 2030: 0.5
# 2035: 0.45
# 2040: 0.4
# 2045: 0.35
# 2050: 0.3
DRI_fraction: 1 # fraction of the primary route converted to DRI + EAF
# 2020: 0
# 2025: 0
# 2030: 0.05
# 2035: 0.2
# 2040: 0.4
# 2045: 0.7
# 2050: 1
St_primary_fraction: # fraction of steel produced via primary route versus secondary route (scrap+EAF); today fraction is 0.6
2020: 0.6
2025: 0.55
2030: 0.5
2035: 0.45
2040: 0.4
2045: 0.35
2050: 0.3
DRI_fraction: # fraction of the primary route converted to DRI + EAF
2020: 0
2025: 0
2030: 0.05
2035: 0.2
2040: 0.4
2045: 0.7
2050: 1
H2_DRI: 1.7 #H2 consumption in Direct Reduced Iron (DRI), MWh_H2,LHV/ton_Steel from 51kgH2/tSt in Vogl et al (2018) doi:10.1016/j.jclepro.2018.08.279
elec_DRI: 0.322 #electricity consumption in Direct Reduced Iron (DRI) shaft, MWh/tSt HYBRIT brochure https://ssabwebsitecdn.azureedge.net/-/media/hybrit/files/hybrit_brochure.pdf
Al_primary_fraction: 0.2 # fraction of aluminium produced via the primary route versus scrap; today fraction is 0.4
# 2020: 0.4
# 2025: 0.375
# 2030: 0.35
# 2035: 0.325
# 2040: 0.3
# 2045: 0.25
# 2050: 0.2
Al_primary_fraction: # fraction of aluminium produced via the primary route versus scrap; today fraction is 0.4
2020: 0.4
2025: 0.375
2030: 0.35
2035: 0.325
2040: 0.3
2045: 0.25
2050: 0.2
MWh_NH3_per_tNH3: 5.166 # LHV
MWh_CH4_per_tNH3_SMR: 10.8 # 2012's demand from https://ec.europa.eu/docsroom/documents/4165/attachments/1/translations/en/renditions/pdf
MWh_elec_per_tNH3_SMR: 0.7 # same source, assuming 94-6% split methane-elec of total energy demand 11.5 MWh/tNH3
@ -319,6 +330,7 @@ industry:
# Material Economics (2019): https://materialeconomics.com/latest-updates/industrial-transformation-2050
costs:
year: 2030
lifetime: 25 #default lifetime
# From a Lion Hirth paper, also reflects average of Noothout et al 2016
discountrate: 0.07
@ -510,6 +522,9 @@ plotting:
solid biomass for industry CC: '#47411c'
solid biomass for industry co2 from atmosphere: '#736412'
solid biomass for industry co2 to stored: '#47411c'
biomass boiler: '#8A9A5B'
biomass to liquid: '#32CD32'
BioSNG: '#123456'
# power transmission
lines: '#6c9459'
transmission lines: '#6c9459'

View File

@ -33,17 +33,20 @@ waste management, agriculture, forestry and land use.
**WARNING**: PyPSA-Eur-Sec is under active development and has several
`limitations <https://pypsa-eur-sec.readthedocs.io/en/latest/limitations.html>`_ which
you should understand before using the model. The github repository
`issues <https://github.com/PyPSA/pypsa-eur-sec/issues>`_ collects known
topics we are working on (please feel free to help or make suggestions). There is neither a full
documentation nor a paper yet, but we hope to have a preprint out by mid-2022.
We cannot support this model if you
choose to use it.
`issues <https://github.com/PyPSA/pypsa-eur-sec/issues>`_ collect known
topics we are working on (please feel free to help or make suggestions).
The `documentation <https://pypsa-eur-sec.readthedocs.io/>`_ remains somewhat
patchy.
We cannot support this model if you choose to use it.
.. note::
More about the current model capabilities and preliminary results
can be found in `a recent presentation at EMP-E <https://nworbmot.org/energy/brown-empe.pdf>`_
and the following `paper in Joule with a description of the industry sector <https://arxiv.org/abs/2109.09563>`_.
You can find showcases of the model's capabilities in the
preprint `Benefits of a Hydrogen Network in Europe
<https://arxiv.org/abs/2207.05816>`_, a `paper in Joule with a
description of the industry sector
<https://arxiv.org/abs/2109.09563>`_, or in `a 2021 presentation
at EMP-E <https://nworbmot.org/energy/brown-empe.pdf>`_.
This diagram gives an overview of the sectors and the links between
them:

View File

@ -24,7 +24,7 @@ incorporates retrofitting options to hydrogen.
* New rule ``build_gas_input_locations`` compiles the LNG import capacities
(including planned projects from gem.wiki), pipeline entry capacities and
local production capacities for each region of the model. These are the
regions where fossil gas can eventually enter the model.
regions where fossil gas can eventually enter the model.
* New rule ``cluster_gas_network`` that clusters the gas transmission network
data to the model resolution. Cross-regional pipeline capacities are aggregated
@ -47,8 +47,8 @@ incorporates retrofitting options to hydrogen.
H2_retrofit_capacity_per_CH4`` units are made available as hydrogen pipeline
capacity in the corresponding corridor. These repurposed hydrogen pipelines
have lower costs than new hydrogen pipelines. Both new and repurposed pipelines
can be built simultaneously. The retrofitting option ``sector: H2_retrofit:`` also works
with a copperplated methane infrastructure, i.e. when ``sector: gas_network: false``.
can be built simultaneously. The retrofitting option ``sector: H2_retrofit:`` also works
with a copperplated methane infrastructure, i.e. when ``sector: gas_network: false``.
* New hydrogen pipelines can now be built where there are already power or gas
transmission routes. Previously, only the electricity transmission routes were
@ -56,6 +56,17 @@ incorporates retrofitting options to hydrogen.
**New features and functionality**
* Add option to aggregate network temporally using representative snapshots or segments (with tsam package)
* Add option for biomass boilers (wood pellets) for decentral heating
* Add option for BioSNG (methane from biomass) with and without CC
* Add option for BtL (Biomass to liquid fuel/oil) with and without CC
* Units are assigned to the buses. These only provide a better understanding. The specifications of the units are not taken into account in the optimisation, which means that no automatic conversion of units takes place.
* Option ``retrieve_sector_databundle`` to automatically retrieve and extract data bundle.
* Add regionalised hydrogen salt cavern storage potentials from `Technical Potential of Salt Caverns for Hydrogen Storage in Europe <https://doi.org/10.20944/preprints201910.0187.v1>`_.
@ -89,7 +100,7 @@ besides many performance improvements.
This release is known to work with `PyPSA-Eur
<https://github.com/PyPSA/pypsa-eur>`_ Version 0.4.0, `Technology Data
<https://github.com/PyPSA/technology-data>`_ Version 0.3.0 and
<https://github.com/PyPSA/technology-data>`_ Version 0.3.0 and
`PyPSA <https://github.com/PyPSA/PyPSA>`_ Version 0.18.0.
Please note that the data bundle has also been updated.
@ -207,19 +218,19 @@ Please note that the data bundle has also been updated.
A function ``helper.override_component_attrs`` was added that loads this data
and can pass the overridden component attributes into ``pypsa.Network()``.
* Add various parameters to ``config.default.yaml`` which were previously hardcoded inside the scripts
* Add various parameters to ``config.default.yaml`` which were previously hardcoded inside the scripts
(e.g. energy reference years, BEV settings, solar thermal collector models, geomap colours).
* Removed stale industry demand rules ``build_industrial_energy_demand_per_country``
and ``build_industrial_demand``. These are superseded with more regionally resolved rules.
* Use simpler and shorter ``gdf.sjoin()`` function to allocate industrial sites
from the Hotmaps database to onshore regions.
from the Hotmaps database to onshore regions.
This change also fixes a bug:
The previous version allocated sites to the closest bus,
but at country borders (where Voronoi cells are distorted by the borders),
this had resulted in e.g. a Spanish site close to the French border
being wrongly allocated to the French bus if the bus center was closer.
being wrongly allocated to the French bus if the bus center was closer.
* Retrofitting rule is now only triggered if endogeneously optimised.
@ -230,7 +241,7 @@ Please note that the data bundle has also been updated.
* Improve legibility of ``config.default.yaml`` and remove unused options.
* Use the country-specific time zone mappings from ``pytz`` rather than a manual mapping.
* A function ``add_carrier_buses()`` was added to the ``prepare_network`` rule to reduce code duplication.
* In the ``prepare_network`` rule the cost and potential adjustment was moved into an

View File

@ -11,7 +11,7 @@ import yaml
import numpy as np
from add_existing_baseyear import add_build_year_to_new_assets
from helper import override_component_attrs
from helper import override_component_attrs, update_config_with_sector_opts
from solve_network import basename
@ -123,6 +123,8 @@ if __name__ == "__main__":
planning_horizons=2030,
)
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
print(snakemake.input.network_p)
logging.basicConfig(level=snakemake.config['logging_level'])
@ -137,4 +139,5 @@ if __name__ == "__main__":
add_brownfield(n, n_p, year)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0])

View File

@ -13,7 +13,7 @@ import pypsa
import yaml
from prepare_sector_network import prepare_costs, define_spatial
from helper import override_component_attrs
from helper import override_component_attrs, update_config_with_sector_opts
from types import SimpleNamespace
spatial = SimpleNamespace()
@ -131,7 +131,8 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
'Oil': 'oil',
'OCGT': 'OCGT',
'CCGT': 'CCGT',
'Natural Gas': 'gas'
'Natural Gas': 'gas',
'Bioenergy': 'urban central solid biomass CHP',
}
fueltype_to_drop = [
@ -139,7 +140,6 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
'Wind',
'Solar',
'Geothermal',
'Bioenergy',
'Waste',
'Other',
'CCGT, Thermal'
@ -150,10 +150,29 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
'Storage Technologies'
]
# drop unused fueltyps and technologies
df_agg.drop(df_agg.index[df_agg.Fueltype.isin(fueltype_to_drop)], inplace=True)
df_agg.drop(df_agg.index[df_agg.Technology.isin(technology_to_drop)], inplace=True)
df_agg.Fueltype = df_agg.Fueltype.map(rename_fuel)
# Intermediate fix for DateIn & DateOut
# Fill missing DateIn
biomass_i = df_agg.loc[df_agg.Fueltype=='urban central solid biomass CHP'].index
mean = df_agg.loc[biomass_i, 'DateIn'].mean()
df_agg.loc[biomass_i, 'DateIn'] = df_agg.loc[biomass_i, 'DateIn'].fillna(int(mean))
# Fill missing DateOut
dateout = df_agg.loc[biomass_i, 'DateIn'] + snakemake.config['costs']['lifetime']
df_agg.loc[biomass_i, 'DateOut'] = df_agg.loc[biomass_i, 'DateOut'].fillna(dateout)
# drop assets which are already phased out / decomissioned
phased_out = df_agg[df_agg["DateOut"]<baseyear].index
df_agg.drop(phased_out, inplace=True)
# calculate remaining lifetime before phase-out (+1 because assumming
# phase out date at the end of the year)
df_agg["lifetime"] = df_agg.DateOut - df_agg.DateIn + 1
# assign clustered bus
busmap_s = pd.read_csv(snakemake.input.busmap_s, index_col=0).squeeze()
busmap = pd.read_csv(snakemake.input.busmap, index_col=0).squeeze()
@ -182,35 +201,52 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
aggfunc='sum'
)
lifetime = df_agg.pivot_table(
index=["grouping_year", 'Fueltype'],
columns='cluster_bus',
values='lifetime',
aggfunc='mean' # currently taken mean for clustering lifetimes
)
carrier = {
"OCGT": "gas",
"CCGT": "gas",
"coal": "coal",
"oil": "oil",
"lignite": "lignite",
"nuclear": "uranium"
"nuclear": "uranium",
'urban central solid biomass CHP': "biomass",
}
for grouping_year, generator in df.index:
# capacity is the capacity in MW at each node for this
capacity = df.loc[grouping_year, generator]
capacity = capacity[~capacity.isna()]
capacity = capacity[capacity > snakemake.config['existing_capacities']['threshold_capacity']]
suffix = '-ac' if generator == 'offwind' else ''
name_suffix = f' {generator}{suffix}-{grouping_year}'
asset_i = capacity.index + name_suffix
if generator in ['solar', 'onwind', 'offwind']:
suffix = '-ac' if generator == 'offwind' else ''
name_suffix = f' {generator}{suffix}-{baseyear}'
# to consider electricity grid connection costs or a split between
# solar utility and rooftop as well, rather take cost assumptions
# from existing network than from the cost database
capital_cost = n.generators.loc[n.generators.carrier==generator+suffix, "capital_cost"].mean()
# check if assets are already in network (e.g. for 2020)
already_build = n.generators.index.intersection(asset_i)
new_build = asset_i.difference(n.generators.index)
# this is for the year 2020
if not already_build.empty:
n.generators.loc[already_build, "p_nom_min"] = capacity.loc[already_build.str.replace(name_suffix, "")].values
new_capacity = capacity.loc[new_build.str.replace(name_suffix, "")]
if 'm' in snakemake.wildcards.clusters:
for ind in capacity.index:
for ind in new_capacity.index:
# existing capacities are split evenly among regions in every country
inv_ind = [i for i in inv_busmap[ind]]
@ -225,7 +261,7 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
[i + name_suffix for i in inv_ind],
bus=ind,
carrier=generator,
p_nom=capacity[ind] / len(inv_ind), # split among regions in a country
p_nom=new_capacity[ind] / len(inv_ind), # split among regions in a country
marginal_cost=costs.at[generator,'VOM'],
capital_cost=capital_cost,
efficiency=costs.at[generator, 'efficiency'],
@ -236,42 +272,72 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
else:
p_max_pu = n.generators_t.p_max_pu[capacity.index + name_suffix]
p_max_pu = n.generators_t.p_max_pu[capacity.index + f' {generator}{suffix}-{baseyear}']
n.madd("Generator",
capacity.index,
suffix=' ' + generator +"-"+ str(grouping_year),
bus=capacity.index,
carrier=generator,
p_nom=capacity,
marginal_cost=costs.at[generator, 'VOM'],
capital_cost=capital_cost,
efficiency=costs.at[generator, 'efficiency'],
p_max_pu=p_max_pu.rename(columns=n.generators.bus),
build_year=grouping_year,
lifetime=costs.at[generator, 'lifetime']
)
if not new_build.empty:
n.madd("Generator",
new_capacity.index,
suffix=' ' + name_suffix,
bus=new_capacity.index,
carrier=generator,
p_nom=new_capacity,
marginal_cost=costs.at[generator, 'VOM'],
capital_cost=capital_cost,
efficiency=costs.at[generator, 'efficiency'],
p_max_pu=p_max_pu.rename(columns=n.generators.bus),
build_year=grouping_year,
lifetime=costs.at[generator, 'lifetime']
)
else:
bus0 = vars(spatial)[carrier[generator]].nodes
if "EU" not in vars(spatial)[carrier[generator]].locations:
bus0 = bus0.intersection(capacity.index + " gas")
n.madd("Link",
capacity.index,
suffix= " " + generator +"-" + str(grouping_year),
bus0=bus0,
bus1=capacity.index,
bus2="co2 atmosphere",
carrier=generator,
marginal_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'VOM'], #NB: VOM is per MWel
capital_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'fixed'], #NB: fixed cost is per MWel
p_nom=capacity / costs.at[generator, 'efficiency'],
efficiency=costs.at[generator, 'efficiency'],
efficiency2=costs.at[carrier[generator], 'CO2 intensity'],
build_year=grouping_year,
lifetime=costs.at[generator, 'lifetime']
)
already_build = n.links.index.intersection(asset_i)
new_build = asset_i.difference(n.links.index)
lifetime_assets = lifetime.loc[grouping_year,generator].dropna()
# this is for the year 2020
if not already_build.empty:
n.links.loc[already_build, "p_nom_min"] = capacity.loc[already_build.str.replace(name_suffix, "")].values
if not new_build.empty:
new_capacity = capacity.loc[new_build.str.replace(name_suffix, "")]
if generator!="urban central solid biomass CHP":
n.madd("Link",
new_capacity.index,
suffix= name_suffix,
bus0=bus0,
bus1=new_capacity.index,
bus2="co2 atmosphere",
carrier=generator,
marginal_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'VOM'], #NB: VOM is per MWel
capital_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'fixed'], #NB: fixed cost is per MWel
p_nom=new_capacity / costs.at[generator, 'efficiency'],
efficiency=costs.at[generator, 'efficiency'],
efficiency2=costs.at[carrier[generator], 'CO2 intensity'],
build_year=grouping_year,
lifetime=lifetime_assets.loc[new_capacity.index],
)
else:
key = 'central solid biomass CHP'
n.madd("Link",
new_capacity.index,
suffix= name_suffix,
bus0=spatial.biomass.df.loc[new_capacity.index]["nodes"].values,
bus1=new_capacity.index,
bus2=new_capacity.index + " urban central heat",
carrier=generator,
p_nom=new_capacity / costs.at[key, 'efficiency'],
capital_cost=costs.at[key, 'fixed'] * costs.at[key, 'efficiency'],
marginal_cost=costs.at[key, 'VOM'],
efficiency=costs.at[key, 'efficiency'],
build_year=grouping_year,
efficiency2=costs.at[key, 'efficiency-heat'],
lifetime=lifetime_assets.loc[new_capacity.index]
)
def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years, ashp_cop, gshp_cop, time_dep_hp_cop, costs, default_lifetime):
@ -376,10 +442,10 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years
for i, grouping_year in enumerate(grouping_years):
if int(grouping_year) + default_lifetime <= int(baseyear):
ratio = 0
else:
# installation is assumed to be linear for the past 25 years (default lifetime)
ratio = (int(grouping_year) - int(grouping_years[i-1])) / default_lifetime
continue
# installation is assumed to be linear for the past 25 years (default lifetime)
ratio = (int(grouping_year) - int(grouping_years[i-1])) / default_lifetime
n.madd("Link",
nodes[name],
@ -443,7 +509,7 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years
# delete links with p_nom=nan corresponding to extra nodes in country
n.mremove("Link", [index for index in n.links.index.to_list() if str(grouping_year) in index and np.isnan(n.links.p_nom[index])])
# delete links if their lifetime is over and p_nom=0
# delete links with capacities below threshold
threshold = snakemake.config['existing_capacities']['threshold_capacity']
n.mremove("Link", [index for index in n.links.index.to_list() if str(grouping_year) in index and n.links.p_nom[index] < threshold])
@ -454,19 +520,21 @@ if __name__ == "__main__":
snakemake = mock_snakemake(
'add_existing_baseyear',
simpl='',
clusters="37",
clusters="45",
lv=1.0,
opts='',
sector_opts='168H-T-H-B-I-solar+p3-dist1',
planning_horizons=2020,
sector_opts='365H-T-H-B-I-A-solar+p3-dist1',
planning_horizons=2030,
)
logging.basicConfig(level=snakemake.config['logging_level'])
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
options = snakemake.config["sector"]
opts = snakemake.wildcards.sector_opts.split('-')
baseyear= snakemake.config['scenario']["planning_horizons"][0]
baseyear = snakemake.config['scenario']["planning_horizons"][0]
overrides = override_component_attrs(snakemake.input.overrides)
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
@ -483,14 +551,17 @@ if __name__ == "__main__":
snakemake.config['costs']['lifetime']
)
grouping_years = snakemake.config['existing_capacities']['grouping_years']
add_power_capacities_installed_before_baseyear(n, grouping_years, costs, baseyear)
grouping_years_power = snakemake.config['existing_capacities']['grouping_years_power']
grouping_years_heat = snakemake.config['existing_capacities']['grouping_years_heat']
add_power_capacities_installed_before_baseyear(n, grouping_years_power, costs, baseyear)
if "H" in opts:
time_dep_hp_cop = options["time_dep_hp_cop"]
ashp_cop = xr.open_dataarray(snakemake.input.cop_air_total).to_pandas().reindex(index=n.snapshots)
gshp_cop = xr.open_dataarray(snakemake.input.cop_soil_total).to_pandas().reindex(index=n.snapshots)
default_lifetime = snakemake.config['costs']['lifetime']
add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years, ashp_cop, gshp_cop, time_dep_hp_cop, costs, default_lifetime)
add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years_heat,
ashp_cop, gshp_cop, time_dep_hp_cop, costs, default_lifetime)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0])

View File

@ -127,17 +127,16 @@ to_ipcc = {
}
def build_eurostat(countries, year):
def build_eurostat(input_eurostat, countries, report_year, year):
"""Return multi-index for all countries' energy data in TWh/a."""
report_year = snakemake.config["energy"]["eurostat_report_year"]
filenames = {
2016: f"/{year}-Energy-Balances-June2016edition.xlsx",
2017: f"/{year}-ENERGY-BALANCES-June2017edition.xlsx"
}
dfs = pd.read_excel(
snakemake.input.eurostat + filenames[report_year],
input_eurostat + filenames[report_year],
sheet_name=None,
skiprows=1,
index_col=list(range(4)),
@ -563,18 +562,18 @@ def build_energy_totals(countries, eurostat, swiss, idees):
return df
def build_eea_co2(year=1990):
def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"):
# https://www.eea.europa.eu/data-and-maps/data/national-emissions-reported-to-the-unfccc-and-to-the-eu-greenhouse-gas-monitoring-mechanism-16
# downloaded 201228 (modified by EEA last on 201221)
df = pd.read_csv(snakemake.input.co2, encoding="latin-1")
df = pd.read_csv(input_co2, encoding="latin-1")
df.replace(dict(Year="1985-1987"), 1986, inplace=True)
df.Year = df.Year.astype(int)
index_col = ["Country_code", "Pollutant_name", "Year", "Sector_name"]
df = df.set_index(index_col).sort_index()
emissions_scope = snakemake.config["energy"]["emissions"]
emissions_scope = emissions_scope
cts = ["CH", "EUA", "NO"] + eu28_eea
@ -611,9 +610,9 @@ def build_eea_co2(year=1990):
return emissions / 1e3
def build_eurostat_co2(countries, year=1990):
def build_eurostat_co2(input_eurostat, countries, report_year, year=1990):
eurostat = build_eurostat(countries, year)
eurostat = build_eurostat(input_eurostat, countries, report_year, year)
specific_emissions = pd.Series(index=eurostat.columns, dtype=float)
@ -702,7 +701,9 @@ if __name__ == "__main__":
idees_countries = countries.intersection(eu28)
data_year = config["energy_totals_year"]
eurostat = build_eurostat(countries, data_year)
report_year = snakemake.config["energy"]["eurostat_report_year"]
input_eurostat = snakemake.input.eurostat
eurostat = build_eurostat(input_eurostat, countries, report_year, data_year)
swiss = build_swiss(data_year)
idees = build_idees(idees_countries, data_year)
@ -710,8 +711,9 @@ if __name__ == "__main__":
energy.to_csv(snakemake.output.energy_name)
base_year_emissions = config["base_emissions_year"]
eea_co2 = build_eea_co2(base_year_emissions)
eurostat_co2 = build_eurostat_co2(countries, base_year_emissions)
emissions_scope = snakemake.config["energy"]["emissions"]
eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope)
eurostat_co2 = build_eurostat_co2(input_eurostat, countries, report_year, base_year_emissions)
co2 = build_co2_totals(countries, eea_co2, eurostat_co2)
co2.to_csv(snakemake.output.co2_name)

View File

@ -5,9 +5,7 @@ import pandas as pd
import geopandas as gpd
from itertools import product
from distutils.version import StrictVersion
gpd_version = StrictVersion(gpd.__version__)
from packaging.version import Version, parse
def locate_missing_industrial_sites(df):
@ -73,7 +71,7 @@ def prepare_hotmaps_database(regions):
gdf = gpd.GeoDataFrame(df, geometry='coordinates', crs="EPSG:4326")
kws = dict(op="within") if gpd_version < '0.10' else dict(predicate="within")
kws = dict(op="within") if parse(gpd.__version__) < Version('0.10') else dict(predicate="within")
gdf = gpd.sjoin(gdf, regions, how="inner", **kws)
gdf.rename(columns={"index_right": "bus"}, inplace=True)

View File

@ -8,9 +8,8 @@ import geopandas as gpd
from shapely import wkt
from pypsa.geo import haversine_pts
from distutils.version import StrictVersion
from packaging.version import Version, parse
gpd_version = StrictVersion(gpd.__version__)
def concat_gdf(gdf_list, crs='EPSG:4326'):
"""Concatenate multiple geopandas dataframes with common coordinate reference system (crs)."""
@ -34,7 +33,7 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25):
gdf = gpd.GeoDataFrame(geometry=df[f"point{i}"], crs="EPSG:4326")
kws = dict(op="within") if gpd_version < '0.10' else dict(predicate="within")
kws = dict(op="within") if parse(gpd.__version__) < Version('0.10') else dict(predicate="within")
bus_mapping = gpd.sjoin(gdf, bus_regions, how="left", **kws).index_right
bus_mapping = bus_mapping.groupby(bus_mapping.index).first()

View File

@ -1,5 +1,6 @@
from shutil import copy
import yaml
files = {
"config.yaml": "config.yaml",
@ -14,5 +15,16 @@ if __name__ == '__main__':
from helper import mock_snakemake
snakemake = mock_snakemake('copy_config')
basepath = snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/configs/'
for f, name in files.items():
copy(f,snakemake.config['summary_dir'] + '/' + snakemake.config['run'] + '/configs/' + name)
copy(f, basepath + name)
with open(basepath + 'config.snakemake.yaml', 'w') as yaml_file:
yaml.dump(
snakemake.config,
yaml_file,
default_flow_style=False,
allow_unicode=True,
sort_keys=False
)

View File

@ -1,7 +1,9 @@
import os
import yaml
import pytz
import pandas as pd
from pathlib import Path
from snakemake.utils import update_config
from pypsa.descriptors import Dict
from pypsa.components import components, component_attrs
@ -58,6 +60,7 @@ def mock_snakemake(rulename, **wildcards):
import os
from pypsa.descriptors import Dict
from snakemake.script import Snakemake
from packaging.version import Version, parse
script_dir = Path(__file__).parent.resolve()
assert Path.cwd().resolve() == script_dir, \
@ -67,7 +70,8 @@ def mock_snakemake(rulename, **wildcards):
if os.path.exists(p):
snakefile = p
break
workflow = sm.Workflow(snakefile, overwrite_configfiles=[])
kwargs = dict(rerun_triggers=[]) if parse(sm.__version__) > Version("7.7.0") else {}
workflow = sm.Workflow(snakefile, overwrite_configfiles=[], **kwargs)
workflow.include(snakefile)
workflow.global_resources = {}
rule = workflow.get_rule(rulename)
@ -122,4 +126,18 @@ def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None):
week_df = week_df.tz_localize(localize)
return week_df
return week_df
def parse(l):
if len(l) == 1:
return yaml.safe_load(l[0])
else:
return {l.pop(0): parse(l)}
def update_config_with_sector_opts(config, sector_opts):
for o in sector_opts.split("-"):
if o.startswith("CF:"):
l = o.split("+")[1:]
update_config(config, parse(l))

View File

@ -309,7 +309,6 @@ def plot_h2_map(network):
)
n.plot(
# geomap=False,
bus_sizes=0,
link_colors='#72d3d6',
link_widths=link_widths_retro,
@ -443,7 +442,6 @@ def plot_ch4_map(network):
)
n.plot(
# geomap=False,
ax=ax,
bus_sizes=0.,
link_colors='#e8d1d1',
@ -453,7 +451,6 @@ def plot_ch4_map(network):
)
n.plot(
# geomap=False,
ax=ax,
bus_sizes=0.,
link_colors=link_color_used,

View File

@ -7,6 +7,7 @@ import matplotlib.pyplot as plt
plt.style.use('ggplot')
from prepare_sector_network import co2_emissions_year
from helper import update_config_with_sector_opts
#consolidate and rename
def rename_techs(label):
@ -203,7 +204,7 @@ def plot_energy():
new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order))
new_columns = df.columns.sort_values()
fig, ax = plt.subplots(figsize=(12,8))
print(df.loc[new_index, new_columns])
@ -364,7 +365,7 @@ def historical_emissions(cts):
def plot_carbon_budget_distribution():
def plot_carbon_budget_distribution(input_eurostat):
"""
Plot historical carbon emissions in the EU and decarbonization path
"""
@ -386,9 +387,9 @@ def plot_carbon_budget_distribution():
ax1.set_xlim([1990,snakemake.config['scenario']['planning_horizons'][-1]+1])
path_cb = snakemake.config['results_dir'] + snakemake.config['run'] + '/csvs/'
countries=pd.read_csv(path_cb + 'countries.csv', index_col=1)
cts=countries.index.to_list()
e_1990 = co2_emissions_year(cts, opts, year=1990)
countries = pd.read_csv(snakemake.input.country_codes, index_col=1)
cts = countries.index.to_list()
e_1990 = co2_emissions_year(cts, input_eurostat, opts, year=1990)
CO2_CAP=pd.read_csv(path_cb + 'carbon_budget_distribution.csv',
index_col=0)
@ -439,7 +440,8 @@ if __name__ == "__main__":
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake('plot_summary')
n_header = 4
plot_costs()
@ -452,4 +454,4 @@ if __name__ == "__main__":
opts=sector_opts.split('-')
for o in opts:
if "cb" in o:
plot_carbon_budget_distribution()
plot_carbon_budget_distribution(snakemake.input.eurostat)

View File

@ -14,7 +14,7 @@ from scipy.stats import beta
from vresutils.costdata import annuity
from build_energy_totals import build_eea_co2, build_eurostat_co2, build_co2_totals
from helper import override_component_attrs, generate_periodic_profiles
from helper import override_component_attrs, generate_periodic_profiles, update_config_with_sector_opts
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
from networkx.algorithms import complement
@ -171,21 +171,22 @@ def get(item, investment_year=None):
return item
def co2_emissions_year(countries, opts, year):
def co2_emissions_year(countries, input_eurostat, opts, emissions_scope, report_year, year):
"""
Calculate CO2 emissions in one specific year (e.g. 1990 or 2018).
"""
eea_co2 = build_eea_co2(year)
emissions_scope = snakemake.config["energy"]["emissions"]
eea_co2 = build_eea_co2(snakemake.input.co2, year, emissions_scope)
# TODO: read Eurostat data from year > 2014
# this only affects the estimation of CO2 emissions for BA, RS, AL, ME, MK
report_year = snakemake.config["energy"]["eurostat_report_year"]
if year > 2014:
eurostat_co2 = build_eurostat_co2(year=2014)
eurostat_co2 = build_eurostat_co2(input_eurostat, countries, report_year, year=2014)
else:
eurostat_co2 = build_eurostat_co2(year)
eurostat_co2 = build_eurostat_co2(input_eurostat, countries, report_year, year)
co2_totals = build_co2_totals(eea_co2, eurostat_co2)
co2_totals = build_co2_totals(countries, eea_co2, eurostat_co2)
sectors = emission_sectors_from_opts(opts)
@ -198,7 +199,7 @@ def co2_emissions_year(countries, opts, year):
# TODO: move to own rule with sector-opts wildcard?
def build_carbon_budget(o, fn):
def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year):
"""
Distribute carbon budget following beta or exponential transition path.
"""
@ -215,10 +216,12 @@ def build_carbon_budget(o, fn):
countries = n.buses.country.dropna().unique()
e_1990 = co2_emissions_year(countries, opts, year=1990)
e_1990 = co2_emissions_year(countries, input_eurostat, opts, emissions_scope,
report_year, year=1990)
#emissions at the beginning of the path (last year available 2018)
e_0 = co2_emissions_year(countries, opts, year=2018)
e_0 = co2_emissions_year(countries, input_eurostat, opts, emissions_scope,
report_year,year=2018)
planning_horizons = snakemake.config['scenario']['planning_horizons']
t_0 = planning_horizons[0]
@ -246,8 +249,9 @@ def build_carbon_budget(o, fn):
co2_cap = pd.Series({t: exponential_decay(t) for t in planning_horizons}, name=o)
# TODO log in Snakefile
if not os.path.exists(fn):
os.makedirs(fn)
csvs_folder = fn.rsplit("/", 1)[0]
if not os.path.exists(csvs_folder):
os.makedirs(csvs_folder)
co2_cap.to_csv(fn, float_format='%.3f')
@ -398,10 +402,13 @@ def add_carrier_buses(n, carrier, nodes=None):
n.add("Carrier", carrier)
unit = "MWh_LHV" if carrier == "gas" else "MWh_th"
n.madd("Bus",
nodes,
location=location,
carrier=carrier
carrier=carrier,
unit=unit
)
#capital cost could be corrected to e.g. 0.2 EUR/kWh * annuity and O&M
@ -452,6 +459,7 @@ def patch_electricity_network(n):
update_wind_solar_costs(n, costs)
n.loads["carrier"] = "electricity"
n.buses["location"] = n.buses.index
n.buses["unit"] = "MWh_el"
# remove trailing white space of load index until new PyPSA version after v0.18.
n.loads.rename(lambda x: x.strip(), inplace=True)
n.loads_t.p_set.rename(lambda x: x.strip(), axis=1, inplace=True)
@ -468,7 +476,8 @@ def add_co2_tracking(n, options):
n.add("Bus",
"co2 atmosphere",
location="EU",
carrier="co2"
carrier="co2",
unit="t_co2"
)
# can also be negative
@ -484,7 +493,8 @@ def add_co2_tracking(n, options):
n.madd("Bus",
spatial.co2.nodes,
location=spatial.co2.locations,
carrier="co2 stored"
carrier="co2 stored",
unit="t_co2"
)
n.madd("Store",
@ -771,7 +781,8 @@ def insert_electricity_distribution_grid(n, costs):
n.madd("Bus",
nodes + " low voltage",
location=nodes,
carrier="low voltage"
carrier="low voltage",
unit="MWh_el"
)
n.madd("Link",
@ -838,7 +849,8 @@ def insert_electricity_distribution_grid(n, costs):
n.madd("Bus",
nodes + " home battery",
location=nodes,
carrier="home battery"
carrier="home battery",
unit="MWh_el"
)
n.madd("Store",
@ -913,7 +925,8 @@ def add_storage_and_grids(n, costs):
n.madd("Bus",
nodes + " H2",
location=nodes,
carrier="H2"
carrier="H2",
unit="MWh_LHV"
)
n.madd("Link",
@ -1119,7 +1132,8 @@ def add_storage_and_grids(n, costs):
n.madd("Bus",
nodes + " battery",
location=nodes,
carrier="battery"
carrier="battery",
unit="MWh_el"
)
n.madd("Store",
@ -1186,6 +1200,24 @@ def add_storage_and_grids(n, costs):
lifetime=costs.at['helmeth', 'lifetime']
)
if options.get('coal_cc'):
n.madd("Link",
spatial.nodes,
suffix=" coal CC",
bus0=spatial.coal.nodes,
bus1=spatial.nodes,
bus2="co2 atmosphere",
bus3="co2 stored",
marginal_cost=costs.at['coal', 'efficiency'] * costs.at['coal', 'VOM'], #NB: VOM is per MWel
capital_cost=costs.at['coal', 'efficiency'] * costs.at['coal', 'fixed'] + costs.at['biomass CHP capture', 'fixed'] * costs.at['coal', 'CO2 intensity'], #NB: fixed cost is per MWel
p_nom_extendable=True,
carrier="coal",
efficiency=costs.at['coal', 'efficiency'],
efficiency2=costs.at['coal', 'CO2 intensity'] * (1 - costs.at['biomass CHP capture','capture_rate']),
efficiency3=costs.at['coal', 'CO2 intensity'] * costs.at['biomass CHP capture','capture_rate'],
lifetime=costs.at['coal','lifetime']
)
if options['SMR']:
@ -1249,7 +1281,8 @@ def add_land_transport(n, costs):
nodes,
location=nodes,
suffix=" EV battery",
carrier="Li ion"
carrier="Li ion",
unit="MWh_el"
)
p_set = electric_share * (transport[nodes] + cycling_shift(transport[nodes], 1) + cycling_shift(transport[nodes], 2)) / 3
@ -1323,7 +1356,8 @@ def add_land_transport(n, costs):
n.madd("Bus",
spatial.oil.nodes,
location=spatial.oil.locations,
carrier="oil"
carrier="oil",
unit="MWh_LHV"
)
ice_efficiency = options['transport_internal_combustion_efficiency']
@ -1431,7 +1465,8 @@ def add_heat(n, costs):
n.madd("Bus",
nodes[name] + f" {name} heat",
location=nodes[name],
carrier=name + " heat"
carrier=name + " heat",
unit="MWh_th"
)
## Add heat load
@ -1488,7 +1523,8 @@ def add_heat(n, costs):
n.madd("Bus",
nodes[name] + f" {name} water tanks",
location=nodes[name],
carrier=name + " water tanks"
carrier=name + " water tanks",
unit="MWh_th"
)
n.madd("Link",
@ -1517,9 +1553,6 @@ def add_heat(n, costs):
"for 'decentral' and 'central' separately.")
tes_time_constant_days = options["tes_tau"] if name_type == "decentral" else 180.
# conversion from EUR/m^3 to EUR/MWh for 40 K diff and 1.17 kWh/m^3/K
capital_cost = costs.at[name_type + ' water tank storage', 'fixed'] / 0.00117 / 40
n.madd("Store",
nodes[name] + f" {name} water tanks",
bus=nodes[name] + f" {name} water tanks",
@ -1527,7 +1560,7 @@ def add_heat(n, costs):
e_nom_extendable=True,
carrier=name + " water tanks",
standing_loss=1 - np.exp(- 1 / 24 / tes_time_constant_days),
capital_cost=capital_cost,
capital_cost=costs.at[name_type + ' water tank storage', 'fixed'],
lifetime=costs.at[name_type + ' water tank storage', 'lifetime']
)
@ -1561,6 +1594,7 @@ def add_heat(n, costs):
lifetime=costs.at[key, 'lifetime']
)
if options["solar_thermal"]:
n.add("Carrier", name + " solar thermal")
@ -1793,13 +1827,15 @@ def add_biomass(n, costs):
n.madd("Bus",
spatial.gas.biogas,
location=spatial.gas.locations,
carrier="biogas"
carrier="biogas",
unit="MWh_LHV"
)
n.madd("Bus",
spatial.biomass.nodes,
location=spatial.biomass.locations,
carrier="solid biomass"
carrier="solid biomass",
unit="MWh_LHV"
)
n.madd("Store",
@ -1897,6 +1933,95 @@ def add_biomass(n, costs):
lifetime=costs.at[key, 'lifetime']
)
if options["biomass_boiler"]:
#TODO: Add surcharge for pellets
nodes_heat = create_nodes_for_heat_sector()[0]
for name in ["residential rural", "services rural",
"residential urban decentral", "services urban decentral"]:
n.madd("Link",
nodes_heat[name] + f" {name} biomass boiler",
p_nom_extendable=True,
bus0=spatial.biomass.df.loc[nodes_heat[name], "nodes"].values,
bus1=nodes_heat[name] + f" {name} heat",
carrier=name + " biomass boiler",
efficiency=costs.at['biomass boiler', 'efficiency'],
capital_cost=costs.at['biomass boiler', 'efficiency'] * costs.at['biomass boiler', 'fixed'],
lifetime=costs.at['biomass boiler', 'lifetime']
)
#Solid biomass to liquid fuel
if options["biomass_to_liquid"]:
n.madd("Link",
spatial.biomass.nodes,
suffix=" biomass to liquid",
bus0=spatial.biomass.nodes,
bus1=spatial.oil.nodes,
bus2="co2 atmosphere",
carrier="biomass to liquid",
lifetime=costs.at['BtL', 'lifetime'],
efficiency=costs.at['BtL', 'efficiency'],
efficiency2=-costs.at['solid biomass', 'CO2 intensity'] + costs.at['BtL', 'CO2 stored'],
p_nom_extendable=True,
capital_cost=costs.at['BtL', 'fixed'],
marginal_cost=costs.at['BtL', 'efficiency']*costs.loc["BtL", "VOM"]
)
#TODO: Update with energy penalty
n.madd("Link",
spatial.biomass.nodes,
suffix=" biomass to liquid CC",
bus0=spatial.biomass.nodes,
bus1=spatial.oil.nodes,
bus2="co2 atmosphere",
bus3=spatial.co2.nodes,
carrier="biomass to liquid",
lifetime=costs.at['BtL', 'lifetime'],
efficiency=costs.at['BtL', 'efficiency'],
efficiency2=-costs.at['solid biomass', 'CO2 intensity'] + costs.at['BtL', 'CO2 stored'] * (1 - costs.at['BtL', 'capture rate']),
efficiency3=costs.at['BtL', 'CO2 stored'] * costs.at['BtL', 'capture rate'],
p_nom_extendable=True,
capital_cost=costs.at['BtL', 'fixed'] + costs.at['biomass CHP capture', 'fixed'] * costs.at[
"BtL", "CO2 stored"],
marginal_cost=costs.at['BtL', 'efficiency'] * costs.loc["BtL", "VOM"])
#BioSNG from solid biomass
if options["biosng"]:
n.madd("Link",
spatial.biomass.nodes,
suffix=" solid biomass to gas",
bus0=spatial.biomass.nodes,
bus1=spatial.gas.nodes,
bus3="co2 atmosphere",
carrier="BioSNG",
lifetime=costs.at['BioSNG', 'lifetime'],
efficiency=costs.at['BioSNG', 'efficiency'],
efficiency3=-costs.at['solid biomass', 'CO2 intensity'] + costs.at['BioSNG', 'CO2 stored'],
p_nom_extendable=True,
capital_cost=costs.at['BioSNG', 'fixed'],
marginal_cost=costs.at['BioSNG', 'efficiency']*costs.loc["BioSNG", "VOM"]
)
#TODO: Update with energy penalty for CC
n.madd("Link",
spatial.biomass.nodes,
suffix=" solid biomass to gas CC",
bus0=spatial.biomass.nodes,
bus1=spatial.gas.nodes,
bus2=spatial.co2.nodes,
bus3="co2 atmosphere",
carrier="BioSNG",
lifetime=costs.at['BioSNG', 'lifetime'],
efficiency=costs.at['BioSNG', 'efficiency'],
efficiency2=costs.at['BioSNG', 'CO2 stored'] * costs.at['BioSNG', 'capture rate'],
efficiency3=-costs.at['solid biomass', 'CO2 intensity'] + costs.at['BioSNG', 'CO2 stored'] * (1 - costs.at['BioSNG', 'capture rate']),
p_nom_extendable=True,
capital_cost=costs.at['BioSNG', 'fixed'] + costs.at['biomass CHP capture', 'fixed'] * costs.at[
"BioSNG", "CO2 stored"],
marginal_cost=costs.at['BioSNG', 'efficiency']*costs.loc["BioSNG", "VOM"]
)
def add_industry(n, costs):
@ -1910,7 +2035,8 @@ def add_industry(n, costs):
n.madd("Bus",
spatial.biomass.industry,
location=spatial.biomass.locations,
carrier="solid biomass for industry"
carrier="solid biomass for industry",
unit="MWh_LHV"
)
if options["biomass_transport"]:
@ -1952,7 +2078,8 @@ def add_industry(n, costs):
n.madd("Bus",
spatial.gas.industry,
location=spatial.gas.locations,
carrier="gas for industry")
carrier="gas for industry",
unit="MWh_LHV")
gas_demand = industrial_demand.loc[nodes, "methane"] / 8760.
@ -2008,7 +2135,8 @@ def add_industry(n, costs):
nodes,
suffix=" H2 liquid",
carrier="H2 liquid",
location=nodes
location=nodes,
unit="MWh_LHV"
)
n.madd("Link",
@ -2066,7 +2194,8 @@ def add_industry(n, costs):
n.madd("Bus",
spatial.oil.nodes,
location=spatial.oil.locations,
carrier="oil"
carrier="oil",
unit="MWh_LHV"
)
if "oil" not in n.stores.carrier.unique():
@ -2180,7 +2309,8 @@ def add_industry(n, costs):
n.add("Bus",
"process emissions",
location="EU",
carrier="process emissions"
carrier="process emissions",
unit="t_co2"
)
# this should be process emissions fossil+feedstock
@ -2369,6 +2499,88 @@ def limit_individual_line_extension(n, maxext):
hvdc = n.links.index[n.links.carrier == 'DC']
n.links.loc[hvdc, 'p_nom_max'] = n.links.loc[hvdc, 'p_nom'] + maxext
def apply_time_segmentation(n, segments, solver_name="cbc",
overwrite_time_dependent=True):
"""Aggregating time series to segments with different lengths
Input:
n: pypsa Network
segments: (int) number of segments in which the typical period should be
subdivided
solver_name: (str) name of solver
overwrite_time_dependent: (bool) overwrite time dependent data of pypsa network
with typical time series created by tsam
"""
try:
import tsam.timeseriesaggregation as tsam
except:
raise ModuleNotFoundError("Optional dependency 'tsam' not found."
"Install via 'pip install tsam'")
# get all time-dependent data
columns = pd.MultiIndex.from_tuples([],names=['component', 'key', 'asset'])
raw = pd.DataFrame(index=n.snapshots,columns=columns)
for c in n.iterate_components():
for attr, pnl in c.pnl.items():
# exclude e_min_pu which is used for SOC of EVs in the morning
if not pnl.empty and attr != 'e_min_pu':
df = pnl.copy()
df.columns = pd.MultiIndex.from_product([[c.name], [attr], df.columns])
raw = pd.concat([raw, df], axis=1)
# normalise all time-dependent data
annual_max = raw.max().replace(0,1)
raw = raw.div(annual_max, level=0)
# get representative segments
agg = tsam.TimeSeriesAggregation(raw, hoursPerPeriod=len(raw),
noTypicalPeriods=1, noSegments=int(segments),
segmentation=True, solver=solver_name)
segmented = agg.createTypicalPeriods()
weightings = segmented.index.get_level_values("Segment Duration")
offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0)
timesteps = [raw.index[0] + pd.Timedelta(f"{offset}h") for offset in offsets]
snapshots = pd.DatetimeIndex(timesteps)
sn_weightings = pd.Series(weightings, index=snapshots, name="weightings", dtype="float64")
n.set_snapshots(sn_weightings.index)
n.snapshot_weightings = n.snapshot_weightings.mul(sn_weightings, axis=0)
# overwrite time-dependent data with timeseries created by tsam
if overwrite_time_dependent:
values_t = segmented.mul(annual_max).set_index(snapshots)
for component, key in values_t.columns.droplevel(2).unique():
n.pnl(component)[key] = values_t[component, key]
return n
def set_temporal_aggregation(n, opts, solver_name):
"""Aggregate network temporally."""
for o in opts:
# temporal averaging
m = re.match(r"^\d+h$", o, re.IGNORECASE)
if m is not None:
n = average_every_nhours(n, m.group(0))
break
# representive snapshots
m = re.match(r"(^\d+)sn$", o, re.IGNORECASE)
if m is not None:
sn = int(m[1])
logger.info(f"use every {sn} snapshot as representative")
n.set_snapshots(n.snapshots[::sn])
n.snapshot_weightings *= sn
break
# segments with package tsam
m = re.match(r"^(\d+)seg$", o, re.IGNORECASE)
if m is not None:
segments = int(m[1])
logger.info(f"use temporal segmentation with {segments} segments")
n = apply_time_segmentation(n, segments, solver_name=solver_name)
break
return n
#%%
if __name__ == "__main__":
if 'snakemake' not in globals():
@ -2378,13 +2590,15 @@ if __name__ == "__main__":
simpl='',
opts="",
clusters="37",
lv=1.0,
sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1',
lv=1.5,
sector_opts='cb40ex0-365H-T-H-B-I-A-solar+p3-dist1',
planning_horizons="2020",
)
logging.basicConfig(level=snakemake.config['logging_level'])
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
options = snakemake.config["sector"]
opts = snakemake.wildcards.sector_opts.split('-')
@ -2471,11 +2685,8 @@ if __name__ == "__main__":
if options["co2_network"]:
add_co2_network(n, costs)
for o in opts:
m = re.match(r'^\d+h$', o, re.IGNORECASE)
if m is not None:
n = average_every_nhours(n, m.group(0))
break
solver_name = snakemake.config["solving"]["solver"]["name"]
n = set_temporal_aggregation(n, opts, solver_name)
limit_type = "config"
limit = get(snakemake.config["co2_budget"], investment_year)
@ -2484,9 +2695,11 @@ if __name__ == "__main__":
limit_type = "carbon budget"
fn = snakemake.config['results_dir'] + snakemake.config['run'] + '/csvs/carbon_budget_distribution.csv'
if not os.path.exists(fn):
build_carbon_budget(o, fn)
emissions_scope = snakemake.config["energy"]["emissions"]
report_year = snakemake.config["energy"]["eurostat_report_year"]
build_carbon_budget(o, snakemake.input.eurostat, fn, emissions_scope, report_year)
co2_cap = pd.read_csv(fn, index_col=0).squeeze()
limit = co2_cap[investment_year]
limit = co2_cap.loc[investment_year]
break
for o in opts:
if not "Co2L" in o: continue
@ -2514,4 +2727,5 @@ if __name__ == "__main__":
if options['electricity_grid_connection']:
add_electricity_grid_connection(n, costs)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0])

View File

@ -11,7 +11,7 @@ from pypsa.linopf import network_lopf, ilopf
from vresutils.benchmark import memory_logger
from helper import override_component_attrs
from helper import override_component_attrs, update_config_with_sector_opts
import logging
logger = logging.getLogger(__name__)
@ -227,7 +227,7 @@ def add_co2_sequestration_limit(n, sns):
limit = n.config["sector"].get("co2_sequestration_potential", 200) * 1e6
for o in opts:
if not "seq" in o: continue
limit = float(o[o.find("seq")+3:])
limit = float(o[o.find("seq")+3:]) * 1e6
break
name = 'co2_sequestration_limit'
@ -290,11 +290,13 @@ if __name__ == "__main__":
logging.basicConfig(filename=snakemake.log.python,
level=snakemake.config['logging_level'])
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
tmpdir = snakemake.config['solving'].get('tmpdir')
if tmpdir is not None:
from pathlib import Path
Path(tmpdir).mkdir(parents=True, exist_ok=True)
opts = snakemake.wildcards.opts.split('-')
opts = snakemake.wildcards.sector_opts.split('-')
solve_opts = snakemake.config['solving']['options']
fn = getattr(snakemake.log, 'memory', None)
@ -313,6 +315,7 @@ if __name__ == "__main__":
n.line_volume_limit = n.global_constraints.at["lv_limit", "constant"]
n.line_volume_limit_dual = n.global_constraints.at["lv_limit", "mu"]
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0])
logger.info("Maximum memory usage: {}".format(mem.mem_usage))

View File

@ -262,6 +262,9 @@ sector:
biomass_transport: false # biomass transport between nodes
conventional_generation: # generator : carrier
OCGT: gas
biomass_boiler: false
biomass_to_liquid: false
biosng: false
industry:
@ -316,6 +319,7 @@ industry:
# Material Economics (2019): https://materialeconomics.com/latest-updates/industrial-transformation-2050
costs:
year: 2030
lifetime: 25 #default lifetime
# From a Lion Hirth paper, also reflects average of Noothout et al 2016
discountrate: 0.07

View File

@ -260,6 +260,9 @@ sector:
biomass_transport: false # biomass transport between nodes
conventional_generation: # generator : carrier
OCGT: gas
biomass_boiler: false
biomass_to_liquid: false
biosng: false
industry:
@ -314,6 +317,7 @@ industry:
# Material Economics (2019): https://materialeconomics.com/latest-updates/industrial-transformation-2050
costs:
year: 2030
lifetime: 25 #default lifetime
# From a Lion Hirth paper, also reflects average of Noothout et al 2016
discountrate: 0.07