Merge pull request #135 from PyPSA/retrofit-gas-pipelines

Retrofit gas pipelines

Co-authored-by: lisazeyen <lisa.zeyen@web.de>
This commit is contained in:
Fabian Neumann 2021-11-29 09:15:09 +01:00 committed by GitHub
commit a7fd4901c1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 1064 additions and 226 deletions

View File

@ -101,6 +101,61 @@ rule build_simplified_population_layouts:
script: "scripts/build_clustered_population_layouts.py"
if config["sector"]["gas_network"]:
datafiles = [
"IGGIELGN_LNGs.geojson",
"IGGIELGN_BorderPoints.geojson",
"IGGIELGN_Productions.geojson",
"IGGIELGN_PipeSegments.geojson",
]
rule retrieve_gas_infrastructure_data:
output: expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles)
script: 'scripts/retrieve_gas_infrastructure_data.py'
rule build_gas_network:
input:
gas_network="data/gas_network/scigrid-gas/data/IGGIELGN_PipeSegments.geojson"
output:
cleaned_gas_network="resources/gas_network.csv"
resources: mem_mb=4000
script: "scripts/build_gas_network.py"
rule build_gas_input_locations:
input:
lng="data/gas_network/scigrid-gas/data/IGGIELGN_LNGs.geojson",
entry="data/gas_network/scigrid-gas/data/IGGIELGN_BorderPoints.geojson",
production="data/gas_network/scigrid-gas/data/IGGIELGN_Productions.geojson",
planned_lng="data/gas_network/planned_LNGs.csv",
regions_onshore=pypsaeur("resources/regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions_offshore=pypsaeur('resources/regions_offshore_elec_s{simpl}_{clusters}.geojson')
output:
gas_input_nodes="resources/gas_input_locations_s{simpl}_{clusters}.geojson",
gas_input_nodes_simplified="resources/gas_input_locations_s{simpl}_{clusters}_simplified.csv"
resources: mem_mb=2000,
script: "scripts/build_gas_input_locations.py"
rule cluster_gas_network:
input:
cleaned_gas_network="resources/gas_network.csv",
regions_onshore=pypsaeur("resources/regions_onshore_elec_s{simpl}_{clusters}.geojson"),
regions_offshore=pypsaeur("resources/regions_offshore_elec_s{simpl}_{clusters}.geojson")
output:
clustered_gas_network="resources/gas_network_elec_s{simpl}_{clusters}.csv"
resources: mem_mb=4000
script: "scripts/cluster_gas_network.py"
gas_infrastructure = {**rules.cluster_gas_network.output, **rules.build_gas_input_locations.output}
else:
gas_infrastructure = {}
rule build_heat_demands:
input:
pop_layout_total="resources/pop_layout_total.nc",
@ -379,8 +434,8 @@ rule prepare_sector_network:
energy_totals_name='resources/energy_totals.csv',
co2_totals_name='resources/co2_totals.csv',
transport_name='resources/transport_data.csv',
traffic_data_KFZ = "data/emobility/KFZ__count",
traffic_data_Pkw = "data/emobility/Pkw__count",
traffic_data_KFZ="data/emobility/KFZ__count",
traffic_data_Pkw="data/emobility/Pkw__count",
biomass_potentials='resources/biomass_potentials_s{simpl}_{clusters}.csv',
heat_profile="data/heat_load_profile_BDEW.csv",
costs=CDIR + "costs_{planning_horizons}.csv",
@ -411,7 +466,8 @@ rule prepare_sector_network:
solar_thermal_urban="resources/solar_thermal_urban_elec_s{simpl}_{clusters}.nc",
solar_thermal_rural="resources/solar_thermal_rural_elec_s{simpl}_{clusters}.nc",
**build_retro_cost_output,
**build_biomass_transport_costs_output
**build_biomass_transport_costs_output,
**gas_infrastructure
output: RDIR + '/prenetworks/elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc'
threads: 1
resources: mem_mb=2000

View File

@ -249,6 +249,14 @@ sector:
electricity_distribution_grid: false
electricity_distribution_grid_cost_factor: 1.0 #multiplies cost in data/costs.csv
electricity_grid_connection: true # only applies to onshore wind and utility PV
H2_network: true
gas_network: true
H2_retrofit: true # if set to True existing gas pipes can be retrofitted to H2 pipes
# according to hydrogen backbone strategy (April, 2020) p.15
# https://gasforclimate2050.eu/wp-content/uploads/2020/07/2020_European-Hydrogen-Backbone_Report.pdf
# 60% of original natural gas capacity could be used in cost-optimal case as H2 capacity
H2_retrofit_capacity_per_CH4: 0.6 # ratio for H2 capacity per original CH4 capacity of retrofitted pipelines
gas_network_connectivity_upgrade: 3 # https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.connectivity.edge_augmentation.k_edge_augmentation.html#networkx.algorithms.connectivity.edge_augmentation.k_edge_augmentation
gas_distribution_grid: true
gas_distribution_grid_cost_factor: 1.0 #multiplies cost in data/costs.csv
biomass_transport: false # biomass transport between nodes
@ -449,6 +457,7 @@ plotting:
gas boilers: '#db6a25'
gas boiler marginal: '#db6a25'
gas: '#e05b09'
fossil gas: '#e05b09'
natural gas: '#e05b09'
CCGT: '#a85522'
CCGT marginal: '#a85522'
@ -457,6 +466,7 @@ plotting:
gas for industry: '#853403'
gas for industry CC: '#692e0a'
gas pipeline: '#ebbca0'
gas pipeline new: '#a87c62'
# oil
oil: '#c9c9c9'
oil boiler: '#adadad'
@ -546,6 +556,7 @@ plotting:
H2 storage: '#bf13a0'
land transport fuel cell: '#6b3161'
H2 pipeline: '#f081dc'
H2 pipeline retrofitted: '#ba99b5'
H2 Fuel Cell: '#c251ae'
H2 Electrolysis: '#ff29d9'
# syngas
@ -584,4 +595,4 @@ plotting:
power-to-liquid: '#25c49a'
gas-to-power/heat: '#ee8340'
waste: '#e3d37d'
other: '#000000'
other: '#000000'

View File

@ -0,0 +1,8 @@
name,geometry,max_cap_store2pipe_M_m3_per_d,source
Wilhelmshaven,"POINT(8.133 53.516)",27.4,https://www.gem.wiki/Wilhelmshaven_LNG_Terminal
Brunsbüttel,"POINT(8.976 53.914)",19.2,https://www.gem.wiki/Brunsb%C3%BCttel_LNG_Terminal
Stade,"POINT(9.510 53.652)",32.9,https://www.gem.wiki/Stade_LNG_Terminal
Alexandroupolis,"POINT(25.843 40.775)",16.7,https://www.gem.wiki/Alexandroupolis_LNG_Terminal
Shannon,"POINT(-9.442 52.581)",22.5,https://www.gem.wiki/Shannon_LNG_Terminal
Gothenburg,"POINT(11.948 57.702)",1.4,https://www.gem.wiki/Gothenburg_LNG_Terminal
Cork,"POINT(-8.323 51.831)",11.0,https://www.gem.wiki/Cork_LNG_Terminal
1 name geometry max_cap_store2pipe_M_m3_per_d source
2 Wilhelmshaven POINT(8.133 53.516) 27.4 https://www.gem.wiki/Wilhelmshaven_LNG_Terminal
3 Brunsbüttel POINT(8.976 53.914) 19.2 https://www.gem.wiki/Brunsb%C3%BCttel_LNG_Terminal
4 Stade POINT(9.510 53.652) 32.9 https://www.gem.wiki/Stade_LNG_Terminal
5 Alexandroupolis POINT(25.843 40.775) 16.7 https://www.gem.wiki/Alexandroupolis_LNG_Terminal
6 Shannon POINT(-9.442 52.581) 22.5 https://www.gem.wiki/Shannon_LNG_Terminal
7 Gothenburg POINT(11.948 57.702) 1.4 https://www.gem.wiki/Gothenburg_LNG_Terminal
8 Cork POINT(-8.323 51.831) 11.0 https://www.gem.wiki/Cork_LNG_Terminal

View File

@ -8,10 +8,56 @@ Future release
.. note::
This unreleased version currently may require the master branches of PyPSA, PyPSA-Eur, and the technology-data repository.
This release includes the addition of the European gas transmission network and
incorporates retrofitting options to hydrogen.
**Gas Transmission Network**
* New rule ``retrieve_gas_infrastructure_data`` that downloads and extracts the
SciGRID_gas `IGGIELGN <https://zenodo.org/record/4767098>`_ dataset from zenodo.
It includes data on the transmission routes, pipe diameters, capacities, pressure,
and whether the pipeline is bidirectional and carries H-Gas or L-Gas.
* New rule ``build_gas_network`` processes and cleans the pipeline data from SciGRID_gas.
Missing or uncertain pipeline capacities can be inferred by diameter.
* New rule ``build_gas_input_locations`` compiles the LNG import capacities
(including planned projects from gem.wiki), pipeline entry capacities and
local production capacities for each region of the model. These are the
regions where fossil gas can eventually enter the model.
* New rule ``cluster_gas_network`` that clusters the gas transmission network
data to the model resolution. Cross-regional pipeline capacities are aggregated
(while pressure and diameter compability is ignored), intra-regional pipelines
are dropped. Lengths are recalculated based on the regions' centroids.
* With the option ``sector: gas_network:``, the existing gas network is
added with a lossless transport model. A length-weighted `k-edge augmentation
algorithm
<https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.connectivity.edge_augmentation.k_edge_augmentation.html#networkx.algorithms.connectivity.edge_augmentation.k_edge_augmentation>`_
can be run to add new candidate gas pipelines such that all regions of the
model can be connected to the gas network. The number of candidates can be
controlled via the setting ``sector: gas_network_connectivity_upgrade:``. When
the gas network is activated, all the gas demands are regionally disaggregated
as well.
* New constraint allows retrofitting of gas pipelines to hydrogen pipelines.
This option is activated via the setting ``sector: H2_retrofit:``. For every
unit of gas pipeline capacity dismantled, ``sector:
H2_retrofit_capacity_per_CH4`` units are made available as hydrogen pipeline
capacity in the corresponding corridor. These repurposed hydrogen pipelines
have lower costs than new hydrogen pipelines. Both new and repurposed pipelines
can be built simultaneously.
* New hydrogen pipelines can now be built where there are already power or gas
transmission routes. Previously, only the electricity transmission routes were
considered.
* Option ``retrieve_sector_databundle`` to automatically retrieve and extract data bundle.
* Add regionalised hydrogen salt cavern storage potentials from `Technical Potential of Salt Caverns for Hydrogen Storage in Europe <https://doi.org/10.20944/preprints201910.0187.v1>`_.
PyPSA-Eur-Sec 0.6.0 (4 October 2021)
====================================

View File

@ -41,8 +41,10 @@ locations of industry from `HotMaps database <https://gitlab.com/hotmaps/industr
Hydrogen network: nodal.
Methane network: single node for Europe, since future demand is so
low and no bottlenecks are expected.
Methane network: single node for Europe, since future demand is so low and no
bottlenecks are expected. Optionally, if for example retrofitting from fossil
gas to hydrogen is to be considered, the methane grid can be nodally resolved
based on SciGRID_gas data.
Solid biomass: choice between single node for Europe and nodal where biomass
potential is regionally disaggregated (currently given per country,

View File

@ -151,7 +151,7 @@ Hydrogen demand
Stationary fuel cell CHP.
Transport applications.
Transport applications (heavy-duty road vehicles, liquid H2 in shipping).
Industry (ammonia, precursor to hydrocarbons for chemicals and iron/steel).

View File

@ -175,7 +175,7 @@ def convert_nuts2_to_regions(bio_nuts2, regions):
# calculate area of nuts2 regions
bio_nuts2["area_nuts2"] = area(bio_nuts2)
overlay = gpd.overlay(regions, bio_nuts2)
overlay = gpd.overlay(regions, bio_nuts2, keep_geom_type=True)
# calculate share of nuts2 area inside region
overlay["share"] = area(overlay) / overlay["area_nuts2"]

View File

@ -0,0 +1,105 @@
"""
Build import locations for fossil gas from entry-points, LNG terminals and production sites.
"""
import logging
logger = logging.getLogger(__name__)
import pandas as pd
import geopandas as gpd
from shapely import wkt
from cluster_gas_network import load_bus_regions
def read_scigrid_gas(fn):
df = gpd.read_file(fn)
df = pd.concat([df, df.param.apply(pd.Series)], axis=1)
df.drop(["param", "uncertainty", "method"], axis=1, inplace=True)
return df
def build_gas_input_locations(lng_fn, planned_lng_fn, entry_fn, prod_fn, countries):
# LNG terminals
lng = read_scigrid_gas(lng_fn)
planned_lng = pd.read_csv(planned_lng_fn)
planned_lng.geometry = planned_lng.geometry.apply(wkt.loads)
planned_lng = gpd.GeoDataFrame(planned_lng, crs=4326)
lng = lng.append(planned_lng, ignore_index=True)
# Entry points from outside the model scope
entry = read_scigrid_gas(entry_fn)
entry["from_country"] = entry.from_country.str.rstrip()
entry = entry.loc[
~(entry.from_country.isin(countries) & entry.to_country.isin(countries)) & # only take non-EU entries
~entry.name.str.contains("Tegelen") | # malformed datapoint
(entry.from_country == "NO") # entries from NO to GB
]
# production sites inside the model scope
prod = read_scigrid_gas(prod_fn)
prod = prod.loc[
(prod.geometry.y > 35) &
(prod.geometry.x < 30) &
(prod.country_code != "DE")
]
conversion_factor = 437.5 # MCM/day to MWh/h
lng["p_nom"] = lng["max_cap_store2pipe_M_m3_per_d"] * conversion_factor
entry["p_nom"] = entry["max_cap_from_to_M_m3_per_d"] * conversion_factor
prod["p_nom"] = prod["max_supply_M_m3_per_d"] * conversion_factor
lng["type"] = "lng"
entry["type"] = "pipeline"
prod["type"] = "production"
sel = ["geometry", "p_nom", "type"]
return pd.concat([prod[sel], entry[sel], lng[sel]], ignore_index=True)
if __name__ == "__main__":
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
'build_gas_import_locations',
simpl='',
clusters='37',
)
logging.basicConfig(level=snakemake.config['logging_level'])
regions = load_bus_regions(
snakemake.input.regions_onshore,
snakemake.input.regions_offshore
)
# add a buffer to eastern countries because some
# entry points are still in Russian or Ukrainian territory.
buffer = 9000 # meters
eastern_countries = ['FI', 'EE', 'LT', 'LV', 'PL', 'SK', 'HU', 'RO']
add_buffer_b = regions.index.str[:2].isin(eastern_countries)
regions.loc[add_buffer_b] = regions[add_buffer_b].to_crs(3035).buffer(buffer).to_crs(4326)
countries = regions.index.str[:2].unique().str.replace("GB", "UK")
gas_input_locations = build_gas_input_locations(
snakemake.input.lng,
snakemake.input.planned_lng,
snakemake.input.entry,
snakemake.input.production,
countries
)
gas_input_nodes = gpd.sjoin(gas_input_locations, regions, how='left')
gas_input_nodes.rename(columns={"index_right": "bus"}, inplace=True)
gas_input_nodes.to_file(snakemake.output.gas_input_nodes, driver='GeoJSON')
gas_input_nodes_s = gas_input_nodes.groupby(["bus", "type"])["p_nom"].sum().unstack()
gas_input_nodes_s.columns.name = "p_nom"
gas_input_nodes_s.to_csv(snakemake.output.gas_input_nodes_simplified)

View File

@ -0,0 +1,135 @@
"""Preprocess gas network based on data from bthe SciGRID Gas project (https://www.gas.scigrid.de/)."""
import logging
logger = logging.getLogger(__name__)
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
from pypsa.geo import haversine_pts
def diameter_to_capacity(pipe_diameter_mm):
"""Calculate pipe capacity in MW based on diameter in mm.
20 inch (500 mm) 50 bar -> 1.5 GW CH4 pipe capacity (LHV)
24 inch (600 mm) 50 bar -> 5 GW CH4 pipe capacity (LHV)
36 inch (900 mm) 50 bar -> 11.25 GW CH4 pipe capacity (LHV)
48 inch (1200 mm) 80 bar -> 21.7 GW CH4 pipe capacity (LHV)
Based on p.15 of https://gasforclimate2050.eu/wp-content/uploads/2020/07/2020_European-Hydrogen-Backbone_Report.pdf
"""
# slopes definitions
m0 = (1500 - 0) / (500 - 0)
m1 = (5000 - 1500) / (600 - 500)
m2 = (11250 - 5000) / (900 - 600)
m3 = (21700 - 11250) / (1200 - 900)
# intercept
a0 = 0
a1 = -16000
a2 = -7500
a3 = -20100
if pipe_diameter_mm < 500:
return a0 + m0 * pipe_diameter_mm
elif pipe_diameter_mm < 600:
return a1 + m1 * pipe_diameter_mm
elif pipe_diameter_mm < 900:
return a2 + m2 * pipe_diameter_mm
else:
return a3 + m3 * pipe_diameter_mm
def load_dataset(fn):
df = gpd.read_file(fn)
param = df.param.apply(pd.Series)
method = df.method.apply(pd.Series)[["diameter_mm", "max_cap_M_m3_per_d"]]
method.columns = method.columns + "_method"
df = pd.concat([df, param, method], axis=1)
to_drop = ["param", "uncertainty", "method", "tags"]
to_drop = df.columns.intersection(to_drop)
df.drop(to_drop, axis=1, inplace=True)
return df
def prepare_dataset(
df,
length_factor=1.5,
correction_threshold_length=4,
correction_threshold_p_nom=8,
bidirectional_below=10
):
# extract start and end from LineString
df["point0"] = df.geometry.apply(lambda x: Point(x.coords[0]))
df["point1"] = df.geometry.apply(lambda x: Point(x.coords[-1]))
conversion_factor = 437.5 # MCM/day to MWh/h
df["p_nom"] = df.max_cap_M_m3_per_d * conversion_factor
# for inferred diameters, assume 500 mm rather than 900 mm (more conservative)
df.loc[df.diameter_mm_method != 'raw', "diameter_mm"] = 500.
keep = ["name", "diameter_mm", "is_H_gas", "is_bothDirection",
"length_km", "p_nom", "max_pressure_bar",
"start_year", "point0", "point1", "geometry"]
to_rename = {
"is_bothDirection": "bidirectional",
"is_H_gas": "H_gas",
"start_year": "build_year",
"length_km": "length",
}
df = df[keep].rename(columns=to_rename)
df.bidirectional = df.bidirectional.astype(bool)
df.H_gas = df.H_gas.astype(bool)
# short lines below 10 km are assumed to be bidirectional
short_lines = df["length"] < bidirectional_below
df.loc[short_lines, "bidirectional"] = True
# correct all capacities that deviate correction_threshold factor
# to diameter-based capacities, unless they are NordStream pipelines
# also all capacities below 0.5 GW are now diameter-based capacities
df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity)
ratio = df.p_nom / df.p_nom_diameter
not_nordstream = df.max_pressure_bar < 220
df.p_nom.update(df.p_nom_diameter.where(
(df.p_nom <= 500) |
((ratio > correction_threshold_p_nom) & not_nordstream) |
((ratio < 1 / correction_threshold_p_nom) & not_nordstream)
))
# lines which have way too discrepant line lengths
# get assigned haversine length * length factor
df["length_haversine"] = df.apply(
lambda p: length_factor * haversine_pts(
[p.point0.x, p.point0.y],
[p.point1.x, p.point1.y]
), axis=1
)
ratio = df.eval("length / length_haversine")
df["length"].update(df.length_haversine.where(
(df["length"] < 20) |
(ratio > correction_threshold_length) |
(ratio < 1 / correction_threshold_length)
))
return df
if __name__ == "__main__":
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake('build_gas_network')
logging.basicConfig(level=snakemake.config['logging_level'])
gas_network = load_dataset(snakemake.input.gas_network)
gas_network = prepare_dataset(gas_network)
gas_network.to_csv(snakemake.output.cleaned_gas_network)

View File

@ -3,7 +3,11 @@
import uuid
import pandas as pd
import geopandas as gpd
from itertools import product
from distutils.version import StrictVersion
gpd_version = StrictVersion(gpd.__version__)
def locate_missing_industrial_sites(df):
@ -69,7 +73,8 @@ def prepare_hotmaps_database(regions):
gdf = gpd.GeoDataFrame(df, geometry='coordinates', crs="EPSG:4326")
gdf = gpd.sjoin(gdf, regions, how="inner", op='within')
kws = dict(op="within") if gpd_version < '0.10' else dict(predicate="within")
gdf = gpd.sjoin(gdf, regions, how="inner", **kws)
gdf.rename(columns={"index_right": "bus"}, inplace=True)
gdf["country"] = gdf.bus.str[:2]

124
scripts/cluster_gas_network.py Executable file
View File

@ -0,0 +1,124 @@
"""Cluster gas network."""
import logging
logger = logging.getLogger(__name__)
import pandas as pd
import geopandas as gpd
from shapely import wkt
from pypsa.geo import haversine_pts
from distutils.version import StrictVersion
gpd_version = StrictVersion(gpd.__version__)
def concat_gdf(gdf_list, crs='EPSG:4326'):
"""Concatenate multiple geopandas dataframes with common coordinate reference system (crs)."""
return gpd.GeoDataFrame(pd.concat(gdf_list), crs=crs)
def load_bus_regions(onshore_path, offshore_path):
"""Load pypsa-eur on- and offshore regions and concat."""
bus_regions_offshore = gpd.read_file(offshore_path)
bus_regions_onshore = gpd.read_file(onshore_path)
bus_regions = concat_gdf([bus_regions_offshore, bus_regions_onshore])
bus_regions = bus_regions.dissolve(by='name', aggfunc='sum')
return bus_regions
def build_clustered_gas_network(df, bus_regions, length_factor=1.25):
for i in [0,1]:
gdf = gpd.GeoDataFrame(geometry=df[f"point{i}"], crs="EPSG:4326")
kws = dict(op="within") if gpd_version < '0.10' else dict(predicate="within")
bus_mapping = gpd.sjoin(gdf, bus_regions, how="left", **kws).index_right
bus_mapping = bus_mapping.groupby(bus_mapping.index).first()
df[f"bus{i}"] = bus_mapping
df[f"point{i}"] = df[f"bus{i}"].map(bus_regions.to_crs(3035).centroid.to_crs(4326))
# drop pipes where not both buses are inside regions
df = df.loc[~df.bus0.isna() & ~df.bus1.isna()]
# drop pipes within the same region
df = df.loc[df.bus1 != df.bus0]
# recalculate lengths as center to center * length factor
df["length"] = df.apply(
lambda p: length_factor * haversine_pts(
[p.point0.x, p.point0.y],
[p.point1.x, p.point1.y]
), axis=1
)
# tidy and create new numbered index
df.drop(["point0", "point1"], axis=1, inplace=True)
df.reset_index(drop=True, inplace=True)
return df
def reindex_pipes(df):
def make_index(x):
connector = " <-> " if x.bidirectional else " -> "
return "gas pipeline " + x.bus0 + connector + x.bus1
df.index = df.apply(make_index, axis=1)
df["p_min_pu"] = df.bidirectional.apply(lambda bi: -1 if bi else 0)
df.drop("bidirectional", axis=1, inplace=True)
df.sort_index(axis=1, inplace=True)
def aggregate_parallel_pipes(df):
strategies = {
'bus0': 'first',
'bus1': 'first',
"p_nom": 'sum',
"p_nom_diameter": 'sum',
"max_pressure_bar": "mean",
"build_year": "mean",
"diameter_mm": "mean",
"length": 'mean',
'name': ' '.join,
"p_min_pu": 'min',
}
return df.groupby(df.index).agg(strategies)
if __name__ == "__main__":
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
'cluster_gas_network',
simpl='',
clusters='37'
)
logging.basicConfig(level=snakemake.config['logging_level'])
fn = snakemake.input.cleaned_gas_network
df = pd.read_csv(fn, index_col=0)
for col in ["point0", "point1"]:
df[col] = df[col].apply(wkt.loads)
bus_regions = load_bus_regions(
snakemake.input.regions_onshore,
snakemake.input.regions_offshore
)
gas_network = build_clustered_gas_network(df, bus_regions)
reindex_pipes(gas_network)
gas_network = aggregate_parallel_pipes(gas_network)
gas_network.to_csv(snakemake.output.clustered_gas_network)

View File

@ -88,4 +88,16 @@ def mock_snakemake(rulename, **wildcards):
Path(path).parent.mkdir(parents=True, exist_ok=True)
os.chdir(script_dir)
return snakemake
return snakemake
# from pypsa-eur/_helpers.py
def progress_retrieve(url, file):
import urllib
from progressbar import ProgressBar
pbar = ProgressBar(0, 100)
def dlProgress(count, blockSize, totalSize):
pbar.update( int(count * blockSize * 100 / totalSize) )
urllib.request.urlretrieve(url, file, reporthook=dlProgress)

View File

@ -124,7 +124,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
to_drop = costs.index.levels[0].symmetric_difference(n.buses.index)
if len(to_drop) != 0:
print("dropping non-buses", to_drop)
costs.drop(to_drop, level=0, inplace=True, axis=0)
costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore")
# make sure they are removed from index
costs.index = pd.MultiIndex.from_tuples(costs.index.values)
@ -236,50 +236,76 @@ def plot_h2_map(network):
linewidth_factor = 1e4
# MW below which not drawn
line_lower_threshold = 1e3
bus_color = "m"
link_color = "c"
# Drop non-electric buses so they don't clutter the plot
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
elec = n.links.index[n.links.carrier == "H2 Electrolysis"]
elec = n.links[n.links.carrier.isin(["H2 Electrolysis", "H2 Fuel Cell"])].index
bus_sizes = n.links.loc[elec,"p_nom_opt"].groupby(n.links.loc[elec, "bus0"]).sum() / bus_size_factor
bus_sizes = n.links.loc[elec,"p_nom_opt"].groupby([n.links["bus0"], n.links.carrier]).sum() / bus_size_factor
# make a fake MultiIndex so that area is correct for legend
bus_sizes.index = pd.MultiIndex.from_product(
[bus_sizes.index, ["electrolysis"]])
bus_sizes.rename(index=lambda x: x.replace(" H2", ""), level=0, inplace=True)
n.links.drop(n.links.index[n.links.carrier != "H2 pipeline"], inplace=True)
n.links.drop(n.links.index[~n.links.carrier.str.contains("H2 pipeline")], inplace=True)
link_widths = n.links.p_nom_opt / linewidth_factor
link_widths[n.links.p_nom_opt < line_lower_threshold] = 0.
h2_new = n.links.loc[n.links.carrier=="H2 pipeline", "p_nom_opt"]
h2_retro = n.links.loc[n.links.carrier=='H2 pipeline retrofitted']
positive_order = h2_retro.bus0 < h2_retro.bus1
h2_retro_p = h2_retro[positive_order]
swap_buses = {"bus0": "bus1", "bus1": "bus0"}
h2_retro_n = h2_retro[~positive_order].rename(columns=swap_buses)
h2_retro = pd.concat([h2_retro_p, h2_retro_n])
h2_retro.index = h2_retro.apply(
lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}",
axis=1
)
h2_retro = h2_retro["p_nom_opt"]
link_widths_total = (h2_new + h2_retro) / linewidth_factor
link_widths_total = link_widths_total.groupby(level=0).sum().reindex(n.links.index).fillna(0.)
link_widths_total[n.links.p_nom_opt < line_lower_threshold] = 0.
retro = n.links.p_nom_opt.where(n.links.carrier=='H2 pipeline retrofitted', other=0.)
link_widths_retro = retro / linewidth_factor
link_widths_retro[n.links.p_nom_opt < line_lower_threshold] = 0.
n.links.bus0 = n.links.bus0.str.replace(" H2", "")
n.links.bus1 = n.links.bus1.str.replace(" H2", "")
print(link_widths.sort_values())
print(n.links[["bus0", "bus1"]])
fig, ax = plt.subplots(
figsize=(7, 6),
subplot_kw={"projection": ccrs.PlateCarree()}
)
n.plot(
bus_sizes=bus_sizes,
bus_colors={"electrolysis": bus_color},
link_colors=link_color,
link_widths=link_widths,
bus_colors=snakemake.config['plotting']['tech_colors'],
link_colors='#a2f0f2',
link_widths=link_widths_total,
branch_components=["Link"],
ax=ax, **map_opts
ax=ax,
**map_opts
)
n.plot(
geomap=False,
bus_sizes=0,
link_colors='#72d3d6',
link_widths=link_widths_retro,
branch_components=["Link"],
ax=ax,
**map_opts
)
handles = make_legend_circles_for(
[50000, 10000],
scale=bus_size_factor,
facecolor=bus_color
facecolor='grey'
)
labels = ["{} GW".format(s) for s in (50, 10)]
@ -300,7 +326,7 @@ def plot_h2_map(network):
labels = []
for s in (50, 10):
handles.append(plt.Line2D([0], [0], color=link_color,
handles.append(plt.Line2D([0], [0], color="grey",
linewidth=s * 1e3 / linewidth_factor))
labels.append("{} GW".format(s))
@ -318,7 +344,148 @@ def plot_h2_map(network):
fig.savefig(
snakemake.output.map.replace("-costs-all","-h2_network"),
transparent=True,
bbox_inches="tight"
)
def plot_ch4_map(network):
n = network.copy()
if "gas pipeline" not in n.links.carrier.unique():
return
assign_location(n)
bus_size_factor = 8e7
linewidth_factor = 1e4
# MW below which not drawn
line_lower_threshold = 500
# Drop non-electric buses so they don't clutter the plot
n.buses.drop(n.buses.index[n.buses.carrier != "AC"], inplace=True)
fossil_gas_i = n.generators[n.generators.carrier=="gas"].index
fossil_gas = n.generators_t.p.loc[:,fossil_gas_i].mul(n.snapshot_weightings.generators, axis=0).sum().groupby(n.generators.loc[fossil_gas_i,"bus"]).sum() / bus_size_factor
fossil_gas.rename(index=lambda x: x.replace(" gas", ""), inplace=True)
fossil_gas = fossil_gas.reindex(n.buses.index).fillna(0)
# make a fake MultiIndex so that area is correct for legend
fossil_gas.index = pd.MultiIndex.from_product([fossil_gas.index, ["fossil gas"]])
methanation_i = n.links[n.links.carrier.isin(["helmeth", "Sabatier"])].index
methanation = abs(n.links_t.p1.loc[:,methanation_i].mul(n.snapshot_weightings.generators, axis=0)).sum().groupby(n.links.loc[methanation_i,"bus1"]).sum() / bus_size_factor
methanation = methanation.groupby(methanation.index).sum().rename(index=lambda x: x.replace(" gas", ""))
# make a fake MultiIndex so that area is correct for legend
methanation.index = pd.MultiIndex.from_product([methanation.index, ["methanation"]])
biogas_i = n.stores[n.stores.carrier=="biogas"].index
biogas = n.stores_t.p.loc[:,biogas_i].mul(n.snapshot_weightings.generators, axis=0).sum().groupby(n.stores.loc[biogas_i,"bus"]).sum() / bus_size_factor
biogas = biogas.groupby(biogas.index).sum().rename(index=lambda x: x.replace(" biogas", ""))
# make a fake MultiIndex so that area is correct for legend
biogas.index = pd.MultiIndex.from_product([biogas.index, ["biogas"]])
bus_sizes = pd.concat([fossil_gas, methanation, biogas])
bus_sizes.sort_index(inplace=True)
to_remove = n.links.index[~n.links.carrier.str.contains("gas pipeline")]
n.links.drop(to_remove, inplace=True)
link_widths_rem = n.links.p_nom_opt / linewidth_factor
link_widths_rem[n.links.p_nom_opt < line_lower_threshold] = 0.
link_widths_orig = n.links.p_nom / linewidth_factor
link_widths_orig[n.links.p_nom < line_lower_threshold] = 0.
max_usage = n.links_t.p0.abs().max(axis=0)
link_widths_used = max_usage / linewidth_factor
link_widths_used[max_usage < line_lower_threshold] = 0.
link_color_used = n.links.carrier.map({"gas pipeline": "#f08080",
"gas pipeline new": "#c46868"})
n.links.bus0 = n.links.bus0.str.replace(" gas", "")
n.links.bus1 = n.links.bus1.str.replace(" gas", "")
tech_colors = snakemake.config['plotting']['tech_colors']
bus_colors = {
"fossil gas": tech_colors["fossil gas"],
"methanation": tech_colors["methanation"],
"biogas": "seagreen"
}
fig, ax = plt.subplots(figsize=(7,6), subplot_kw={"projection": ccrs.PlateCarree()})
n.plot(
bus_sizes=bus_sizes,
bus_colors=bus_colors,
link_colors='lightgrey',
link_widths=link_widths_orig,
branch_components=["Link"],
ax=ax,
**map_opts
)
n.plot(
geomap=False,
ax=ax,
bus_sizes=0.,
link_colors='#e8d1d1',
link_widths=link_widths_rem,
branch_components=["Link"],
**map_opts
)
n.plot(
geomap=False,
ax=ax,
bus_sizes=0.,
link_colors=link_color_used,
link_widths=link_widths_used,
branch_components=["Link"],
**map_opts
)
handles = make_legend_circles_for(
[10e6, 100e6],
scale=bus_size_factor,
facecolor='grey'
)
labels = ["{} TWh".format(s) for s in (10, 100)]
l2 = ax.legend(
handles, labels,
loc="upper left",
bbox_to_anchor=(-0.03, 1.01),
labelspacing=1.0,
frameon=False,
title='gas generation',
handler_map=make_handler_map_to_scale_circles_as_in(ax)
)
ax.add_artist(l2)
handles = []
labels = []
for s in (50, 10):
handles.append(plt.Line2D([0], [0], color="grey", linewidth=s * 1e3 / linewidth_factor))
labels.append("{} GW".format(s))
l1_1 = ax.legend(
handles, labels,
loc="upper left",
bbox_to_anchor=(0.28, 1.01),
frameon=False,
labelspacing=0.8,
handletextpad=1.5,
title='gas pipeline used capacity'
)
ax.add_artist(l1_1)
fig.savefig(
snakemake.output.map.replace("-costs-all","-ch4_network"),
bbox_inches="tight"
)
@ -344,7 +511,8 @@ def plot_map_without(network):
dc_color = "m"
# hack because impossible to drop buses...
n.buses.loc["EU gas", ["x", "y"]] = n.buses.loc["DE0 0", ["x", "y"]]
if "EU gas" in n.buses.index:
n.buses.loc["EU gas", ["x", "y"]] = n.buses.loc["DE0 0", ["x", "y"]]
to_drop = n.links.index[(n.links.carrier != "DC") & (n.links.carrier != "B2B")]
n.links.drop(to_drop, inplace=True)
@ -546,6 +714,7 @@ if __name__ == "__main__":
)
plot_h2_map(n)
plot_ch4_map(n)
plot_map_without(n)
#plot_series(n, carrier="AC", name=suffix)

View File

@ -321,7 +321,7 @@ def historical_emissions(cts):
e["domestic navigation"] = "1.A.3.d - Domestic Navigation"
e['international navigation'] = '1.D.1.b - International Navigation'
e["domestic aviation"] = '1.A.3.a - Domestic Aviation'
e["international aviation"] = '1.D.1.a - International Aviation'
e["international aviation"] = '1.D.1.a - International Aviation'
e['total energy'] = '1 - Energy'
e['industrial processes'] = '2 - Industrial Processes and Product Use'
e['agriculture'] = '3 - Agriculture'
@ -331,25 +331,25 @@ def historical_emissions(cts):
e['indirect'] = 'ind_CO2 - Indirect CO2'
e["total wL"] = "Total (with LULUCF)"
e["total woL"] = "Total (without LULUCF)"
pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"]
pol = ["CO2"] # ["All greenhouse gases - (CO2 equivalent)"]
cts
if "GB" in cts:
cts.remove("GB")
cts.append("UK")
year = np.arange(1990,2018).tolist()
idx = pd.IndexSlice
co2_totals = df.loc[idx[year,e.values,cts,pol],"emissions"].unstack("Year").rename(index=pd.Series(e.index,e.values))
co2_totals = df.loc[idx[year,e.values,cts,pol],"emissions"].unstack("Year").rename(index=pd.Series(e.index,e.values))
co2_totals = (1/1e6)*co2_totals.groupby(level=0, axis=0).sum() #Gton CO2
co2_totals.loc['industrial non-elec'] = co2_totals.loc['total energy'] - co2_totals.loc[['electricity', 'services non-elec','residential non-elec', 'road non-elec',
'rail non-elec', 'domestic aviation', 'international aviation', 'domestic navigation',
'international navigation']].sum()
emissions = co2_totals.loc["electricity"]
emissions = co2_totals.loc["electricity"]
if "T" in opts:
emissions += co2_totals.loc[[i+ " non-elec" for i in ["rail","road"]]].sum()
if "H" in opts:
@ -357,7 +357,7 @@ def historical_emissions(cts):
if "I" in opts:
emissions += co2_totals.loc[["industrial non-elec","industrial processes",
"domestic aviation","international aviation",
"domestic navigation","international navigation"]].sum()
"domestic navigation","international navigation"]].sum()
return emissions
@ -365,8 +365,8 @@ def historical_emissions(cts):
def plot_carbon_budget_distribution():
"""
Plot historical carbon emissions in the EU and decarbonization path
"""
"""
import matplotlib.gridspec as gridspec
import seaborn as sns; sns.set()
sns.set_style('ticks')
@ -374,7 +374,7 @@ def plot_carbon_budget_distribution():
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.figure(figsize=(10, 7))
gs1 = gridspec.GridSpec(1, 1)
@ -382,55 +382,55 @@ def plot_carbon_budget_distribution():
ax1.set_ylabel('CO$_2$ emissions (Gt per year)',fontsize=22)
ax1.set_ylim([0,5])
ax1.set_xlim([1990,snakemake.config['scenario']['planning_horizons'][-1]+1])
path_cb = snakemake.config['results_dir'] + snakemake.config['run'] + '/csvs/'
countries=pd.read_csv(path_cb + 'countries.csv', index_col=1)
countries=pd.read_csv(path_cb + 'countries.csv', index_col=1)
cts=countries.index.to_list()
e_1990 = co2_emissions_year(cts, opts, year=1990)
CO2_CAP=pd.read_csv(path_cb + 'carbon_budget_distribution.csv',
index_col=0)
ax1.plot(e_1990*CO2_CAP[o],linewidth=3,
e_1990 = co2_emissions_year(cts, opts, year=1990)
CO2_CAP=pd.read_csv(path_cb + 'carbon_budget_distribution.csv',
index_col=0)
ax1.plot(e_1990*CO2_CAP[o],linewidth=3,
color='dodgerblue', label=None)
emissions = historical_emissions(cts)
ax1.plot(emissions, color='black', linewidth=3, label=None)
#plot commited and uder-discussion targets
ax1.plot(emissions, color='black', linewidth=3, label=None)
#plot commited and uder-discussion targets
#(notice that historical emissions include all countries in the
# network, but targets refer to EU)
ax1.plot([2020],[0.8*emissions[1990]],
marker='*', markersize=12, markerfacecolor='black',
markeredgecolor='black')
markeredgecolor='black')
ax1.plot([2030],[0.45*emissions[1990]],
marker='*', markersize=12, markerfacecolor='white',
markeredgecolor='black')
markeredgecolor='black')
ax1.plot([2030],[0.6*emissions[1990]],
marker='*', markersize=12, markerfacecolor='black',
markeredgecolor='black')
ax1.plot([2050, 2050],[x*emissions[1990] for x in [0.2, 0.05]],
color='gray', linewidth=2, marker='_', alpha=0.5)
color='gray', linewidth=2, marker='_', alpha=0.5)
ax1.plot([2050],[0.01*emissions[1990]],
marker='*', markersize=12, markerfacecolor='white',
linewidth=0, markeredgecolor='black',
label='EU under-discussion target', zorder=10,
clip_on=False)
marker='*', markersize=12, markerfacecolor='white',
linewidth=0, markeredgecolor='black',
label='EU under-discussion target', zorder=10,
clip_on=False)
ax1.plot([2050],[0.125*emissions[1990]],'ro',
marker='*', markersize=12, markerfacecolor='black',
markeredgecolor='black', label='EU commited target')
ax1.legend(fancybox=True, fontsize=18, loc=(0.01,0.01),
facecolor='white', frameon=True)
path_cb_plot = snakemake.config['results_dir'] + snakemake.config['run'] + '/graphs/'
plt.savefig(path_cb_plot+'carbon_budget_plot.pdf', dpi=300)
ax1.legend(fancybox=True, fontsize=18, loc=(0.01,0.01),
facecolor='white', frameon=True)
path_cb_plot = snakemake.config['results_dir'] + snakemake.config['run'] + '/graphs/'
plt.savefig(path_cb_plot+'carbon_budget_plot.pdf', dpi=300)
if __name__ == "__main__":
@ -445,7 +445,7 @@ if __name__ == "__main__":
plot_energy()
plot_balances()
for sector_opts in snakemake.config['scenario']['sector_opts']:
opts=sector_opts.split('-')
for o in opts:

View File

@ -8,6 +8,7 @@ import pytz
import pandas as pd
import numpy as np
import xarray as xr
import networkx as nx
from itertools import product
from scipy.stats import beta
@ -16,6 +17,10 @@ from vresutils.costdata import annuity
from build_energy_totals import build_eea_co2, build_eurostat_co2, build_co2_totals
from helper import override_component_attrs
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
from networkx.algorithms import complement
from pypsa.geo import haversine_pts
import logging
logger = logging.getLogger(__name__)
@ -68,6 +73,29 @@ def define_spatial(nodes):
spatial.co2.vents = ["co2 vent"]
spatial.co2.df = pd.DataFrame(vars(spatial.co2), index=nodes)
# gas
spatial.gas = SimpleNamespace()
if options["gas_network"]:
spatial.gas.nodes = nodes + " gas"
spatial.gas.locations = nodes
spatial.gas.biogas = nodes + " biogas"
spatial.gas.industry = nodes + " gas for industry"
spatial.gas.industry_cc = nodes + " gas for industry CC"
else:
spatial.gas.nodes = ["EU gas"]
spatial.gas.locations = "EU"
spatial.gas.biogas = ["EU biogas"]
spatial.gas.industry = ["gas for industry"]
spatial.gas.industry_cc = ["gas for industry CC"]
spatial.gas.df = pd.DataFrame(vars(spatial.gas), index=nodes)
from types import SimpleNamespace
spatial = SimpleNamespace()
def emission_sectors_from_opts(opts):
@ -108,40 +136,6 @@ def get(item, investment_year=None):
return item
def create_network_topology(n, prefix, connector=" -> "):
"""
Create a network topology like the power transmission network.
Parameters
----------
n : pypsa.Network
prefix : str
connector : str
Returns
-------
pd.DataFrame with columns bus0, bus1 and length
"""
ln_attrs = ["bus0", "bus1", "length"]
lk_attrs = ["bus0", "bus1", "length", "underwater_fraction"]
candidates = pd.concat([
n.lines[ln_attrs],
n.links.loc[n.links.carrier == "DC", lk_attrs]
]).fillna(0)
positive_order = candidates.bus0 < candidates.bus1
candidates_p = candidates[positive_order]
swap_buses = {"bus0": "bus1", "bus1": "bus0"}
candidates_n = candidates[~positive_order].rename(columns=swap_buses)
candidates = pd.concat([candidates_p, candidates_n])
topo = candidates.groupby(["bus0", "bus1"], as_index=False).mean()
topo.index = topo.apply(lambda c: prefix + c.bus0 + connector + c.bus1, axis=1)
return topo
def co2_emissions_year(countries, opts, year):
"""
Calculate CO2 emissions in one specific year (e.g. 1990 or 2018).
@ -229,14 +223,21 @@ def add_lifetime_wind_solar(n, costs):
n.generators.loc[gen_i, "lifetime"] = costs.at[carrier, 'lifetime']
def create_network_topology(n, prefix, connector=" -> ", bidirectional=True):
def haversine(p):
coord0 = n.buses.loc[p.bus0, ['x', 'y']].values
coord1 = n.buses.loc[p.bus1, ['x', 'y']].values
return 1.5 * haversine_pts(coord0, coord1)
def create_network_topology(n, prefix, carriers=["DC"], connector=" -> ", bidirectional=True):
"""
Create a network topology like the power transmission network.
Create a network topology from transmission lines and link carrier selection.
Parameters
----------
n : pypsa.Network
prefix : str
carriers : list-like
connector : str
bidirectional : bool, default True
True: one link for each connection
@ -244,7 +245,7 @@ def create_network_topology(n, prefix, connector=" -> ", bidirectional=True):
Returns
-------
pd.DataFrame with columns bus0, bus1 and length
pd.DataFrame with columns bus0, bus1, length, underwater_fraction
"""
ln_attrs = ["bus0", "bus1", "length"]
@ -252,9 +253,13 @@ def create_network_topology(n, prefix, connector=" -> ", bidirectional=True):
candidates = pd.concat([
n.lines[ln_attrs],
n.links.loc[n.links.carrier == "DC", lk_attrs]
n.links.loc[n.links.carrier.isin(carriers), lk_attrs]
]).fillna(0)
# base network topology purely on location not carrier
candidates["bus0"] = candidates.bus0.map(n.buses.location)
candidates["bus1"] = candidates.bus1.map(n.buses.location)
positive_order = candidates.bus0 < candidates.bus1
candidates_p = candidates[positive_order]
swap_buses = {"bus0": "bus1", "bus1": "bus0"}
@ -339,39 +344,42 @@ def update_wind_solar_costs(n, costs):
n.generators.loc[n.generators.carrier==tech, 'capital_cost'] = capital_cost.rename(index=lambda node: node + ' ' + tech)
def add_carrier_buses(n, carriers):
def add_carrier_buses(n, carrier, nodes=None):
"""
Add buses to connect e.g. coal, nuclear and oil plants
"""
if isinstance(carriers, str):
carriers = [carriers]
for carrier in carriers:
if nodes is None:
nodes = ["EU " + carrier]
n.add("Carrier", carrier)
# skip if carrier already exists
if carrier in n.carriers.index:
return
n.add("Bus",
"EU " + carrier,
location="EU",
carrier=carrier
)
n.add("Carrier", carrier)
#capital cost could be corrected to e.g. 0.2 EUR/kWh * annuity and O&M
n.add("Store",
"EU " + carrier + " Store",
bus="EU " + carrier,
e_nom_extendable=True,
e_cyclic=True,
carrier=carrier,
)
n.madd("Bus",
nodes,
location=nodes.str.replace(" " + carrier, ""),
carrier=carrier
)
n.add("Generator",
"EU " + carrier,
bus="EU " + carrier,
p_nom_extendable=True,
carrier=carrier,
marginal_cost=costs.at[carrier, 'fuel']
)
#capital cost could be corrected to e.g. 0.2 EUR/kWh * annuity and O&M
n.madd("Store",
nodes + " Store",
bus=nodes,
e_nom_extendable=True,
e_cyclic=True,
carrier=carrier,
)
n.madd("Generator",
nodes,
bus=nodes,
p_nom_extendable=True,
carrier=carrier,
marginal_cost=costs.at[carrier, 'fuel']
)
# TODO: PyPSA-Eur merge issue
@ -535,17 +543,6 @@ def average_every_nhours(n, offset):
logger.info(f'Resampling the network to {offset}')
m = n.copy(with_time=False)
# TODO is this still needed?
#fix copying of network attributes
#copied from pypsa/io.py, should be in pypsa/components.py#Network.copy()
allowed_types = (float, int, bool, str) + tuple(np.typeDict.values())
attrs = dict((attr, getattr(n, attr))
for attr in dir(n)
if (not attr.startswith("__") and
isinstance(getattr(n,attr), allowed_types)))
for k,v in attrs.items():
setattr(m,k,v)
snapshot_weightings = n.snapshot_weightings.resample(offset).sum()
m.set_snapshots(snapshot_weightings.index)
m.snapshot_weightings = snapshot_weightings
@ -803,13 +800,18 @@ def add_generation(n, costs):
fallback = {"OCGT": "gas"}
conventionals = options.get("conventional_generation", fallback)
add_carrier_buses(n, np.unique(list(conventionals.values())))
for generator, carrier in conventionals.items():
if carrier == 'gas' and options["gas_network"]:
carrier_nodes = spatial.gas.nodes
else:
carrier_nodes = ["EU " + carrier]
add_carrier_buses(n, carrier, carrier_nodes)
n.madd("Link",
nodes + " " + generator,
bus0="EU " + carrier,
bus0=carrier_nodes,
bus1=nodes,
bus2="co2 atmosphere",
marginal_cost=costs.at[generator, 'efficiency'] * costs.at[generator, 'VOM'], #NB: VOM is per MWel
@ -1002,11 +1004,8 @@ def add_electricity_grid_connection(n, costs):
n.generators.loc[gens, "capital_cost"] += costs.at['electricity grid connection', 'fixed']
def add_storage(n, costs):
# TODO pop_layout
# TODO options?
print("adding electricity storage")
def add_storage_and_grids(n, costs):
print("adding electricity and hydrogen storage as well as hydrogen and gas grids")
nodes = pop_layout.index
@ -1079,33 +1078,129 @@ def add_storage(n, costs):
capital_cost=h2_capital_cost
)
attrs = ["bus0", "bus1", "length"]
h2_links = pd.DataFrame(columns=attrs)
if options["gas_network"]:
candidates = pd.concat({"lines": n.lines[attrs],
"links": n.links.loc[n.links.carrier == "DC", attrs]})
logger.info("Add gas network")
for candidate in candidates.index:
buses = [candidates.at[candidate, "bus0"], candidates.at[candidate, "bus1"]]
buses.sort()
name = f"H2 pipeline {buses[0]} -> {buses[1]}"
if name not in h2_links.index:
h2_links.at[name, "bus0"] = buses[0]
h2_links.at[name, "bus1"] = buses[1]
h2_links.at[name, "length"] = candidates.at[candidate, "length"]
fn = snakemake.input.clustered_gas_network
gas_pipes = pd.read_csv(fn, index_col=0)
# TODO Add efficiency losses
n.madd("Link",
h2_links.index,
bus0=h2_links.bus0.values + " H2",
bus1=h2_links.bus1.values + " H2",
p_min_pu=-1,
p_nom_extendable=True,
length=h2_links.length.values,
capital_cost=costs.at['H2 (g) pipeline', 'fixed'] * h2_links.length.values,
carrier="H2 pipeline",
lifetime=costs.at['H2 (g) pipeline', 'lifetime']
)
if options["H2_retrofit"]:
gas_pipes["p_nom_max"] = gas_pipes.p_nom
gas_pipes["p_nom_min"] = 0.
gas_pipes["capital_cost"] = 0.
else:
gas_pipes["p_nom_max"] = np.inf
gas_pipes["p_nom_min"] = gas_pipes.p_nom
gas_pipes["capital_cost"] = gas_pipes.length * costs.at['CH4 (g) pipeline', 'fixed']
n.madd("Link",
gas_pipes.index,
bus0=gas_pipes.bus0 + " gas",
bus1=gas_pipes.bus1 + " gas",
p_min_pu=gas_pipes.p_min_pu,
p_nom=gas_pipes.p_nom,
p_nom_extendable=True,
p_nom_max=gas_pipes.p_nom_max,
p_nom_min=gas_pipes.p_nom_min,
length=gas_pipes.length,
capital_cost=gas_pipes.capital_cost,
tags=gas_pipes.name,
carrier="gas pipeline",
lifetime=costs.at['CH4 (g) pipeline', 'lifetime']
)
# remove fossil generators where there is neither
# production, LNG terminal, nor entry-point beyond system scope
fn = snakemake.input.gas_input_nodes_simplified
gas_input_nodes = pd.read_csv(fn, index_col=0)
unique = gas_input_nodes.index.unique()
gas_i = n.generators.carrier == 'gas'
internal_i = ~n.generators.bus.map(n.buses.location).isin(unique)
remove_i = n.generators[gas_i & internal_i].index
n.generators.drop(remove_i, inplace=True)
p_nom = gas_input_nodes.sum(axis=1).rename(lambda x: x + " gas")
n.generators.loc[gas_i, "p_nom_extendable"] = False
n.generators.loc[gas_i, "p_nom"] = p_nom
# add candidates for new gas pipelines to achieve full connectivity
G = nx.Graph()
gas_buses = n.buses.loc[n.buses.carrier=='gas', 'location']
G.add_nodes_from(np.unique(gas_buses.values))
sel = gas_pipes.p_nom > 1500
attrs = ["bus0", "bus1", "length"]
G.add_weighted_edges_from(gas_pipes.loc[sel, attrs].values)
# find all complement edges
complement_edges = pd.DataFrame(complement(G).edges, columns=["bus0", "bus1"])
complement_edges["length"] = complement_edges.apply(haversine, axis=1)
# apply k_edge_augmentation weighted by length of complement edges
k_edge = options.get("gas_network_connectivity_upgrade", 3)
augmentation = k_edge_augmentation(G, k_edge, avail=complement_edges.values)
new_gas_pipes = pd.DataFrame(augmentation, columns=["bus0", "bus1"])
new_gas_pipes["length"] = new_gas_pipes.apply(haversine, axis=1)
new_gas_pipes.index = new_gas_pipes.apply(
lambda x: f"gas pipeline new {x.bus0} <-> {x.bus1}", axis=1)
n.madd("Link",
new_gas_pipes.index,
bus0=new_gas_pipes.bus0 + " gas",
bus1=new_gas_pipes.bus1 + " gas",
p_min_pu=-1, # new gas pipes are bidirectional
p_nom_extendable=True,
length=new_gas_pipes.length,
capital_cost=new_gas_pipes.length * costs.at['CH4 (g) pipeline', 'fixed'],
carrier="gas pipeline new",
lifetime=costs.at['CH4 (g) pipeline', 'lifetime']
)
# retroftting existing CH4 pipes to H2 pipes
if options["gas_network"] and options["H2_retrofit"]:
gas_pipe_i = n.links[n.links.carrier == "gas pipeline"].index
n.links.loc[gas_pipe_i, "p_nom_extendable"] = True
h2_pipes = gas_pipes.rename(index=lambda x:
x.replace("gas pipeline", "H2 pipeline retrofitted"))
n.madd("Link",
h2_pipes.index,
bus0=h2_pipes.bus0 + " H2",
bus1=h2_pipes.bus1 + " H2",
p_min_pu=-1., # allow that all H2 retrofit pipelines can be used in both directions
p_nom_max=h2_pipes.p_nom * options["H2_retrofit_capacity_per_CH4"],
p_nom_extendable=True,
length=h2_pipes.length,
capital_cost=costs.at['H2 (g) pipeline repurposed', 'fixed'] * h2_pipes.length,
tags=h2_pipes.name,
carrier="H2 pipeline retrofitted",
lifetime=costs.at['H2 (g) pipeline repurposed', 'lifetime']
)
if options.get("H2_network", True):
h2_pipes = create_network_topology(n, "H2 pipeline ", carriers=["DC", "gas pipeline"])
# TODO Add efficiency losses
n.madd("Link",
h2_pipes.index,
bus0=h2_pipes.bus0.values + " H2",
bus1=h2_pipes.bus1.values + " H2",
p_min_pu=-1,
p_nom_extendable=True,
length=h2_pipes.length.values,
capital_cost=costs.at['H2 (g) pipeline', 'fixed'] * h2_pipes.length.values,
carrier="H2 pipeline",
lifetime=costs.at['H2 (g) pipeline', 'lifetime']
)
n.add("Carrier", "battery")
@ -1153,7 +1248,7 @@ def add_storage(n, costs):
spatial.nodes,
suffix=" Sabatier",
bus0=nodes + " H2",
bus1="EU gas",
bus1=spatial.gas.nodes,
bus2=spatial.co2.nodes,
p_nom_extendable=True,
carrier="Sabatier",
@ -1169,7 +1264,7 @@ def add_storage(n, costs):
spatial.nodes,
suffix=" helmeth",
bus0=nodes,
bus1="EU gas",
bus1=spatial.gas.nodes,
bus2=spatial.co2.nodes,
carrier="helmeth",
p_nom_extendable=True,
@ -1185,7 +1280,7 @@ def add_storage(n, costs):
n.madd("Link",
spatial.nodes,
suffix=" SMR CC",
bus0="EU gas",
bus0=spatial.gas.nodes,
bus1=nodes + " H2",
bus2="co2 atmosphere",
bus3=spatial.co2.nodes,
@ -1200,7 +1295,7 @@ def add_storage(n, costs):
n.madd("Link",
nodes + " SMR",
bus0="EU gas",
bus0=spatial.gas.nodes,
bus1=nodes + " H2",
bus2="co2 atmosphere",
p_nom_extendable=True,
@ -1336,8 +1431,6 @@ def add_land_transport(n, costs):
def add_heat(n, costs):
# TODO options?
# TODO pop_layout?
print("adding heat")
@ -1493,7 +1586,7 @@ def add_heat(n, costs):
n.madd("Link",
nodes[name] + f" {name} gas boiler",
p_nom_extendable=True,
bus0="EU gas",
bus0=spatial.gas.df.loc[nodes[name], "nodes"].values,
bus1=nodes[name] + f" {name} heat",
bus2="co2 atmosphere",
carrier=name + " gas boiler",
@ -1523,7 +1616,7 @@ def add_heat(n, costs):
# add gas CHP; biomass CHP is added in biomass section
n.madd("Link",
nodes[name] + " urban central gas CHP",
bus0="EU gas",
bus0=spatial.gas.df.loc[nodes[name], "nodes"].values,
bus1=nodes[name],
bus2=nodes[name] + " urban central heat",
bus3="co2 atmosphere",
@ -1539,7 +1632,7 @@ def add_heat(n, costs):
n.madd("Link",
nodes[name] + " urban central gas CHP CC",
bus0="EU gas",
bus0=spatial.gas.df.loc[nodes[name], "nodes"].values,
bus1=nodes[name],
bus2=nodes[name] + " urban central heat",
bus3="co2 atmosphere",
@ -1560,7 +1653,7 @@ def add_heat(n, costs):
n.madd("Link",
nodes[name] + f" {name} micro gas CHP",
p_nom_extendable=True,
bus0="EU gas",
bus0=spatial.gas.df.loc[nodes[name], "nodes"].values,
bus1=nodes[name],
bus2=nodes[name] + f" {name} heat",
bus3="co2 atmosphere",
@ -1715,17 +1808,24 @@ def add_biomass(n, costs):
biomass_potentials = pd.read_csv(snakemake.input.biomass_potentials, index_col=0)
if options["biomass_transport"]:
biomass_potentials_spatial = biomass_potentials.rename(index=lambda x: x + " solid biomass")
# need to aggregate potentials if gas not nodally resolved
if options["gas_network"]:
biogas_potentials_spatial = biomass_potentials["biogas"].rename(index=lambda x: x + " biogas")
else:
biomass_potentials_spatial = biomass_potentials.sum()
biogas_potentials_spatial = biomass_potentials["biogas"].sum()
if options["biomass_transport"]:
solid_biomass_potentials_spatial = biomass_potentials["solid biomass"].rename(index=lambda x: x + " solid biomass")
else:
solid_biomass_potentials_spatial = biomass_potentials["solid biomass"].sum()
n.add("Carrier", "biogas")
n.add("Carrier", "solid biomass")
n.add("Bus",
"EU biogas",
location="EU",
n.madd("Bus",
spatial.gas.biogas,
location=spatial.gas.locations,
carrier="biogas"
)
@ -1735,28 +1835,28 @@ def add_biomass(n, costs):
carrier="solid biomass"
)
n.add("Store",
"EU biogas",
bus="EU biogas",
n.madd("Store",
spatial.gas.biogas,
bus=spatial.gas.biogas,
carrier="biogas",
e_nom=biomass_potentials["biogas"].sum(),
e_nom=biogas_potentials_spatial,
marginal_cost=costs.at['biogas', 'fuel'],
e_initial=biomass_potentials["biogas"].sum()
e_initial=biogas_potentials_spatial
)
n.madd("Store",
spatial.biomass.nodes,
bus=spatial.biomass.nodes,
carrier="solid biomass",
e_nom=biomass_potentials_spatial["solid biomass"],
e_nom=solid_biomass_potentials_spatial,
marginal_cost=costs.at['solid biomass', 'fuel'],
e_initial=biomass_potentials_spatial["solid biomass"]
e_initial=solid_biomass_potentials_spatial
)
n.add("Link",
"biogas to gas",
bus0="EU biogas",
bus1="EU gas",
n.madd("Link",
spatial.gas.locations + "biogas to gas",
bus0=spatial.gas.biogas,
bus1=spatial.gas.nodes,
bus2="co2 atmosphere",
carrier="biogas to gas",
capital_cost=costs.loc["biogas upgrading", "fixed"],
@ -1883,22 +1983,29 @@ def add_industry(n, costs):
lifetime=costs.at['cement capture', 'lifetime']
)
n.add("Bus",
"gas for industry",
location="EU",
n.madd("Bus",
spatial.gas.industry,
location=spatial.gas.locations,
carrier="gas for industry")
n.add("Load",
"gas for industry",
bus="gas for industry",
gas_demand = industrial_demand.loc[nodes, "methane"] / 8760.
if options["gas_network"]:
spatial_gas_demand = gas_demand.rename(index=lambda x: x + " gas for industry")
else:
spatial_gas_demand = gas_demand.sum()
n.madd("Load",
spatial.gas.industry,
bus=spatial.gas.industry,
carrier="gas for industry",
p_set=industrial_demand.loc[nodes, "methane"].sum() / 8760
p_set=spatial_gas_demand
)
n.add("Link",
"gas for industry",
bus0="EU gas",
bus1="gas for industry",
n.madd("Link",
spatial.gas.industry,
bus0=spatial.gas.nodes,
bus1=spatial.gas.industry,
bus2="co2 atmosphere",
carrier="gas for industry",
p_nom_extendable=True,
@ -1907,10 +2014,9 @@ def add_industry(n, costs):
)
n.madd("Link",
spatial.co2.locations,
suffix=" gas for industry CC",
bus0="EU gas",
bus1="gas for industry",
spatial.gas.industry_cc,
bus0=spatial.gas.nodes,
bus1=spatial.gas.industry,
bus2="co2 atmosphere",
bus3=spatial.co2.nodes,
carrier="gas for industry CC",
@ -1922,7 +2028,6 @@ def add_industry(n, costs):
lifetime=costs.at['cement capture', 'lifetime']
)
n.madd("Load",
nodes,
suffix=" H2 for industry",
@ -2328,13 +2433,14 @@ if __name__ == "__main__":
add_lifetime_wind_solar(n, costs)
conventional = snakemake.config['existing_capacities']['conventional_carriers']
add_carrier_buses(n, conventional)
for carrier in conventional:
add_carrier_buses(n, carrier)
add_co2_tracking(n, options)
add_generation(n, costs)
add_storage(n, costs)
add_storage_and_grids(n, costs)
# TODO merge with opts cost adjustment below
for o in opts:

View File

@ -0,0 +1,36 @@
"""
Retrieve gas infrastructure data from https://zenodo.org/record/4767098/files/IGGIELGN.zip
"""
import logging
from helper import progress_retrieve
import zipfile
from pathlib import Path
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake('retrieve_gas_network_data')
rootpath = '..'
else:
rootpath = '.'
url = "https://zenodo.org/record/4767098/files/IGGIELGN.zip"
# Save locations
zip_fn = Path(f"{rootpath}/IGGIELGN.zip")
to_fn = Path(f"{rootpath}/data/gas_network/scigrid-gas")
logger.info(f"Downloading databundle from '{url}'.")
progress_retrieve(url, zip_fn)
logger.info(f"Extracting databundle.")
zipfile.ZipFile(zip_fn).extractall(to_fn)
zip_fn.unlink()
logger.info(f"Gas infrastructure data available in '{to_fn}'.")

View File

@ -186,6 +186,28 @@ def add_chp_constraints(n):
define_constraints(n, lhs, "<=", 0, 'chplink', 'backpressure')
def add_pipe_retrofit_constraint(n):
"""Add constraint for retrofitting existing CH4 pipelines to H2 pipelines."""
gas_pipes_i = n.links[n.links.carrier=="gas pipeline"].index
h2_retrofitted_i = n.links[n.links.carrier=='H2 pipeline retrofitted'].index
if h2_retrofitted_i.empty or gas_pipes_i.empty: return
link_p_nom = get_var(n, "Link", "p_nom")
pipe_capacity = n.links.loc[gas_pipes_i, 'p_nom']
CH4_per_H2 = 1 / n.config["sector"]["H2_retrofit_capacity_per_CH4"]
lhs = linexpr(
(CH4_per_H2, link_p_nom.loc[h2_retrofitted_i].rename(index=lambda x: x.replace("H2 pipeline retrofitted", "gas pipeline"))),
(1, link_p_nom.loc[gas_pipes_i])
)
define_constraints(n, lhs, "=", pipe_capacity, 'Link', 'pipe_retrofit')
def add_co2_sequestration_limit(n, sns):
co2_stores = n.stores.loc[n.stores.carrier=='co2 stored'].index
@ -205,6 +227,7 @@ def add_co2_sequestration_limit(n, sns):
def extra_functionality(n, snapshots):
add_battery_constraints(n)
add_pipe_retrofit_constraint(n)
add_co2_sequestration_limit(n, snapshots)