Merge branch 'glaes'

This commit is contained in:
Jonas Hoersch 2019-02-06 12:06:32 +01:00
commit d57df6b0c8
25 changed files with 6132 additions and 5684 deletions

113
Snakefile
View File

@ -3,7 +3,7 @@ configfile: "config.yaml"
COSTS="data/costs.csv"
wildcard_constraints:
lv="[0-9\.]+|inf|all",
ll="(v|c)([0-9\.]+|opt|all)", # line limit, can be volume or cost
simpl="[a-zA-Z0-9]*|all",
clusters="[0-9]+m?|all",
sectors="[+a-zA-Z0-9]+",
@ -11,12 +11,17 @@ wildcard_constraints:
rule cluster_all_elec_networks:
input:
expand("networks/elec_s{simpl}_{clusters}_lv{lv}_{opts}.nc",
expand("networks/elec_s{simpl}_{clusters}.nc",
**config['scenario'])
rule prepare_all_elec_networks:
input:
expand("networks/elec_s{simpl}_{clusters}_l{ll}_{opts}.nc",
**config['scenario'])
rule solve_all_elec_networks:
input:
expand("results/networks/elec_s{simpl}_{clusters}_lv{lv}_{opts}.nc",
expand("results/networks/elec_s{simpl}_{clusters}_l{ll}_{opts}.nc",
**config['scenario'])
if config['enable']['prepare_links_p_nom']:
@ -89,38 +94,32 @@ rule build_bus_regions:
rule build_cutout:
output: "cutouts/{cutout}"
resources: mem=5000
resources: mem=config['atlite'].get('nprocesses', 4) * 1000
threads: config['atlite'].get('nprocesses', 4)
benchmark: "benchmarks/build_cutout_{cutout}"
# group: 'feedin_preparation'
script: "scripts/build_cutout.py"
def memory_build_renewable_potentials(wildcards):
corine_config = config["renewable"][wildcards.technology]["corine"]
return 12000 if corine_config.get("distance") is None else 24000
rule build_renewable_potentials:
input:
cutout=lambda wildcards: "cutouts/" + config["renewable"][wildcards.technology]['cutout'],
corine="data/bundle/corine/g250_clc06_V18_5.tif",
natura="data/bundle/natura/Natura2000_end2015.shp"
output: "resources/potentials_{technology}.nc"
resources: mem=memory_build_renewable_potentials
benchmark: "benchmarks/build_renewable_potentials_{technology}"
# group: 'feedin_preparation'
script: "scripts/build_renewable_potentials.py"
rule build_natura_raster:
input: "data/bundle/natura/Natura2000_end2015.shp"
output: "resources/natura.tiff"
script: "scripts/build_natura_raster.py"
rule build_renewable_profiles:
input:
base_network="networks/base.nc",
potentials="resources/potentials_{technology}.nc",
corine="data/bundle/corine/g250_clc06_V18_5.tif",
natura="resources/natura.tiff",
gebco="data/bundle/GEBCO_2014_2D.nc",
country_shapes='resources/country_shapes.geojson',
offshore_shapes='resources/offshore_shapes.geojson',
regions=lambda wildcards: ("resources/regions_onshore.geojson"
if wildcards.technology in ('onwind', 'solar')
else "resources/regions_offshore.geojson"),
cutout=lambda wildcards: "cutouts/" + config["renewable"][wildcards.technology]['cutout']
output:
profile="resources/profile_{technology}.nc",
resources: mem=5000
output: profile="resources/profile_{technology}.nc",
resources: mem=config['atlite'].get('nprocesses', 2) * 5000
threads: config['atlite'].get('nprocesses', 2)
benchmark: "benchmarks/build_renewable_profiles_{technology}"
# group: 'feedin_preparation'
script: "scripts/build_renewable_profiles.py"
@ -142,6 +141,7 @@ rule add_electricity:
regions="resources/regions_onshore.geojson",
powerplants='resources/powerplants.csv',
hydro_capacities='data/bundle/hydro_capacities.csv',
geth_hydro_capacities='data/geth2015_hydro_capacities.csv',
opsd_load='data/bundle/time_series_60min_singleindex_filtered.csv',
nuts3_shapes='resources/nuts3_shapes.geojson',
**{'profile_' + t: "resources/profile_" + t + ".nc"
@ -156,6 +156,7 @@ rule add_electricity:
rule simplify_network:
input:
network='networks/{network}.nc',
tech_costs=COSTS,
regions_onshore="resources/regions_onshore.geojson",
regions_offshore="resources/regions_offshore.geojson"
output:
@ -198,10 +199,10 @@ rule cluster_network:
rule prepare_network:
input: 'networks/{network}_s{simpl}_{clusters}.nc', tech_costs=COSTS
output: 'networks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}.nc'
output: 'networks/{network}_s{simpl}_{clusters}_l{ll}_{opts}.nc'
threads: 1
resources: mem=1000
# benchmark: "benchmarks/prepare_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}"
# benchmark: "benchmarks/prepare_network/{network}_s{simpl}_{clusters}_l{ll}_{opts}"
script: "scripts/prepare_network.py"
def memory(w):
@ -214,34 +215,43 @@ def memory(w):
if w.clusters.endswith('m'):
return int(factor * (18000 + 180 * int(w.clusters[:-1])))
else:
return int(factor * (10000 + 190 * int(w.clusters)))
return int(factor * (10000 + 195 * int(w.clusters)))
# return 4890+310 * int(w.clusters)
rule solve_network:
input: "networks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}.nc"
output: "results/networks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}.nc"
input: "networks/{network}_s{simpl}_{clusters}_l{ll}_{opts}.nc"
output: "results/networks/{network}_s{simpl}_{clusters}_l{ll}_{opts}.nc"
shadow: "shallow"
log:
solver="logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_solver.log",
python="logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_python.log",
memory="logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_memory.log"
benchmark: "benchmarks/solve_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}"
solver="logs/{network}_s{simpl}_{clusters}_l{ll}_{opts}_solver.log",
python="logs/{network}_s{simpl}_{clusters}_l{ll}_{opts}_python.log",
memory="logs/{network}_s{simpl}_{clusters}_l{ll}_{opts}_memory.log"
benchmark: "benchmarks/solve_network/{network}_s{simpl}_{clusters}_l{ll}_{opts}"
threads: 4
resources: mem=memory
# group: "solve" # with group, threads is ignored https://bitbucket.org/snakemake/snakemake/issues/971/group-job-description-does-not-contain
script: "scripts/solve_network.py"
rule trace_solve_network:
input: "networks/{network}_s{simpl}_{clusters}_l{ll}_{opts}.nc"
output: "results/networks/{network}_s{simpl}_{clusters}_l{ll}_{opts}_trace.nc"
shadow: "shallow"
log: python="logs/{network}_s{simpl}_{clusters}_l{ll}_{opts}_python_trace.log",
threads: 4
resources: mem=memory
script: "scripts/trace_solve_network.py"
rule solve_operations_network:
input:
unprepared="networks/{network}_s{simpl}_{clusters}.nc",
optimized="results/networks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}.nc"
output: "results/networks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_op.nc"
optimized="results/networks/{network}_s{simpl}_{clusters}_l{ll}_{opts}.nc"
output: "results/networks/{network}_s{simpl}_{clusters}_l{ll}_{opts}_op.nc"
shadow: "shallow"
log:
solver="logs/solve_operations_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_op_solver.log",
python="logs/solve_operations_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_op_python.log",
memory="logs/solve_operations_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_op_memory.log"
benchmark: "benchmarks/solve_operations_network/{network}_s{simpl}_{clusters}_lv{lv}_{opts}"
solver="logs/solve_operations_network/{network}_s{simpl}_{clusters}_l{ll}_{opts}_op_solver.log",
python="logs/solve_operations_network/{network}_s{simpl}_{clusters}_l{ll}_{opts}_op_python.log",
memory="logs/solve_operations_network/{network}_s{simpl}_{clusters}_l{ll}_{opts}_op_memory.log"
benchmark: "benchmarks/solve_operations_network/{network}_s{simpl}_{clusters}_l{ll}_{opts}"
threads: 4
resources: mem=(lambda w: 5000 + 372 * int(w.clusters))
# group: "solve_operations"
@ -249,31 +259,40 @@ rule solve_operations_network:
rule plot_network:
input:
network="results/networks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}.nc",
network="results/networks/{network}_s{simpl}_{clusters}_l{ll}_{opts}.nc",
tech_costs=COSTS
output:
only_map="results/plots/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{attr}.{ext}",
ext="results/plots/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_{attr}_ext.{ext}"
only_map="results/plots/{network}_s{simpl}_{clusters}_l{ll}_{opts}_{attr}.{ext}",
ext="results/plots/{network}_s{simpl}_{clusters}_l{ll}_{opts}_{attr}_ext.{ext}"
script: "scripts/plot_network.py"
def summary_networks(w):
def input_make_summary(w):
# It's mildly hacky to include the separate costs input as first entry
return ([COSTS] +
expand("results/networks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}.nc",
expand("results/networks/{network}_s{simpl}_{clusters}_l{ll}_{opts}.nc",
network=w.network,
**{k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k)
for k in ["simpl", "clusters", "lv", "opts"]}))
for k in ["simpl", "clusters", "l", "opts"]}))
rule make_summary:
input: summary_networks
output: directory("results/summaries/{network}_s{simpl}_{clusters}_lv{lv}_{opts}")
input: input_make_summary
output: directory("results/summaries/{network}_s{simpl}_{clusters}_l{ll}_{opts}_{country}")
script: "scripts/make_summary.py"
rule plot_summary:
input: directory("results/summaries/{network}_s{simpl}_{clusters}_lv{lv}_{opts}")
output: "results/plots/summary_{summary}_{network}_s{simpl}_{clusters}_lv{lv}_{opts}.{ext}"
input: "results/summaries/{network}_s{simpl}_{clusters}_l{ll}_{opts}_{country}"
output: "results/plots/summary_{summary}_{network}_s{simpl}_{clusters}_l{ll}_{opts}_{country}.{ext}"
script: "scripts/plot_summary.py"
def input_plot_p_nom_max(wildcards):
return [('networks/{network}_s{simpl}{maybe_cluster}.nc'
.format(maybe_cluster=('' if c == 'full' else ('_' + c)), **wildcards))
for c in wildcards.clusters.split(",")]
rule plot_p_nom_max:
input: input_plot_p_nom_max
output: "results/plots/{network}_s{simpl}_cum_p_nom_max_{clusters}_{technology}_{country}.{ext}"
script: "scripts/plot_p_nom_max.py"
# Local Variables:
# mode: python
# End:

View File

@ -5,7 +5,10 @@ feedin_preparation:
walltime: "12:00:00"
solve_network:
walltime: "02:00:00:00"
walltime: "05:00:00:00"
trace_solve_network:
walltime: "05:00:00:00"
solve:
walltime: "05:00:00:00"

View File

@ -6,7 +6,9 @@ summary_dir: results
scenario:
sectors: [E] # ,E+EV,E+BEV,E+BEV+V2G] # [ E+EV, E+BEV, E+BEV+V2G ]
simpl: ['']
lv: [1.0, 1.125, 1.25, 1.5, 2.0, 3.0, 'inf']
#ll: ['v1.0', 'v1.09', 'v1.125', 'v1.18', 'v1.25', 'v1.35', 'v1.5', 'v1.7', 'v2.0', 'vopt'] # line limit a 'v' prefix means volume
ll: ['vopt', 'copt'] #['v1.0', 'v1.125', 'v1.25', 'v1.5', 'v2.0', 'vopt'] # line limit a 'v' prefix means volume
#ll: ['c1.0', 'v1.125', 'v1.25', 'v1.5', 'v2.0', 'vopt'] # line limit a 'v' prefix means volume
clusters: [37, 45, 64, 90, 128, 181, 256, 362, 512] # (2**np.r_[5.5:9:.5]).astype(int)
opts: [Co2L-3H] #, LC-FL, LC-T, Ep-T, Co2L-T]
@ -34,6 +36,11 @@ electricity:
battery: 6
H2: 168
# estimate_renewable_capacities_from_capacity_stats:
# # Wind is the Fueltype in ppm.data.Capacity_stats, onwind, offwind-{ac,dc} the carrier in PyPSA-Eur
# Wind: [onwind, offwind-ac, offwind-dc]
# Solar: [solar]
conventional_carriers: [] # nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass]
atlite:
@ -58,7 +65,7 @@ renewable:
method: wind
turbine: Vestas_V112_3MW
# ScholzPhd Tab 4.3.1: 10MW/km^2
capacity_per_sqm: 3
capacity_per_sqkm: 3
# correction_factor: 0.93
corine:
#The selection of CORINE Land Cover [1] types that are allowed for wind and solar are based on [2] p.42 / p.28
@ -70,19 +77,36 @@ renewable:
24, 25, 26, 27, 28, 29, 31, 32]
distance: 1000
distance_grid_codes: [1, 2, 3, 4, 5, 6]
natura: true
offwind:
natura: true
potential: simple # or conservative
clip_p_max_pu: 1.e-2
offwind-ac:
cutout: europe-2013-era5
resource:
method: wind
turbine: NREL_ReferenceTurbine_5MW_offshore
capacity_per_sqkm: 3
# correction_factor: 0.93
corine: [44, 255]
natura: true
max_depth: 50
max_shore_distance: 80000
potential: simple # or conservative
clip_p_max_pu: 1.e-2
offwind-dc:
cutout: europe-2013-era5
resource:
method: wind
turbine: NREL_ReferenceTurbine_5MW_offshore
# ScholzPhd Tab 4.3.1: 10MW/km^2
capacity_per_sqm: 3
height_cutoff: 50
capacity_per_sqkm: 3
# correction_factor: 0.93
corine:
grid_codes: [44, 255]
natura: true
corine: [44, 255]
natura: true
max_depth: 50
min_shore_distance: 80000
potential: simple # or conservative
clip_p_max_pu: 1.e-2
solar:
cutout: europe-2013-sarah
resource:
@ -92,18 +116,20 @@ renewable:
slope: 35.
azimuth: 180.
# ScholzPhd Tab 4.3.1: 170 MW/km^2
capacity_per_sqm: 1.7
capacity_per_sqkm: 1.7
correction_factor: 0.877
corine:
grid_codes: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 26, 31, 32]
natura: true
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
natura: true
potential: simple # or conservative
clip_p_max_pu: 1.e-2
hydro:
cutout: europe-2013-era5
carriers: [ror, PHS, hydro]
PHS_max_hours: 6
hydro_max_hours: "energy_capacity_totals_by_country" # one of energy_capacity_totals_by_country,
# estimate_by_large_installations or a float
clip_min_inflow: 1.0
lines:
types:
220.: "Al/St 240/40 2-bundle 220.0"
@ -149,13 +175,13 @@ costs:
solving:
options:
formulation: kirchhoff
clip_p_max_pu: 1.e-2
load_shedding: true
#load_shedding: true
noisy_costs: true
min_iterations: 4
# max_iterations: 6
max_iterations: 3
#nhours: 10
clip_p_max_pu: 0.01
solver:
name: gurobi
threads: 4
@ -188,7 +214,7 @@ plotting:
energy_threshold: 50.
vre_techs: ["onwind", "offwind", "solar", "ror"]
vre_techs: ["onwind", "offwind-ac", "offwind-dc", "solar", "ror"]
conv_techs: ["OCGT", "CCGT", "Nuclear", "Coal"]
storage_techs: ["hydro+PHS", "battery", "H2"]
# store_techs: ["Li ion", "water tanks"]
@ -203,7 +229,11 @@ plotting:
"onwind" : "b"
"onshore wind" : "b"
'offwind' : "c"
'offwind-ac' : "c"
'offwind-dc' : "aquamarine"
'offshore wind' : "c"
'offshore wind ac' : "c"
'offshore wind dc' : "aquamarine"
"hydro" : "#3B5323"
"hydro+PHS" : "#3B5323"
"hydro reservoir" : "#3B5323"

View File

@ -1,7 +1,7 @@
technology,year,parameter,value,unit,source
solar-rooftop,2030,discount rate,0.04,per unit,standard for decentral
onwind,2030,lifetime,25,years,IEA2010
offwind,2030,lifetime,25,years,IEA2010
onwind,2030,lifetime,30,years,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind,2030,lifetime,30,years,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
solar,2030,lifetime,25,years,IEA2010
solar-rooftop,2030,lifetime,25,years,IEA2010
solar-utility,2030,lifetime,25,years,IEA2010
@ -16,8 +16,15 @@ lignite,2030,lifetime,40,years,IEA2010
geothermal,2030,lifetime,40,years,IEA2010
biomass,2030,lifetime,30,years,ECF2010 in DIW DataDoc http://hdl.handle.net/10419/80348
oil,2030,lifetime,30,years,ECF2010 in DIW DataDoc http://hdl.handle.net/10419/80348
onwind,2030,investment,1182,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
offwind,2030,investment,2506,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
onwind,2030,investment,1110,EUR/kWel,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
onwind-landcosts,2030,investment,200,EUR/kWel,Land costs and compensation payments conservatively estimated based on DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind,2030,investment,1640,EUR/kWel,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind-ac-station,2030,investment,250,EUR/kWel,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind-ac-connection-submarine,2030,investment,2685,EUR/MW/km,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind-ac-connection-underground,2030,investment,1342,EUR/MW/km,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind-dc-station,2030,investment,400,EUR/kWel,Haertel 2017; assuming one onshore and one offshore node + 13% learning reduction
offwind-dc-connection-submarine,2030,investment,2000,EUR/MW/km,DTU report based on Fig 34 of https://ec.europa.eu/energy/sites/ener/files/documents/2014_nsog_report.pdf
offwind-dc-connection-underground,2030,investment,1000,EUR/MW/km,Haertel 2017; average + 13% learning reduction
solar,2030,investment,600,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
biomass,2030,investment,2209,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
geothermal,2030,investment,3392,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
@ -32,8 +39,8 @@ OCGT,2030,investment,400,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
nuclear,2030,investment,6000,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
CCGT,2030,investment,800,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
oil,2030,investment,400,EUR/kWel,DIW DataDoc http://hdl.handle.net/10419/80348
onwind,2030,FOM,2.961083,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
offwind,2030,FOM,3.192338,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
onwind,2030,FOM,2.450549,%/year,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind,2030,FOM,2.304878,%/year,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
solar,2030,FOM,4.166667,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
solar-rooftop,2030,FOM,2,%/year,ETIP PV
solar-utility,2030,FOM,3,%/year,ETIP PV
@ -47,8 +54,8 @@ hydro,2030,FOM,1,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
ror,2030,FOM,2,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
CCGT,2030,FOM,2.5,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
OCGT,2030,FOM,3.75,%/year,DIW DataDoc http://hdl.handle.net/10419/80348
onwind,2030,VOM,0.015,EUR/MWhel,RES costs made up to fix curtailment order
offwind,2030,VOM,0.02,EUR/MWhel,RES costs made up to fix curtailment order
onwind,2030,VOM,2.3,EUR/MWhel,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
offwind,2030,VOM,2.7,EUR/MWhel,DEA https://ens.dk/en/our-services/projections-and-models/technology-data
solar,2030,VOM,0.01,EUR/MWhel,RES costs made up to fix curtailment order
coal,2030,VOM,6,EUR/MWhel,DIW DataDoc http://hdl.handle.net/10419/80348 PC (Advanced/SuperC)
lignite,2030,VOM,7,EUR/MWhel,DIW DataDoc http://hdl.handle.net/10419/80348
@ -175,7 +182,7 @@ HVAC overhead,2030,FOM,2,%/year,Hagspiel
HVDC overhead,2030,investment,400,EUR/MW/km,Hagspiel
HVDC overhead,2030,lifetime,40,years,Hagspiel
HVDC overhead,2030,FOM,2,%/year,Hagspiel
HVDC submarine,2030,investment,2000,EUR/MW/km,Own analysis of European submarine HVDC projects since 2000
HVDC submarine,2030,investment,2000,EUR/MW/km,DTU report based on Fig 34 of https://ec.europa.eu/energy/sites/ener/files/documents/2014_nsog_report.pdf
HVDC submarine,2030,lifetime,40,years,Hagspiel
HVDC submarine,2030,FOM,2,%/year,Hagspiel
HVDC inverter pair,2030,investment,150000,EUR/MW,Hagspiel

1 technology year parameter value unit source
2 solar-rooftop 2030 discount rate 0.04 per unit standard for decentral
3 onwind 2030 lifetime 25 30 years IEA2010 DEA https://ens.dk/en/our-services/projections-and-models/technology-data
4 offwind 2030 lifetime 25 30 years IEA2010 DEA https://ens.dk/en/our-services/projections-and-models/technology-data
5 solar 2030 lifetime 25 years IEA2010
6 solar-rooftop 2030 lifetime 25 years IEA2010
7 solar-utility 2030 lifetime 25 years IEA2010
16 geothermal 2030 lifetime 40 years IEA2010
17 biomass 2030 lifetime 30 years ECF2010 in DIW DataDoc http://hdl.handle.net/10419/80348
18 oil 2030 lifetime 30 years ECF2010 in DIW DataDoc http://hdl.handle.net/10419/80348
19 onwind 2030 investment 1182 1110 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348 DEA https://ens.dk/en/our-services/projections-and-models/technology-data
20 offwind onwind-landcosts 2030 investment 2506 200 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348 Land costs and compensation payments conservatively estimated based on DEA https://ens.dk/en/our-services/projections-and-models/technology-data
21 offwind 2030 investment 1640 EUR/kWel DEA https://ens.dk/en/our-services/projections-and-models/technology-data
22 offwind-ac-station 2030 investment 250 EUR/kWel DEA https://ens.dk/en/our-services/projections-and-models/technology-data
23 offwind-ac-connection-submarine 2030 investment 2685 EUR/MW/km DEA https://ens.dk/en/our-services/projections-and-models/technology-data
24 offwind-ac-connection-underground 2030 investment 1342 EUR/MW/km DEA https://ens.dk/en/our-services/projections-and-models/technology-data
25 offwind-dc-station 2030 investment 400 EUR/kWel Haertel 2017; assuming one onshore and one offshore node + 13% learning reduction
26 offwind-dc-connection-submarine 2030 investment 2000 EUR/MW/km DTU report based on Fig 34 of https://ec.europa.eu/energy/sites/ener/files/documents/2014_nsog_report.pdf
27 offwind-dc-connection-underground 2030 investment 1000 EUR/MW/km Haertel 2017; average + 13% learning reduction
28 solar 2030 investment 600 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
29 biomass 2030 investment 2209 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
30 geothermal 2030 investment 3392 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
39 nuclear 2030 investment 6000 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
40 CCGT 2030 investment 800 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
41 oil 2030 investment 400 EUR/kWel DIW DataDoc http://hdl.handle.net/10419/80348
42 onwind 2030 FOM 2.961083 2.450549 %/year DIW DataDoc http://hdl.handle.net/10419/80348 DEA https://ens.dk/en/our-services/projections-and-models/technology-data
43 offwind 2030 FOM 3.192338 2.304878 %/year DIW DataDoc http://hdl.handle.net/10419/80348 DEA https://ens.dk/en/our-services/projections-and-models/technology-data
44 solar 2030 FOM 4.166667 %/year DIW DataDoc http://hdl.handle.net/10419/80348
45 solar-rooftop 2030 FOM 2 %/year ETIP PV
46 solar-utility 2030 FOM 3 %/year ETIP PV
54 ror 2030 FOM 2 %/year DIW DataDoc http://hdl.handle.net/10419/80348
55 CCGT 2030 FOM 2.5 %/year DIW DataDoc http://hdl.handle.net/10419/80348
56 OCGT 2030 FOM 3.75 %/year DIW DataDoc http://hdl.handle.net/10419/80348
57 onwind 2030 VOM 0.015 2.3 EUR/MWhel RES costs made up to fix curtailment order DEA https://ens.dk/en/our-services/projections-and-models/technology-data
58 offwind 2030 VOM 0.02 2.7 EUR/MWhel RES costs made up to fix curtailment order DEA https://ens.dk/en/our-services/projections-and-models/technology-data
59 solar 2030 VOM 0.01 EUR/MWhel RES costs made up to fix curtailment order
60 coal 2030 VOM 6 EUR/MWhel DIW DataDoc http://hdl.handle.net/10419/80348 PC (Advanced/SuperC)
61 lignite 2030 VOM 7 EUR/MWhel DIW DataDoc http://hdl.handle.net/10419/80348
182 HVDC overhead 2030 investment 400 EUR/MW/km Hagspiel
183 HVDC overhead 2030 lifetime 40 years Hagspiel
184 HVDC overhead 2030 FOM 2 %/year Hagspiel
185 HVDC submarine 2030 investment 2000 EUR/MW/km Own analysis of European submarine HVDC projects since 2000 DTU report based on Fig 34 of https://ec.europa.eu/energy/sites/ener/files/documents/2014_nsog_report.pdf
186 HVDC submarine 2030 lifetime 40 years Hagspiel
187 HVDC submarine 2030 FOM 2 %/year Hagspiel
188 HVDC inverter pair 2030 investment 150000 EUR/MW Hagspiel

View File

@ -0,0 +1,32 @@
# Table 25 from F. Geth et al., An overview of large-scale stationary electricity storage plants in Europe (2015) 12121227
country,n,p_nom_discharge,p_nom_charge,e_stor
AT,19,4.051,3.246,132.41
BE,3,1.301,1.196,5.71
BG,3,1.399,0.93,11.13
HR,3,0.281,0.246,2.34
CY,0,-,-,-
CZ,3,1.119,1.145,5.72
DK,0,-,-,-
EE,0,-,-,-
FI ,0,-,-,-
FR,10,5.512,4.317,83.37
DE,34,6.805,6.417,39.12
GR,2,0.735,-,4.97
HU,0,-,-,-
IE,1,0.292,-,1.8
IT,25,7.833,7.64,68.27
LV,0,-,-,-
LT,1,0.9,0.88,10.8
LU,1,1.296,1.05,4.92
MT,0,-,-,-
NL,0,-,-,-
PL,6,1.757,1.647,7.96
PT,7,1.279,-,40.77
RO,5,0.285,0.2,10.2
SK,4,1.016,0.79,3.63
SI,1,0.185,0.18,0.5
ES,26,6.358,5.859,70
SE,2,0.091,-,72.12
GB,4,2.788,2.65,26.7
NO,8,1.273,0.892,399.39
CH,20,2.291,1.512,311.48
1 # Table 25 from F. Geth et al., An overview of large-scale stationary electricity storage plants in Europe (2015) 1212–1227
2 country,n,p_nom_discharge,p_nom_charge,e_stor
3 AT,19,4.051,3.246,132.41
4 BE,3,1.301,1.196,5.71
5 BG,3,1.399,0.93,11.13
6 HR,3,0.281,0.246,2.34
7 CY,0,-,-,-
8 CZ,3,1.119,1.145,5.72
9 DK,0,-,-,-
10 EE,0,-,-,-
11 FI ,0,-,-,-
12 FR,10,5.512,4.317,83.37
13 DE,34,6.805,6.417,39.12
14 GR,2,0.735,-,4.97
15 HU,0,-,-,-
16 IE,1,0.292,-,1.8
17 IT,25,7.833,7.64,68.27
18 LV,0,-,-,-
19 LT,1,0.9,0.88,10.8
20 LU,1,1.296,1.05,4.92
21 MT,0,-,-,-
22 NL,0,-,-,-
23 PL,6,1.757,1.647,7.96
24 PT,7,1.279,-,40.77
25 RO,5,0.285,0.2,10.2
26 SK,4,1.016,0.79,3.63
27 SI,1,0.185,0.18,0.5
28 ES,26,6.358,5.859,70
29 SE,2,0.091,-,72.12
30 GB,4,2.788,2.65,26.7
31 NO,8,1.273,0.892,399.39
32 CH,20,2.291,1.512,311.48

View File

@ -14,11 +14,19 @@ dependencies:
- seaborn
- memory_profiler
- networkx>=1.10
- netcdf4
- xarray
- xlrd
- scikit-learn
- pytables
- pycountry
# Second order dependencies which should really be deps of atlite
- xarray
- netcdf4
- bottleneck
- cyordereddict
- toolz
- dask
- progressbar2
# Include ipython so that one does not inadvertently drop out of the conda
# environment by calling ipython
@ -36,9 +44,11 @@ dependencies:
# The FRESNA/KIT stuff is not packaged for conda yet
- pip:
- pypsa>=0.13
- pypsa>=0.13.2
- vresutils>=0.2.5
- git+https://github.com/FRESNA/atlite.git
#- git+https://github.com/FRESNA/powerplantmatching.git
- git+https://github.com/FRESNA/atlite.git#egg=atlite
- git+https://github.com/PyPSA/glaes.git#egg=glaes
- git+https://github.com/PyPSA/geokit.git#egg=geokit
#- git+https://github.com/FRESNA/powerplantmatching.git#egg=powerplantmatching
#- https://software.ecmwf.int/wiki/download/attachments/56664858/ecmwf-api-client-python.tgz
- countrycode

File diff suppressed because it is too large Load Diff

View File

@ -18,6 +18,13 @@ from vresutils import transfer as vtransfer
import pypsa
try:
import powerplantmatching as ppm
from build_powerplants import country_alpha_2
has_ppm = True
except ImportError:
has_ppm = False
def normed(s): return s/s.sum()
@ -111,6 +118,9 @@ def attach_load(n):
opsd_load = timeseries_opsd(slice(*n.snapshots[[0,-1]].year.astype(str)),
snakemake.input.opsd_load)
# Convert to naive UTC (has to be explicit since pandas 0.24)
opsd_load.index = opsd_load.index.tz_localize(None)
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index')
def normed(x): return x.divide(x.sum())
@ -161,6 +171,18 @@ def attach_wind_and_solar(n, costs):
n.add("Carrier", name=tech)
with xr.open_dataset(getattr(snakemake.input, 'profile_' + tech)) as ds:
suptech = tech.split('-', 2)[0]
if suptech == 'offwind':
underwater_fraction = ds['underwater_fraction'].to_pandas()
connection_cost = (snakemake.config['lines']['length_factor'] * ds['average_distance'].to_pandas() *
(underwater_fraction * costs.at[tech + '-connection-submarine', 'capital_cost'] +
(1. - underwater_fraction) * costs.at[tech + '-connection-underground', 'capital_cost']))
capital_cost = costs.at['offwind', 'capital_cost'] + costs.at[tech + '-station', 'capital_cost'] + connection_cost
logger.info("Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}".format(connection_cost.min(), connection_cost.max(), tech))
elif suptech == 'onwind':
capital_cost = costs.at['onwind', 'capital_cost'] + costs.at['onwind-landcosts', 'capital_cost']
else:
capital_cost = costs.at[tech, 'capital_cost']
n.madd("Generator", ds.indexes['bus'], ' ' + tech,
bus=ds.indexes['bus'],
@ -168,9 +190,9 @@ def attach_wind_and_solar(n, costs):
p_nom_extendable=True,
p_nom_max=ds['p_nom_max'].to_pandas(),
weight=ds['weight'].to_pandas(),
marginal_cost=costs.at[tech, 'marginal_cost'],
capital_cost=costs.at[tech, 'capital_cost'],
efficiency=costs.at[tech, 'efficiency'],
marginal_cost=costs.at[suptech, 'marginal_cost'],
capital_cost=capital_cost,
efficiency=costs.at[suptech, 'efficiency'],
p_max_pu=ds['profile'].transpose('time', 'bus').to_pandas())
@ -219,13 +241,13 @@ def attach_hydro(n, costs, ppl):
has_pump=ppl.technology.str.contains('Pumped Storage')
)
country = ppl.loc[ppl.has_inflow, 'bus'].map(n.buses.country)
country = ppl['bus'].map(n.buses.country)
# distribute by p_nom in each country
dist_key = ppl.loc[ppl.has_inflow, 'p_nom'].groupby(country).transform(normed)
with xr.open_dataarray(snakemake.input.profile_hydro) as inflow:
inflow_t = (
inflow.sel(countries=country.values)
inflow.sel(countries=country.loc[ppl.has_inflow].values)
.rename({'countries': 'name'})
.assign_coords(name=ppl.index[ppl.has_inflow])
.transpose('time', 'name')
@ -260,13 +282,24 @@ def attach_hydro(n, costs, ppl):
inflow=inflow_t.loc[:, phs.index[phs.has_inflow]])
if 'hydro' in carriers:
hydro = ppl.loc[ppl.has_store & ~ ppl.has_pump & ppl.has_inflow]
hydro = ppl.loc[ppl.has_store & ~ ppl.has_pump & ppl.has_inflow].join(country.rename('country'))
hydro_max_hours = c.get('hydro_max_hours')
if hydro_max_hours is None:
if hydro_max_hours == 'energy_capacity_totals_by_country':
hydro_e_country = pd.read_csv(snakemake.input.hydro_capacities, index_col=0)["E_store[TWh]"].clip(lower=0.2)*1e6
hydro_max_hours_country = hydro_e_country / hydro.p_nom.groupby(country).sum()
hydro_max_hours = country.loc[hydro.index].map(hydro_max_hours_country)
hydro_max_hours_country = hydro_e_country / hydro.groupby('country').p_nom.sum()
hydro_max_hours = hydro.country.map(hydro_e_country / hydro.groupby('country').p_nom.sum())
elif hydro_max_hours == 'estimate_by_large_installations':
hydro_capacities = pd.read_csv(snakemake.input.hydro_capacities, comment="#", na_values='-', index_col=0)
estim_hydro_max_hours = hydro_capacities.e_stor / hydro_capacities.p_nom_discharge
missing_countries = (pd.Index(hydro['country'].unique())
.difference(estim_hydro_max_hours.dropna().index))
if not missing_countries.empty:
logger.warning("Assuming max_hours=6 for hydro reservoirs in the countries: {}"
.format(", ".join(missing_countries)))
hydro_max_hours = hydro['country'].map(estim_hydro_max_hours).fillna(6)
n.madd('StorageUnit', hydro.index, carrier='hydro',
bus=hydro['bus'],
@ -286,7 +319,7 @@ def attach_hydro(n, costs, ppl):
def attach_extendable_generators(n, costs, ppl):
elec_opts = snakemake.config['electricity']
carriers = list(elec_opts['extendable_carriers']['Generator'])
assert carriers == ['OCGT'], "Only OCGT plants as extendable generators allowed for now"
assert set(carriers).issubset(['OCGT']), "Only OCGT plants as extendable generators allowed for now"
_add_missing_carriers_from_costs(n, costs, carriers)
@ -373,6 +406,29 @@ def attach_storage(n, costs):
# marginal_cost=options['marginal_cost_storage'],
# p_nom_extendable=True)
def estimate_renewable_capacities(n, tech_map=None):
if tech_map is None:
tech_map = snakemake.config['electricity'].get('estimate_renewable_capacities_from_capacity_stats', {})
if len(tech_map) == 0: return
assert has_ppm, "The estimation of renewable capacities needs the powerplantmatching package"
capacities = ppm.data.Capacity_stats()
capacities['alpha_2'] = capacities['Country'].map(country_alpha_2)
capacities = capacities.loc[capacities.Energy_Source_Level_2].set_index(['Fueltype', 'alpha_2']).sort_index()
countries = n.buses.country.unique()
for ppm_fueltype, techs in tech_map.items():
tech_capacities = capacities.loc[ppm_fueltype, 'Capacity'].reindex(countries, fill_value=0.)
tech_b = n.generators.carrier.isin(techs)
n.generators.loc[tech_b, 'p_nom'] = (
(n.generators_t.p_max_pu.mean().loc[tech_b] * n.generators.loc[tech_b, 'p_nom_max']) # maximal yearly generation
.groupby(n.generators.bus.map(n.buses.country)) # for each country
.transform(lambda s: normed(s) * tech_capacities.at[s.name])
.where(lambda s: s>0.1, 0.) # only capacities above 100kW
)
def add_co2limit(n, Nyears=1.):
n.add("GlobalConstraint", "CO2Limit",
@ -395,11 +451,12 @@ if __name__ == "__main__":
snakemake = MockSnakemake(output=['networks/elec.nc'])
snakemake.input = snakemake.expand(
Dict(base_network='networks/base.nc',
tech_costs='data/costs/costs.csv',
tech_costs='data/costs.csv',
regions="resources/regions_onshore.geojson",
powerplants="resources/powerplants.csv",
hydro_capacities='data/hydro_capacities.csv',
opsd_load='data/time_series_60min_singleindex_filtered.csv',
hydro_capacities='data/bundle/hydro_capacities.csv',
opsd_load='data/bundle/time_series_60min_singleindex_filtered.csv',
nuts3_shapes='resources/nuts3_shapes.geojson',
**{'profile_' + t: "resources/profile_" + t + ".nc"
for t in snakemake.config['renewable']})
)
@ -422,4 +479,6 @@ if __name__ == "__main__":
attach_extendable_generators(n, costs, ppl)
attach_storage(n, costs)
estimate_renewable_capacities(n)
n.export_to_netcdf(snakemake.output[0])

View File

@ -107,6 +107,18 @@ def _load_links_from_eg(buses):
def _add_links_from_tyndp(buses, links):
links_tyndp = pd.read_csv(snakemake.input.links_tyndp)
# remove all links from list which lie outside all of the desired countries
europe_shape = gpd.read_file(snakemake.input.europe_shape).loc[0, 'geometry']
europe_shape_prepped = shapely.prepared.prep(europe_shape)
x1y1_in_europe_b = links_tyndp[['x1', 'y1']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1)
x2y2_in_europe_b = links_tyndp[['x2', 'y2']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1)
is_within_covered_countries_b = x1y1_in_europe_b & x2y2_in_europe_b
if not is_within_covered_countries_b.all():
logger.info("TYNDP links outside of the covered area (skipping): " +
", ".join(links_tyndp.loc[~ is_within_covered_countries_b, "Name"]))
links_tyndp = links_tyndp.loc[is_within_covered_countries_b]
has_replaces_b = links_tyndp.replaces.notnull()
oids = dict(Bus=_get_oid(buses), Link=_get_oid(links))
keep_b = dict(Bus=pd.Series(True, index=buses.index),
@ -123,7 +135,7 @@ def _add_links_from_tyndp(buses, links):
# Corresponds approximately to 60km tolerances
if links_tyndp["j"].notnull().any():
logger.info("The following TYNDP links were already in the dataset (skipping): " + ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"]))
logger.info("TYNDP links already in the dataset (skipping): " + ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"]))
links_tyndp = links_tyndp.loc[links_tyndp["j"].isnull()]
tree = sp.spatial.KDTree(buses[['x', 'y']])
@ -277,8 +289,8 @@ def _set_countries_and_substations(n):
)
countries = snakemake.config['countries']
country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('id')['geometry']
offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes).set_index('id')['geometry']
country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry']
offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes).set_index('name')['geometry']
substation_b = buses['symbol'].str.contains('substation|converter station', case=False)
def prefer_voltage(x, which):
@ -387,7 +399,7 @@ def _replace_b2b_converter_at_country_border_by_link(n):
.format(i, b0, line, linkcntry.at[i], buscntry.at[b1]))
def _set_links_underwater_fraction(n):
offshore_shape = gpd.read_file(snakemake.input.offshore_shapes).set_index('id').unary_union
offshore_shape = gpd.read_file(snakemake.input.offshore_shapes).unary_union
links = gpd.GeoSeries(n.links.geometry.dropna().map(shapely.wkt.loads))
n.links['underwater_fraction'] = links.intersection(offshore_shape).length / links.length
@ -408,6 +420,12 @@ def _adjust_capacities_of_under_construction_branches(n):
elif links_mode != 'keep':
logger.warn("Unrecognized configuration for `links: under_construction` = `{}`. Keeping under construction links.")
if lines_mode == 'remove' or links_mode == 'remove':
# We might need to remove further unconnected components
n = _remove_unconnected_components(n)
return n
def base_network():
buses = _load_buses_from_eg()
@ -448,7 +466,7 @@ def base_network():
_replace_b2b_converter_at_country_border_by_link(n)
_adjust_capacities_of_under_construction_branches(n)
n = _adjust_capacities_of_under_construction_branches(n)
return n

View File

@ -12,8 +12,8 @@ countries = snakemake.config['countries']
n = pypsa.Network(snakemake.input.base_network)
country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('id')['geometry']
offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes).set_index('id')['geometry']
country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry']
offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes).set_index('name')['geometry']
onshore_regions = []
offshore_regions = []
@ -24,6 +24,8 @@ for country in countries:
onshore_shape = country_shapes[country]
onshore_locs = n.buses.loc[c_b & n.buses.substation_lv, ["x", "y"]]
onshore_regions.append(gpd.GeoDataFrame({
'x': onshore_locs['x'],
'y': onshore_locs['y'],
'geometry': voronoi_partition_pts(onshore_locs.values, onshore_shape),
'country': country
}, index=onshore_locs.index))
@ -32,6 +34,8 @@ for country in countries:
offshore_shape = offshore_shapes[country]
offshore_locs = n.buses.loc[c_b & n.buses.substation_off, ["x", "y"]]
offshore_regions_c = gpd.GeoDataFrame({
'x': offshore_locs['x'],
'y': offshore_locs['y'],
'geometry': voronoi_partition_pts(offshore_locs.values, offshore_shape),
'country': country
}, index=offshore_locs.index)
@ -41,7 +45,9 @@ for country in countries:
def save_to_geojson(s, fn):
if os.path.exists(fn):
os.unlink(fn)
s.reset_index().to_file(fn, driver='GeoJSON')
df = s.reset_index()
schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'}
df.to_file(fn, driver='GeoJSON', schema=schema)
save_to_geojson(pd.concat(onshore_regions), snakemake.output.regions_onshore)

View File

@ -9,11 +9,12 @@ import logging
logger = logging.getLogger(__name__)
logger.setLevel(level=snakemake.config['logging_level'])
cutout = atlite.Cutout(snakemake.config['renewable']['hydro']['cutout'],
config = snakemake.config['renewable']['hydro']
cutout = atlite.Cutout(config['cutout'],
cutout_dir=os.path.dirname(snakemake.input.cutout))
countries = snakemake.config['countries']
country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('id')['geometry'].reindex(countries)
country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry'].reindex(countries)
country_shapes.index.name = 'countries'
eia_stats = vhydro.get_eia_annual_hydro_generation(snakemake.input.eia_hydro_generation).reindex(columns=countries)
@ -22,4 +23,7 @@ inflow = cutout.runoff(shapes=country_shapes,
lower_threshold_quantile=True,
normalize_using_yearly=eia_stats)
if 'clip_min_inflow' in config:
inflow.values[inflow.values < config['clip_min_inflow']] = 0.
inflow.to_netcdf(snakemake.output[0])

View File

@ -0,0 +1,19 @@
import numpy as np
import atlite
from osgeo import gdal
import geokit as gk
def determine_cutout_xXyY(cutout_name):
cutout = atlite.Cutout(cutout_name, cutout_dir="cutouts")
x, X, y, Y = cutout.extent
dx = (X - x) / (cutout.shape[1] - 1)
dy = (Y - y) / (cutout.shape[0] - 1)
return [x - dx/2., X + dx/2., y - dy/2., Y + dy/2.]
cutout_names = np.unique([res['cutout'] for res in snakemake.config['renewable'].values()])
xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutout_names))
xXyY = min(xs), max(Xs), min(ys), max(Ys)
natura = gk.vector.loadVector(snakemake.input[0])
extent = gk.Extent.from_xXyY(xXyY).castTo(3035).fit(100)
extent.rasterize(natura, pixelWidth=100, pixelHeight=100, output=snakemake.output[0])

View File

@ -1,40 +1,68 @@
# coding: utf-8
import logging
import numpy as np
import pandas as pd
from scipy.spatial import cKDTree as KDTree
import pycountry as pyc
import pypsa
import powerplantmatching as ppm
if 'snakemake' not in globals():
from vresutils.snakemake import MockSnakemake, Dict
def country_alpha_2(name):
try:
cntry = pyc.countries.get(name=name)
except KeyError:
cntry = None
if cntry is None:
cntry = pyc.countries.get(official_name=name)
return cntry.alpha_2
snakemake = MockSnakemake(
input=Dict(base_network='networks/base.nc'),
output=['resources/powerplants.csv']
)
if __name__ == "__main__":
if 'snakemake' not in globals():
from vresutils.snakemake import MockSnakemake, Dict
logging.basicConfig(level=snakemake.config['logging_level'])
snakemake = MockSnakemake(
input=Dict(base_network='networks/base.nc'),
output=['resources/powerplants.csv']
)
n = pypsa.Network(snakemake.input.base_network)
logging.basicConfig(level=snakemake.config['logging_level'])
ppl = (ppm.collection.matched_data()
[lambda df : ~df.Fueltype.isin(('Solar', 'Wind'))]
.pipe(ppm.cleaning.clean_technology)
.assign(Fueltype=lambda df: (
df.Fueltype.where(df.Fueltype != 'Natural Gas',
df.Technology.replace('Steam Turbine', 'OCGT').fillna('OCGT'))))
.pipe(ppm.utils.fill_geoposition, parse=True, only_saved_locs=True)
.pipe(ppm.heuristics.fill_missing_duration))
n = pypsa.Network(snakemake.input.base_network)
# ppl.loc[(ppl.Fueltype == 'Other') & ppl.Technology.str.contains('CCGT'), 'Fueltype'] = 'CCGT'
# ppl.loc[(ppl.Fueltype == 'Other') & ppl.Technology.str.contains('Steam Turbine'), 'Fueltype'] = 'CCGT'
ppl = (ppm.collection.matched_data()
[lambda df : ~df.Fueltype.isin(('Solar', 'Wind'))]
.pipe(ppm.cleaning.clean_technology)
.assign(Fueltype=lambda df: (
df.Fueltype.where(df.Fueltype != 'Natural Gas',
df.Technology.replace('Steam Turbine', 'OCGT').fillna('OCGT'))))
.pipe(ppm.utils.fill_geoposition))
ppl = ppl.loc[ppl.lon.notnull() & ppl.lat.notnull()]
# ppl.loc[(ppl.Fueltype == 'Other') & ppl.Technology.str.contains('CCGT'), 'Fueltype'] = 'CCGT'
# ppl.loc[(ppl.Fueltype == 'Other') & ppl.Technology.str.contains('Steam Turbine'), 'Fueltype'] = 'CCGT'
substation_lv_i = n.buses.index[n.buses['substation_lv']]
kdtree = KDTree(n.buses.loc[substation_lv_i, ['x','y']].values)
ppl = ppl.assign(bus=substation_lv_i[kdtree.query(ppl[['lon','lat']].values)[1]])
ppl = ppl.loc[ppl.lon.notnull() & ppl.lat.notnull()]
ppl.to_csv(snakemake.output[0])
ppl_country = ppl.Country.map(country_alpha_2)
countries = n.buses.country.unique()
cntries_without_ppl = []
for cntry in countries:
substation_lv_i = n.buses.index[n.buses['substation_lv'] & (n.buses.country == cntry)]
ppl_b = ppl_country == cntry
if not ppl_b.any():
cntries_without_ppl.append(cntry)
continue
kdtree = KDTree(n.buses.loc[substation_lv_i, ['x','y']].values)
ppl.loc[ppl_b, 'bus'] = substation_lv_i[kdtree.query(ppl.loc[ppl_b, ['lon','lat']].values)[1]]
if cntries_without_ppl:
logging.warning("No powerplants known in: {}".format(", ".join(cntries_without_ppl)))
bus_null_b = ppl["bus"].isnull()
if bus_null_b.any():
logging.warning("Couldn't find close bus for {} powerplants".format(bus_null_b.sum()))
ppl.to_csv(snakemake.output[0])

View File

@ -1,22 +0,0 @@
import os
import atlite
import xarray as xr
import pandas as pd
from vresutils import landuse as vlanduse
config = snakemake.config['renewable'][snakemake.wildcards.technology]
cutout = atlite.Cutout(config['cutout'],
cutout_dir=os.path.dirname(snakemake.input.cutout))
total_capacity = config['capacity_per_sqm'] * vlanduse._cutout_cell_areas(cutout)
potentials = xr.DataArray(total_capacity *
vlanduse.corine_for_cutout(cutout, fn=snakemake.input.corine,
natura_fn=snakemake.input.natura, **config['corine']),
[cutout.meta.indexes['y'], cutout.meta.indexes['x']])
if 'height_cutoff' in config:
potentials.values[(cutout.meta['height'] < - config['height_cutoff']).transpose(*potentials.dims)] = 0.
potentials.to_netcdf(snakemake.output[0])

View File

@ -5,51 +5,185 @@ import atlite
import numpy as np
import xarray as xr
import pandas as pd
import geopandas as gpd
from multiprocessing import Pool
import glaes as gl
import geokit as gk
from osgeo import gdal
from scipy.sparse import csr_matrix, vstack
from pypsa.geo import haversine
from vresutils import landuse as vlanduse
from vresutils.array import spdiag
import progressbar as pgb
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=snakemake.config['logging_level'])
config = snakemake.config['renewable'][snakemake.wildcards.technology]
bounds = dx = dy = gebco = clc = natura = None
def init_globals(n_bounds, n_dx, n_dy):
# global in each process of the multiprocessing.Pool
global bounds, dx, dy, gebco, clc, natura
time = pd.date_range(freq='m', **snakemake.config['snapshots'])
params = dict(years=slice(*time.year[[0, -1]]), months=slice(*time.month[[0, -1]]))
bounds = n_bounds
dx = n_dx
dy = n_dy
regions = gpd.read_file(snakemake.input.regions).set_index('name')
regions.index.name = 'bus'
gebco = gk.raster.loadRaster(snakemake.input.gebco)
gebco.SetProjection(gk.srs.loadSRS(4326).ExportToWkt())
cutout = atlite.Cutout(config['cutout'],
cutout_dir=os.path.dirname(snakemake.input.cutout),
**params)
clc = gk.raster.loadRaster(snakemake.input.corine)
clc.SetProjection(gk.srs.loadSRS(3035).ExportToWkt())
# Potentials
potentials = xr.open_dataarray(snakemake.input.potentials)
natura = gk.raster.loadRaster(snakemake.input.natura)
# Indicatormatrix
indicatormatrix = cutout.indicatormatrix(regions.geometry)
def downsample_to_coarse_grid(bounds, dx, dy, mask, data):
# The GDAL warp function with the 'average' resample algorithm needs a band of zero values of at least
# the size of one coarse cell around the original raster or it produces erroneous results
orig = mask.createRaster(data=data)
padded_extent = mask.extent.castTo(bounds.srs).pad(max(dx, dy)).castTo(mask.srs)
padded = padded_extent.fit((mask.pixelWidth, mask.pixelHeight)).warp(orig, mask.pixelWidth, mask.pixelHeight)
orig = None # free original raster
average = bounds.createRaster(dx, dy, dtype=gdal.GDT_Float32)
assert gdal.Warp(average, padded, resampleAlg='average') == 1, "gdal warp failed: %s" % gdal.GetLastErrorMsg()
return average
resource = config['resource']
func = getattr(cutout, resource.pop('method'))
correction_factor = config.get('correction_factor', 1.)
if correction_factor != 1.:
logger.warning('correction_factor is set as {}'.format(correction_factor))
capacity_factor = correction_factor * func(capacity_factor=True, **resource)
layout = capacity_factor * potentials
def calculate_potential(gid):
feature = gk.vector.extractFeature(snakemake.input.regions, where=gid)
ec = gl.ExclusionCalculator(feature.geom)
profile, capacities = func(matrix=indicatormatrix, index=regions.index,
layout=layout, per_unit=True, return_capacity=True,
**resource)
corine = config.get("corine", {})
if isinstance(corine, list):
corine = {'grid_codes': corine}
if "grid_codes" in corine:
ec.excludeRasterType(clc, value=corine["grid_codes"], invert=True)
if corine.get("distance", 0.) > 0.:
ec.excludeRasterType(clc, value=corine["distance_grid_codes"], buffer=corine["distance"])
relativepotentials = (potentials / layout).stack(spatial=('y', 'x')).values
p_nom_max = xr.DataArray([np.nanmin(relativepotentials[row.nonzero()[1]])
if row.getnnz() > 0 else 0
for row in indicatormatrix.tocsr()],
[capacities.coords['bus']]) * capacities
if config.get("natura", False):
ec.excludeRasterType(natura, value=1)
if "max_depth" in config:
ec.excludeRasterType(gebco, (None, -config["max_depth"]))
ds = xr.merge([(correction_factor * profile).rename('profile'),
capacities.rename('weight'),
p_nom_max.rename('p_nom_max'),
layout.rename('potential')])
(ds.sel(bus=ds['profile'].mean('time') > config.get('min_p_max_pu', 0.))
.to_netcdf(snakemake.output.profile))
# TODO compute a distance field as a raster beforehand
if 'max_shore_distance' in config:
ec.excludeVectorType(snakemake.input.country_shapes, buffer=config['max_shore_distance'], invert=True)
if 'min_shore_distance' in config:
ec.excludeVectorType(snakemake.input.country_shapes, buffer=config['min_shore_distance'])
availability = downsample_to_coarse_grid(bounds, dx, dy, ec.region, np.where(ec.region.mask, ec._availability, 0))
return csr_matrix(gk.raster.extractMatrix(availability).flatten() / 100.)
if __name__ == '__main__':
pgb.streams.wrap_stderr()
logging.basicConfig(level=snakemake.config['logging_level'])
config = snakemake.config['renewable'][snakemake.wildcards.technology]
time = pd.date_range(freq='m', **snakemake.config['snapshots'])
params = dict(years=slice(*time.year[[0, -1]]), months=slice(*time.month[[0, -1]]))
cutout = atlite.Cutout(config['cutout'],
cutout_dir=os.path.dirname(snakemake.input.cutout),
**params)
minx, maxx, miny, maxy = cutout.extent
dx = (maxx - minx) / (cutout.shape[1] - 1)
dy = (maxy - miny) / (cutout.shape[0] - 1)
bounds = gk.Extent.from_xXyY((minx - dx/2., maxx + dx/2.,
miny - dy/2., maxy + dy/2.))
# Use GLAES to compute available potentials and the transition matrix
with Pool(initializer=init_globals, initargs=(bounds, dx, dy),
maxtasksperchild=20, processes=snakemake.config['atlite'].get('nprocesses', 2)) as pool:
regions = gk.vector.extractFeatures(snakemake.input.regions, onlyAttr=True)
buses = pd.Index(regions['name'], name="bus")
widgets = [
pgb.widgets.Percentage(),
' ', pgb.widgets.SimpleProgress(format='(%s)' % pgb.widgets.SimpleProgress.DEFAULT_FORMAT),
' ', pgb.widgets.Bar(),
' ', pgb.widgets.Timer(),
' ', pgb.widgets.ETA()
]
progressbar = pgb.ProgressBar(prefix='Compute GIS potentials: ', widgets=widgets, max_value=len(regions))
matrix = vstack(list(progressbar(pool.imap(calculate_potential, regions.index))))
potentials = config['capacity_per_sqkm'] * vlanduse._cutout_cell_areas(cutout)
potmatrix = matrix * spdiag(potentials.ravel())
potmatrix.data[potmatrix.data < 1.] = 0 # ignore weather cells where only less than 1 MW can be installed
potmatrix.eliminate_zeros()
resource = config['resource']
func = getattr(cutout, resource.pop('method'))
correction_factor = config.get('correction_factor', 1.)
if correction_factor != 1.:
logger.warning('correction_factor is set as {}'.format(correction_factor))
capacity_factor = correction_factor * func(capacity_factor=True, show_progress='Compute capacity factors: ', **resource).stack(spatial=('y', 'x')).values
layoutmatrix = potmatrix * spdiag(capacity_factor)
profile, capacities = func(matrix=layoutmatrix, index=buses, per_unit=True,
return_capacity=True, show_progress='Compute profiles: ',
**resource)
p_nom_max_meth = config.get('potential', 'conservative')
if p_nom_max_meth == 'simple':
p_nom_max = xr.DataArray(np.asarray(potmatrix.sum(axis=1)).squeeze(), [buses])
elif p_nom_max_meth == 'conservative':
# p_nom_max has to be calculated for each bus and is the minimal ratio
# (min over all weather grid cells of the bus region) between the available
# potential (potmatrix) and the used normalised layout (layoutmatrix /
# capacities), so we would like to calculate i.e. potmatrix / (layoutmatrix /
# capacities). Since layoutmatrix = potmatrix * capacity_factor, this
# corresponds to capacities/max(capacity factor in the voronoi cell)
p_nom_max = xr.DataArray([1./np.max(capacity_factor[inds]) if len(inds) else 0.
for inds in np.split(potmatrix.indices, potmatrix.indptr[1:-1])], [buses]) * capacities
else:
raise AssertionError('Config key `potential` should be one of "simple" (default) or "conservative",'
' not "{}"'.format(p_nom_max_meth))
layout = xr.DataArray(np.asarray(potmatrix.sum(axis=0)).reshape(cutout.shape),
[cutout.meta.indexes[ax] for ax in ['y', 'x']])
# Determine weighted average distance from substation
cell_coords = cutout.grid_coordinates()
average_distance = []
for i in regions.index:
row = layoutmatrix[i]
distances = haversine(regions.loc[i, ['x', 'y']], cell_coords[row.indices])[0]
average_distance.append((distances * (row.data / row.data.sum())).sum())
average_distance = xr.DataArray(average_distance, [buses])
ds = xr.merge([(correction_factor * profile).rename('profile'),
capacities.rename('weight'),
p_nom_max.rename('p_nom_max'),
layout.rename('potential'),
average_distance.rename('average_distance')])
if snakemake.wildcards.technology.startswith("offwind"):
import geopandas as gpd
from shapely.geometry import LineString
offshore_shape = gpd.read_file(snakemake.input.offshore_shapes).unary_union
underwater_fraction = []
for i in regions.index:
row = layoutmatrix[i]
centre_of_mass = (cell_coords[row.indices] * (row.data / row.data.sum())[:,np.newaxis]).sum(axis=0)
line = LineString([centre_of_mass, regions.loc[i, ['x', 'y']]])
underwater_fraction.append(line.intersection(offshore_shape).length / line.length)
ds['underwater_fraction'] = xr.DataArray(underwater_fraction, [buses])
# select only buses with some capacity and minimal capacity factor
ds = ds.sel(bus=((ds['profile'].mean('time') > config.get('min_p_max_pu', 0.)) &
(ds['p_nom_max'] > config.get('min_p_nom_max', 0.))))
if 'clip_p_max_pu' in config:
ds['profile'].values[ds['profile'].values < config['clip_p_max_pu']] = 0.
ds.to_netcdf(snakemake.output.profile)

View File

@ -9,10 +9,14 @@ import geopandas as gpd
from shapely.geometry import MultiPolygon, Polygon
from shapely.ops import cascaded_union
try:
from countrycode.countrycode import countrycode
except ImportError:
from countrycode import countrycode
import pycountry as pyc
def _get_country(target, **keys):
assert len(keys) == 1
try:
return getattr(pyc.countries.get(**keys), target)
except KeyError:
return np.nan
def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True):
if isinstance(polys, MultiPolygon):
@ -44,13 +48,13 @@ def countries():
return s
def eez(country_shapes):
cntries = snakemake.config['countries']
cntries3 = frozenset(countrycode(cntries, origin='iso2c', target='iso3c'))
df = gpd.read_file(snakemake.input.eez)
df = df.loc[df['ISO_3digit'].isin(cntries3)]
df['name'] = countrycode(df['ISO_3digit'], origin='iso3c', target='iso2c')
df = df.loc[df['ISO_3digit'].isin([_get_country('alpha_3', alpha_2=c) for c in snakemake.config['countries']])]
df['name'] = df['ISO_3digit'].map(lambda c: _get_country('alpha_2', alpha_3=c))
s = df.set_index('name').geometry.map(lambda s: _simplify_polys(s, filterremote=False))
return gpd.GeoSeries({k:v for k,v in s.iteritems() if v.distance(country_shapes[k]) < 1e-3})
s = gpd.GeoSeries({k:v for k,v in s.iteritems() if v.distance(country_shapes[k]) < 1e-3})
s.index.name = "name"
return s
def country_cover(country_shapes, eez_shapes=None):
shapes = list(country_shapes)
@ -119,12 +123,14 @@ def nuts3(country_shapes):
return df
def save_to_geojson(s, fn):
def save_to_geojson(df, fn):
if os.path.exists(fn):
os.unlink(fn)
if isinstance(s, gpd.GeoDataFrame):
s = s.reset_index()
s.to_file(fn, driver='GeoJSON')
if not isinstance(df, gpd.GeoDataFrame):
df = gpd.GeoDataFrame(dict(geometry=df))
df = df.reset_index()
schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'}
df.to_file(fn, driver='GeoJSON', schema=schema)
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing

View File

@ -60,34 +60,7 @@ def plot_weighting(n, country, country_shape=None):
# # Determining the number of clusters per country
def distribute_clusters(n, n_clusters):
load = n.loads_t.p_set.mean().groupby(n.loads.bus).sum()
loadc = load.groupby([n.buses.country, n.buses.sub_network]).sum()
n_cluster_per_country = n_clusters * normed(loadc)
one_cluster_b = n_cluster_per_country < 0.5
n_one_cluster, n_one_cluster_prev = one_cluster_b.sum(), 0
while n_one_cluster > n_one_cluster_prev:
n_clusters_rem = n_clusters - one_cluster_b.sum()
assert n_clusters_rem > 0
n_cluster_per_country[~one_cluster_b] = n_clusters_rem * normed(loadc[~one_cluster_b])
one_cluster_b = n_cluster_per_country < 0.5
n_one_cluster, n_one_cluster_prev = one_cluster_b.sum(), n_one_cluster
n_cluster_per_country[one_cluster_b] = 1.1
n_cluster_per_country[~one_cluster_b] = n_cluster_per_country[~one_cluster_b] + 0.5
return n_cluster_per_country.astype(int)
def distribute_clusters_exactly(n, n_clusters):
for d in [0, 1, -1, 2, -2]:
n_cluster_per_country = distribute_clusters(n, n_clusters + d)
if n_cluster_per_country.sum() == n_clusters:
return n_cluster_per_country
else:
return distribute_clusters(n, n_clusters)
def distribute_clusters_optim(n, n_clusters, solver_name=None):
def distribute_clusters(n, n_clusters, solver_name=None):
if solver_name is None:
solver_name = snakemake.config['solver']['solver']['name']
@ -96,8 +69,12 @@ def distribute_clusters_optim(n, n_clusters, solver_name=None):
.groupby([n.buses.country, n.buses.sub_network]).sum()
.pipe(normed))
N = n.buses.groupby(['country', 'sub_network']).size()
m = po.ConcreteModel()
m.n = po.Var(list(L.index), bounds=(1, None), domain=po.Integers)
def n_bounds(model, *n_id):
return (1, N[n_id])
m.n = po.Var(list(L.index), bounds=n_bounds, domain=po.Integers)
m.tot = po.Constraint(expr=(po.summation(m.n) == n_clusters))
m.objective = po.Objective(expr=po.sum((m.n[i] - L.loc[i]*n_clusters)**2
for i in L.index),
@ -111,7 +88,12 @@ def distribute_clusters_optim(n, n_clusters, solver_name=None):
return pd.Series(m.n.get_values(), index=L.index).astype(int)
def busmap_for_n_clusters(n, n_clusters,algo_cluster):
def busmap_for_n_clusters(n, n_clusters, algorithm="kmeans", **algorithm_kwds):
if algorithm == "kmeans":
algorithm_kwds.setdefault('n_init', 1000)
algorithm_kwds.setdefault('max_iter', 30000)
algorithm_kwds.setdefault('tol', 1e-6)
n.determine_network_topology()
if 'snakemake' in globals():
@ -119,26 +101,31 @@ def busmap_for_n_clusters(n, n_clusters,algo_cluster):
else:
solver_name = "gurobi"
n_clusters = distribute_clusters_optim(n, n_clusters, solver_name=solver_name)
n_clusters = distribute_clusters(n, n_clusters, solver_name=solver_name)
def busmap_for_country(x,algo_cluster):
def reduce_network(n, buses):
nr = pypsa.Network()
nr.import_components_from_dataframe(buses, "Bus")
nr.import_components_from_dataframe(n.lines.loc[n.lines.bus0.isin(buses.index) & n.lines.bus1.isin(buses.index)], "Line")
return nr
def busmap_for_country(x):
prefix = x.name[0] + x.name[1] + ' '
logger.debug("Determining busmap for country {}".format(prefix[:-1]))
if len(x) == 1:
return pd.Series(prefix + '0', index=x.index)
weight = weighting_for_country(n, x)
if algo_cluster=="kmeans":
return prefix + busmap_by_kmeans(n, weight, n_clusters[x.name], buses_i=x.index, n_init=1000, max_iter=30000, tol=1e-6)
elif algo_cluster=="spectral":
n2=pypsa.Network()
n2.buses=x
n2.lines=n.lines.loc[n.lines.bus0.isin(x.index)].loc[n.lines.bus1.isin(x.index)]
return prefix + busmap_by_spectral_clustering(n2, n_clusters[x.name])
elif algo_cluster=="louvain":
n2=pypsa.Network()
n2.buses=x
n2.lines=n.lines.loc[n.lines.bus0.isin(x.index)].loc[n.lines.bus1.isin(x.index)]
return prefix + busmap_by_louvain(n2, n_clusters[x.name])
return n.buses.groupby(['country', 'sub_network'], group_keys=False).apply(busmap_for_country, algo_cluster)
if algorithm == "kmeans":
return prefix + busmap_by_kmeans(n, weight, n_clusters[x.name], buses_i=x.index, **algorithm_kwds)
elif algorithm == "spectral":
return prefix + busmap_by_spectral_clustering(reduce_network(n, x), n_clusters[x.name], **algorithm_kwds)
elif algorithm == "louvain":
return prefix + busmap_by_louvain(reduce_network(n, x), n_clusters[x.name], **algorithm_kwds)
else:
raise ArgumentError("`algorithm` must be one of 'kmeans', 'spectral' or 'louvain'")
return n.buses.groupby(['country', 'sub_network'], group_keys=False).apply(busmap_for_country)
def plot_busmap_for_n_clusters(n, n_clusters=50):
busmap = busmap_for_n_clusters(n, n_clusters)
@ -147,17 +134,25 @@ def plot_busmap_for_n_clusters(n, n_clusters=50):
n.plot(bus_colors=busmap.map(dict(zip(cs, cr))))
del cs, cr
def clustering_for_n_clusters(n, n_clusters, aggregate_renewables=True, line_length_factor=1.25,algo_cluster="kmeans"):
aggregate_generators_carriers = (None if aggregate_renewables
else (pd.Index(n.generators.carrier.unique())
.difference(['onwind', 'offwind', 'solar'])))
def clustering_for_n_clusters(n, n_clusters, aggregate_carriers=None,
line_length_factor=1.25, potential_mode='simple', algorithm="kmeans"):
if potential_mode == 'simple':
p_nom_max_strategy = np.sum
elif potential_mode == 'conservative':
p_nom_max_strategy = np.min
else:
raise AttributeError("potential_mode should be one of 'simple' or 'conservative', "
"but is '{}'".format(potential_mode))
clustering = get_clustering_from_busmap(
n, busmap_for_n_clusters(n, n_clusters,algo_cluster),
n, busmap_for_n_clusters(n, n_clusters, algorithm),
bus_strategies=dict(country=_make_consense("Bus", "country")),
aggregate_generators_weighted=True,
aggregate_generators_carriers=aggregate_generators_carriers,
aggregate_generators_carriers=aggregate_carriers,
aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=line_length_factor
line_length_factor=line_length_factor,
generator_strategies={'p_nom_max': p_nom_max_strategy}
)
return clustering
@ -165,7 +160,9 @@ def clustering_for_n_clusters(n, n_clusters, aggregate_renewables=True, line_len
def save_to_geojson(s, fn):
if os.path.exists(fn):
os.unlink(fn)
s.reset_index().to_file(fn, driver='GeoJSON')
df = s.reset_index()
schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'}
df.to_file(fn, driver='GeoJSON', schema=schema)
def cluster_regions(busmaps, input=None, output=None):
if input is None: input = snakemake.input
@ -202,12 +199,16 @@ if __name__ == "__main__":
n = pypsa.Network(snakemake.input.network)
renewable_carriers = pd.Index([tech
for tech in n.generators.carrier.unique()
if tech.split('-', 2)[0] in snakemake.config['renewable']])
if snakemake.wildcards.clusters.endswith('m'):
n_clusters = int(snakemake.wildcards.clusters[:-1])
aggregate_renewables = False
aggregate_carriers = pd.Index(n.generators.carrier.unique()).difference(renewable_carriers)
else:
n_clusters = int(snakemake.wildcards.clusters)
aggregate_renewables = True
aggregate_carriers = None # All
if n_clusters == len(n.buses):
# Fast-path if no clustering is necessary
@ -216,7 +217,18 @@ if __name__ == "__main__":
clustering = pypsa.networkclustering.Clustering(n, busmap, linemap, linemap, pd.Series(dtype='O'))
else:
line_length_factor = snakemake.config['lines']['length_factor']
clustering = clustering_for_n_clusters(n, n_clusters, aggregate_renewables, line_length_factor=line_length_factor)
def consense(x):
v = x.iat[0]
assert ((x == v).all() or x.isnull().all()), (
"The `potential` configuration option must agree for all renewable carriers, for now!"
)
return v
potential_mode = consense(pd.Series([snakemake.config['renewable'][tech]['potential']
for tech in renewable_carriers]))
clustering = clustering_for_n_clusters(n, n_clusters, aggregate_carriers,
line_length_factor=line_length_factor,
potential_mode=potential_mode)
clustering.network.export_to_netcdf(snakemake.output.network)
with pd.HDFStore(snakemake.output.clustermaps, mode='w') as store:

View File

@ -18,9 +18,14 @@ def assign_carriers(n):
for carrier in ["transport","heat","urban heat"]:
n.loads.loc[n.loads.index.str.contains(carrier),"carrier"] = carrier
n.storage_units['carrier'].replace({'hydro': 'hydro+PHS', 'PHS': 'hydro+PHS'}, inplace=True)
if "carrier" not in n.lines:
n.lines["carrier"] = "AC"
n.lines["carrier"].replace({"AC": "lines"}, inplace=True)
n.links["carrier"].replace({"DC": "lines"}, inplace=True)
if "EU gas store" in n.stores.index and n.stores.loc["EU gas Store","carrier"] == "":
n.stores.loc["EU gas Store","carrier"] = "gas Store"
@ -56,8 +61,6 @@ def calculate_costs(n,label,costs):
return costs
def calculate_curtailment(n,label,curtailment):
avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum()
@ -76,12 +79,31 @@ def calculate_energy(n,label,energy):
else:
c_energies = (-c.pnl.p1.multiply(n.snapshot_weightings,axis=0).sum() - c.pnl.p0.multiply(n.snapshot_weightings,axis=0).sum()).groupby(c.df.carrier).sum()
energy = energy.reindex(energy.index|pd.MultiIndex.from_product([[c.list_name],c_energies.index]))
energy.loc[idx[c.list_name,list(c_energies.index)],label] = c_energies.values
energy = include_in_summary(energy, [c.list_name], label, c_energies)
return energy
def include_in_summary(summary, multiindexprefix, label, item):
summary = summary.reindex(summary.index | pd.MultiIndex.from_product([[p] for p in multiindexprefix] + [item.index]))
summary.loc[idx[tuple(multiindexprefix + [list(item.index)])], label] = item.values
return summary
def calculate_capacity(n,label,capacity):
for c in n.iterate_components(n.one_port_components):
if 'p_nom_opt' in c.df.columns:
c_capacities = abs(c.df.p_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum()
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
for c in n.iterate_components(n.passive_branch_components):
c_capacities = c.df['s_nom_opt'].groupby(c.df.carrier).sum()
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
for c in n.iterate_components(n.controllable_branch_components):
c_capacities = c.df.p_nom_opt.groupby(c.df.carrier).sum()
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
return capacity
def calculate_supply(n,label,supply):
"""calculate the max dispatch of each component at the buses where the loads are attached"""
@ -319,6 +341,7 @@ def calculate_weighted_prices(n,label,weighted_prices):
outputs = ["costs",
"curtailment",
"energy",
"capacity",
"supply",
"supply_energy",
"prices",
@ -328,7 +351,7 @@ outputs = ["costs",
"metrics",
]
def make_summaries(networks_dict):
def make_summaries(networks_dict, country='all'):
columns = pd.MultiIndex.from_tuples(networks_dict.keys(),names=["simpl","clusters","lv","opts"])
@ -343,14 +366,21 @@ def make_summaries(networks_dict):
print("does not exist!!")
continue
n = pypsa.Network(filename)
try:
n = pypsa.Network(filename)
except OSError:
logger.warning("Skipping {filename}".format(filename=filename))
continue
assign_carriers(n)
if country != 'all':
n = n[n.buses.country == country]
Nyears = n.snapshot_weightings.sum()/8760.
costs = load_costs(Nyears, snakemake.input[0],
snakemake.config['costs'], snakemake.config['electricity'])
update_transmission_costs(n, costs)
update_transmission_costs(n, costs, simple_hvdc_costs=False)
assign_carriers(n)
for output in outputs:
dfs[output] = globals()["calculate_" + output](n, label, dfs[output])
@ -371,8 +401,9 @@ if __name__ == "__main__":
w = getattr(snakemake.wildcards, key)
return snakemake.config["scenario"][key] if w == "all" else [w]
networks_dict = {(simpl,clusters,lv,opts) : ('results/networks/elec_s{simpl}_{clusters}_lv{lv}_{opts}.nc'
.format(simpl=simpl,
networks_dict = {(simpl,clusters,lv,opts) : ('results/networks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}.nc'
.format(network=snakemake.wildcards.network,
simpl=simpl,
clusters=clusters,
opts=opts,
lv=lv))
@ -383,6 +414,6 @@ if __name__ == "__main__":
print(networks_dict)
dfs = make_summaries(networks_dict)
dfs = make_summaries(networks_dict, country=snakemake.wildcards.country)
to_csv(dfs)

View File

@ -187,7 +187,7 @@ fig.savefig(snakemake.output.only_map, dpi=150,
ax1 = ax = fig.add_axes([-0.115, 0.625, 0.2, 0.2])
ax.set_title('Energy per technology', fontdict=dict(fontsize="medium"))
e_primary = aggregate_p(n).drop('load').loc[lambda s: s>0]
e_primary = aggregate_p(n).drop('load', errors='ignore').loc[lambda s: s>0]
patches, texts, autotexts = ax.pie(e_primary,
startangle=90,
@ -214,8 +214,8 @@ def split_costs(n):
costs, costs_cap_ex, costs_cap_new, costs_marg = split_costs(n)
costs_graph = pd.DataFrame(dict(a=costs.drop('load')),
index=['AC-AC', 'AC line', 'onwind', 'offwind', 'solar', 'OCGT', 'battery', 'H2']).dropna()
costs_graph = pd.DataFrame(dict(a=costs.drop('load', errors='ignore')),
index=['AC-AC', 'AC line', 'onwind', 'offwind-ac', 'offwind-dc', 'solar', 'OCGT', 'battery', 'H2']).dropna()
bottom = np.array([0., 0.])
texts = []
@ -249,8 +249,13 @@ ax.grid(True, axis="y", color='k', linestyle='dotted')
#fig.tight_layout()
fig.suptitle('Expansion to {lv} x today\'s line volume at {clusters} clusters'
.format(lv=snakemake.wildcards.lv, clusters=snakemake.wildcards.clusters))
ll = snakemake.wildcards.ll
ll_type = ll[0]
ll_factor = ll[1:]
lbl = dict(c='line cost', v='line volume')[ll_type]
amnt = '{lv} x today\'s'.format(ll_factor) if ll_factor != 'opt' else 'optimal'
fig.suptitle('Expansion to {amount} {label} at {clusters} clusters'
.format(amount=amnt, label=lbl, clusters=snakemake.wildcards.clusters))
fig.savefig(snakemake.output.ext, transparent=True,
bbox_inches='tight', bbox_extra_artists=[l1, l2, l3, ax1, ax2])

70
scripts/plot_p_nom_max.py Normal file
View File

@ -0,0 +1,70 @@
import pypsa
import pandas as pd
import matplotlib.pyplot as plt
def cum_p_nom_max(net, tech, country=None):
carrier_b = net.generators.carrier == tech
generators = \
pd.DataFrame(dict(
p_nom_max=net.generators.loc[carrier_b, 'p_nom_max'],
p_max_pu=net.generators_t.p_max_pu.loc[:,carrier_b].mean(),
country=net.generators.loc[carrier_b, 'bus'].map(net.buses.country)
)).sort_values("p_max_pu", ascending=False)
if country is not None:
generators = generators.loc[generators.country == country]
generators["cum_p_nom_max"] = generators["p_nom_max"].cumsum() / 1e6
return generators
if __name__ == __main__:
# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals():
from vresutils.snakemake import MockSnakemake, Dict
snakemake = MockSnakemake(
path='..',
wildcards={'clusters': '45,90,181,full',
'country': 'all'},
params=dict(techs=['onwind', 'offwind-ac', 'offwind-dc', 'solar']),
input=Dict(
**{
'full': 'networks/elec_s.nc',
'45': 'networks/elec_s_45.nc',
'90': 'networks/elec_s_90.nc',
'181': 'networks/elec_s_181.nc',
}
),
output=['results/plots/cum_p_nom_max_{clusters}_{country}.pdf']
)
logging.basicConfig(level=snakemake.config['logging_level'])
plot_kwds = dict(drawstyle="steps-post")
clusters = snakemake.wildcards.clusters.split(',')
techs = snakemake.params.techs
country = snakemake.wildcards.country
if country == 'all':
country = None
else:
plot_kwds['marker'] = 'x'
fig, axes = plt.subplots(1, len(techs))
for cluster in clusters:
net = pypsa.Network(getattr(snakemake.input, cluster))
for i, tech in enumerate(techs):
cum_p_nom_max(net, tech, country).plot(x="p_max_pu", y="c_p_nom_max", label=cluster, ax=axes[0][i], **plot_kwds)
for i, tech in enumerate(techs):
ax = axes[0][i]
ax.set_xlabel(f"Capacity factor of {tech}")
ax.set_ylabel("Cumulative installable capacity / TW")
plt.legend(title="Cluster level")
fig.savefig(snakemake.output[0], transparent=True, bbox_inches='tight')

View File

@ -31,6 +31,10 @@ def rename_techs(label):
label = "methanation"
elif label == "offwind":
label = "offshore wind"
elif label == "offwind-ac":
label = "offshore wind ac"
elif label == "offwind-dc":
label = "offshore wind dc"
elif label == "onwind":
label = "onshore wind"
elif label == "ror":
@ -47,7 +51,7 @@ def rename_techs(label):
return label
preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","onshore wind","offshore wind","solar PV","solar thermal","building retrofitting","ground heat pump","air heat pump","resistive heater","CHP","OCGT","gas boiler","gas","natural gas","methanation","hydrogen storage","battery storage","hot water storage"])
preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","onshore wind","offshore wind ac", "offshore wind dc","solar PV","solar thermal","building retrofitting","ground heat pump","air heat pump","resistive heater","CHP","OCGT","gas boiler","gas","natural gas","methanation","hydrogen storage","battery storage","hot water storage"])
def plot_costs(infn, fn=None):

View File

@ -37,10 +37,56 @@ def set_line_s_max_pu(n):
s_max_pu = np.clip(0.5 + 0.2 * (n_clusters - 37) / (200 - 37), 0.5, 0.7)
n.lines['s_max_pu'] = s_max_pu
def set_line_cost_limit(n, lc, Nyears=1.):
links_dc_b = n.links.carrier == 'DC'
lines_s_nom = n.lines.s_nom.where(
n.lines.type == '',
np.sqrt(3) * n.lines.num_parallel *
n.lines.type.map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom)
)
n.lines['capital_cost_lc'] = n.lines['capital_cost']
n.links['capital_cost_lc'] = n.links['capital_cost']
total_line_cost = ((lines_s_nom * n.lines['capital_cost_lc']).sum() +
n.links.loc[links_dc_b].eval('p_nom * capital_cost_lc').sum())
if lc == 'opt':
costs = load_costs(Nyears, snakemake.input.tech_costs,
snakemake.config['costs'], snakemake.config['electricity'])
update_transmission_costs(n, costs, simple_hvdc_costs=False)
else:
# Either line_volume cap or cost
n.lines['capital_cost'] = 0.
n.links.loc[links_dc_b, 'capital_cost'] = 0.
if lc == 'opt' or float(lc) > 1.0:
n.lines['s_nom_min'] = lines_s_nom
n.lines['s_nom_extendable'] = True
n.links.loc[links_dc_b, 'p_nom_min'] = n.links.loc[links_dc_b, 'p_nom']
n.links.loc[links_dc_b, 'p_nom_extendable'] = True
if lc != 'opt':
n.line_cost_limit = float(lc) * total_line_cost
return n
def set_line_volume_limit(n, lv, Nyears=1.):
links_dc_b = n.links.carrier == 'DC'
if np.isinf(lv):
lines_s_nom = n.lines.s_nom.where(
n.lines.type == '',
np.sqrt(3) * n.lines.num_parallel *
n.lines.type.map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom)
)
total_line_volume = ((lines_s_nom * n.lines['length']).sum() +
n.links.loc[links_dc_b].eval('p_nom * length').sum())
if lv == 'opt':
costs = load_costs(Nyears, snakemake.input.tech_costs,
snakemake.config['costs'], snakemake.config['electricity'])
update_transmission_costs(n, costs, simple_hvdc_costs=True)
@ -49,22 +95,15 @@ def set_line_volume_limit(n, lv, Nyears=1.):
n.lines['capital_cost'] = 0.
n.links.loc[links_dc_b, 'capital_cost'] = 0.
if lv > 1.0:
lines_s_nom = n.lines.s_nom.where(
n.lines.type == '',
np.sqrt(3) * n.lines.num_parallel *
n.lines.type.map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom)
)
if lv == 'opt' or float(lv) > 1.0:
n.lines['s_nom_min'] = lines_s_nom
n.lines['s_nom_extendable'] = True
n.links.loc[links_dc_b, 'p_nom_min'] = n.links.loc[links_dc_b, 'p_nom']
n.links.loc[links_dc_b, 'p_nom_extendable'] = True
n.line_volume_limit = lv * ((lines_s_nom * n.lines['length']).sum() +
n.links.loc[links_dc_b].eval('p_nom * length').sum())
if lv != 'opt':
n.line_volume_limit = float(lv) * total_line_volume
return n
@ -90,9 +129,9 @@ if __name__ == "__main__":
if 'snakemake' not in globals():
from vresutils.snakemake import MockSnakemake
snakemake = MockSnakemake(
wildcards=dict(network='elec', simpl='', clusters='37', lv='2', opts='Co2L-3H'),
wildcards=dict(network='elec', simpl='', clusters='37', ll='v2', opts='Co2L-3H'),
input=['networks/{network}_s{simpl}_{clusters}.nc'],
output=['networks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}.nc']
output=['networks/{network}_s{simpl}_{clusters}_l{ll}_{opts}.nc']
)
logging.basicConfig(level=snakemake.config['logging_level'])
@ -119,6 +158,10 @@ if __name__ == "__main__":
# if 'Ep' in opts:
# add_emission_prices(n)
set_line_volume_limit(n, float(snakemake.wildcards.lv), Nyears)
ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:]
if ll_type == 'v':
set_line_volume_limit(n, factor, Nyears)
elif ll_type == 'c':
set_line_cost_limit(n, factor, Nyears)
n.export_to_netcdf(snakemake.output[0])

View File

@ -10,7 +10,7 @@ import os
import re
import numpy as np
import scipy as sp
from scipy.sparse.csgraph import connected_components
from scipy.sparse.csgraph import connected_components, dijkstra
import xarray as xr
import geopandas as gpd
import shapely
@ -26,6 +26,7 @@ from pypsa.networkclustering import (busmap_by_stubs, busmap_by_kmeans,
aggregategenerators, aggregateoneport)
from cluster_network import clustering_for_n_clusters, cluster_regions
from add_electricity import load_costs
def simplify_network_to_380(n):
## All goes to v_nom == 380
@ -62,7 +63,51 @@ def simplify_network_to_380(n):
return n, trafo_map
def _aggregate_and_move_components(n, busmap, aggregate_one_ports={"Load", "StorageUnit"}):
def _prepare_connection_costs_per_link(n):
costs = load_costs(n.snapshot_weightings.sum() / 8760, snakemake.input.tech_costs,
snakemake.config['costs'], snakemake.config['electricity'])
connection_costs_per_link = {}
for tech in snakemake.config['renewable']:
if tech.startswith('offwind'):
connection_costs_per_link[tech] = (
n.links.length * snakemake.config['lines']['length_factor'] *
(n.links.underwater_fraction * costs.at[tech + '-connection-submarine', 'capital_cost'] +
(1. - n.links.underwater_fraction) * costs.at[tech + '-connection-underground', 'capital_cost'])
)
return connection_costs_per_link
def _compute_connection_costs_to_bus(n, busmap, connection_costs_per_link=None, buses=None):
if connection_costs_per_link is None:
connection_costs_per_link = _prepare_connection_costs_per_link(n)
if buses is None:
buses = busmap.index[busmap.index != busmap.values]
connection_costs_to_bus = pd.DataFrame(index=buses)
for tech in connection_costs_per_link:
adj = n.adjacency_matrix(weights=pd.concat(dict(Link=connection_costs_per_link[tech].reindex(n.links.index),
Line=pd.Series(0., n.lines.index))))
costs_between_buses = dijkstra(adj, directed=False, indices=n.buses.index.get_indexer(buses))
connection_costs_to_bus[tech] = costs_between_buses[np.arange(len(buses)),
n.buses.index.get_indexer(busmap.loc[buses])]
return connection_costs_to_bus
def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus):
for tech in connection_costs_to_bus:
tech_b = n.generators.carrier == tech
costs = n.generators.loc[tech_b, "bus"].map(connection_costs_to_bus[tech]).loc[lambda s: s>0]
if not costs.empty:
n.generators.loc[costs.index, "capital_cost"] += costs
logger.info("Displacing {} generator(s) and adding connection costs to capital_costs: {} "
.format(tech, ", ".join("{:.0f} Eur/MW/a for `{}`".format(d, b) for b, d in costs.iteritems())))
def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, aggregate_one_ports={"Load", "StorageUnit"}):
def replace_components(n, c, df, pnl):
n.mremove(c, n.df(c).index)
@ -71,6 +116,8 @@ def _aggregate_and_move_components(n, busmap, aggregate_one_ports={"Load", "Stor
if not df.empty:
import_series_from_dataframe(n, df, c, attr)
_adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus)
generators, generators_pnl = aggregategenerators(n, busmap)
replace_components(n, "Generator", generators, generators_pnl)
@ -128,6 +175,9 @@ def simplify_links(n):
busmap = n.buses.index.to_series()
connection_costs_per_link = _prepare_connection_costs_per_link(n)
connection_costs_to_bus = pd.DataFrame(0., index=n.buses.index, columns=list(connection_costs_per_link))
for lbl in labels.value_counts().loc[lambda s: s > 2].index:
for b, buses, links in split_links(labels.index[labels == lbl]):
@ -139,6 +189,8 @@ def simplify_links(n):
m = sp.spatial.distance_matrix(n.buses.loc[b, ['x', 'y']],
n.buses.loc[buses[1:-1], ['x', 'y']])
busmap.loc[buses] = b[np.r_[0, m.argmin(axis=0), 1]]
connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(n, busmap, connection_costs_per_link, buses)
all_links = [i for _, i in sum(links, [])]
p_max_pu = snakemake.config['links'].get('p_max_pu', 1.)
@ -168,17 +220,38 @@ def simplify_links(n):
logger.debug("Collecting all components using the busmap")
_aggregate_and_move_components(n, busmap)
_aggregate_and_move_components(n, busmap, connection_costs_to_bus)
return n, busmap
def remove_stubs(n):
logger.info("Removing stubs")
busmap = busmap_by_stubs(n, ['country'])
_aggregate_and_move_components(n, busmap)
busmap = busmap_by_stubs(n) # ['country'])
connection_costs_to_bus = _compute_connection_costs_to_bus(n, busmap)
_aggregate_and_move_components(n, busmap, connection_costs_to_bus)
return n, busmap
def cluster(n, n_clusters):
logger.info("Clustering to {} buses".format(n_clusters))
renewable_carriers = pd.Index([tech
for tech in n.generators.carrier.unique()
if tech.split('-', 2)[0] in snakemake.config['renewable']])
def consense(x):
v = x.iat[0]
assert ((x == v).all() or x.isnull().all()), (
"The `potential` configuration option must agree for all renewable carriers, for now!"
)
return v
potential_mode = (consense(pd.Series([snakemake.config['renewable'][tech]['potential']
for tech in renewable_carriers]))
if len(renewable_carriers) > 0 else 'conservative')
clustering = clustering_for_n_clusters(n, n_clusters, potential_mode=potential_mode)
return clustering.network, clustering.busmap
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
@ -186,21 +259,22 @@ if __name__ == "__main__":
from vresutils.snakemake import MockSnakemake, Dict
snakemake = MockSnakemake(
path='..',
wildcards=Dict(simpl=''),
wildcards=Dict(simpl='1024', network='elec'),
input=Dict(
network='networks/elec.nc',
regions_onshore='resources/regions_onshore.geojson',
regions_offshore='resources/regions_offshore.geojson'
network='networks/{network}.nc',
tech_costs="data/costs.csv",
regions_onshore="resources/regions_onshore.geojson",
regions_offshore="resources/regions_offshore.geojson"
),
output=Dict(
network='networks/elec_s{simpl}.nc',
regions_onshore='resources/regions_onshore_s{simpl}.geojson',
regions_offshore='resources/regions_offshore_s{simpl}.geojson'
network='networks/{network}_s{simpl}.nc',
regions_onshore="resources/regions_onshore_{network}_s{simpl}.geojson",
regions_offshore="resources/regions_offshore_{network}_s{simpl}.geojson",
clustermaps='resources/clustermaps_{network}_s{simpl}.h5'
)
)
logger = logging.getLogger()
logger.setLevel(snakemake.config['logging_level'])
logging.basicConfig(level=snakemake.config['logging_level'])
n = pypsa.Network(snakemake.input.network)
@ -213,11 +287,8 @@ if __name__ == "__main__":
busmaps = [trafo_map, simplify_links_map, stub_map]
if snakemake.wildcards.simpl:
n_clusters = int(snakemake.wildcards.simpl)
clustering = clustering_for_n_clusters(n, n_clusters)
n = clustering.network
busmaps.append(clustering.busmap)
n, cluster_map = cluster(n, int(snakemake.wildcards.simpl))
busmaps.append(cluster_map)
n.export_to_netcdf(snakemake.output.network)

View File

@ -43,7 +43,7 @@ def prepare_network(n, solve_opts=None):
)
if solve_opts.get('noisy_costs'):
for t in n.iterate_components():
for t in n.iterate_components(n.one_port_components):
#if 'capital_cost' in t.df:
# t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5)
if 'marginal_cost' in t.df:
@ -89,6 +89,18 @@ def add_lv_constraint(n):
<= line_volume)
)
def add_lc_constraint(n):
line_cost = getattr(n, 'line_cost_limit', None)
if line_cost is not None and not np.isinf(line_cost):
n.model.line_cost_constraint = pypsa.opt.Constraint(
expr=((sum(n.model.passive_branch_s_nom["Line",line]*n.lines.at[line,"capital_cost_lc"]
for line in n.lines.index[n.lines.s_nom_extendable]) +
sum(n.model.link_p_nom[link]*n.links.at[link,"capital_cost_lc"]
for link in n.links.index[(n.links.carrier=='DC') &
n.links.p_nom_extendable]))
<= line_cost)
)
def add_eps_storage_constraint(n):
if not hasattr(n, 'epsilon'):
n.epsilon = 1e-5
@ -108,7 +120,7 @@ def fix_branches(n, lines_s_nom=None, links_p_nom=None):
if isinstance(n.opt, pypsa.opf.PersistentSolver):
n.opt.update_var(n.model.link_p_nom)
def solve_network(n, config=None, solver_log=None, opts=None):
def solve_network(n, config=None, solver_log=None, opts=None, callback=None):
if config is None:
config = snakemake.config['solving']
solve_opts = config['options']
@ -118,23 +130,27 @@ def solve_network(n, config=None, solver_log=None, opts=None):
solver_log = snakemake.log.solver
solver_name = solver_options.pop('name')
def run_lopf(n, allow_warning_status=False, fix_zero_lines=False, fix_ext_lines=False):
def extra_postprocessing(n, snapshots, duals):
if hasattr(n, 'line_volume_limit') and hasattr(n.model, 'line_volume_constraint'):
cdata = pd.Series(list(n.model.line_volume_constraint.values()),
index=list(n.model.line_volume_constraint.keys()))
n.line_volume_limit_dual = -cdata.map(duals).sum()
if hasattr(n, 'line_cost_limit') and hasattr(n.model, 'line_cost_constraint'):
cdata = pd.Series(list(n.model.line_cost_constraint.values()),
index=list(n.model.line_cost_constraint.keys()))
n.line_cost_limit_dual = -cdata.map(duals).sum()
def run_lopf(n, allow_warning_status=False, fix_ext_lines=False):
free_output_series_dataframes(n)
if not hasattr(n, 'opt') or not isinstance(n.opt, pypsa.opf.PersistentSolver):
pypsa.opf.network_lopf_build_model(n, formulation=solve_opts['formulation'])
add_opts_constraints(n, opts)
pypsa.opf.network_lopf_build_model(n, formulation=solve_opts['formulation'])
add_opts_constraints(n, opts)
if not fix_ext_lines:
add_lv_constraint(n)
# add_eps_storage_constraint(n)
add_lc_constraint(n)
pypsa.opf.network_lopf_prepare_solver(n, solver_name=solver_name)
if fix_zero_lines:
fix_lines_b = (n.lines.s_nom_opt == 0.) & n.lines.s_nom_extendable
fix_links_b = (n.links.p_nom_opt == 0.) & n.links.p_nom_extendable
fix_branches(n,
lines_s_nom=pd.Series(0., n.lines.index[fix_lines_b]),
links_p_nom=pd.Series(0., n.links.index[fix_links_b]))
pypsa.opf.network_lopf_prepare_solver(n, solver_name=solver_name)
if fix_ext_lines:
fix_branches(n,
@ -149,6 +165,7 @@ def solve_network(n, config=None, solver_log=None, opts=None):
solver_logfile=solver_log,
solver_options=solver_options,
formulation=solve_opts['formulation'],
extra_postprocessing=extra_postprocessing
#free_memory={'pypsa'}
)
@ -159,6 +176,7 @@ def solve_network(n, config=None, solver_log=None, opts=None):
return status, termination_condition
iteration = 0
lines_ext_b = n.lines.s_nom_extendable
if lines_ext_b.any():
# puh: ok, we need to iterate, since there is a relation
@ -174,7 +192,7 @@ def solve_network(n, config=None, solver_log=None, opts=None):
lines_ext_typed_b = (n.lines.type != '') & lines_ext_b
lines_ext_untyped_b = (n.lines.type == '') & lines_ext_b
def update_line_parameters(n, zero_lines_below=10, fix_zero_lines=False):
def update_line_parameters(n, zero_lines_below=10):
if zero_lines_below > 0:
n.lines.loc[n.lines.s_nom_opt < zero_lines_below, 's_nom_opt'] = 0.
n.links.loc[n.links.p_nom_opt < zero_lines_below, 'p_nom_opt'] = 0.
@ -191,25 +209,10 @@ def solve_network(n, config=None, solver_log=None, opts=None):
)
logger.debug("lines.num_parallel={}".format(n.lines.loc[lines_ext_typed_b, 'num_parallel']))
if isinstance(n.opt, pypsa.opf.PersistentSolver):
n.calculate_dependent_values()
assert solve_opts['formulation'] == 'kirchhoff', \
"Updating persistent solvers has only been implemented for the kirchhoff formulation for now"
n.opt.remove_constraint(n.model.cycle_constraints)
del n.model.cycle_constraints_index
del n.model.cycle_constraints_index_0
del n.model.cycle_constraints_index_1
del n.model.cycle_constraints
pypsa.opf.define_passive_branch_flows_with_kirchhoff(n, n.snapshots, skip_vars=True)
n.opt.add_constraint(n.model.cycle_constraints)
iteration = 1
iteration += 1
lines['s_nom_opt'] = lines['s_nom'] * n.lines['num_parallel'].where(n.lines.type != '', 1.)
status, termination_condition = run_lopf(n, allow_warning_status=True)
if callback is not None: callback(n, iteration, status)
def msq_diff(n):
lines_err = np.sqrt(((n.lines['s_nom_opt'] - lines['s_nom_opt'])**2).mean())/lines['s_nom_opt'].mean()
@ -228,25 +231,16 @@ def solve_network(n, config=None, solver_log=None, opts=None):
iteration += 1
status, termination_condition = run_lopf(n, allow_warning_status=True)
if callback is not None: callback(n, iteration, status)
update_line_parameters(n, zero_lines_below=100)
logger.info("Starting last run with fixed extendable lines")
# Not really needed, could also be taken out
# if 'snakemake' in globals():
# fn = os.path.basename(snakemake.output[0])
# n.export_to_netcdf('/home/vres/data/jonas/playground/pypsa-eur/' + fn)
iteration += 1
status, termination_condition = run_lopf(n, fix_ext_lines=True)
# Drop zero lines from network
# zero_lines_i = n.lines.index[(n.lines.s_nom_opt == 0.) & n.lines.s_nom_extendable]
# if len(zero_lines_i):
# n.mremove("Line", zero_lines_i)
# zero_links_i = n.links.index[(n.links.p_nom_opt == 0.) & n.links.p_nom_extendable]
# if len(zero_links_i):
# n.mremove("Link", zero_links_i)
if callback is not None: callback(n, iteration, status)
return n

View File

@ -0,0 +1,47 @@
import numpy as np
import pandas as pd
import logging
logger = logging.getLogger(__name__)
from solve_network import patch_pyomo_tmpdir, prepare_network, solve_network
import pypsa
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals():
from vresutils.snakemake import MockSnakemake, Dict
snakemake = MockSnakemake(
wildcards=dict(network='elec', simpl='', clusters='45', lv='1.25', opts='Co2L-3H'),
input=["networks/{network}_s{simpl}_{clusters}_lv{lv}_{opts}.nc"],
output=["results/networks/s{simpl}_{clusters}_lv{lv}_{opts}_trace.nc"],
log=dict(python="logs/{network}_s{simpl}_{clusters}_lv{lv}_{opts}_python_trace.log")
)
tmpdir = snakemake.config['solving'].get('tmpdir')
if tmpdir is not None:
patch_pyomo_tmpdir(tmpdir)
logging.basicConfig(filename=snakemake.log.python,
level=snakemake.config['logging_level'])
n = pypsa.Network(snakemake.input[0])
solver_log = 'solver.log'
config = snakemake.config['solving']
opts = snakemake.wildcards.opts.split('-')
def save_optimal_capacities(net, iteration, status):
net.lines[f"s_nom_opt_{iteration}"] = net.lines["s_nom_opt"]
net.links[f"p_nom_opt_{iteration}"] = net.links["p_nom_opt"]
setattr(net, f"status_{iteration}", status)
setattr(net, f"objective_{iteration}", net.objective)
net.iteration = iteration
net.export_to_netcdf(snakemake.output[0])
config['options']['max_iterations'] = 12
n = prepare_network(n, config['options'])
n = solve_network(n, config, solver_log, opts, save_optimal_capacities)
n.export_to_netcdf(snakemake.output[0])