some refactoring and code cleaning

This commit is contained in:
Fabian Neumann 2020-09-11 12:40:53 +02:00
parent fee8ef92d4
commit f6dd2e12f3
38 changed files with 331 additions and 736 deletions

1
.gitignore vendored
View File

@ -7,6 +7,7 @@
__pycache__ __pycache__
*dconf *dconf
gurobi.log gurobi.log
.vscode
/bak /bak
/resources /resources

129
Snakefile
View File

@ -11,33 +11,31 @@ if not exists("config.yaml"):
configfile: "config.yaml" configfile: "config.yaml"
COSTS="data/costs.csv" COSTS="data/costs.csv"
ATLITE_NPROCESSES = config['atlite'].get('nprocesses', 4)
wildcard_constraints: wildcard_constraints:
ll="(v|c)([0-9\.]+|opt|all)|all", # line limit, can be volume or cost
simpl="[a-zA-Z0-9]*|all", simpl="[a-zA-Z0-9]*|all",
clusters="[0-9]+m?|all", clusters="[0-9]+m?|all",
sectors="[+a-zA-Z0-9]+", ll="(v|c)([0-9\.]+|opt|all)|all",
opts="[-+a-zA-Z0-9\.]*" opts="[-+a-zA-Z0-9\.]*"
rule cluster_all_elec_networks: rule cluster_all_elec_networks:
input: input: expand("networks/elec_s{simpl}_{clusters}.nc", **config['scenario'])
expand("networks/elec_s{simpl}_{clusters}.nc",
**config['scenario'])
rule extra_components_all_elec_networks: rule extra_components_all_elec_networks:
input: input: expand("networks/elec_s{simpl}_{clusters}_ec.nc", **config['scenario'])
expand("networks/elec_s{simpl}_{clusters}_ec.nc",
**config['scenario'])
rule prepare_all_elec_networks: rule prepare_all_elec_networks:
input: input: expand("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario'])
expand("networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
**config['scenario'])
rule solve_all_elec_networks: rule solve_all_elec_networks:
input: input: expand("results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario'])
expand("results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
**config['scenario'])
if config['enable'].get('prepare_links_p_nom', False): if config['enable'].get('prepare_links_p_nom', False):
rule prepare_links_p_nom: rule prepare_links_p_nom:
@ -45,7 +43,6 @@ if config['enable'].get('prepare_links_p_nom', False):
log: 'logs/prepare_links_p_nom.log' log: 'logs/prepare_links_p_nom.log'
threads: 1 threads: 1
resources: mem=500 resources: mem=500
# group: 'nonfeedin_preparation'
script: 'scripts/prepare_links_p_nom.py' script: 'scripts/prepare_links_p_nom.py'
@ -56,15 +53,18 @@ datafiles = ['ch_cantons.csv', 'je-e-21.03.02.xls',
'nama_10r_3gdp.tsv.gz', 'time_series_60min_singleindex_filtered.csv', 'nama_10r_3gdp.tsv.gz', 'time_series_60min_singleindex_filtered.csv',
'corine/g250_clc06_V18_5.tif'] 'corine/g250_clc06_V18_5.tif']
if not config.get('tutorial', False): if not config.get('tutorial', False):
datafiles.extend(["natura/Natura2000_end2015.shp", "GEBCO_2014_2D.nc"]) datafiles.extend(["natura/Natura2000_end2015.shp", "GEBCO_2014_2D.nc"])
if config['enable'].get('retrieve_databundle', True): if config['enable'].get('retrieve_databundle', True):
rule retrieve_databundle: rule retrieve_databundle:
output: expand('data/bundle/{file}', file=datafiles) output: expand('data/bundle/{file}', file=datafiles)
log: "logs/retrieve_databundle.log" log: "logs/retrieve_databundle.log"
script: 'scripts/retrieve_databundle.py' script: 'scripts/retrieve_databundle.py'
rule build_powerplants: rule build_powerplants:
input: input:
base_network="networks/base.nc", base_network="networks/base.nc",
@ -73,9 +73,9 @@ rule build_powerplants:
log: "logs/build_powerplants.log" log: "logs/build_powerplants.log"
threads: 1 threads: 1
resources: mem=500 resources: mem=500
# group: 'nonfeedin_preparation'
script: "scripts/build_powerplants.py" script: "scripts/build_powerplants.py"
rule base_network: rule base_network:
input: input:
eg_buses='data/entsoegridkit/buses.csv', eg_buses='data/entsoegridkit/buses.csv',
@ -94,9 +94,9 @@ rule base_network:
benchmark: "benchmarks/base_network" benchmark: "benchmarks/base_network"
threads: 1 threads: 1
resources: mem=500 resources: mem=500
# group: 'nonfeedin_preparation'
script: "scripts/base_network.py" script: "scripts/base_network.py"
rule build_shapes: rule build_shapes:
input: input:
naturalearth='data/bundle/naturalearth/ne_10m_admin_0_countries.shp', naturalearth='data/bundle/naturalearth/ne_10m_admin_0_countries.shp',
@ -114,9 +114,9 @@ rule build_shapes:
log: "logs/build_shapes.log" log: "logs/build_shapes.log"
threads: 1 threads: 1
resources: mem=500 resources: mem=500
# group: 'nonfeedin_preparation'
script: "scripts/build_shapes.py" script: "scripts/build_shapes.py"
rule build_bus_regions: rule build_bus_regions:
input: input:
country_shapes='resources/country_shapes.geojson', country_shapes='resources/country_shapes.geojson',
@ -126,20 +126,21 @@ rule build_bus_regions:
regions_onshore="resources/regions_onshore.geojson", regions_onshore="resources/regions_onshore.geojson",
regions_offshore="resources/regions_offshore.geojson" regions_offshore="resources/regions_offshore.geojson"
log: "logs/build_bus_regions.log" log: "logs/build_bus_regions.log"
threads: 1
resources: mem=1000 resources: mem=1000
# group: 'nonfeedin_preparation'
script: "scripts/build_bus_regions.py" script: "scripts/build_bus_regions.py"
if config['enable'].get('build_cutout', False):
if config['enable'].get('build_cutout', False):
rule build_cutout: rule build_cutout:
output: directory("cutouts/{cutout}") output: directory("cutouts/{cutout}")
log: "logs/build_cutout/{cutout}.log" log: "logs/build_cutout/{cutout}.log"
resources: mem=config['atlite'].get('nprocesses', 4) * 1000
threads: config['atlite'].get('nprocesses', 4)
benchmark: "benchmarks/build_cutout_{cutout}" benchmark: "benchmarks/build_cutout_{cutout}"
# group: 'feedin_preparation' threads: ATLITE_NPROCESSES
resources: mem=ATLITE_NPROCESSES * 1000
script: "scripts/build_cutout.py" script: "scripts/build_cutout.py"
if config['enable'].get('retrieve_cutout', True): if config['enable'].get('retrieve_cutout', True):
rule retrieve_cutout: rule retrieve_cutout:
output: directory(expand("cutouts/{cutouts}", **config['atlite'])), output: directory(expand("cutouts/{cutouts}", **config['atlite'])),
@ -156,34 +157,37 @@ if config['enable'].get('build_natura_raster', False):
log: "logs/build_natura_raster.log" log: "logs/build_natura_raster.log"
script: "scripts/build_natura_raster.py" script: "scripts/build_natura_raster.py"
if config['enable'].get('retrieve_natura_raster', True): if config['enable'].get('retrieve_natura_raster', True):
rule retrieve_natura_raster: rule retrieve_natura_raster:
output: "resources/natura.tiff" output: "resources/natura.tiff"
log: "logs/retrieve_natura_raster.log" log: "logs/retrieve_natura_raster.log"
script: 'scripts/retrieve_natura_raster.py' script: 'scripts/retrieve_natura_raster.py'
rule build_renewable_profiles: rule build_renewable_profiles:
input: input:
base_network="networks/base.nc", base_network="networks/base.nc",
corine="data/bundle/corine/g250_clc06_V18_5.tif", corine="data/bundle/corine/g250_clc06_V18_5.tif",
natura="resources/natura.tiff", natura="resources/natura.tiff",
gebco=lambda wildcards: ("data/bundle/GEBCO_2014_2D.nc" gebco=lambda w: ("data/bundle/GEBCO_2014_2D.nc"
if "max_depth" in config["renewable"][wildcards.technology].keys() if "max_depth" in config["renewable"][w.technology].keys()
else []), else []),
country_shapes='resources/country_shapes.geojson', country_shapes='resources/country_shapes.geojson',
offshore_shapes='resources/offshore_shapes.geojson', offshore_shapes='resources/offshore_shapes.geojson',
regions=lambda wildcards: ("resources/regions_onshore.geojson" regions=lambda w: ("resources/regions_onshore.geojson"
if wildcards.technology in ('onwind', 'solar') if w.technology in ('onwind', 'solar')
else "resources/regions_offshore.geojson"), else "resources/regions_offshore.geojson"),
cutout=lambda wildcards: "cutouts/" + config["renewable"][wildcards.technology]['cutout'] cutout=lambda w: "cutouts/" + config["renewable"][w.technology]['cutout']
output: profile="resources/profile_{technology}.nc", output:
profile="resources/profile_{technology}.nc",
log: "logs/build_renewable_profile_{technology}.log" log: "logs/build_renewable_profile_{technology}.log"
resources: mem=config['atlite'].get('nprocesses', 2) * 5000
threads: config['atlite'].get('nprocesses', 2)
benchmark: "benchmarks/build_renewable_profiles_{technology}" benchmark: "benchmarks/build_renewable_profiles_{technology}"
# group: 'feedin_preparation' threads: ATLITE_NPROCESSES
resources: mem=ATLITE_NPROCESSES * 5000
script: "scripts/build_renewable_profiles.py" script: "scripts/build_renewable_profiles.py"
if 'hydro' in config['renewable'].keys(): if 'hydro' in config['renewable'].keys():
rule build_hydro_profile: rule build_hydro_profile:
input: input:
@ -193,9 +197,9 @@ if 'hydro' in config['renewable'].keys():
output: 'resources/profile_hydro.nc' output: 'resources/profile_hydro.nc'
log: "logs/build_hydro_profile.log" log: "logs/build_hydro_profile.log"
resources: mem=5000 resources: mem=5000
# group: 'feedin_preparation'
script: 'scripts/build_hydro_profile.py' script: 'scripts/build_hydro_profile.py'
rule add_electricity: rule add_electricity:
input: input:
base_network='networks/base.nc', base_network='networks/base.nc',
@ -206,16 +210,16 @@ rule add_electricity:
geth_hydro_capacities='data/geth2015_hydro_capacities.csv', geth_hydro_capacities='data/geth2015_hydro_capacities.csv',
opsd_load='data/bundle/time_series_60min_singleindex_filtered.csv', opsd_load='data/bundle/time_series_60min_singleindex_filtered.csv',
nuts3_shapes='resources/nuts3_shapes.geojson', nuts3_shapes='resources/nuts3_shapes.geojson',
**{'profile_' + t: "resources/profile_" + t + ".nc" **{'profile_' + tech: "resources/profile_" + tech + ".nc"
for t in config['renewable']} for tech in config['renewable']}
output: "networks/elec.nc" output: "networks/elec.nc"
log: "logs/add_electricity.log" log: "logs/add_electricity.log"
benchmark: "benchmarks/add_electricity" benchmark: "benchmarks/add_electricity"
threads: 1 threads: 1
resources: mem=3000 resources: mem=3000
# group: 'build_pypsa_networks'
script: "scripts/add_electricity.py" script: "scripts/add_electricity.py"
rule simplify_network: rule simplify_network:
input: input:
network='networks/{network}.nc', network='networks/{network}.nc',
@ -231,9 +235,9 @@ rule simplify_network:
benchmark: "benchmarks/simplify_network/{network}_s{simpl}" benchmark: "benchmarks/simplify_network/{network}_s{simpl}"
threads: 1 threads: 1
resources: mem=4000 resources: mem=4000
# group: 'build_pypsa_networks'
script: "scripts/simplify_network.py" script: "scripts/simplify_network.py"
rule cluster_network: rule cluster_network:
input: input:
network='networks/{network}_s{simpl}.nc', network='networks/{network}_s{simpl}.nc',
@ -250,7 +254,6 @@ rule cluster_network:
benchmark: "benchmarks/cluster_network/{network}_s{simpl}_{clusters}" benchmark: "benchmarks/cluster_network/{network}_s{simpl}_{clusters}"
threads: 1 threads: 1
resources: mem=3000 resources: mem=3000
# group: 'build_pypsa_networks'
script: "scripts/cluster_network.py" script: "scripts/cluster_network.py"
@ -263,7 +266,6 @@ rule add_extra_components:
benchmark: "benchmarks/add_extra_components/{network}_s{simpl}_{clusters}_ec" benchmark: "benchmarks/add_extra_components/{network}_s{simpl}_{clusters}_ec"
threads: 1 threads: 1
resources: mem=3000 resources: mem=3000
# group: 'build_pypsa_networks'
script: "scripts/add_extra_components.py" script: "scripts/add_extra_components.py"
@ -271,11 +273,12 @@ rule prepare_network:
input: 'networks/{network}_s{simpl}_{clusters}_ec.nc', tech_costs=COSTS input: 'networks/{network}_s{simpl}_{clusters}_ec.nc', tech_costs=COSTS
output: 'networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc' output: 'networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc'
log: "logs/prepare_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.log" log: "logs/prepare_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.log"
benchmark: "benchmarks/prepare_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}"
threads: 1 threads: 1
resources: mem=1000 resources: mem=1000
# benchmark: "benchmarks/prepare_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}"
script: "scripts/prepare_network.py" script: "scripts/prepare_network.py"
def memory(w): def memory(w):
factor = 3. factor = 3.
for o in w.opts.split('-'): for o in w.opts.split('-'):
@ -287,12 +290,11 @@ def memory(w):
return int(factor * (18000 + 180 * int(w.clusters[:-1]))) return int(factor * (18000 + 180 * int(w.clusters[:-1])))
else: else:
return int(factor * (10000 + 195 * int(w.clusters))) return int(factor * (10000 + 195 * int(w.clusters)))
# return 4890+310 * int(w.clusters)
rule solve_network: rule solve_network:
input: "networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" input: "networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
output: "results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" output: "results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
shadow: "shallow"
log: log:
solver=normpath("logs/solve_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"), solver=normpath("logs/solve_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"),
python="logs/solve_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", python="logs/solve_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
@ -300,15 +302,15 @@ rule solve_network:
benchmark: "benchmarks/solve_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}" benchmark: "benchmarks/solve_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}"
threads: 4 threads: 4
resources: mem=memory resources: mem=memory
# group: "solve" # with group, threads is ignored https://bitbucket.org/snakemake/snakemake/issues/971/group-job-description-does-not-contain shadow: "shallow"
script: "scripts/solve_network.py" script: "scripts/solve_network.py"
rule solve_operations_network: rule solve_operations_network:
input: input:
unprepared="networks/{network}_s{simpl}_{clusters}_ec.nc", unprepared="networks/{network}_s{simpl}_{clusters}_ec.nc",
optimized="results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc" optimized="results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
output: "results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc" output: "results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc"
shadow: "shallow"
log: log:
solver=normpath("logs/solve_operations_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"), solver=normpath("logs/solve_operations_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"),
python="logs/solve_operations_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log", python="logs/solve_operations_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
@ -316,9 +318,10 @@ rule solve_operations_network:
benchmark: "benchmarks/solve_operations_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}" benchmark: "benchmarks/solve_operations_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}"
threads: 4 threads: 4
resources: mem=(lambda w: 5000 + 372 * int(w.clusters)) resources: mem=(lambda w: 5000 + 372 * int(w.clusters))
# group: "solve_operations" shadow: "shallow"
script: "scripts/solve_operations_network.py" script: "scripts/solve_operations_network.py"
rule plot_network: rule plot_network:
input: input:
network="results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", network="results/networks/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
@ -329,6 +332,7 @@ rule plot_network:
log: "logs/plot_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_{ext}.log" log: "logs/plot_network/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_{ext}.log"
script: "scripts/plot_network.py" script: "scripts/plot_network.py"
def input_make_summary(w): def input_make_summary(w):
# It's mildly hacky to include the separate costs input as first entry # It's mildly hacky to include the separate costs input as first entry
if w.ll.endswith("all"): if w.ll.endswith("all"):
@ -344,42 +348,48 @@ def input_make_summary(w):
**{k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k) **{k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k)
for k in ["simpl", "clusters", "opts"]})) for k in ["simpl", "clusters", "opts"]}))
rule make_summary: rule make_summary:
input: input_make_summary input: input_make_summary
output: directory("results/summaries/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}") output: directory("results/summaries/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}")
log: "logs/make_summary/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.log", log: "logs/make_summary/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.log",
script: "scripts/make_summary.py" script: "scripts/make_summary.py"
rule plot_summary: rule plot_summary:
input: "results/summaries/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}" input: "results/summaries/{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}"
output: "results/plots/summary_{summary}_{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.{ext}" output: "results/plots/summary_{summary}_{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.{ext}"
log: "logs/plot_summary/{summary}_{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}_{ext}.log" log: "logs/plot_summary/{summary}_{network}_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}_{ext}.log"
script: "scripts/plot_summary.py" script: "scripts/plot_summary.py"
def input_plot_p_nom_max(wildcards):
return [('networks/{network}_s{simpl}{maybe_cluster}.nc' def input_plot_p_nom_max(w):
.format(maybe_cluster=('' if c == 'full' else ('_' + c)), **wildcards)) return [("networks/{network}_s{simpl}{maybe_cluster}.nc"
for c in wildcards.clusts.split(",")] .format(maybe_cluster=('' if c == 'full' else ('_' + c)), **w))
for c in w.clusts.split(",")]
rule plot_p_nom_max: rule plot_p_nom_max:
input: input_plot_p_nom_max input: input_plot_p_nom_max
output: "results/plots/{network}_s{simpl}_cum_p_nom_max_{clusts}_{techs}_{country}.{ext}" output: "results/plots/{network}_s{simpl}_cum_p_nom_max_{clusts}_{techs}_{country}.{ext}"
log: "logs/plot_p_nom_max/{network}_s{simpl}_{clusts}_{techs}_{country}_{ext}.log" log: "logs/plot_p_nom_max/{network}_s{simpl}_{clusts}_{techs}_{country}_{ext}.log"
script: "scripts/plot_p_nom_max.py" script: "scripts/plot_p_nom_max.py"
rule build_country_flh: rule build_country_flh:
input: input:
base_network="networks/base.nc", base_network="networks/base.nc",
corine="data/bundle/corine/g250_clc06_V18_5.tif", corine="data/bundle/corine/g250_clc06_V18_5.tif",
natura="resources/natura.tiff", natura="resources/natura.tiff",
gebco=lambda wildcards: ("data/bundle/GEBCO_2014_2D.nc" gebco=lambda w: ("data/bundle/GEBCO_2014_2D.nc"
if "max_depth" in config["renewable"][wildcards.technology].keys() if "max_depth" in config["renewable"][w.technology].keys()
else []), else []),
country_shapes='resources/country_shapes.geojson', country_shapes='resources/country_shapes.geojson',
offshore_shapes='resources/offshore_shapes.geojson', offshore_shapes='resources/offshore_shapes.geojson',
pietzker="data/pietzker2014.xlsx", pietzker="data/pietzker2014.xlsx",
regions=lambda w: ("resources/country_shapes.geojson" regions=lambda w: ("resources/country_shapes.geojson"
if w.technology in ('onwind', 'solar') if w.technology in ('onwind', 'solar')
else "resources/offshore_shapes.geojson"), else "resources/offshore_shapes.geojson"),
cutout=lambda w: "cutouts/" + config["renewable"][w.technology]['cutout'] cutout=lambda w: "cutouts/" + config["renewable"][w.technology]['cutout']
output: output:
area="resources/country_flh_area_{technology}.csv", area="resources/country_flh_area_{technology}.csv",
@ -390,9 +400,4 @@ rule build_country_flh:
log: "logs/build_country_flh_{technology}.log" log: "logs/build_country_flh_{technology}.log"
resources: mem=10000 resources: mem=10000
benchmark: "benchmarks/build_country_flh_{technology}" benchmark: "benchmarks/build_country_flh_{technology}"
# group: 'feedin_preparation'
script: "scripts/build_country_flh.py" script: "scripts/build_country_flh.py"
# Local Variables:
# mode: python
# End:

View File

@ -1,22 +0,0 @@
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: GPL-3.0-or-later
__default__:
log: "logs/cluster/{{name}}.log"
feedin_preparation:
walltime: "12:00:00"
solve_network:
walltime: "05:00:00:00"
trace_solve_network:
walltime: "05:00:00:00"
solve:
walltime: "05:00:00:00"
threads: 4 # Group threads are not aggregated
solve_operations:
walltime: "01:00:00:00"

View File

@ -12,7 +12,6 @@ logging:
summary_dir: results summary_dir: results
scenario: scenario:
sectors: [E]
simpl: [''] simpl: ['']
ll: ['copt'] ll: ['copt']
clusters: [37, 128, 256, 512, 1024] clusters: [37, 128, 256, 512, 1024]
@ -257,67 +256,18 @@ plotting:
'waste' : '#68896b' 'waste' : '#68896b'
'geothermal' : '#ba91b1' 'geothermal' : '#ba91b1'
"OCGT" : "#d35050" "OCGT" : "#d35050"
"OCGT marginal" : "#d35050"
"OCGT-heat" : "#d35050"
"gas boiler" : "#d35050"
"gas boilers" : "#d35050"
"gas boiler marginal" : "#d35050"
"gas-to-power/heat" : "#d35050"
"gas" : "#d35050" "gas" : "#d35050"
"natural gas" : "#d35050" "natural gas" : "#d35050"
"CCGT" : "#b20101" "CCGT" : "#b20101"
"CCGT marginal" : "#b20101"
"Nuclear" : "#ff9000"
"Nuclear marginal" : "#ff9000"
"nuclear" : "#ff9000" "nuclear" : "#ff9000"
"coal" : "#707070" "coal" : "#707070"
"Coal" : "#707070"
"Coal marginal" : "#707070"
"lignite" : "#9e5a01" "lignite" : "#9e5a01"
"Lignite" : "#9e5a01"
"Lignite marginal" : "#9e5a01"
"Oil" : "#262626"
"oil" : "#262626" "oil" : "#262626"
"H2" : "#ea048a" "H2" : "#ea048a"
"hydrogen storage" : "#ea048a" "hydrogen storage" : "#ea048a"
"Sabatier" : "#a31597"
"methanation" : "#a31597"
"helmeth" : "#a31597"
"DAC" : "#d284ff"
"co2 stored" : "#e5e5e5"
"CO2 sequestration" : "#e5e5e5"
"battery" : "#b8ea04" "battery" : "#b8ea04"
"battery storage" : "#b8ea04"
"Li ion" : "#b8ea04"
"BEV charger" : "#e2ff7c"
"V2G" : "#7a9618"
"transport fuel cell" : "#e884be"
"retrofitting" : "#e0d6a8"
"building retrofitting" : "#e0d6a8"
"heat pumps" : "#ff9768"
"heat pump" : "#ff9768"
"air heat pump" : "#ffbea0"
"ground heat pump" : "#ff7a3d"
"power-to-heat" : "#a59e7c"
"power-to-gas" : "#db8585"
"power-to-liquid" : "#a9acd1"
"Fischer-Tropsch" : "#a9acd1"
"resistive heater" : "#aa4925"
"water tanks" : "#401f75"
"hot water storage" : "#401f75"
"hot water charging" : "#351c5e"
"hot water discharging" : "#683ab2"
"CHP" : "#d80a56"
"CHP heat" : "#d80a56"
"CHP electric" : "#d80a56"
"district heating" : "#93864b"
"Ambient" : "#262626"
"Electric load" : "#f9d002" "Electric load" : "#f9d002"
"electricity" : "#f9d002" "electricity" : "#f9d002"
"Heat load" : "#d35050"
"heat" : "#d35050"
"Transport load" : "#235ebc"
"transport" : "#235ebc"
"lines" : "#70af1d" "lines" : "#70af1d"
"transmission lines" : "#70af1d" "transmission lines" : "#70af1d"
"AC-AC" : "#70af1d" "AC-AC" : "#70af1d"
@ -337,18 +287,5 @@ plotting:
hydro: "Reservoir & Dam" hydro: "Reservoir & Dam"
battery: "Battery Storage" battery: "Battery Storage"
H2: "Hydrogen Storage" H2: "Hydrogen Storage"
lines: "Transmission lines" lines: "Transmission Lines"
ror: "Run of river" ror: "Run of River"
nice_names_n:
OCGT: "Open-Cycle\nGas"
CCGT: "Combined-Cycle\nGas"
offwind-ac: "Offshore\nWind (AC)"
offwind-dc: "Offshore\nWind (DC)"
onwind: "Onshore\nWind"
battery: "Battery\nStorage"
H2: "Hydrogen\nStorage"
lines: "Transmission\nlines"
ror: "Run of\nriver"
PHS: "Pumped Hydro\nStorage"
hydro: "Reservoir\n& Dam"

View File

@ -11,7 +11,6 @@ logging:
summary_dir: results summary_dir: results
scenario: scenario:
sectors: [E]
simpl: [''] simpl: ['']
ll: ['copt'] ll: ['copt']
clusters: [5] clusters: [5]
@ -169,164 +168,5 @@ solving:
clip_p_max_pu: 0.01 clip_p_max_pu: 0.01
skip_iterations: false skip_iterations: false
track_iterations: false track_iterations: false
#nhours: 10
solver: solver:
name: cbc name: cbc
# solver:
# name: gurobi
# threads: 4
# method: 2 # barrier
# crossover: 0
# BarConvTol: 1.e-5
# FeasibilityTol: 1.e-6
# AggFill: 0
# PreDual: 0
# GURO_PAR_BARDENSETHRESH: 200
# solver:
# name: cplex
# threads: 4
# lpmethod: 4 # barrier
# solutiontype: 2 # non basic solution, ie no crossover
# barrier_convergetol: 1.e-5
# feasopt_tolerance: 1.e-6
plotting:
map:
figsize: [7, 7]
boundaries: [-10.2, 29, 35, 72]
p_nom:
bus_size_factor: 5.e+4
linewidth_factor: 3.e+3
costs_max: 800
costs_threshold: 1
energy_max: 15000.
energy_min: -10000.
energy_threshold: 50.
vre_techs: ["onwind", "offwind-ac", "offwind-dc", "solar", "ror"]
conv_techs: ["OCGT", "CCGT", "Nuclear", "Coal"]
storage_techs: ["hydro+PHS", "battery", "H2"]
load_carriers: ["AC load"]
AC_carriers: ["AC line", "AC transformer"]
link_carriers: ["DC line", "Converter AC-DC"]
tech_colors:
"onwind" : "#235ebc"
"onshore wind" : "#235ebc"
'offwind' : "#6895dd"
'offwind-ac' : "#6895dd"
'offshore wind' : "#6895dd"
'offshore wind ac' : "#6895dd"
'offwind-dc' : "#74c6f2"
'offshore wind dc' : "#74c6f2"
"hydro" : "#08ad97"
"hydro+PHS" : "#08ad97"
"PHS" : "#08ad97"
"hydro reservoir" : "#08ad97"
'hydroelectricity' : '#08ad97'
"ror" : "#4adbc8"
"run of river" : "#4adbc8"
'solar' : "#f9d002"
'solar PV' : "#f9d002"
'solar thermal' : '#ffef60'
'biomass' : '#0c6013'
'solid biomass' : '#06540d'
'biogas' : '#23932d'
'waste' : '#68896b'
'geothermal' : '#ba91b1'
"OCGT" : "#d35050"
"OCGT marginal" : "#d35050"
"OCGT-heat" : "#d35050"
"gas boiler" : "#d35050"
"gas boilers" : "#d35050"
"gas boiler marginal" : "#d35050"
"gas-to-power/heat" : "#d35050"
"gas" : "#d35050"
"natural gas" : "#d35050"
"CCGT" : "#b20101"
"CCGT marginal" : "#b20101"
"Nuclear" : "#ff9000"
"Nuclear marginal" : "#ff9000"
"nuclear" : "#ff9000"
"coal" : "#707070"
"Coal" : "#707070"
"Coal marginal" : "#707070"
"lignite" : "#9e5a01"
"Lignite" : "#9e5a01"
"Lignite marginal" : "#9e5a01"
"Oil" : "#262626"
"oil" : "#262626"
"H2" : "#ea048a"
"hydrogen storage" : "#ea048a"
"Sabatier" : "#a31597"
"methanation" : "#a31597"
"helmeth" : "#a31597"
"DAC" : "#d284ff"
"co2 stored" : "#e5e5e5"
"CO2 sequestration" : "#e5e5e5"
"battery" : "#b8ea04"
"battery storage" : "#b8ea04"
"Li ion" : "#b8ea04"
"BEV charger" : "#e2ff7c"
"V2G" : "#7a9618"
"transport fuel cell" : "#e884be"
"retrofitting" : "#e0d6a8"
"building retrofitting" : "#e0d6a8"
"heat pumps" : "#ff9768"
"heat pump" : "#ff9768"
"air heat pump" : "#ffbea0"
"ground heat pump" : "#ff7a3d"
"power-to-heat" : "#a59e7c"
"power-to-gas" : "#db8585"
"power-to-liquid" : "#a9acd1"
"Fischer-Tropsch" : "#a9acd1"
"resistive heater" : "#aa4925"
"water tanks" : "#401f75"
"hot water storage" : "#401f75"
"hot water charging" : "#351c5e"
"hot water discharging" : "#683ab2"
"CHP" : "#d80a56"
"CHP heat" : "#d80a56"
"CHP electric" : "#d80a56"
"district heating" : "#93864b"
"Ambient" : "#262626"
"Electric load" : "#f9d002"
"electricity" : "#f9d002"
"Heat load" : "#d35050"
"heat" : "#d35050"
"Transport load" : "#235ebc"
"transport" : "#235ebc"
"lines" : "#70af1d"
"transmission lines" : "#70af1d"
"AC-AC" : "#70af1d"
"AC line" : "#70af1d"
"links" : "#8a1caf"
"HVDC links" : "#8a1caf"
"DC-DC" : "#8a1caf"
"DC link" : "#8a1caf"
nice_names:
OCGT: "Open-Cycle Gas"
CCGT: "Combined-Cycle Gas"
offwind-ac: "Offshore Wind (AC)"
offwind-dc: "Offshore Wind (DC)"
onwind: "Onshore Wind"
solar: "Solar"
PHS: "Pumped Hydro Storage"
hydro: "Reservoir & Dam"
battery: "Battery Storage"
H2: "Hydrogen Storage"
lines: "Transmission lines"
ror: "Run of river"
nice_names_n:
OCGT: "Open-Cycle\nGas"
CCGT: "Combined-Cycle\nGas"
offwind-ac: "Offshore\nWind (AC)"
offwind-dc: "Offshore\nWind (DC)"
onwind: "Onshore\nWind"
battery: "Battery\nStorage"
H2: "Hydrogen\nStorage"
lines: "Transmission\nlines"
ror: "Run of\nriver"
PHS: "Pumped Hydro\nStorage"
hydro: "Reservoir\n& Dam"

View File

@ -11,5 +11,4 @@ energy_max,TWh,float,"Upper y-axis limit in energy bar plots."
energy_min,TWh,float,"Lower y-axis limit in energy bar plots." energy_min,TWh,float,"Lower y-axis limit in energy bar plots."
energy_threshold,TWh,float,"Threshold below which technologies will not be shown in energy bar plots." energy_threshold,TWh,float,"Threshold below which technologies will not be shown in energy bar plots."
tech_colors,--,"carrier -> HEX colour code","Mapping from network ``carrier`` to a colour (`HEX colour code <https://en.wikipedia.org/wiki/Web_colors#Hex_triplet>`_)." tech_colors,--,"carrier -> HEX colour code","Mapping from network ``carrier`` to a colour (`HEX colour code <https://en.wikipedia.org/wiki/Web_colors#Hex_triplet>`_)."
nice_names,--,"str -> str","Mapping from network ``carrier`` to a more readable name." nice_names,--,"str -> str","Mapping from network ``carrier`` to a more readable name."
nice_names_n,--,"str -> str","Same as nice_names, but with linebreaks."
1 Unit Values Description
11 energy_min TWh float Lower y-axis limit in energy bar plots.
12 energy_threshold TWh float Threshold below which technologies will not be shown in energy bar plots.
13 tech_colors -- carrier -> HEX colour code Mapping from network ``carrier`` to a colour (`HEX colour code <https://en.wikipedia.org/wiki/Web_colors#Hex_triplet>`_).
14 nice_names -- str -> str Mapping from network ``carrier`` to a more readable name.
nice_names_n -- str -> str Same as nice_names, but with linebreaks.

View File

@ -1,6 +1,5 @@
,Unit,Values,Description ,Unit,Values,Description
sectors,--,"Must be 'elec'","Placeholder for integration of other energy sectors."
simpl,--,cf. :ref:`simpl`,"List of ``{simpl}`` wildcards to run." simpl,--,cf. :ref:`simpl`,"List of ``{simpl}`` wildcards to run."
ll,--,cf. :ref:`ll`,"List of ``{ll}`` wildcards to run."
clusters,--,cf. :ref:`clusters`,"List of ``{clusters}`` wildcards to run." clusters,--,cf. :ref:`clusters`,"List of ``{clusters}`` wildcards to run."
ll,--,cf. :ref:`ll`,"List of ``{ll}`` wildcards to run."
opts,--,cf. :ref:`opts`,"List of ``{opts}`` wildcards to run." opts,--,cf. :ref:`opts`,"List of ``{opts}`` wildcards to run."
1 Unit Values Description
sectors -- Must be 'elec' Placeholder for integration of other energy sectors.
2 simpl -- cf. :ref:`simpl` List of ``{simpl}`` wildcards to run.
ll -- cf. :ref:`ll` List of ``{ll}`` wildcards to run.
3 clusters -- cf. :ref:`clusters` List of ``{clusters}`` wildcards to run.
4 ll -- cf. :ref:`ll` List of ``{ll}`` wildcards to run.
5 opts -- cf. :ref:`opts` List of ``{opts}`` wildcards to run.

View File

@ -194,10 +194,10 @@ Licence
PyPSA-Eur work is released under multiple licenses: PyPSA-Eur work is released under multiple licenses:
* All original source code is licensed as free software under `GPL-3.0-or-later <LICENSES/GPL-3.0-or-later.txt>`_. * All original source code is licensed as free software under `GPL-3.0-or-later <.licenses/GPL-3.0-or-later.txt>`_.
* The documentation is licensed under `CC-BY-4.0 <LICENSES/CC-BY-4.0.txt>`_. * The documentation is licensed under `CC-BY-4.0 <.licenses/CC-BY-4.0.txt>`_.
* Configuration files are mostly licensed under `CC0-1.0 <LICENSES/CC0-1.0.txt>`_. * Configuration files are mostly licensed under `CC0-1.0 <.licenses/CC0-1.0.txt>`_.
* Data files are licensed under `CC-BY-4.0 <LICENSES/CC-BY-4.0.txt>`_. * Data files are licensed under `CC-BY-4.0 <.licenses/CC-BY-4.0.txt>`_.
See the individual files and the `dep5 <.reuse/dep5>`_ file for license details. See the individual files and the `dep5 <.reuse/dep5>`_ file for license details.

View File

@ -18,16 +18,6 @@ what data to retrieve and what files to produce.
Detailed explanations of how wildcards work in ``snakemake`` can be found in the Detailed explanations of how wildcards work in ``snakemake`` can be found in the
`relevant section of the documentation <https://snakemake.readthedocs.io/en/stable/snakefiles/rules.html#wildcards>`_. `relevant section of the documentation <https://snakemake.readthedocs.io/en/stable/snakefiles/rules.html#wildcards>`_.
.. _network:
The ``{network}`` wildcard
==========================
The ``{network}`` wildcard specifies the considered energy sector(s)
and, as currently only ``elec`` (for electricity) is included,
it currently represents rather a placeholder wildcard to facilitate
future extensions including multiple energy sectors at once.
.. _simpl: .. _simpl:
The ``{simpl}`` wildcard The ``{simpl}`` wildcard

View File

@ -1 +0,0 @@
backend : Agg

View File

@ -44,6 +44,7 @@ def configure_logging(snakemake, skip_handlers=False):
}) })
logging.basicConfig(**kwargs) logging.basicConfig(**kwargs)
def load_network(import_name=None, custom_components=None): def load_network(import_name=None, custom_components=None):
""" """
Helper for importing a pypsa.Network with additional custom components. Helper for importing a pypsa.Network with additional custom components.
@ -70,7 +71,6 @@ def load_network(import_name=None, custom_components=None):
------- -------
pypsa.Network pypsa.Network
""" """
import pypsa import pypsa
from pypsa.descriptors import Dict from pypsa.descriptors import Dict
@ -90,10 +90,12 @@ def load_network(import_name=None, custom_components=None):
override_components=override_components, override_components=override_components,
override_component_attrs=override_component_attrs) override_component_attrs=override_component_attrs)
def pdbcast(v, h): def pdbcast(v, h):
return pd.DataFrame(v.values.reshape((-1, 1)) * h.values, return pd.DataFrame(v.values.reshape((-1, 1)) * h.values,
index=v.index, columns=h.index) index=v.index, columns=h.index)
def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True): def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True):
import pypsa import pypsa
from add_electricity import update_transmission_costs, load_costs from add_electricity import update_transmission_costs, load_costs
@ -113,11 +115,11 @@ def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True):
if combine_hydro_ps: if combine_hydro_ps:
n.storage_units.loc[n.storage_units.carrier.isin({'PHS', 'hydro'}), 'carrier'] = 'hydro+PHS' n.storage_units.loc[n.storage_units.carrier.isin({'PHS', 'hydro'}), 'carrier'] = 'hydro+PHS'
# #if the carrier was not set on the heat storage units # if the carrier was not set on the heat storage units
# bus_carrier = n.storage_units.bus.map(n.buses.carrier) # bus_carrier = n.storage_units.bus.map(n.buses.carrier)
# n.storage_units.loc[bus_carrier == "heat","carrier"] = "water tanks" # n.storage_units.loc[bus_carrier == "heat","carrier"] = "water tanks"
Nyears = n.snapshot_weightings.sum()/8760. Nyears = n.snapshot_weightings.sum() / 8760.
costs = load_costs(Nyears, tech_costs, config['costs'], config['electricity']) costs = load_costs(Nyears, tech_costs, config['costs'], config['electricity'])
update_transmission_costs(n, costs) update_transmission_costs(n, costs)

View File

@ -90,26 +90,28 @@ It further adds extendable ``generators`` with **zero** capacity for
- additional open- and combined-cycle gas turbines (if ``OCGT`` and/or ``CCGT`` is listed in the config setting ``electricity: extendable_carriers``) - additional open- and combined-cycle gas turbines (if ``OCGT`` and/or ``CCGT`` is listed in the config setting ``electricity: extendable_carriers``)
""" """
from vresutils.costdata import annuity
from vresutils.load import timeseries_opsd
from vresutils import transfer as vtransfer
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
import pypsa
import pandas as pd import pandas as pd
import numpy as np import numpy as np
import xarray as xr import xarray as xr
import geopandas as gpd import geopandas as gpd
import pypsa
import powerplantmatching as ppm import powerplantmatching as ppm
from vresutils.costdata import annuity
from vresutils.load import timeseries_opsd
from vresutils import transfer as vtransfer
idx = pd.IndexSlice idx = pd.IndexSlice
logger = logging.getLogger(__name__)
def normed(s): return s/s.sum() def normed(s): return s/s.sum()
def _add_missing_carriers_from_costs(n, costs, carriers): def _add_missing_carriers_from_costs(n, costs, carriers):
missing_carriers = pd.Index(carriers).difference(n.carriers.index) missing_carriers = pd.Index(carriers).difference(n.carriers.index)
if missing_carriers.empty: return if missing_carriers.empty: return
@ -121,6 +123,7 @@ def _add_missing_carriers_from_costs(n, costs, carriers):
emissions.index = missing_carriers emissions.index = missing_carriers
n.import_components_from_dataframe(emissions, 'Carrier') n.import_components_from_dataframe(emissions, 'Carrier')
def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None): def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None):
if tech_costs is None: if tech_costs is None:
tech_costs = snakemake.input.tech_costs tech_costs = snakemake.input.tech_costs
@ -193,10 +196,11 @@ def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None):
return costs return costs
def load_powerplants(ppl_fn=None): def load_powerplants(ppl_fn=None):
if ppl_fn is None: if ppl_fn is None:
ppl_fn = snakemake.input.powerplants ppl_fn = snakemake.input.powerplants
carrier_dict = {'ocgt': 'OCGT', 'ccgt': 'CCGT', 'bioenergy':'biomass', carrier_dict = {'ocgt': 'OCGT', 'ccgt': 'CCGT', 'bioenergy': 'biomass',
'ccgt, thermal': 'CCGT', 'hard coal': 'coal'} 'ccgt, thermal': 'CCGT', 'hard coal': 'coal'}
return (pd.read_csv(ppl_fn, index_col=0, dtype={'bus': 'str'}) return (pd.read_csv(ppl_fn, index_col=0, dtype={'bus': 'str'})
.powerplant.to_pypsa_names() .powerplant.to_pypsa_names()
@ -204,12 +208,6 @@ def load_powerplants(ppl_fn=None):
.replace({'carrier': carrier_dict})) .replace({'carrier': carrier_dict}))
# =============================================================================
# Attach components
# =============================================================================
# ### Load
def attach_load(n): def attach_load(n):
substation_lv_i = n.buses.index[n.buses['substation_lv']] substation_lv_i = n.buses.index[n.buses['substation_lv']]
regions = (gpd.read_file(snakemake.input.regions).set_index('name') regions = (gpd.read_file(snakemake.input.regions).set_index('name')
@ -249,7 +247,6 @@ def attach_load(n):
n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load) n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load)
### Set line costs
def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=False): def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=False):
n.lines['capital_cost'] = (n.lines['length'] * length_factor * n.lines['capital_cost'] = (n.lines['length'] * length_factor *
@ -270,7 +267,6 @@ def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=Fal
costs.at['HVDC inverter pair', 'capital_cost']) costs.at['HVDC inverter pair', 'capital_cost'])
n.links.loc[dc_b, 'capital_cost'] = costs n.links.loc[dc_b, 'capital_cost'] = costs
### Generators
def attach_wind_and_solar(n, costs): def attach_wind_and_solar(n, costs):
for tech in snakemake.config['renewable']: for tech in snakemake.config['renewable']:
@ -309,15 +305,17 @@ def attach_wind_and_solar(n, costs):
p_max_pu=ds['profile'].transpose('time', 'bus').to_pandas()) p_max_pu=ds['profile'].transpose('time', 'bus').to_pandas())
def attach_conventional_generators(n, costs, ppl): def attach_conventional_generators(n, costs, ppl):
carriers = snakemake.config['electricity']['conventional_carriers'] carriers = snakemake.config['electricity']['conventional_carriers']
_add_missing_carriers_from_costs(n, costs, carriers) _add_missing_carriers_from_costs(n, costs, carriers)
ppl = (ppl.query('carrier in @carriers').join(costs, on='carrier') ppl = (ppl.query('carrier in @carriers').join(costs, on='carrier')
.rename(index=lambda s: 'C' + str(s))) .rename(index=lambda s: 'C' + str(s)))
logger.info('Adding {} generators with capacities\n{}' logger.info('Adding {} generators with capacities\n{}'
.format(len(ppl), ppl.groupby('carrier').p_nom.sum())) .format(len(ppl), ppl.groupby('carrier').p_nom.sum()))
n.madd("Generator", ppl.index, n.madd("Generator", ppl.index,
carrier=ppl.carrier, carrier=ppl.carrier,
bus=ppl.bus, bus=ppl.bus,
@ -325,6 +323,7 @@ def attach_conventional_generators(n, costs, ppl):
efficiency=ppl.efficiency, efficiency=ppl.efficiency,
marginal_cost=ppl.marginal_cost, marginal_cost=ppl.marginal_cost,
capital_cost=0) capital_cost=0)
logger.warning(f'Capital costs for conventional generators put to 0 EUR/MW.') logger.warning(f'Capital costs for conventional generators put to 0 EUR/MW.')
@ -374,8 +373,8 @@ def attach_hydro(n, costs, ppl):
.where(lambda df: df<=1., other=1.))) .where(lambda df: df<=1., other=1.)))
if 'PHS' in carriers and not phs.empty: if 'PHS' in carriers and not phs.empty:
# fill missing max hours to config value and assume no natural inflow # fill missing max hours to config value and
# due to lack of data # assume no natural inflow due to lack of data
phs = phs.replace({'max_hours': {0: c['PHS_max_hours']}}) phs = phs.replace({'max_hours': {0: c['PHS_max_hours']}})
n.madd('StorageUnit', phs.index, n.madd('StorageUnit', phs.index,
carrier='PHS', carrier='PHS',
@ -413,7 +412,6 @@ def attach_hydro(n, costs, ppl):
hydro_max_hours = hydro.max_hours.where(hydro.max_hours > 0, hydro_max_hours = hydro.max_hours.where(hydro.max_hours > 0,
hydro.country.map(max_hours_country)).fillna(6) hydro.country.map(max_hours_country)).fillna(6)
n.madd('StorageUnit', hydro.index, carrier='hydro', n.madd('StorageUnit', hydro.index, carrier='hydro',
bus=hydro['bus'], bus=hydro['bus'],
p_nom=hydro['p_nom'], p_nom=hydro['p_nom'],
@ -432,6 +430,7 @@ def attach_hydro(n, costs, ppl):
def attach_extendable_generators(n, costs, ppl): def attach_extendable_generators(n, costs, ppl):
elec_opts = snakemake.config['electricity'] elec_opts = snakemake.config['electricity']
carriers = pd.Index(elec_opts['extendable_carriers']['Generator']) carriers = pd.Index(elec_opts['extendable_carriers']['Generator'])
_add_missing_carriers_from_costs(n, costs, carriers) _add_missing_carriers_from_costs(n, costs, carriers)
for tech in carriers: for tech in carriers:
@ -497,10 +496,11 @@ def estimate_renewable_capacities(n, tech_map=None):
n.generators.loc[tech_i, 'p_nom'] = ( n.generators.loc[tech_i, 'p_nom'] = (
(n.generators_t.p_max_pu[tech_i].mean() * (n.generators_t.p_max_pu[tech_i].mean() *
n.generators.loc[tech_i, 'p_nom_max']) # maximal yearly generation n.generators.loc[tech_i, 'p_nom_max']) # maximal yearly generation
.groupby(n.generators.bus.map(n.buses.country)) # for each country .groupby(n.generators.bus.map(n.buses.country))
.transform(lambda s: normed(s) * tech_capacities.at[s.name]) .transform(lambda s: normed(s) * tech_capacities.at[s.name])
.where(lambda s: s>0.1, 0.)) # only capacities above 100kW .where(lambda s: s>0.1, 0.)) # only capacities above 100kW
def add_nice_carrier_names(n, config=None): def add_nice_carrier_names(n, config=None):
if config is None: config = snakemake.config if config is None: config = snakemake.config
carrier_i = n.carriers.index carrier_i = n.carriers.index
@ -522,7 +522,7 @@ if __name__ == "__main__":
configure_logging(snakemake) configure_logging(snakemake)
n = pypsa.Network(snakemake.input.base_network) n = pypsa.Network(snakemake.input.base_network)
Nyears = n.snapshot_weightings.sum()/8760. Nyears = n.snapshot_weightings.sum() / 8760.
costs = load_costs(Nyears) costs = load_costs(Nyears)
ppl = load_powerplants() ppl = load_powerplants()

View File

@ -50,17 +50,20 @@ The rule :mod:`add_extra_components` attaches additional extendable components t
- ``Stores`` of carrier 'H2' and/or 'battery' in combination with ``Links``. If this option is chosen, the script adds extra buses with corresponding carrier where energy ``Stores`` are attached and which are connected to the corresponding power buses via two links, one each for charging and discharging. This leads to three investment variables for the energy capacity, charging and discharging capacity of the storage unit. - ``Stores`` of carrier 'H2' and/or 'battery' in combination with ``Links``. If this option is chosen, the script adds extra buses with corresponding carrier where energy ``Stores`` are attached and which are connected to the corresponding power buses via two links, one each for charging and discharging. This leads to three investment variables for the energy capacity, charging and discharging capacity of the storage unit.
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
import pypsa
import pandas as pd import pandas as pd
import numpy as np import numpy as np
import pypsa
from add_electricity import (load_costs, add_nice_carrier_names, from add_electricity import (load_costs, add_nice_carrier_names,
_add_missing_carriers_from_costs) _add_missing_carriers_from_costs)
idx = pd.IndexSlice idx = pd.IndexSlice
logger = logging.getLogger(__name__)
def attach_storageunits(n, costs): def attach_storageunits(n, costs):
elec_opts = snakemake.config['electricity'] elec_opts = snakemake.config['electricity']
carriers = elec_opts['extendable_carriers']['StorageUnit'] carriers = elec_opts['extendable_carriers']['StorageUnit']
@ -82,6 +85,7 @@ def attach_storageunits(n, costs):
max_hours=max_hours[carrier], max_hours=max_hours[carrier],
cyclic_state_of_charge=True) cyclic_state_of_charge=True)
def attach_stores(n, costs): def attach_stores(n, costs):
elec_opts = snakemake.config['electricity'] elec_opts = snakemake.config['electricity']
carriers = elec_opts['extendable_carriers']['Store'] carriers = elec_opts['extendable_carriers']['Store']
@ -144,6 +148,7 @@ def attach_stores(n, costs):
capital_cost=costs.at['battery inverter', 'capital_cost'], capital_cost=costs.at['battery inverter', 'capital_cost'],
p_nom_extendable=True) p_nom_extendable=True)
def attach_hydrogen_pipelines(n, costs): def attach_hydrogen_pipelines(n, costs):
elec_opts = snakemake.config['electricity'] elec_opts = snakemake.config['electricity']
ext_carriers = elec_opts['extendable_carriers'] ext_carriers = elec_opts['extendable_carriers']
@ -176,6 +181,7 @@ def attach_hydrogen_pipelines(n, costs):
efficiency=costs.at['H2 pipeline','efficiency'], efficiency=costs.at['H2 pipeline','efficiency'],
carrier="H2 pipeline") carrier="H2 pipeline")
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if 'snakemake' not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
@ -184,7 +190,7 @@ if __name__ == "__main__":
configure_logging(snakemake) configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)
Nyears = n.snapshot_weightings.sum()/8760. Nyears = n.snapshot_weightings.sum() / 8760.
costs = load_costs(Nyears, tech_costs=snakemake.input.tech_costs, costs = load_costs(Nyears, tech_costs=snakemake.input.tech_costs,
config=snakemake.config['costs'], config=snakemake.config['costs'],
elec_config=snakemake.config['electricity']) elec_config=snakemake.config['electricity'])

View File

@ -63,14 +63,16 @@ Description
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
import pypsa
import yaml import yaml
import pandas as pd import pandas as pd
import geopandas as gpd import geopandas as gpd
import numpy as np import numpy as np
import scipy as sp import scipy as sp
import networkx as nx
from scipy.sparse import csgraph from scipy.sparse import csgraph
from six import iteritems from six import iteritems
from itertools import product from itertools import product
@ -78,9 +80,8 @@ from itertools import product
from shapely.geometry import Point, LineString from shapely.geometry import Point, LineString
import shapely, shapely.prepared, shapely.wkt import shapely, shapely.prepared, shapely.wkt
import networkx as nx logger = logging.getLogger(__name__)
import pypsa
def _get_oid(df): def _get_oid(df):
if "tags" in df.columns: if "tags" in df.columns:
@ -88,12 +89,14 @@ def _get_oid(df):
else: else:
return pd.Series(np.nan, df.index) return pd.Series(np.nan, df.index)
def _get_country(df): def _get_country(df):
if "tags" in df.columns: if "tags" in df.columns:
return df.tags.str.extract('"country"=>"([A-Z]{2})"', expand=False) return df.tags.str.extract('"country"=>"([A-Z]{2})"', expand=False)
else: else:
return pd.Series(np.nan, df.index) return pd.Series(np.nan, df.index)
def _find_closest_links(links, new_links, distance_upper_bound=1.5): def _find_closest_links(links, new_links, distance_upper_bound=1.5):
treecoords = np.asarray([np.asarray(shapely.wkt.loads(s))[[0, -1]].flatten() treecoords = np.asarray([np.asarray(shapely.wkt.loads(s))[[0, -1]].flatten()
for s in links.geometry]) for s in links.geometry])
@ -109,6 +112,7 @@ def _find_closest_links(links, new_links, distance_upper_bound=1.5):
[lambda ds: ~ds.index.duplicated(keep='first')]\ [lambda ds: ~ds.index.duplicated(keep='first')]\
.sort_index()['i'] .sort_index()['i']
def _load_buses_from_eg(): def _load_buses_from_eg():
buses = (pd.read_csv(snakemake.input.eg_buses, quotechar="'", buses = (pd.read_csv(snakemake.input.eg_buses, quotechar="'",
true_values='t', false_values='f', true_values='t', false_values='f',
@ -130,6 +134,7 @@ def _load_buses_from_eg():
return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b]) return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b])
def _load_transformers_from_eg(buses): def _load_transformers_from_eg(buses):
transformers = (pd.read_csv(snakemake.input.eg_transformers, quotechar="'", transformers = (pd.read_csv(snakemake.input.eg_transformers, quotechar="'",
true_values='t', false_values='f', true_values='t', false_values='f',
@ -140,6 +145,7 @@ def _load_transformers_from_eg(buses):
return transformers return transformers
def _load_converters_from_eg(buses): def _load_converters_from_eg(buses):
converters = (pd.read_csv(snakemake.input.eg_converters, quotechar="'", converters = (pd.read_csv(snakemake.input.eg_converters, quotechar="'",
true_values='t', false_values='f', true_values='t', false_values='f',
@ -241,6 +247,7 @@ def _add_links_from_tyndp(buses, links):
return buses, links.append(links_tyndp, sort=True) return buses, links.append(links_tyndp, sort=True)
def _load_lines_from_eg(buses): def _load_lines_from_eg(buses):
lines = (pd.read_csv(snakemake.input.eg_lines, quotechar="'", true_values='t', false_values='f', lines = (pd.read_csv(snakemake.input.eg_lines, quotechar="'", true_values='t', false_values='f',
dtype=dict(line_id='str', bus0='str', bus1='str', dtype=dict(line_id='str', bus0='str', bus1='str',
@ -254,11 +261,13 @@ def _load_lines_from_eg(buses):
return lines return lines
def _apply_parameter_corrections(n): def _apply_parameter_corrections(n):
with open(snakemake.input.parameter_corrections) as f: with open(snakemake.input.parameter_corrections) as f:
corrections = yaml.safe_load(f) corrections = yaml.safe_load(f)
if corrections is None: return if corrections is None: return
for component, attrs in iteritems(corrections): for component, attrs in iteritems(corrections):
df = n.df(component) df = n.df(component)
oid = _get_oid(df) oid = _get_oid(df)
@ -275,6 +284,7 @@ def _apply_parameter_corrections(n):
inds = r.index.intersection(df.index) inds = r.index.intersection(df.index)
df.loc[inds, attr] = r[inds].astype(df[attr].dtype) df.loc[inds, attr] = r[inds].astype(df[attr].dtype)
def _set_electrical_parameters_lines(lines): def _set_electrical_parameters_lines(lines):
v_noms = snakemake.config['electricity']['voltages'] v_noms = snakemake.config['electricity']['voltages']
linetypes = snakemake.config['lines']['types'] linetypes = snakemake.config['lines']['types']
@ -286,12 +296,14 @@ def _set_electrical_parameters_lines(lines):
return lines return lines
def _set_lines_s_nom_from_linetypes(n): def _set_lines_s_nom_from_linetypes(n):
n.lines['s_nom'] = ( n.lines['s_nom'] = (
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) * np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines['v_nom'] * n.lines.num_parallel n.lines['v_nom'] * n.lines.num_parallel
) )
def _set_electrical_parameters_links(links): def _set_electrical_parameters_links(links):
if links.empty: return links if links.empty: return links
@ -301,11 +313,11 @@ def _set_electrical_parameters_links(links):
links_p_nom = pd.read_csv(snakemake.input.links_p_nom) links_p_nom = pd.read_csv(snakemake.input.links_p_nom)
#Filter links that are not in operation anymore # filter links that are not in operation anymore
removed_b = links_p_nom.Remarks.str.contains('Shut down|Replaced', na=False) removed_b = links_p_nom.Remarks.str.contains('Shut down|Replaced', na=False)
links_p_nom = links_p_nom[~removed_b] links_p_nom = links_p_nom[~removed_b]
#find closest link for all links in links_p_nom # find closest link for all links in links_p_nom
links_p_nom['j'] = _find_closest_links(links, links_p_nom) links_p_nom['j'] = _find_closest_links(links, links_p_nom)
links_p_nom = links_p_nom.groupby(['j'],as_index=False).agg({'Power (MW)': 'sum'}) links_p_nom = links_p_nom.groupby(['j'],as_index=False).agg({'Power (MW)': 'sum'})
@ -318,6 +330,7 @@ def _set_electrical_parameters_links(links):
return links return links
def _set_electrical_parameters_converters(converters): def _set_electrical_parameters_converters(converters):
p_max_pu = snakemake.config['links'].get('p_max_pu', 1.) p_max_pu = snakemake.config['links'].get('p_max_pu', 1.)
converters['p_max_pu'] = p_max_pu converters['p_max_pu'] = p_max_pu
@ -331,6 +344,7 @@ def _set_electrical_parameters_converters(converters):
return converters return converters
def _set_electrical_parameters_transformers(transformers): def _set_electrical_parameters_transformers(transformers):
config = snakemake.config['transformers'] config = snakemake.config['transformers']
@ -341,9 +355,11 @@ def _set_electrical_parameters_transformers(transformers):
return transformers return transformers
def _remove_dangling_branches(branches, buses): def _remove_dangling_branches(branches, buses):
return pd.DataFrame(branches.loc[branches.bus0.isin(buses.index) & branches.bus1.isin(buses.index)]) return pd.DataFrame(branches.loc[branches.bus0.isin(buses.index) & branches.bus1.isin(buses.index)])
def _remove_unconnected_components(network): def _remove_unconnected_components(network):
_, labels = csgraph.connected_components(network.adjacency_matrix(), directed=False) _, labels = csgraph.connected_components(network.adjacency_matrix(), directed=False)
component = pd.Series(labels, index=network.buses.index) component = pd.Series(labels, index=network.buses.index)
@ -356,6 +372,7 @@ def _remove_unconnected_components(network):
return network[component == component_sizes.index[0]] return network[component == component_sizes.index[0]]
def _set_countries_and_substations(n): def _set_countries_and_substations(n):
buses = n.buses buses = n.buses
@ -442,6 +459,7 @@ def _set_countries_and_substations(n):
return buses return buses
def _replace_b2b_converter_at_country_border_by_link(n): def _replace_b2b_converter_at_country_border_by_link(n):
# Affects only the B2B converter in Lithuania at the Polish border at the moment # Affects only the B2B converter in Lithuania at the Polish border at the moment
buscntry = n.buses.country buscntry = n.buses.country
@ -479,6 +497,7 @@ def _replace_b2b_converter_at_country_border_by_link(n):
logger.info("Replacing B2B converter `{}` together with bus `{}` and line `{}` by an HVDC tie-line {}-{}" logger.info("Replacing B2B converter `{}` together with bus `{}` and line `{}` by an HVDC tie-line {}-{}"
.format(i, b0, line, linkcntry.at[i], buscntry.at[b1])) .format(i, b0, line, linkcntry.at[i], buscntry.at[b1]))
def _set_links_underwater_fraction(n): def _set_links_underwater_fraction(n):
if n.links.empty: return if n.links.empty: return
@ -489,6 +508,7 @@ def _set_links_underwater_fraction(n):
links = gpd.GeoSeries(n.links.geometry.dropna().map(shapely.wkt.loads)) links = gpd.GeoSeries(n.links.geometry.dropna().map(shapely.wkt.loads))
n.links['underwater_fraction'] = links.intersection(offshore_shape).length / links.length n.links['underwater_fraction'] = links.intersection(offshore_shape).length / links.length
def _adjust_capacities_of_under_construction_branches(n): def _adjust_capacities_of_under_construction_branches(n):
lines_mode = snakemake.config['lines'].get('under_construction', 'undef') lines_mode = snakemake.config['lines'].get('under_construction', 'undef')
if lines_mode == 'zero': if lines_mode == 'zero':
@ -513,6 +533,7 @@ def _adjust_capacities_of_under_construction_branches(n):
return n return n
def base_network(): def base_network():
buses = _load_buses_from_eg() buses = _load_buses_from_eg()
@ -534,7 +555,7 @@ def base_network():
n.name = 'PyPSA-Eur' n.name = 'PyPSA-Eur'
n.set_snapshots(pd.date_range(freq='h', **snakemake.config['snapshots'])) n.set_snapshots(pd.date_range(freq='h', **snakemake.config['snapshots']))
n.snapshot_weightings[:] *= 8760./n.snapshot_weightings.sum() n.snapshot_weightings[:] *= 8760. / n.snapshot_weightings.sum()
n.import_components_from_dataframe(buses, "Bus") n.import_components_from_dataframe(buses, "Bus")
n.import_components_from_dataframe(lines, "Line") n.import_components_from_dataframe(lines, "Line")
@ -565,4 +586,5 @@ if __name__ == "__main__":
configure_logging(snakemake) configure_logging(snakemake)
n = base_network() n = base_network()
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])

View File

@ -42,17 +42,24 @@ Description
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
from vresutils.graph import voronoi_partition_pts import pypsa
import os import os
import pandas as pd import pandas as pd
import geopandas as gpd import geopandas as gpd
import pypsa from vresutils.graph import voronoi_partition_pts
logger = logging.getLogger(__name__)
def save_to_geojson(s, fn):
if os.path.exists(fn):
os.unlink(fn)
schema = {**gpd.io.file.infer_schema(s), 'geometry': 'Unknown'}
s.to_file(fn, driver='GeoJSON', schema=schema)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if 'snakemake' not in globals():
@ -96,12 +103,6 @@ if __name__ == "__main__":
offshore_regions_c = offshore_regions_c.loc[offshore_regions_c.area > 1e-2] offshore_regions_c = offshore_regions_c.loc[offshore_regions_c.area > 1e-2]
offshore_regions.append(offshore_regions_c) offshore_regions.append(offshore_regions_c)
def save_to_geojson(s, fn):
if os.path.exists(fn):
os.unlink(fn)
schema = {**gpd.io.file.infer_schema(s), 'geometry': 'Unknown'}
s.to_file(fn, driver='GeoJSON', schema=schema)
save_to_geojson(pd.concat(onshore_regions, ignore_index=True), snakemake.output.regions_onshore) save_to_geojson(pd.concat(onshore_regions, ignore_index=True), snakemake.output.regions_onshore)
save_to_geojson(pd.concat(offshore_regions, ignore_index=True), snakemake.output.regions_offshore) save_to_geojson(pd.concat(offshore_regions, ignore_index=True), snakemake.output.regions_offshore)

View File

@ -63,7 +63,6 @@ Description
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
import os import os
@ -84,6 +83,9 @@ import progressbar as pgb
from build_renewable_profiles import init_globals, calculate_potential from build_renewable_profiles import init_globals, calculate_potential
logger = logging.getLogger(__name__)
def build_area(flh, countries, areamatrix, breaks, fn): def build_area(flh, countries, areamatrix, breaks, fn):
area_unbinned = xr.DataArray(areamatrix.todense(), [countries, capacity_factor.coords['spatial']]) area_unbinned = xr.DataArray(areamatrix.todense(), [countries, capacity_factor.coords['spatial']])
bins = xr.DataArray(pd.cut(flh.to_series(), bins=breaks), flh.coords, name="bins") bins = xr.DataArray(pd.cut(flh.to_series(), bins=breaks), flh.coords, name="bins")
@ -92,6 +94,7 @@ def build_area(flh, countries, areamatrix, breaks, fn):
area.columns = area.columns.map(lambda s: s.left) area.columns = area.columns.map(lambda s: s.left)
return area return area
def plot_area_not_solar(area, countries): def plot_area_not_solar(area, countries):
# onshore wind/offshore wind # onshore wind/offshore wind
a = area.T a = area.T

View File

@ -92,12 +92,13 @@ Description
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
import os import os
import atlite import atlite
logger = logging.getLogger(__name__)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if 'snakemake' not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
@ -113,4 +114,6 @@ if __name__ == "__main__":
cutout_dir=os.path.dirname(snakemake.output[0]), cutout_dir=os.path.dirname(snakemake.output[0]),
**cutout_params) **cutout_params)
cutout.prepare(nprocesses=snakemake.config['atlite'].get('nprocesses', 4)) nprocesses = snakemake.config['atlite'].get('nprocesses', 4)
cutout.prepare(nprocesses=nprocesses)

View File

@ -60,7 +60,6 @@ Description
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
import os import os
@ -68,6 +67,8 @@ import atlite
import geopandas as gpd import geopandas as gpd
from vresutils import hydro as vhydro from vresutils import hydro as vhydro
logger = logging.getLogger(__name__)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if 'snakemake' not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
@ -75,8 +76,8 @@ if __name__ == "__main__":
configure_logging(snakemake) configure_logging(snakemake)
config = snakemake.config['renewable']['hydro'] config = snakemake.config['renewable']['hydro']
cutout = atlite.Cutout(config['cutout'], cutout_dir = os.path.dirname(snakemake.input.cutout)
cutout_dir=os.path.dirname(snakemake.input.cutout)) cutout = atlite.Cutout(config['cutout'], cutout_dir=cutout_dir)
countries = snakemake.config['countries'] countries = snakemake.config['countries']
country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry'].reindex(countries) country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry'].reindex(countries)
@ -84,9 +85,9 @@ if __name__ == "__main__":
eia_stats = vhydro.get_eia_annual_hydro_generation(snakemake.input.eia_hydro_generation).reindex(columns=countries) eia_stats = vhydro.get_eia_annual_hydro_generation(snakemake.input.eia_hydro_generation).reindex(columns=countries)
inflow = cutout.runoff(shapes=country_shapes, inflow = cutout.runoff(shapes=country_shapes,
smooth=True, smooth=True,
lower_threshold_quantile=True, lower_threshold_quantile=True,
normalize_using_yearly=eia_stats) normalize_using_yearly=eia_stats)
if 'clip_min_inflow' in config: if 'clip_min_inflow' in config:
inflow.values[inflow.values < config['clip_min_inflow']] = 0. inflow.values[inflow.values < config['clip_min_inflow']] = 0.

View File

@ -41,6 +41,7 @@ Description
import logging import logging
from _helpers import configure_logging from _helpers import configure_logging
import atlite import atlite
import geokit as gk import geokit as gk
from pathlib import Path from pathlib import Path
@ -58,7 +59,7 @@ def determine_cutout_xXyY(cutout_name):
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if 'snakemake' not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('build_natura_raster') #has to be enabled snakemake = mock_snakemake('build_natura_raster')
configure_logging(snakemake) configure_logging(snakemake)
cutout_dir = Path(snakemake.input.cutouts[0]).parent.resolve() cutout_dir = Path(snakemake.input.cutouts[0]).parent.resolve()

View File

@ -72,16 +72,18 @@ The configuration options ``electricity: powerplants_filter`` and ``electricity:
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
from scipy.spatial import cKDTree as KDTree
import pypsa import pypsa
import powerplantmatching as pm import powerplantmatching as pm
import pandas as pd import pandas as pd
import numpy as np import numpy as np
from scipy.spatial import cKDTree as KDTree
logger = logging.getLogger(__name__)
def add_custom_powerplants(ppl): def add_custom_powerplants(ppl):
custom_ppl_query = snakemake.config['electricity']['custom_powerplants'] custom_ppl_query = snakemake.config['electricity']['custom_powerplants']
if not custom_ppl_query: if not custom_ppl_query:
@ -94,7 +96,6 @@ def add_custom_powerplants(ppl):
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if 'snakemake' not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('build_powerplants') snakemake = mock_snakemake('build_powerplants')

View File

@ -181,27 +181,28 @@ node (`p_nom_max`): ``simple`` and ``conservative``:
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
import matplotlib.pyplot as plt
import os import os
import atlite import atlite
import numpy as np import numpy as np
import xarray as xr import xarray as xr
import pandas as pd import pandas as pd
import multiprocessing as mp import multiprocessing as mp
import matplotlib.pyplot as plt
import progressbar as pgb
from scipy.sparse import csr_matrix, vstack from scipy.sparse import csr_matrix, vstack
from pypsa.geo import haversine from pypsa.geo import haversine
from vresutils import landuse as vlanduse from vresutils import landuse as vlanduse
from vresutils.array import spdiag from vresutils.array import spdiag
import progressbar as pgb logger = logging.getLogger(__name__)
bounds = dx = dy = config = paths = gebco = clc = natura = None bounds = dx = dy = config = paths = gebco = clc = natura = None
def init_globals(bounds_xXyY, n_dx, n_dy, n_config, n_paths): def init_globals(bounds_xXyY, n_dx, n_dy, n_config, n_paths):
# Late import so that the GDAL Context is only created in the new processes # Late import so that the GDAL Context is only created in the new processes
global gl, gk, gdal global gl, gk, gdal
@ -227,6 +228,7 @@ def init_globals(bounds_xXyY, n_dx, n_dy, n_config, n_paths):
natura = gk.raster.loadRaster(paths["natura"]) natura = gk.raster.loadRaster(paths["natura"])
def downsample_to_coarse_grid(bounds, dx, dy, mask, data): def downsample_to_coarse_grid(bounds, dx, dy, mask, data):
# The GDAL warp function with the 'average' resample algorithm needs a band of zero values of at least # The GDAL warp function with the 'average' resample algorithm needs a band of zero values of at least
# the size of one coarse cell around the original raster or it produces erroneous results # the size of one coarse cell around the original raster or it produces erroneous results
@ -238,6 +240,7 @@ def downsample_to_coarse_grid(bounds, dx, dy, mask, data):
assert gdal.Warp(average, padded, resampleAlg='average') == 1, "gdal warp failed: %s" % gdal.GetLastErrorMsg() assert gdal.Warp(average, padded, resampleAlg='average') == 1, "gdal warp failed: %s" % gdal.GetLastErrorMsg()
return average return average
def calculate_potential(gid, save_map=None): def calculate_potential(gid, save_map=None):
feature = gk.vector.extractFeature(paths["regions"], where=gid) feature = gk.vector.extractFeature(paths["regions"], where=gid)
ec = gl.ExclusionCalculator(feature.geom) ec = gl.ExclusionCalculator(feature.geom)

View File

@ -92,6 +92,7 @@ def _get_country(target, **keys):
except (KeyError, AttributeError): except (KeyError, AttributeError):
return np.nan return np.nan
def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True): def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True):
if isinstance(polys, MultiPolygon): if isinstance(polys, MultiPolygon):
polys = sorted(polys, key=attrgetter('area'), reverse=True) polys = sorted(polys, key=attrgetter('area'), reverse=True)
@ -105,6 +106,7 @@ def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True):
polys = mainpoly polys = mainpoly
return polys.simplify(tolerance=tolerance) return polys.simplify(tolerance=tolerance)
def countries(): def countries():
cntries = snakemake.config['countries'] cntries = snakemake.config['countries']
if 'RS' in cntries: cntries.append('KV') if 'RS' in cntries: cntries.append('KV')
@ -121,6 +123,7 @@ def countries():
return s return s
def eez(country_shapes): def eez(country_shapes):
df = gpd.read_file(snakemake.input.eez) df = gpd.read_file(snakemake.input.eez)
df = df.loc[df['ISO_3digit'].isin([_get_country('alpha_3', alpha_2=c) for c in snakemake.config['countries']])] df = df.loc[df['ISO_3digit'].isin([_get_country('alpha_3', alpha_2=c) for c in snakemake.config['countries']])]
@ -130,6 +133,7 @@ def eez(country_shapes):
s.index.name = "name" s.index.name = "name"
return s return s
def country_cover(country_shapes, eez_shapes=None): def country_cover(country_shapes, eez_shapes=None):
shapes = list(country_shapes) shapes = list(country_shapes)
if eez_shapes is not None: if eez_shapes is not None:
@ -140,6 +144,7 @@ def country_cover(country_shapes, eez_shapes=None):
europe_shape = max(europe_shape, key=attrgetter('area')) europe_shape = max(europe_shape, key=attrgetter('area'))
return Polygon(shell=europe_shape.exterior) return Polygon(shell=europe_shape.exterior)
def nuts3(country_shapes): def nuts3(country_shapes):
df = gpd.read_file(snakemake.input.nuts3) df = gpd.read_file(snakemake.input.nuts3)
df = df.loc[df['STAT_LEVL_'] == 3] df = df.loc[df['STAT_LEVL_'] == 3]
@ -158,7 +163,6 @@ def nuts3(country_shapes):
.applymap(lambda x: pd.to_numeric(x, errors='coerce')) .applymap(lambda x: pd.to_numeric(x, errors='coerce'))
.fillna(method='bfill', axis=1))['2014'] .fillna(method='bfill', axis=1))['2014']
# Swiss data
cantons = pd.read_csv(snakemake.input.ch_cantons) cantons = pd.read_csv(snakemake.input.ch_cantons)
cantons = cantons.set_index(cantons['HASC'].str[3:])['NUTS'] cantons = cantons.set_index(cantons['HASC'].str[3:])['NUTS']
cantons = cantons.str.pad(5, side='right', fillchar='0') cantons = cantons.str.pad(5, side='right', fillchar='0')
@ -197,6 +201,7 @@ def nuts3(country_shapes):
return df return df
def save_to_geojson(df, fn): def save_to_geojson(df, fn):
if os.path.exists(fn): if os.path.exists(fn):
os.unlink(fn) os.unlink(fn)
@ -206,20 +211,23 @@ def save_to_geojson(df, fn):
schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'} schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'}
df.to_file(fn, driver='GeoJSON', schema=schema) df.to_file(fn, driver='GeoJSON', schema=schema)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if 'snakemake' not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('build_shapes') snakemake = mock_snakemake('build_shapes')
configure_logging(snakemake) configure_logging(snakemake)
out = snakemake.output
country_shapes = countries() country_shapes = countries()
save_to_geojson(country_shapes, snakemake.output.country_shapes) save_to_geojson(country_shapes, out.country_shapes)
offshore_shapes = eez(country_shapes) offshore_shapes = eez(country_shapes)
save_to_geojson(offshore_shapes, snakemake.output.offshore_shapes) save_to_geojson(offshore_shapes, out.offshore_shapes)
europe_shape = country_cover(country_shapes, offshore_shapes) europe_shape = country_cover(country_shapes, offshore_shapes)
save_to_geojson(gpd.GeoSeries(europe_shape), snakemake.output.europe_shape) save_to_geojson(gpd.GeoSeries(europe_shape), out.europe_shape)
nuts3_shapes = nuts3(country_shapes) nuts3_shapes = nuts3(country_shapes)
save_to_geojson(nuts3_shapes, snakemake.output.nuts3_shapes) save_to_geojson(nuts3_shapes, out.nuts3_shapes)

View File

@ -120,31 +120,33 @@ Exemplary unsolved network clustered to 37 nodes:
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
import pandas as pd import pypsa
idx = pd.IndexSlice
import os import os
import shapely
import pandas as pd
import numpy as np import numpy as np
import geopandas as gpd import geopandas as gpd
import shapely import pyomo.environ as po
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import seaborn as sns import seaborn as sns
from six.moves import reduce from six.moves import reduce
import pyomo.environ as po
import pypsa
from pypsa.networkclustering import (busmap_by_kmeans, busmap_by_spectral_clustering, from pypsa.networkclustering import (busmap_by_kmeans, busmap_by_spectral_clustering,
_make_consense, get_clustering_from_busmap) _make_consense, get_clustering_from_busmap)
from add_electricity import load_costs from add_electricity import load_costs
def normed(x): idx = pd.IndexSlice
return (x/x.sum()).fillna(0.)
logger = logging.getLogger(__name__)
def normed(x): return (x/x.sum()).fillna(0.)
def weighting_for_country(n, x): def weighting_for_country(n, x):
conv_carriers = {'OCGT','CCGT','PHS', 'hydro'} conv_carriers = {'OCGT','CCGT','PHS', 'hydro'}
@ -162,22 +164,13 @@ def weighting_for_country(n, x):
g = normed(gen.reindex(b_i, fill_value=0)) g = normed(gen.reindex(b_i, fill_value=0))
l = normed(load.reindex(b_i, fill_value=0)) l = normed(load.reindex(b_i, fill_value=0))
w= g + l w = g + l
return (w * (100. / w.max())).clip(lower=1.).astype(int) return (w * (100. / w.max())).clip(lower=1.).astype(int)
## Plot weighting for Germany
def plot_weighting(n, country, country_shape=None):
n.plot(bus_sizes=(2*weighting_for_country(n.buses.loc[n.buses.country == country])).reindex(n.buses.index, fill_value=1))
if country_shape is not None:
plt.xlim(country_shape.bounds[0], country_shape.bounds[2])
plt.ylim(country_shape.bounds[1], country_shape.bounds[3])
# # Determining the number of clusters per country
def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None): def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None):
"""Determine the number of clusters per country"""
if solver_name is None: if solver_name is None:
solver_name = snakemake.config['solving']['solver']['name'] solver_name = snakemake.config['solving']['solver']['name']
@ -189,7 +182,7 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None):
N = n.buses.groupby(['country', 'sub_network']).size() N = n.buses.groupby(['country', 'sub_network']).size()
assert n_clusters >= len(N) and n_clusters <= N.sum(), \ assert n_clusters >= len(N) and n_clusters <= N.sum(), \
"Number of clusters must be {} <= n_clusters <= {} for this selection of countries.".format(len(N), N.sum()) f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries."
if focus_weights is not None: if focus_weights is not None:
@ -205,7 +198,7 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None):
logger.warning('Using custom focus weights for determining number of clusters.') logger.warning('Using custom focus weights for determining number of clusters.')
assert np.isclose(L.sum(), 1.0, rtol=1e-3), "Country weights L must sum up to 1.0 when distributing clusters. Is {}.".format(L.sum()) assert np.isclose(L.sum(), 1.0, rtol=1e-3), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}."
m = po.ConcreteModel() m = po.ConcreteModel()
def n_bounds(model, *n_id): def n_bounds(model, *n_id):
@ -221,10 +214,11 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None):
opt = po.SolverFactory('ipopt') opt = po.SolverFactory('ipopt')
results = opt.solve(m) results = opt.solve(m)
assert results['Solver'][0]['Status'] == 'ok', "Solver returned non-optimally: {}".format(results) assert results['Solver'][0]['Status'] == 'ok', f"Solver returned non-optimally: {results}"
return pd.Series(m.n.get_values(), index=L.index).astype(int) return pd.Series(m.n.get_values(), index=L.index).astype(int)
def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algorithm="kmeans", **algorithm_kwds): def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algorithm="kmeans", **algorithm_kwds):
if algorithm == "kmeans": if algorithm == "kmeans":
algorithm_kwds.setdefault('n_init', 1000) algorithm_kwds.setdefault('n_init', 1000)
@ -243,7 +237,7 @@ def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algori
def busmap_for_country(x): def busmap_for_country(x):
prefix = x.name[0] + x.name[1] + ' ' prefix = x.name[0] + x.name[1] + ' '
logger.debug("Determining busmap for country {}".format(prefix[:-1])) logger.debug(f"Determining busmap for country {prefix[:-1]}")
if len(x) == 1: if len(x) == 1:
return pd.Series(prefix + '0', index=x.index) return pd.Series(prefix + '0', index=x.index)
weight = weighting_for_country(n, x) weight = weighting_for_country(n, x)
@ -260,12 +254,6 @@ def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algori
return (n.buses.groupby(['country', 'sub_network'], group_keys=False, squeeze=True) return (n.buses.groupby(['country', 'sub_network'], group_keys=False, squeeze=True)
.apply(busmap_for_country).rename('busmap')) .apply(busmap_for_country).rename('busmap'))
def plot_busmap_for_n_clusters(n, n_clusters=50):
busmap = busmap_for_n_clusters(n, n_clusters)
cs = busmap.unique()
cr = sns.color_palette("hls", len(cs))
n.plot(bus_colors=busmap.map(dict(zip(cs, cr))))
del cs, cr
def clustering_for_n_clusters(n, n_clusters, aggregate_carriers=None, def clustering_for_n_clusters(n, n_clusters, aggregate_carriers=None,
line_length_factor=1.25, potential_mode='simple', line_length_factor=1.25, potential_mode='simple',
@ -277,8 +265,7 @@ def clustering_for_n_clusters(n, n_clusters, aggregate_carriers=None,
elif potential_mode == 'conservative': elif potential_mode == 'conservative':
p_nom_max_strategy = np.min p_nom_max_strategy = np.min
else: else:
raise AttributeError("potential_mode should be one of 'simple' or 'conservative', " raise AttributeError(f"potential_mode should be one of 'simple' or 'conservative' but is '{potential_mode}'")
"but is '{}'".format(potential_mode))
clustering = get_clustering_from_busmap( clustering = get_clustering_from_busmap(
n, busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm), n, busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm),
@ -301,6 +288,7 @@ def clustering_for_n_clusters(n, n_clusters, aggregate_carriers=None,
return clustering return clustering
def save_to_geojson(s, fn): def save_to_geojson(s, fn):
if os.path.exists(fn): if os.path.exists(fn):
os.unlink(fn) os.unlink(fn)
@ -308,6 +296,7 @@ def save_to_geojson(s, fn):
schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'} schema = {**gpd.io.file.infer_schema(df), 'geometry': 'Unknown'}
df.to_file(fn, driver='GeoJSON', schema=schema) df.to_file(fn, driver='GeoJSON', schema=schema)
def cluster_regions(busmaps, input=None, output=None): def cluster_regions(busmaps, input=None, output=None):
if input is None: input = snakemake.input if input is None: input = snakemake.input
if output is None: output = snakemake.output if output is None: output = snakemake.output
@ -321,6 +310,17 @@ def cluster_regions(busmaps, input=None, output=None):
regions_c.index.name = 'name' regions_c.index.name = 'name'
save_to_geojson(regions_c, getattr(output, which)) save_to_geojson(regions_c, getattr(output, which))
def plot_busmap_for_n_clusters(n, n_clusters, fn=None):
busmap = busmap_for_n_clusters(n, n_clusters)
cs = busmap.unique()
cr = sns.color_palette("hls", len(cs))
n.plot(bus_colors=busmap.map(dict(zip(cs, cr))))
if fn is not None:
plt.savefig(fn, bbox_inches='tight')
del cs, cr
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if 'snakemake' not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake

View File

@ -54,22 +54,22 @@ Replacing '/summaries/' with '/plots/' creates nice colored maps of the results.
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
import os import os
import pypsa
from six import iteritems
import pandas as pd import pandas as pd
import pypsa from six import iteritems
from add_electricity import load_costs, update_transmission_costs from add_electricity import load_costs, update_transmission_costs
idx = pd.IndexSlice idx = pd.IndexSlice
logger = logging.getLogger(__name__)
opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"} opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"}
def _add_indexed_rows(df, raw_index): def _add_indexed_rows(df, raw_index):
new_index = df.index|pd.MultiIndex.from_product(raw_index) new_index = df.index|pd.MultiIndex.from_product(raw_index)
if isinstance(new_index, pd.Index): if isinstance(new_index, pd.Index):
@ -77,6 +77,7 @@ def _add_indexed_rows(df, raw_index):
return df.reindex(new_index) return df.reindex(new_index)
def assign_carriers(n): def assign_carriers(n):
if "carrier" not in n.loads: if "carrier" not in n.loads:
@ -97,7 +98,8 @@ def assign_carriers(n):
if "EU gas store" in n.stores.index and n.stores.loc["EU gas Store","carrier"] == "": if "EU gas store" in n.stores.index and n.stores.loc["EU gas Store","carrier"] == "":
n.stores.loc["EU gas Store","carrier"] = "gas Store" n.stores.loc["EU gas Store","carrier"] = "gas Store"
def calculate_costs(n,label,costs):
def calculate_costs(n, label, costs):
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}): for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}):
capital_costs = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"] capital_costs = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"]
@ -130,7 +132,7 @@ def calculate_costs(n,label,costs):
return costs return costs
def calculate_curtailment(n,label,curtailment): def calculate_curtailment(n, label, curtailment):
avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum() avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum()
used = n.generators_t.p.sum().groupby(n.generators.carrier).sum() used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()
@ -139,7 +141,7 @@ def calculate_curtailment(n,label,curtailment):
return curtailment return curtailment
def calculate_energy(n,label,energy): def calculate_energy(n, label, energy):
for c in n.iterate_components(n.one_port_components|n.branch_components): for c in n.iterate_components(n.one_port_components|n.branch_components):
@ -159,6 +161,7 @@ def include_in_summary(summary, multiindexprefix, label, item):
summary = _add_indexed_rows(summary, raw_index) summary = _add_indexed_rows(summary, raw_index)
summary.loc[idx[raw_index], label] = item.values summary.loc[idx[raw_index], label] = item.values
return summary return summary
def calculate_capacity(n,label,capacity): def calculate_capacity(n,label,capacity):
@ -178,7 +181,7 @@ def calculate_capacity(n,label,capacity):
return capacity return capacity
def calculate_supply(n,label,supply): def calculate_supply(n, label, supply):
"""calculate the max dispatch of each component at the buses where the loads are attached""" """calculate the max dispatch of each component at the buses where the loads are attached"""
load_types = n.loads.carrier.value_counts().index load_types = n.loads.carrier.value_counts().index
@ -224,7 +227,8 @@ def calculate_supply(n,label,supply):
return supply return supply
def calculate_supply_energy(n,label,supply_energy):
def calculate_supply_energy(n, label, supply_energy):
"""calculate the total dispatch of each component at the buses where the loads are attached""" """calculate the total dispatch of each component at the buses where the loads are attached"""
load_types = n.loads.carrier.value_counts().index load_types = n.loads.carrier.value_counts().index
@ -269,6 +273,7 @@ def calculate_supply_energy(n,label,supply_energy):
return supply_energy return supply_energy
def calculate_metrics(n,label,metrics): def calculate_metrics(n,label,metrics):
metrics = metrics.reindex(metrics.index|pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"])) metrics = metrics.reindex(metrics.index|pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"]))
@ -295,16 +300,15 @@ def calculate_prices(n,label,prices):
prices = prices.reindex(prices.index|bus_type.value_counts().index) prices = prices.reindex(prices.index|bus_type.value_counts().index)
#WARNING: this is time-averaged, should really be load-weighted average logger.warning("Prices are time-averaged, not load-weighted")
prices[label] = n.buses_t.marginal_price.mean().groupby(bus_type).mean() prices[label] = n.buses_t.marginal_price.mean().groupby(bus_type).mean()
return prices return prices
def calculate_weighted_prices(n,label,weighted_prices): def calculate_weighted_prices(n,label,weighted_prices):
# Warning: doesn't include storage units as loads
logger.warning("Weighted prices don't include storage units as loads")
weighted_prices = weighted_prices.reindex(pd.Index(["electricity","heat","space heat","urban heat","space urban heat","gas","H2"])) weighted_prices = weighted_prices.reindex(pd.Index(["electricity","heat","space heat","urban heat","space urban heat","gas","H2"]))
@ -347,7 +351,7 @@ def calculate_weighted_prices(n,label,weighted_prices):
load += n.links_t.p0[names].groupby(n.links.loc[names,"bus0"],axis=1).sum(axis=1) load += n.links_t.p0[names].groupby(n.links.loc[names,"bus0"],axis=1).sum(axis=1)
#Add H2 Store when charging # Add H2 Store when charging
if carrier == "H2": if carrier == "H2":
stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1) stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1)
stores[stores > 0.] = 0. stores[stores > 0.] = 0.
@ -361,62 +365,6 @@ def calculate_weighted_prices(n,label,weighted_prices):
return weighted_prices return weighted_prices
# BROKEN don't use
#
# def calculate_market_values(n, label, market_values):
# # Warning: doesn't include storage units
# n.buses["suffix"] = n.buses.index.str[2:]
# suffix = ""
# buses = n.buses.index[n.buses.suffix == suffix]
# ## First do market value of generators ##
# generators = n.generators.index[n.buses.loc[n.generators.bus,"suffix"] == suffix]
# techs = n.generators.loc[generators,"carrier"].value_counts().index
# market_values = market_values.reindex(market_values.index | techs)
# for tech in techs:
# gens = generators[n.generators.loc[generators,"carrier"] == tech]
# dispatch = n.generators_t.p[gens].groupby(n.generators.loc[gens,"bus"],axis=1).sum().reindex(columns=buses,fill_value=0.)
# revenue = dispatch*n.buses_t.marginal_price[buses]
# market_values.at[tech,label] = revenue.sum().sum()/dispatch.sum().sum()
# ## Now do market value of links ##
# for i in ["0","1"]:
# all_links = n.links.index[n.buses.loc[n.links["bus"+i],"suffix"] == suffix]
# techs = n.links.loc[all_links,"carrier"].value_counts().index
# market_values = market_values.reindex(market_values.index | techs)
# for tech in techs:
# links = all_links[n.links.loc[all_links,"carrier"] == tech]
# dispatch = n.links_t["p"+i][links].groupby(n.links.loc[links,"bus"+i],axis=1).sum().reindex(columns=buses,fill_value=0.)
# revenue = dispatch*n.buses_t.marginal_price[buses]
# market_values.at[tech,label] = revenue.sum().sum()/dispatch.sum().sum()
# return market_values
# OLD CODE must be adapted
# def calculate_price_statistics(n, label, price_statistics):
# price_statistics = price_statistics.reindex(price_statistics.index|pd.Index(["zero_hours","mean","standard_deviation"]))
# n.buses["suffix"] = n.buses.index.str[2:]
# suffix = ""
# buses = n.buses.index[n.buses.suffix == suffix]
# threshold = 0.1 #higher than phoney marginal_cost of wind/solar
# df = pd.DataFrame(data=0.,columns=buses,index=n.snapshots)
# df[n.buses_t.marginal_price[buses] < threshold] = 1.
# price_statistics.at["zero_hours", label] = df.sum().sum()/(df.shape[0]*df.shape[1])
# price_statistics.at["mean", label] = n.buses_t.marginal_price[buses].unstack().mean()
# price_statistics.at["standard_deviation", label] = n.buses_t.marginal_price[buses].unstack().std()
# return price_statistics
outputs = ["costs", outputs = ["costs",
"curtailment", "curtailment",
"energy", "energy",
@ -425,11 +373,10 @@ outputs = ["costs",
"supply_energy", "supply_energy",
"prices", "prices",
"weighted_prices", "weighted_prices",
# "price_statistics",
# "market_values",
"metrics", "metrics",
] ]
def make_summaries(networks_dict, country='all'): def make_summaries(networks_dict, country='all'):
columns = pd.MultiIndex.from_tuples(networks_dict.keys(),names=["simpl","clusters","ll","opts"]) columns = pd.MultiIndex.from_tuples(networks_dict.keys(),names=["simpl","clusters","ll","opts"])
@ -454,7 +401,7 @@ def make_summaries(networks_dict, country='all'):
if country != 'all': if country != 'all':
n = n[n.buses.country == country] n = n[n.buses.country == country]
Nyears = n.snapshot_weightings.sum()/8760. Nyears = n.snapshot_weightings.sum() / 8760.
costs = load_costs(Nyears, snakemake.input[0], costs = load_costs(Nyears, snakemake.input[0],
snakemake.config['costs'], snakemake.config['electricity']) snakemake.config['costs'], snakemake.config['electricity'])
update_transmission_costs(n, costs, simple_hvdc_costs=False) update_transmission_costs(n, costs, simple_hvdc_costs=False)
@ -484,7 +431,6 @@ if __name__ == "__main__":
network_dir = os.path.join('results', 'networks') network_dir = os.path.join('results', 'networks')
configure_logging(snakemake) configure_logging(snakemake)
def expand_from_wildcard(key): def expand_from_wildcard(key):
w = getattr(snakemake.wildcards, key) w = getattr(snakemake.wildcards, key)
return snakemake.config["scenario"][key] if w == "all" else [w] return snakemake.config["scenario"][key] if w == "all" else [w]
@ -504,8 +450,6 @@ if __name__ == "__main__":
for l in ll for l in ll
for opts in expand_from_wildcard("opts")} for opts in expand_from_wildcard("opts")}
print(networks_dict)
dfs = make_summaries(networks_dict, country=snakemake.wildcards.country) dfs = make_summaries(networks_dict, country=snakemake.wildcards.country)
to_csv(dfs) to_csv(dfs)

View File

@ -20,7 +20,6 @@ Description
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import (load_network_for_plots, aggregate_p, aggregate_costs, from _helpers import (load_network_for_plots, aggregate_p, aggregate_costs,
configure_logging) configure_logging)
@ -35,6 +34,9 @@ from matplotlib.patches import Circle, Ellipse
from matplotlib.legend_handler import HandlerPatch from matplotlib.legend_handler import HandlerPatch
to_rgba = mpl.colors.colorConverter.to_rgba to_rgba = mpl.colors.colorConverter.to_rgba
logger = logging.getLogger(__name__)
def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False): def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False):
fig = ax.get_figure() fig = ax.get_figure()
def axes2pt(): def axes2pt():
@ -57,9 +59,11 @@ def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False):
return e return e
return {Circle: HandlerPatch(patch_func=legend_circle_handler)} return {Circle: HandlerPatch(patch_func=legend_circle_handler)}
def make_legend_circles_for(sizes, scale=1.0, **kw): def make_legend_circles_for(sizes, scale=1.0, **kw):
return [Circle((0,0), radius=(s/scale)**0.5, **kw) for s in sizes] return [Circle((0,0), radius=(s/scale)**0.5, **kw) for s in sizes]
def set_plot_style(): def set_plot_style():
plt.style.use(['classic', 'seaborn-white', plt.style.use(['classic', 'seaborn-white',
{'axes.grid': False, 'grid.linestyle': '--', 'grid.color': u'0.6', {'axes.grid': False, 'grid.linestyle': '--', 'grid.color': u'0.6',
@ -69,9 +73,9 @@ def set_plot_style():
'legend.fontsize': 'medium', 'legend.fontsize': 'medium',
'lines.linewidth': 1.5, 'lines.linewidth': 1.5,
'pdf.fonttype': 42, 'pdf.fonttype': 42,
# 'font.family': 'Times New Roman'
}]) }])
def plot_map(n, ax=None, attribute='p_nom', opts={}): def plot_map(n, ax=None, attribute='p_nom', opts={}):
if ax is None: if ax is None:
ax = plt.gca() ax = plt.gca()
@ -114,16 +118,11 @@ def plot_map(n, ax=None, attribute='p_nom', opts={}):
bus_sizes=0, bus_sizes=0,
bus_colors=tech_colors, bus_colors=tech_colors,
boundaries=map_boundaries, boundaries=map_boundaries,
geomap=True, # TODO : Turn to False, after the release of PyPSA 0.14.2 (refer to https://github.com/PyPSA/PyPSA/issues/75) geomap=False,
ax=ax) ax=ax)
ax.set_aspect('equal') ax.set_aspect('equal')
ax.axis('off') ax.axis('off')
# x1, y1, x2, y2 = map_boundaries
# ax.set_xlim(x1, x2)
# ax.set_ylim(y1, y2)
# Rasterize basemap # Rasterize basemap
# TODO : Check if this also works with cartopy # TODO : Check if this also works with cartopy
for c in ax.collections[:2]: c.set_rasterized(True) for c in ax.collections[:2]: c.set_rasterized(True)
@ -176,13 +175,9 @@ def plot_map(n, ax=None, attribute='p_nom', opts={}):
return fig return fig
#n = load_network_for_plots(snakemake.input.network, opts, combine_hydro_ps=False)
def plot_total_energy_pie(n, ax=None): def plot_total_energy_pie(n, ax=None):
"""Add total energy pie plot""" if ax is None: ax = plt.gca()
if ax is None:
ax = plt.gca()
ax.set_title('Energy per technology', fontdict=dict(fontsize="medium")) ax.set_title('Energy per technology', fontdict=dict(fontsize="medium"))
@ -190,7 +185,7 @@ def plot_total_energy_pie(n, ax=None):
patches, texts, autotexts = ax.pie(e_primary, patches, texts, autotexts = ax.pie(e_primary,
startangle=90, startangle=90,
labels = e_primary.rename(opts['nice_names_n']).index, labels = e_primary.rename(opts['nice_names']).index,
autopct='%.0f%%', autopct='%.0f%%',
shadow=False, shadow=False,
colors = [opts['tech_colors'][tech] for tech in e_primary.index]) colors = [opts['tech_colors'][tech] for tech in e_primary.index])
@ -200,9 +195,7 @@ def plot_total_energy_pie(n, ax=None):
t2.remove() t2.remove()
def plot_total_cost_bar(n, ax=None): def plot_total_cost_bar(n, ax=None):
"""Add average system cost bar plot""" if ax is None: ax = plt.gca()
if ax is None:
ax = plt.gca()
total_load = (n.snapshot_weightings * n.loads_t.p.sum(axis=1)).sum() total_load = (n.snapshot_weightings * n.loads_t.p.sum(axis=1)).sum()
tech_colors = opts['tech_colors'] tech_colors = opts['tech_colors']
@ -240,14 +233,13 @@ def plot_total_cost_bar(n, ax=None):
if abs(data[-1]) < 5: if abs(data[-1]) < 5:
continue continue
text = ax.text(1.1,(bottom-0.5*data)[-1]-3,opts['nice_names_n'].get(ind,ind)) text = ax.text(1.1,(bottom-0.5*data)[-1]-3,opts['nice_names'].get(ind,ind))
texts.append(text) texts.append(text)
ax.set_ylabel("Average system cost [Eur/MWh]") ax.set_ylabel("Average system cost [Eur/MWh]")
ax.set_ylim([0, 80]) # opts['costs_max']]) ax.set_ylim([0, opts.get('costs_max', 80)])
ax.set_xlim([0, 1]) ax.set_xlim([0, 1])
#ax.set_xticks([0.5]) ax.set_xticklabels([])
ax.set_xticklabels([]) #["w/o\nEp", "w/\nEp"])
ax.grid(True, axis="y", color='k', linestyle='dotted') ax.grid(True, axis="y", color='k', linestyle='dotted')
@ -280,8 +272,6 @@ if __name__ == "__main__":
ax2 = fig.add_axes([-0.075, 0.1, 0.1, 0.45]) ax2 = fig.add_axes([-0.075, 0.1, 0.1, 0.45])
plot_total_cost_bar(n, ax2) plot_total_cost_bar(n, ax2)
#fig.tight_layout()
ll = snakemake.wildcards.ll ll = snakemake.wildcards.ll
ll_type = ll[0] ll_type = ll[0]
ll_factor = ll[1:] ll_factor = ll[1:]

View File

@ -19,19 +19,19 @@ Description
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
import pypsa import pypsa
import pandas as pd import pandas as pd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
logger = logging.getLogger(__name__)
def cum_p_nom_max(net, tech, country=None): def cum_p_nom_max(net, tech, country=None):
carrier_b = net.generators.carrier == tech carrier_b = net.generators.carrier == tech
generators = \ generators = pd.DataFrame(dict(
pd.DataFrame(dict(
p_nom_max=net.generators.loc[carrier_b, 'p_nom_max'], p_nom_max=net.generators.loc[carrier_b, 'p_nom_max'],
p_max_pu=net.generators_t.p_max_pu.loc[:,carrier_b].mean(), p_max_pu=net.generators_t.p_max_pu.loc[:,carrier_b].mean(),
country=net.generators.loc[carrier_b, 'bus'].map(net.buses.country) country=net.generators.loc[carrier_b, 'bus'].map(net.buses.country)

View File

@ -21,41 +21,19 @@ Description
import os import os
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
import pandas as pd import pandas as pd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
#consolidate and rename logger = logging.getLogger(__name__)
def rename_techs(label):
if label.startswith("central "):
label = label[len("central "):]
elif label.startswith("urban "):
label = label[len("urban "):]
if "retrofitting" in label:
label = "building retrofitting" def rename_techs(label):
elif "H2" in label: elif "H2" in label:
label = "hydrogen storage" label = "hydrogen storage"
elif "CHP" in label:
label = "CHP"
elif "water tank" in label:
label = "water tanks"
elif label == "water tanks":
label = "hot water storage"
elif "gas" in label and label != "gas boiler":
label = "natural gas"
elif "solar thermal" in label:
label = "solar thermal"
elif label == "solar": elif label == "solar":
label = "solar PV" label = "solar PV"
elif label == "heat pump":
label = "air heat pump"
elif label == "Sabatier":
label = "methanation"
elif label == "offwind":
label = "offshore wind"
elif label == "offwind-ac": elif label == "offwind-ac":
label = "offshore wind ac" label = "offshore wind ac"
elif label == "offwind-dc": elif label == "offwind-dc":
@ -68,15 +46,14 @@ def rename_techs(label):
label = "hydroelectricity" label = "hydroelectricity"
elif label == "PHS": elif label == "PHS":
label = "hydroelectricity" label = "hydroelectricity"
elif label == "co2 Store":
label = "DAC"
elif "battery" in label: elif "battery" in label:
label = "battery storage" label = "battery storage"
return label return label
preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","onshore wind","offshore wind ac", "offshore wind dc","solar PV","solar thermal","building retrofitting","ground heat pump","air heat pump","resistive heater","CHP","OCGT","gas boiler","gas","natural gas","methanation","hydrogen storage","battery storage","hot water storage"]) preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","onshore wind","offshore wind ac", "offshore wind dc","solar PV","solar thermal","OCGT","hydrogen storage","battery storage"])
def plot_costs(infn, fn=None): def plot_costs(infn, fn=None):

View File

@ -37,11 +37,26 @@ Description
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
import pandas as pd import pandas as pd
logger = logging.getLogger(__name__)
def multiply(s):
return s.str[0].astype(float) * s.str[1].astype(float)
def extract_coordinates(s):
regex = (r"(\d{1,2})°(\d{1,2})(\d{1,2})″(N|S) "
r"(\d{1,2})°(\d{1,2})(\d{1,2})″(E|W)")
e = s.str.extract(regex, expand=True)
lat = (e[0].astype(float) + (e[1].astype(float) + e[2].astype(float)/60.)/60.)*e[3].map({'N': +1., 'S': -1.})
lon = (e[4].astype(float) + (e[5].astype(float) + e[6].astype(float)/60.)/60.)*e[7].map({'E': +1., 'W': -1.})
return lon, lat
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if 'snakemake' not in globals():
from _helpers import mock_snakemake #rule must be enabled in config from _helpers import mock_snakemake #rule must be enabled in config
@ -50,19 +65,11 @@ if __name__ == "__main__":
links_p_nom = pd.read_html('https://en.wikipedia.org/wiki/List_of_HVDC_projects', header=0, match="SwePol")[0] links_p_nom = pd.read_html('https://en.wikipedia.org/wiki/List_of_HVDC_projects', header=0, match="SwePol")[0]
def extract_coordinates(s): mw = "Power (MW)"
regex = (r"(\d{1,2})°(\d{1,2})(\d{1,2})″(N|S) " m_b = links_p_nom[mw].str.contains('x').fillna(False)
r"(\d{1,2})°(\d{1,2})(\d{1,2})″(E|W)")
e = s.str.extract(regex, expand=True)
lat = (e[0].astype(float) + (e[1].astype(float) + e[2].astype(float)/60.)/60.)*e[3].map({'N': +1., 'S': -1.})
lon = (e[4].astype(float) + (e[5].astype(float) + e[6].astype(float)/60.)/60.)*e[7].map({'E': +1., 'W': -1.})
return lon, lat
m_b = links_p_nom["Power (MW)"].str.contains('x').fillna(False) links_p_nom.loc[m_b, mw] = links_p_nom.loc[m_b, mw].str.split('x').pipe(multiply)
def multiply(s): return s.str[0].astype(float) * s.str[1].astype(float) links_p_nom[mw] = links_p_nom[mw].str.extract("[-/]?([\d.]+)", expand=False).astype(float)
links_p_nom.loc[m_b, "Power (MW)"] = links_p_nom.loc[m_b, "Power (MW)"].str.split('x').pipe(multiply)
links_p_nom["Power (MW)"] = links_p_nom["Power (MW)"].str.extract("[-/]?([\d.]+)", expand=False).astype(float)
links_p_nom['x1'], links_p_nom['y1'] = extract_coordinates(links_p_nom['Converterstation 1']) links_p_nom['x1'], links_p_nom['y1'] = extract_coordinates(links_p_nom['Converterstation 1'])
links_p_nom['x2'], links_p_nom['y2'] = extract_coordinates(links_p_nom['Converterstation 2']) links_p_nom['x2'], links_p_nom['y2'] = extract_coordinates(links_p_nom['Converterstation 2'])

View File

@ -55,19 +55,21 @@ Description
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
from add_electricity import load_costs, update_transmission_costs
from six import iteritems
import numpy as np
import re import re
import pypsa import pypsa
import numpy as np
import pandas as pd import pandas as pd
from six import iteritems
from add_electricity import load_costs, update_transmission_costs
idx = pd.IndexSlice idx = pd.IndexSlice
logger = logging.getLogger(__name__)
def add_co2limit(n, Nyears=1., factor=None): def add_co2limit(n, Nyears=1., factor=None):
if factor is not None: if factor is not None:
@ -129,10 +131,10 @@ def set_transmission_limit(n, ll_type, factor, Nyears=1):
n.add('GlobalConstraint', f'l{ll_type}_limit', n.add('GlobalConstraint', f'l{ll_type}_limit',
type=f'transmission_{con_type}_limit', type=f'transmission_{con_type}_limit',
sense='<=', constant=rhs, carrier_attribute='AC, DC') sense='<=', constant=rhs, carrier_attribute='AC, DC')
return n return n
def average_every_nhours(n, offset): def average_every_nhours(n, offset):
logger.info('Resampling the network to {}'.format(offset)) logger.info('Resampling the network to {}'.format(offset))
m = n.copy(with_time=False) m = n.copy(with_time=False)
@ -160,7 +162,7 @@ if __name__ == "__main__":
opts = snakemake.wildcards.opts.split('-') opts = snakemake.wildcards.opts.split('-')
n = pypsa.Network(snakemake.input[0]) n = pypsa.Network(snakemake.input[0])
Nyears = n.snapshot_weightings.sum()/8760. Nyears = n.snapshot_weightings.sum() / 8760.
set_line_s_max_pu(n) set_line_s_max_pu(n)
@ -179,6 +181,7 @@ if __name__ == "__main__":
add_co2limit(n, Nyears, float(m[0])) add_co2limit(n, Nyears, float(m[0]))
else: else:
add_co2limit(n, Nyears) add_co2limit(n, Nyears)
break
for o in opts: for o in opts:
oo = o.split("+") oo = o.split("+")

View File

@ -33,14 +33,15 @@ The :ref:`tutorial` uses a smaller `data bundle <https://zenodo.org/record/35179
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import progress_retrieve, configure_logging from _helpers import progress_retrieve, configure_logging
from pathlib import Path
import tarfile import tarfile
from pathlib import Path
logger = logging.getLogger(__name__)
if __name__ == "__main__": if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals(): if 'snakemake' not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake('retrieve_databundle') snakemake = mock_snakemake('retrieve_databundle')

View File

@ -30,10 +30,11 @@ This rule, as a substitute for :mod:`build_natura_raster`, downloads an already
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import progress_retrieve, configure_logging from _helpers import progress_retrieve, configure_logging
logger = logging.getLogger(__name__)
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if 'snakemake' not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake

View File

@ -83,7 +83,6 @@ The rule :mod:`simplify_network` does up to four things:
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
from cluster_network import clustering_for_n_clusters, cluster_regions from cluster_network import clustering_for_n_clusters, cluster_regions
@ -101,7 +100,8 @@ import pypsa
from pypsa.io import import_components_from_dataframe, import_series_from_dataframe from pypsa.io import import_components_from_dataframe, import_series_from_dataframe
from pypsa.networkclustering import busmap_by_stubs, aggregategenerators, aggregateoneport from pypsa.networkclustering import busmap_by_stubs, aggregategenerators, aggregateoneport
idx = pd.IndexSlice logger = logging.getLogger(__name__)
def simplify_network_to_380(n): def simplify_network_to_380(n):
## All goes to v_nom == 380 ## All goes to v_nom == 380
@ -138,6 +138,7 @@ def simplify_network_to_380(n):
return n, trafo_map return n, trafo_map
def _prepare_connection_costs_per_link(n): def _prepare_connection_costs_per_link(n):
if n.links.empty: return {} if n.links.empty: return {}
@ -156,6 +157,7 @@ def _prepare_connection_costs_per_link(n):
return connection_costs_per_link return connection_costs_per_link
def _compute_connection_costs_to_bus(n, busmap, connection_costs_per_link=None, buses=None): def _compute_connection_costs_to_bus(n, busmap, connection_costs_per_link=None, buses=None):
if connection_costs_per_link is None: if connection_costs_per_link is None:
connection_costs_per_link = _prepare_connection_costs_per_link(n) connection_costs_per_link = _prepare_connection_costs_per_link(n)
@ -175,6 +177,7 @@ def _compute_connection_costs_to_bus(n, busmap, connection_costs_per_link=None,
return connection_costs_to_bus return connection_costs_to_bus
def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus): def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus):
for tech in connection_costs_to_bus: for tech in connection_costs_to_bus:
tech_b = n.generators.carrier == tech tech_b = n.generators.carrier == tech
@ -184,6 +187,7 @@ def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus):
logger.info("Displacing {} generator(s) and adding connection costs to capital_costs: {} " logger.info("Displacing {} generator(s) and adding connection costs to capital_costs: {} "
.format(tech, ", ".join("{:.0f} Eur/MW/a for `{}`".format(d, b) for b, d in costs.iteritems()))) .format(tech, ", ".join("{:.0f} Eur/MW/a for `{}`".format(d, b) for b, d in costs.iteritems())))
def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, aggregate_one_ports={"Load", "StorageUnit"}): def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, aggregate_one_ports={"Load", "StorageUnit"}):
def replace_components(n, c, df, pnl): def replace_components(n, c, df, pnl):
n.mremove(c, n.df(c).index) n.mremove(c, n.df(c).index)
@ -208,6 +212,7 @@ def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, aggregate
df = n.df(c) df = n.df(c)
n.mremove(c, df.index[df.bus0.isin(buses_to_del) | df.bus1.isin(buses_to_del)]) n.mremove(c, df.index[df.bus0.isin(buses_to_del) | df.bus1.isin(buses_to_del)])
def simplify_links(n): def simplify_links(n):
## Complex multi-node links are folded into end-points ## Complex multi-node links are folded into end-points
logger.info("Simplifying connected link components") logger.info("Simplifying connected link components")
@ -303,6 +308,7 @@ def simplify_links(n):
_aggregate_and_move_components(n, busmap, connection_costs_to_bus) _aggregate_and_move_components(n, busmap, connection_costs_to_bus)
return n, busmap return n, busmap
def remove_stubs(n): def remove_stubs(n):
logger.info("Removing stubs") logger.info("Removing stubs")
@ -314,6 +320,7 @@ def remove_stubs(n):
return n, busmap return n, busmap
def cluster(n, n_clusters): def cluster(n, n_clusters):
logger.info("Clustering to {} buses".format(n_clusters)) logger.info("Clustering to {} buses".format(n_clusters))
@ -334,6 +341,7 @@ def cluster(n, n_clusters):
return clustering.network, clustering.busmap return clustering.network, clustering.busmap
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if 'snakemake' not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake

View File

@ -85,7 +85,6 @@ Details (and errors made through this heuristic) are discussed in the paper
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
import numpy as np import numpy as np
@ -94,9 +93,13 @@ import pandas as pd
import pypsa import pypsa
from pypsa.linopf import (get_var, define_constraints, linexpr, join_exprs, from pypsa.linopf import (get_var, define_constraints, linexpr, join_exprs,
network_lopf, ilopf) network_lopf, ilopf)
from pathlib import Path from pathlib import Path
from vresutils.benchmark import memory_logger from vresutils.benchmark import memory_logger
logger = logging.getLogger(__name__)
def prepare_network(n, solve_opts): def prepare_network(n, solve_opts):
if 'clip_p_max_pu' in solve_opts: if 'clip_p_max_pu' in solve_opts:
@ -217,15 +220,16 @@ def extra_functionality(n, snapshots):
def solve_network(n, config, solver_log=None, opts='', **kwargs): def solve_network(n, config, solver_log=None, opts='', **kwargs):
solver_options = config['solving']['solver'].copy() solver_options = config['solving']['solver'].copy()
solver_name = solver_options.pop('name') solver_name = solver_options.pop('name')
track_iterations = config['solving']['options'].get('track_iterations', False) cf_solving = config['solving']['options']
min_iterations = config['solving']['options'].get('min_iterations', 4) track_iterations = cf_solving.get('track_iterations', False)
max_iterations = config['solving']['options'].get('max_iterations', 6) min_iterations = cf_solving.get('min_iterations', 4)
max_iterations = cf_solving.get('max_iterations', 6)
# add to network for extra_functionality # add to network for extra_functionality
n.config = config n.config = config
n.opts = opts n.opts = opts
if config['solving']['options'].get('skip_iterations', False): if cf_solving.get('skip_iterations', False):
network_lopf(n, solver_name=solver_name, solver_options=solver_options, network_lopf(n, solver_name=solver_name, solver_options=solver_options,
extra_functionality=extra_functionality, **kwargs) extra_functionality=extra_functionality, **kwargs)
else: else:

View File

@ -46,7 +46,6 @@ Description
""" """
import logging import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging from _helpers import configure_logging
import pypsa import pypsa
@ -56,6 +55,8 @@ from pathlib import Path
from vresutils.benchmark import memory_logger from vresutils.benchmark import memory_logger
from solve_network import solve_network, prepare_network from solve_network import solve_network, prepare_network
logger = logging.getLogger(__name__)
def set_parameters_from_optimized(n, n_optim): def set_parameters_from_optimized(n, n_optim):
lines_typed_i = n.lines.index[n.lines.type != ''] lines_typed_i = n.lines.index[n.lines.type != '']
n.lines.loc[lines_typed_i, 'num_parallel'] = \ n.lines.loc[lines_typed_i, 'num_parallel'] = \
@ -107,7 +108,8 @@ if __name__ == "__main__":
opts = snakemake.wildcards.opts.split('-') opts = snakemake.wildcards.opts.split('-')
config['solving']['options']['skip_iterations'] = False config['solving']['options']['skip_iterations'] = False
with memory_logger(filename=getattr(snakemake.log, 'memory', None), interval=30.) as mem: fn = getattr(snakemake.log, 'memory', None)
with memory_logger(filename=fn, interval=30.) as mem:
n = prepare_network(n, solve_opts=snakemake.config['solving']['options']) n = prepare_network(n, solve_opts=snakemake.config['solving']['options'])
n = solve_network(n, config, solver_dir=tmpdir, n = solve_network(n, config, solver_dir=tmpdir,
solver_log=snakemake.log.solver, opts=opts) solver_log=snakemake.log.solver, opts=opts)

View File

@ -11,7 +11,6 @@ logging:
summary_dir: results summary_dir: results
scenario: scenario:
sectors: [E]
simpl: [''] simpl: ['']
ll: ['copt'] ll: ['copt']
clusters: [5] clusters: [5]
@ -188,143 +187,3 @@ solving:
# barrier_convergetol: 1.e-5 # barrier_convergetol: 1.e-5
# feasopt_tolerance: 1.e-6 # feasopt_tolerance: 1.e-6
plotting:
map:
figsize: [7, 7]
boundaries: [-10.2, 29, 35, 72]
p_nom:
bus_size_factor: 5.e+4
linewidth_factor: 3.e+3
costs_max: 800
costs_threshold: 1
energy_max: 15000.
energy_min: -10000.
energy_threshold: 50.
vre_techs: ["onwind", "offwind-ac", "offwind-dc", "solar", "ror"]
conv_techs: ["OCGT", "CCGT", "Nuclear", "Coal"]
storage_techs: ["hydro+PHS", "battery", "H2"]
load_carriers: ["AC load"]
AC_carriers: ["AC line", "AC transformer"]
link_carriers: ["DC line", "Converter AC-DC"]
tech_colors:
"onwind" : "#235ebc"
"onshore wind" : "#235ebc"
'offwind' : "#6895dd"
'offwind-ac' : "#6895dd"
'offshore wind' : "#6895dd"
'offshore wind ac' : "#6895dd"
'offwind-dc' : "#74c6f2"
'offshore wind dc' : "#74c6f2"
"hydro" : "#08ad97"
"hydro+PHS" : "#08ad97"
"PHS" : "#08ad97"
"hydro reservoir" : "#08ad97"
'hydroelectricity' : '#08ad97'
"ror" : "#4adbc8"
"run of river" : "#4adbc8"
'solar' : "#f9d002"
'solar PV' : "#f9d002"
'solar thermal' : '#ffef60'
'biomass' : '#0c6013'
'solid biomass' : '#06540d'
'biogas' : '#23932d'
'waste' : '#68896b'
'geothermal' : '#ba91b1'
"OCGT" : "#d35050"
"OCGT marginal" : "#d35050"
"OCGT-heat" : "#d35050"
"gas boiler" : "#d35050"
"gas boilers" : "#d35050"
"gas boiler marginal" : "#d35050"
"gas-to-power/heat" : "#d35050"
"gas" : "#d35050"
"natural gas" : "#d35050"
"CCGT" : "#b20101"
"CCGT marginal" : "#b20101"
"Nuclear" : "#ff9000"
"Nuclear marginal" : "#ff9000"
"nuclear" : "#ff9000"
"coal" : "#707070"
"Coal" : "#707070"
"Coal marginal" : "#707070"
"lignite" : "#9e5a01"
"Lignite" : "#9e5a01"
"Lignite marginal" : "#9e5a01"
"Oil" : "#262626"
"oil" : "#262626"
"H2" : "#ea048a"
"hydrogen storage" : "#ea048a"
"Sabatier" : "#a31597"
"methanation" : "#a31597"
"helmeth" : "#a31597"
"DAC" : "#d284ff"
"co2 stored" : "#e5e5e5"
"CO2 sequestration" : "#e5e5e5"
"battery" : "#b8ea04"
"battery storage" : "#b8ea04"
"Li ion" : "#b8ea04"
"BEV charger" : "#e2ff7c"
"V2G" : "#7a9618"
"transport fuel cell" : "#e884be"
"retrofitting" : "#e0d6a8"
"building retrofitting" : "#e0d6a8"
"heat pumps" : "#ff9768"
"heat pump" : "#ff9768"
"air heat pump" : "#ffbea0"
"ground heat pump" : "#ff7a3d"
"power-to-heat" : "#a59e7c"
"power-to-gas" : "#db8585"
"power-to-liquid" : "#a9acd1"
"Fischer-Tropsch" : "#a9acd1"
"resistive heater" : "#aa4925"
"water tanks" : "#401f75"
"hot water storage" : "#401f75"
"hot water charging" : "#351c5e"
"hot water discharging" : "#683ab2"
"CHP" : "#d80a56"
"CHP heat" : "#d80a56"
"CHP electric" : "#d80a56"
"district heating" : "#93864b"
"Ambient" : "#262626"
"Electric load" : "#f9d002"
"electricity" : "#f9d002"
"Heat load" : "#d35050"
"heat" : "#d35050"
"Transport load" : "#235ebc"
"transport" : "#235ebc"
"lines" : "#70af1d"
"transmission lines" : "#70af1d"
"AC-AC" : "#70af1d"
"AC line" : "#70af1d"
"links" : "#8a1caf"
"HVDC links" : "#8a1caf"
"DC-DC" : "#8a1caf"
"DC link" : "#8a1caf"
nice_names:
OCGT: "Open-Cycle Gas"
CCGT: "Combined-Cycle Gas"
offwind-ac: "Offshore Wind (AC)"
offwind-dc: "Offshore Wind (DC)"
onwind: "Onshore Wind"
solar: "Solar"
PHS: "Pumped Hydro Storage"
hydro: "Reservoir & Dam"
battery: "Battery Storage"
H2: "Hydrogen Storage"
lines: "Transmission lines"
ror: "Run of river"
nice_names_n:
OCGT: "Open-Cycle\nGas"
CCGT: "Combined-Cycle\nGas"
offwind-ac: "Offshore\nWind (AC)"
offwind-dc: "Offshore\nWind (DC)"
onwind: "Onshore\nWind"
battery: "Battery\nStorage"
H2: "Hydrogen\nStorage"
lines: "Transmission\nlines"
ror: "Run of\nriver"
PHS: "Pumped Hydro\nStorage"
hydro: "Reservoir\n& Dam"