[pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
This commit is contained in:
pre-commit-ci[bot] 2022-09-16 13:04:04 +00:00
parent e9ea4c5e53
commit 5d1ef8a640
47 changed files with 4578 additions and 2951 deletions

View File

@ -1,5 +1,5 @@
blank_issues_enabled: false
contact_links:
- name: PyPSA Mailing List
url: https://groups.google.com/forum/#!forum/pypsa
about: Please ask and answer general usage questions here.
- name: PyPSA Mailing List
url: https://groups.google.com/forum/#!forum/pypsa
about: Please ask and answer general usage questions here.

View File

@ -16,7 +16,7 @@ on:
branches:
- master
schedule:
- cron: "0 5 * * TUE"
- cron: "0 5 * * TUE"
env:
CACHE_NUMBER: 1 # Change this value to manually reset the environment cache
@ -28,63 +28,63 @@ jobs:
matrix:
include:
# Matrix required to handle caching with Mambaforge
- os: ubuntu-latest
label: ubuntu-latest
prefix: /usr/share/miniconda3/envs/pypsa-eur
- os: ubuntu-latest
label: ubuntu-latest
prefix: /usr/share/miniconda3/envs/pypsa-eur
- os: macos-latest
label: macos-latest
prefix: /Users/runner/miniconda3/envs/pypsa-eur
- os: macos-latest
label: macos-latest
prefix: /Users/runner/miniconda3/envs/pypsa-eur
- os: windows-latest
label: windows-latest
prefix: C:\Miniconda3\envs\pypsa-eur
- os: windows-latest
label: windows-latest
prefix: C:\Miniconda3\envs\pypsa-eur
name: ${{ matrix.label }}
runs-on: ${{ matrix.os }}
defaults:
run:
shell: bash -l {0}
steps:
- uses: actions/checkout@v2
- name: Setup secrets
run: |
echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc
- uses: actions/checkout@v2
- name: Add solver to environment
run: |
echo -e " - glpk\n - ipopt<3.13.3" >> envs/environment.yaml
- name: Setup secrets
run: |
echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc
- name: Setup Mambaforge
uses: conda-incubator/setup-miniconda@v2
with:
miniforge-variant: Mambaforge
miniforge-version: latest
activate-environment: pypsa-eur
use-mamba: true
- name: Set cache date
run: echo "DATE=$(date +'%Y%m%d')" >> $GITHUB_ENV
- name: Add solver to environment
run: |
echo -e " - glpk\n - ipopt<3.13.3" >> envs/environment.yaml
- name: Create environment cache
uses: actions/cache@v2
id: cache
with:
path: ${{ matrix.prefix }}
key: ${{ matrix.label }}-conda-${{ hashFiles('envs/environment.yaml') }}-${{ env.DATE }}-${{ env.CACHE_NUMBER }}
- name: Setup Mambaforge
uses: conda-incubator/setup-miniconda@v2
with:
miniforge-variant: Mambaforge
miniforge-version: latest
activate-environment: pypsa-eur
use-mamba: true
- name: Update environment due to outdated or unavailable cache
run: mamba env update -n pypsa-eur -f envs/environment.yaml
if: steps.cache.outputs.cache-hit != 'true'
- name: Set cache date
run: echo "DATE=$(date +'%Y%m%d')" >> $GITHUB_ENV
- name: Test snakemake workflow
run: |
conda activate pypsa-eur
conda list
cp test/config.test1.yaml config.yaml
snakemake --cores all solve_all_networks
rm -rf resources/*.nc resources/*.geojson resources/*.h5 networks results
- name: Create environment cache
uses: actions/cache@v2
id: cache
with:
path: ${{ matrix.prefix }}
key: ${{ matrix.label }}-conda-${{ hashFiles('envs/environment.yaml') }}-${{ env.DATE }}-${{ env.CACHE_NUMBER }}
- name: Update environment due to outdated or unavailable cache
run: mamba env update -n pypsa-eur -f envs/environment.yaml
if: steps.cache.outputs.cache-hit != 'true'
- name: Test snakemake workflow
run: |
conda activate pypsa-eur
conda list
cp test/config.test1.yaml config.yaml
snakemake --cores all solve_all_networks
rm -rf resources/*.nc resources/*.geojson resources/*.h5 networks results

View File

@ -7,5 +7,5 @@ version: 2
python:
version: 3.8
install:
- requirements: doc/requirements.txt
- requirements: doc/requirements.txt
system_packages: true

View File

@ -16,4 +16,4 @@ notebooks
doc
cutouts
data/bundle
*.nc
*.nc

699
Snakefile
View File

@ -6,207 +6,312 @@ from os.path import normpath, exists
from shutil import copyfile, move
from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider
HTTP = HTTPRemoteProvider()
if not exists("config.yaml"):
copyfile("config.default.yaml", "config.yaml")
configfile: "config.yaml"
run = config.get("run", {})
RDIR = run["name"] + "/" if run.get("name") else ""
CDIR = RDIR if not run.get("shared_cutouts") else ""
CDIR = RDIR if not run.get("shared_cutouts") else ""
COSTS = "resources/" + RDIR + "costs.csv"
ATLITE_NPROCESSES = config['atlite'].get('nprocesses', 4)
ATLITE_NPROCESSES = config["atlite"].get("nprocesses", 4)
wildcard_constraints:
simpl="[a-zA-Z0-9]*|all",
clusters="[0-9]+m?|all",
ll="(v|c)([0-9\.]+|opt|all)|all",
opts="[-+a-zA-Z0-9\.]*"
opts="[-+a-zA-Z0-9\.]*",
rule cluster_all_networks:
input: expand("networks/" + RDIR + "elec_s{simpl}_{clusters}.nc", **config['scenario'])
input:
expand("networks/" + RDIR + "elec_s{simpl}_{clusters}.nc", **config["scenario"]),
rule extra_components_all_networks:
input: expand("networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc", **config['scenario'])
input:
expand(
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc", **config["scenario"]
),
rule prepare_all_networks:
input: expand("networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario'])
input:
expand(
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
**config["scenario"]
),
rule solve_all_networks:
input: expand("results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario'])
input:
expand(
"results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
**config["scenario"]
),
if config['enable'].get('prepare_links_p_nom', False):
if config["enable"].get("prepare_links_p_nom", False):
rule prepare_links_p_nom:
output: 'data/links_p_nom.csv'
log: "logs/" + RDIR + "prepare_links_p_nom.log"
output:
"data/links_p_nom.csv",
log:
"logs/" + RDIR + "prepare_links_p_nom.log",
threads: 1
resources: mem_mb=500
script: 'scripts/prepare_links_p_nom.py'
resources:
mem_mb=500,
script:
"scripts/prepare_links_p_nom.py"
datafiles = ['ch_cantons.csv', 'je-e-21.03.02.xls',
'eez/World_EEZ_v8_2014.shp',
'hydro_capacities.csv', 'naturalearth/ne_10m_admin_0_countries.shp',
'NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp', 'nama_10r_3popgdp.tsv.gz',
'nama_10r_3gdp.tsv.gz', 'corine/g250_clc06_V18_5.tif']
datafiles = [
"ch_cantons.csv",
"je-e-21.03.02.xls",
"eez/World_EEZ_v8_2014.shp",
"hydro_capacities.csv",
"naturalearth/ne_10m_admin_0_countries.shp",
"NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp",
"nama_10r_3popgdp.tsv.gz",
"nama_10r_3gdp.tsv.gz",
"corine/g250_clc06_V18_5.tif",
]
if not config.get('tutorial', False):
if not config.get("tutorial", False):
datafiles.extend(["natura/Natura2000_end2015.shp", "GEBCO_2014_2D.nc"])
if config['enable'].get('retrieve_databundle', True):
if config["enable"].get("retrieve_databundle", True):
rule retrieve_databundle:
output: expand('data/bundle/{file}', file=datafiles)
log: "logs/" + RDIR + "retrieve_databundle.log"
resources: mem_mb=1000
script: 'scripts/retrieve_databundle.py'
output:
expand("data/bundle/{file}", file=datafiles),
log:
"logs/" + RDIR + "retrieve_databundle.log",
resources:
mem_mb=1000,
script:
"scripts/retrieve_databundle.py"
rule retrieve_load_data:
input: HTTP.remote("data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv", keep_local=True, static=True)
output: "data/load_raw.csv"
resources: mem_mb=5000
input:
HTTP.remote(
"data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv",
keep_local=True,
static=True,
),
output:
"data/load_raw.csv",
resources:
mem_mb=5000,
run:
move(input[0], output[0])
rule build_load_data:
input: "data/load_raw.csv"
output: "resources/" + RDIR + "load.csv"
log: "logs/" + RDIR + "build_load_data.log"
resources: mem_mb=5000
script: 'scripts/build_load_data.py'
input:
"data/load_raw.csv",
output:
"resources/" + RDIR + "load.csv",
log:
"logs/" + RDIR + "build_load_data.log",
resources:
mem_mb=5000,
script:
"scripts/build_load_data.py"
rule build_powerplants:
input:
base_network="networks/" + RDIR + "base.nc",
custom_powerplants="data/custom_powerplants.csv"
output: "resources/" + RDIR + "powerplants.csv"
log: "logs/" + RDIR + "build_powerplants.log"
custom_powerplants="data/custom_powerplants.csv",
output:
"resources/" + RDIR + "powerplants.csv",
log:
"logs/" + RDIR + "build_powerplants.log",
threads: 1
resources: mem_mb=5000
script: "scripts/build_powerplants.py"
resources:
mem_mb=5000,
script:
"scripts/build_powerplants.py"
rule base_network:
input:
eg_buses='data/entsoegridkit/buses.csv',
eg_lines='data/entsoegridkit/lines.csv',
eg_links='data/entsoegridkit/links.csv',
eg_converters='data/entsoegridkit/converters.csv',
eg_transformers='data/entsoegridkit/transformers.csv',
parameter_corrections='data/parameter_corrections.yaml',
links_p_nom='data/links_p_nom.csv',
links_tyndp='data/links_tyndp.csv',
eg_buses="data/entsoegridkit/buses.csv",
eg_lines="data/entsoegridkit/lines.csv",
eg_links="data/entsoegridkit/links.csv",
eg_converters="data/entsoegridkit/converters.csv",
eg_transformers="data/entsoegridkit/transformers.csv",
parameter_corrections="data/parameter_corrections.yaml",
links_p_nom="data/links_p_nom.csv",
links_tyndp="data/links_tyndp.csv",
country_shapes="resources/" + RDIR + "country_shapes.geojson",
offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson",
europe_shape="resources/" + RDIR + "europe_shape.geojson"
output: "networks/" + RDIR + "base.nc"
log: "logs/" + RDIR + "base_network.log"
benchmark: "benchmarks/" + RDIR + "base_network"
europe_shape="resources/" + RDIR + "europe_shape.geojson",
output:
"networks/" + RDIR + "base.nc",
log:
"logs/" + RDIR + "base_network.log",
benchmark:
"benchmarks/" + RDIR + "base_network"
threads: 1
resources: mem_mb=500
script: "scripts/base_network.py"
resources:
mem_mb=500,
script:
"scripts/base_network.py"
rule build_shapes:
input:
naturalearth='data/bundle/naturalearth/ne_10m_admin_0_countries.shp',
eez='data/bundle/eez/World_EEZ_v8_2014.shp',
nuts3='data/bundle/NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp',
nuts3pop='data/bundle/nama_10r_3popgdp.tsv.gz',
nuts3gdp='data/bundle/nama_10r_3gdp.tsv.gz',
ch_cantons='data/bundle/ch_cantons.csv',
ch_popgdp='data/bundle/je-e-21.03.02.xls'
naturalearth="data/bundle/naturalearth/ne_10m_admin_0_countries.shp",
eez="data/bundle/eez/World_EEZ_v8_2014.shp",
nuts3="data/bundle/NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp",
nuts3pop="data/bundle/nama_10r_3popgdp.tsv.gz",
nuts3gdp="data/bundle/nama_10r_3gdp.tsv.gz",
ch_cantons="data/bundle/ch_cantons.csv",
ch_popgdp="data/bundle/je-e-21.03.02.xls",
output:
country_shapes="resources/" + RDIR + "country_shapes.geojson",
offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson",
europe_shape="resources/" + RDIR + "europe_shape.geojson",
nuts3_shapes="resources/" + RDIR + "nuts3_shapes.geojson"
log: "logs/" + RDIR + "build_shapes.log"
nuts3_shapes="resources/" + RDIR + "nuts3_shapes.geojson",
log:
"logs/" + RDIR + "build_shapes.log",
threads: 1
resources: mem_mb=500
script: "scripts/build_shapes.py"
resources:
mem_mb=500,
script:
"scripts/build_shapes.py"
rule build_bus_regions:
input:
country_shapes="resources/" + RDIR + "country_shapes.geojson",
offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson",
base_network="networks/" + RDIR + "base.nc"
base_network="networks/" + RDIR + "base.nc",
output:
regions_onshore="resources/" + RDIR + "regions_onshore.geojson",
regions_offshore="resources/" + RDIR + "regions_offshore.geojson"
log: "logs/" + RDIR + "build_bus_regions.log"
regions_offshore="resources/" + RDIR + "regions_offshore.geojson",
log:
"logs/" + RDIR + "build_bus_regions.log",
threads: 1
resources: mem_mb=1000
script: "scripts/build_bus_regions.py"
resources:
mem_mb=1000,
script:
"scripts/build_bus_regions.py"
if config["enable"].get("build_cutout", False):
if config['enable'].get('build_cutout', False):
rule build_cutout:
input:
input:
regions_onshore="resources/" + RDIR + "regions_onshore.geojson",
regions_offshore="resources/" + RDIR + "regions_offshore.geojson"
output: "cutouts/" + CDIR + "{cutout}.nc"
log: "logs/" + CDIR + "build_cutout/{cutout}.log"
benchmark: "benchmarks/" + CDIR + "build_cutout_{cutout}"
regions_offshore="resources/" + RDIR + "regions_offshore.geojson",
output:
"cutouts/" + CDIR + "{cutout}.nc",
log:
"logs/" + CDIR + "build_cutout/{cutout}.log",
benchmark:
"benchmarks/" + CDIR + "build_cutout_{cutout}"
threads: ATLITE_NPROCESSES
resources: mem_mb=ATLITE_NPROCESSES * 1000
script: "scripts/build_cutout.py"
resources:
mem_mb=ATLITE_NPROCESSES * 1000,
script:
"scripts/build_cutout.py"
if config['enable'].get('retrieve_cutout', True):
if config["enable"].get("retrieve_cutout", True):
rule retrieve_cutout:
input: HTTP.remote("zenodo.org/record/6382570/files/{cutout}.nc", keep_local=True, static=True)
output: "cutouts/" + CDIR + "{cutout}.nc"
log: "logs/" + CDIR + "retrieve_cutout_{cutout}.log"
resources: mem_mb=5000
input:
HTTP.remote(
"zenodo.org/record/6382570/files/{cutout}.nc",
keep_local=True,
static=True,
),
output:
"cutouts/" + CDIR + "{cutout}.nc",
log:
"logs/" + CDIR + "retrieve_cutout_{cutout}.log",
resources:
mem_mb=5000,
run:
move(input[0], output[0])
if config['enable'].get('retrieve_cost_data', True):
if config["enable"].get("retrieve_cost_data", True):
rule retrieve_cost_data:
input: HTTP.remote(f"raw.githubusercontent.com/PyPSA/technology-data/{config['costs']['version']}/outputs/costs_{config['costs']['year']}.csv", keep_local=True)
output: COSTS
log: "logs/" + RDIR + "retrieve_cost_data.log"
resources: mem_mb=5000
input:
HTTP.remote(
f"raw.githubusercontent.com/PyPSA/technology-data/{config['costs']['version']}/outputs/costs_{config['costs']['year']}.csv",
keep_local=True,
),
output:
COSTS,
log:
"logs/" + RDIR + "retrieve_cost_data.log",
resources:
mem_mb=5000,
run:
move(input[0], output[0])
if config['enable'].get('build_natura_raster', False):
if config["enable"].get("build_natura_raster", False):
rule build_natura_raster:
input:
natura="data/bundle/natura/Natura2000_end2015.shp",
cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config['atlite'])
output: "resources/" + RDIR + "natura.tiff"
resources: mem_mb=5000
log: "logs/" + RDIR + "build_natura_raster.log"
script: "scripts/build_natura_raster.py"
cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config["atlite"]),
output:
"resources/" + RDIR + "natura.tiff",
resources:
mem_mb=5000,
log:
"logs/" + RDIR + "build_natura_raster.log",
script:
"scripts/build_natura_raster.py"
if config['enable'].get('retrieve_natura_raster', True):
if config["enable"].get("retrieve_natura_raster", True):
rule retrieve_natura_raster:
input: HTTP.remote("zenodo.org/record/4706686/files/natura.tiff", keep_local=True, static=True)
output: "resources/" + RDIR + "natura.tiff"
resources: mem_mb=5000
input:
HTTP.remote(
"zenodo.org/record/4706686/files/natura.tiff",
keep_local=True,
static=True,
),
output:
"resources/" + RDIR + "natura.tiff",
resources:
mem_mb=5000,
run:
move(input[0], output[0])
rule retrieve_ship_raster:
input: HTTP.remote("https://zenodo.org/record/6953563/files/shipdensity_global.zip", keep_local=True, static=True)
output: "data/shipdensity_global.zip"
resources: mem_mb=5000
input:
HTTP.remote(
"https://zenodo.org/record/6953563/files/shipdensity_global.zip",
keep_local=True,
static=True,
),
output:
"data/shipdensity_global.zip",
resources:
mem_mb=5000,
run:
move(input[0], output[0])
@ -214,74 +319,112 @@ rule retrieve_ship_raster:
rule build_ship_raster:
input:
ship_density="data/shipdensity_global.zip",
cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config['atlite'])
output: "resources/" + RDIR + "shipdensity_raster.nc"
log: "logs/" + RDIR + "build_ship_raster.log"
resources: mem_mb=5000
benchmark: "benchmarks/" + RDIR + "build_ship_raster"
script: "scripts/build_ship_raster.py"
cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config["atlite"]),
output:
"resources/" + RDIR + "shipdensity_raster.nc",
log:
"logs/" + RDIR + "build_ship_raster.log",
resources:
mem_mb=5000,
benchmark:
"benchmarks/" + RDIR + "build_ship_raster"
script:
"scripts/build_ship_raster.py"
rule build_renewable_profiles:
input:
base_network="networks/" + RDIR + "base.nc",
corine="data/bundle/corine/g250_clc06_V18_5.tif",
natura=lambda w: ("resources/" + RDIR + "natura.tiff"
if config["renewable"][w.technology]["natura"]
else []),
gebco=lambda w: ("data/bundle/GEBCO_2014_2D.nc"
if "max_depth" in config["renewable"][w.technology].keys()
else []),
ship_density= lambda w: ("resources/" + RDIR + "shipdensity_raster.nc"
if "ship_threshold" in config["renewable"][w.technology].keys()
else []),
natura=lambda w: (
"resources/" + RDIR + "natura.tiff"
if config["renewable"][w.technology]["natura"]
else []
),
gebco=lambda w: (
"data/bundle/GEBCO_2014_2D.nc"
if "max_depth" in config["renewable"][w.technology].keys()
else []
),
ship_density=lambda w: (
"resources/" + RDIR + "shipdensity_raster.nc"
if "ship_threshold" in config["renewable"][w.technology].keys()
else []
),
country_shapes="resources/" + RDIR + "country_shapes.geojson",
offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson",
regions=lambda w: ("resources/" + RDIR + "regions_onshore.geojson"
if w.technology in ('onwind', 'solar')
else "resources/" + RDIR + "regions_offshore.geojson"),
cutout=lambda w: "cutouts/" + CDIR + config["renewable"][w.technology]['cutout'] + ".nc"
output: profile="resources/" + RDIR + "profile_{technology}.nc",
log: "logs/" + RDIR + "build_renewable_profile_{technology}.log"
benchmark: "benchmarks/" + RDIR + "build_renewable_profiles_{technology}"
regions=lambda w: (
"resources/" + RDIR + "regions_onshore.geojson"
if w.technology in ("onwind", "solar")
else "resources/" + RDIR + "regions_offshore.geojson"
),
cutout=lambda w: "cutouts/"
+ CDIR
+ config["renewable"][w.technology]["cutout"]
+ ".nc",
output:
profile="resources/" + RDIR + "profile_{technology}.nc",
log:
"logs/" + RDIR + "build_renewable_profile_{technology}.log",
benchmark:
"benchmarks/" + RDIR + "build_renewable_profiles_{technology}"
threads: ATLITE_NPROCESSES
resources: mem_mb=ATLITE_NPROCESSES * 5000
wildcard_constraints: technology="(?!hydro).*" # Any technology other than hydro
script: "scripts/build_renewable_profiles.py"
resources:
mem_mb=ATLITE_NPROCESSES * 5000,
wildcard_constraints:
technology="(?!hydro).*", # Any technology other than hydro
script:
"scripts/build_renewable_profiles.py"
rule build_hydro_profile:
input:
country_shapes="resources/" + RDIR + "country_shapes.geojson",
eia_hydro_generation='data/eia_hydro_annual_generation.csv',
cutout=f"cutouts/" + CDIR + "{config['renewable']['hydro']['cutout']}.nc" if "hydro" in config["renewable"] else "config['renewable']['hydro']['cutout'] not configured",
output: "resources/" + RDIR + "profile_hydro.nc"
log: "logs/" + RDIR + "build_hydro_profile.log"
resources: mem_mb=5000
script: 'scripts/build_hydro_profile.py'
eia_hydro_generation="data/eia_hydro_annual_generation.csv",
cutout=f"cutouts/" + CDIR + "{config['renewable']['hydro']['cutout']}.nc"
if "hydro" in config["renewable"]
else "config['renewable']['hydro']['cutout'] not configured",
output:
"resources/" + RDIR + "profile_hydro.nc",
log:
"logs/" + RDIR + "build_hydro_profile.log",
resources:
mem_mb=5000,
script:
"scripts/build_hydro_profile.py"
rule add_electricity:
input:
**{
f"profile_{tech}": "resources/" + RDIR + f"profile_{tech}.nc"
for tech in config["renewable"]
},
**{
f"conventional_{carrier}_{attr}": fn
for carrier, d in config.get("conventional", {None: {}}).items()
for attr, fn in d.items()
if str(fn).startswith("data/")
},
base_network="networks/" + RDIR + "base.nc",
tech_costs=COSTS,
regions="resources/" + RDIR + "regions_onshore.geojson",
powerplants="resources/" + RDIR + "powerplants.csv",
hydro_capacities='data/bundle/hydro_capacities.csv',
geth_hydro_capacities='data/geth2015_hydro_capacities.csv',
hydro_capacities="data/bundle/hydro_capacities.csv",
geth_hydro_capacities="data/geth2015_hydro_capacities.csv",
load="resources/" + RDIR + "load.csv",
nuts3_shapes="resources/" + RDIR + "nuts3_shapes.geojson",
**{f"profile_{tech}": "resources/" + RDIR + f"profile_{tech}.nc"
for tech in config['renewable']},
**{f"conventional_{carrier}_{attr}": fn
for carrier, d in config.get('conventional', {None: {}}).items()
for attr, fn in d.items() if str(fn).startswith("data/")},
output: "networks/" + RDIR + "elec.nc"
log: "logs/" + RDIR + "add_electricity.log"
benchmark: "benchmarks/" + RDIR + "add_electricity"
output:
"networks/" + RDIR + "elec.nc",
log:
"logs/" + RDIR + "add_electricity.log",
benchmark:
"benchmarks/" + RDIR + "add_electricity"
threads: 1
resources: mem_mb=5000
script: "scripts/add_electricity.py"
resources:
mem_mb=5000,
script:
"scripts/add_electricity.py"
rule simplify_network:
@ -289,18 +432,22 @@ rule simplify_network:
network="networks/" + RDIR + "elec.nc",
tech_costs=COSTS,
regions_onshore="resources/" + RDIR + "regions_onshore.geojson",
regions_offshore="resources/" + RDIR + "regions_offshore.geojson"
regions_offshore="resources/" + RDIR + "regions_offshore.geojson",
output:
network="networks/" + RDIR + "elec_s{simpl}.nc",
regions_onshore="resources/" + RDIR + "regions_onshore_elec_s{simpl}.geojson",
regions_offshore="resources/" + RDIR + "regions_offshore_elec_s{simpl}.geojson",
busmap="resources/" + RDIR + "busmap_elec_s{simpl}.csv",
connection_costs="resources/" + RDIR + "connection_costs_s{simpl}.csv"
log: "logs/" + RDIR + "simplify_network/elec_s{simpl}.log"
benchmark: "benchmarks/" + RDIR + "simplify_network/elec_s{simpl}"
connection_costs="resources/" + RDIR + "connection_costs_s{simpl}.csv",
log:
"logs/" + RDIR + "simplify_network/elec_s{simpl}.log",
benchmark:
"benchmarks/" + RDIR + "simplify_network/elec_s{simpl}"
threads: 1
resources: mem_mb=4000
script: "scripts/simplify_network.py"
resources:
mem_mb=4000,
script:
"scripts/simplify_network.py"
rule cluster_network:
@ -309,57 +456,84 @@ rule cluster_network:
regions_onshore="resources/" + RDIR + "regions_onshore_elec_s{simpl}.geojson",
regions_offshore="resources/" + RDIR + "regions_offshore_elec_s{simpl}.geojson",
busmap=ancient("resources/" + RDIR + "busmap_elec_s{simpl}.csv"),
custom_busmap=("data/custom_busmap_elec_s{simpl}_{clusters}.csv"
if config["enable"].get("custom_busmap", False) else []),
tech_costs=COSTS
custom_busmap=(
"data/custom_busmap_elec_s{simpl}_{clusters}.csv"
if config["enable"].get("custom_busmap", False)
else []
),
tech_costs=COSTS,
output:
network="networks/" + RDIR + "elec_s{simpl}_{clusters}.nc",
regions_onshore="resources/" + RDIR + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
regions_offshore="resources/" + RDIR + "regions_offshore_elec_s{simpl}_{clusters}.geojson",
regions_onshore="resources/"
+ RDIR
+ "regions_onshore_elec_s{simpl}_{clusters}.geojson",
regions_offshore="resources/"
+ RDIR
+ "regions_offshore_elec_s{simpl}_{clusters}.geojson",
busmap="resources/" + RDIR + "busmap_elec_s{simpl}_{clusters}.csv",
linemap="resources/" + RDIR + "linemap_elec_s{simpl}_{clusters}.csv"
log: "logs/" + RDIR + "cluster_network/elec_s{simpl}_{clusters}.log"
benchmark: "benchmarks/" + RDIR + "cluster_network/elec_s{simpl}_{clusters}"
linemap="resources/" + RDIR + "linemap_elec_s{simpl}_{clusters}.csv",
log:
"logs/" + RDIR + "cluster_network/elec_s{simpl}_{clusters}.log",
benchmark:
"benchmarks/" + RDIR + "cluster_network/elec_s{simpl}_{clusters}"
threads: 1
resources: mem_mb=6000
script: "scripts/cluster_network.py"
resources:
mem_mb=6000,
script:
"scripts/cluster_network.py"
rule add_extra_components:
input:
network="networks/" + RDIR + "elec_s{simpl}_{clusters}.nc",
tech_costs=COSTS,
output: "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc"
log: "logs/" + RDIR + "add_extra_components/elec_s{simpl}_{clusters}.log"
benchmark: "benchmarks/" + RDIR + "add_extra_components/elec_s{simpl}_{clusters}_ec"
output:
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc",
log:
"logs/" + RDIR + "add_extra_components/elec_s{simpl}_{clusters}.log",
benchmark:
"benchmarks/" + RDIR + "add_extra_components/elec_s{simpl}_{clusters}_ec"
threads: 1
resources: mem_mb=3000
script: "scripts/add_extra_components.py"
resources:
mem_mb=3000,
script:
"scripts/add_extra_components.py"
rule prepare_network:
input: "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc", tech_costs=COSTS,
output: "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
log: "logs/" + RDIR + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log"
benchmark: "benchmarks/" + RDIR + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
input:
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc",
tech_costs=COSTS,
output:
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
log:
"logs/" + RDIR + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log",
benchmark:
(
"benchmarks/"
+ RDIR
+ "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
)
threads: 1
resources: mem_mb=4000
script: "scripts/prepare_network.py"
resources:
mem_mb=4000,
script:
"scripts/prepare_network.py"
def memory(w):
factor = 3.
for o in w.opts.split('-'):
m = re.match(r'^(\d+)h$', o, re.IGNORECASE)
factor = 3.0
for o in w.opts.split("-"):
m = re.match(r"^(\d+)h$", o, re.IGNORECASE)
if m is not None:
factor /= int(m.group(1))
break
for o in w.opts.split('-'):
m = re.match(r'^(\d+)seg$', o, re.IGNORECASE)
for o in w.opts.split("-"):
m = re.match(r"^(\d+)seg$", o, re.IGNORECASE)
if m is not None:
factor *= int(m.group(1)) / 8760
break
if w.clusters.endswith('m'):
if w.clusters.endswith("m"):
return int(factor * (18000 + 180 * int(w.clusters[:-1])))
elif w.clusters == "all":
return int(factor * (18000 + 180 * 4000))
@ -368,44 +542,87 @@ def memory(w):
rule solve_network:
input: "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
output: "results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
input:
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
output:
"results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
log:
solver=normpath("logs/" + RDIR + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"),
python="logs/" + RDIR + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
memory="logs/" + RDIR + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log"
benchmark: "benchmarks/" + RDIR + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
solver=normpath(
"logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"
),
python="logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
memory="logs/"
+ RDIR
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log",
benchmark:
"benchmarks/" + RDIR + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
threads: 4
resources: mem_mb=memory
shadow: "minimal"
script: "scripts/solve_network.py"
resources:
mem_mb=memory,
shadow:
"minimal"
script:
"scripts/solve_network.py"
rule solve_operations_network:
input:
unprepared="networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc",
optimized="results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
output: "results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc"
optimized="results/networks/"
+ RDIR
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
output:
"results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc",
log:
solver=normpath("logs/" + RDIR + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"),
python="logs/" + RDIR + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
memory="logs/" + RDIR + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log"
benchmark: "benchmarks/" + RDIR + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
solver=normpath(
"logs/"
+ RDIR
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"
),
python="logs/"
+ RDIR
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
memory="logs/"
+ RDIR
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log",
benchmark:
(
"benchmarks/"
+ RDIR
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
)
threads: 4
resources: mem_mb=(lambda w: 5000 + 372 * int(w.clusters))
shadow: "minimal"
script: "scripts/solve_operations_network.py"
resources:
mem_mb=(lambda w: 5000 + 372 * int(w.clusters)),
shadow:
"minimal"
script:
"scripts/solve_operations_network.py"
rule plot_network:
input:
network="results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
tech_costs=COSTS
network="results/networks/"
+ RDIR
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
tech_costs=COSTS,
output:
only_map="results/plots/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}.{ext}",
ext="results/plots/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_ext.{ext}"
log: "logs/" + RDIR + "plot_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_{ext}.log"
script: "scripts/plot_network.py"
only_map="results/plots/"
+ RDIR
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}.{ext}",
ext="results/plots/"
+ RDIR
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_ext.{ext}",
log:
"logs/"
+ RDIR
+ "plot_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_{ext}.log",
script:
"scripts/plot_network.py"
def input_make_summary(w):
@ -416,39 +633,79 @@ def input_make_summary(w):
ll = [l for l in ll if l[0] == w.ll[0]]
else:
ll = w.ll
return ([COSTS] +
expand("results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
ll=ll,
**{k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k)
for k in ["simpl", "clusters", "opts"]}))
return [COSTS] + expand(
"results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
ll=ll,
**{
k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k)
for k in ["simpl", "clusters", "opts"]
}
)
rule make_summary:
input: input_make_summary
output: directory("results/summaries/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}")
log: "logs/" + RDIR + "make_summary/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.log",
resources: mem_mb=500
script: "scripts/make_summary.py"
input:
input_make_summary,
output:
directory(
"results/summaries/"
+ RDIR
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}"
),
log:
"logs/"
+ RDIR
+ "make_summary/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.log",
resources:
mem_mb=500,
script:
"scripts/make_summary.py"
rule plot_summary:
input: "results/summaries/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}"
output: "results/plots/" + RDIR + "summary_{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.{ext}"
log: "logs/" + RDIR + "plot_summary/{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}_{ext}.log"
resources: mem_mb=500
script: "scripts/plot_summary.py"
input:
"results/summaries/"
+ RDIR
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}",
output:
"results/plots/"
+ RDIR
+ "summary_{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.{ext}",
log:
"logs/"
+ RDIR
+ "plot_summary/{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}_{ext}.log",
resources:
mem_mb=500,
script:
"scripts/plot_summary.py"
def input_plot_p_nom_max(w):
return [("results/networks/" + RDIR + "elec_s{simpl}{maybe_cluster}.nc"
.format(maybe_cluster=('' if c == 'full' else ('_' + c)), **w))
for c in w.clusts.split(",")]
return [
(
"results/networks/"
+ RDIR
+ "elec_s{simpl}{maybe_cluster}.nc".format(
maybe_cluster=("" if c == "full" else ("_" + c)), **w
)
)
for c in w.clusts.split(",")
]
rule plot_p_nom_max:
input: input_plot_p_nom_max
output: "results/plots/" + RDIR + "elec_s{simpl}_cum_p_nom_max_{clusts}_{techs}_{country}.{ext}"
log: "logs/" + RDIR + "plot_p_nom_max/elec_s{simpl}_{clusts}_{techs}_{country}_{ext}.log"
resources: mem_mb=500
script: "scripts/plot_p_nom_max.py"
input:
input_plot_p_nom_max,
output:
"results/plots/"
+ RDIR
+ "elec_s{simpl}_cum_p_nom_max_{clusts}_{techs}_{country}.{ext}",
log:
"logs/"
+ RDIR
+ "plot_p_nom_max/elec_s{simpl}_{clusts}_{techs}_{country}_{ext}.log",
resources:
mem_mb=500,
script:
"scripts/plot_p_nom_max.py"

View File

@ -9,7 +9,7 @@ logging:
level: INFO
format: '%(levelname)s:%(name)s:%(message)s'
run:
run:
name: "" # use this to keep track of runs with different settings
shared_cutouts: false # set to true to share the default cutout(s) across runs
@ -52,7 +52,7 @@ electricity:
max_hours:
battery: 6
H2: 168
H2: 168
extendable_carriers:
Generator: [solar, onwind, offwind-ac, offwind-dc, OCGT]
@ -63,7 +63,7 @@ electricity:
# use pandas query strings here, e.g. Country not in ['Germany']
powerplants_filter: (DateOut >= 2022 or DateOut != DateOut)
# use pandas query strings here, e.g. Country in ['Germany']
custom_powerplants: false
custom_powerplants: false
conventional_carriers: [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass]
renewable_carriers: [solar, onwind, offwind-ac, offwind-dc, hydro]
@ -120,8 +120,7 @@ renewable:
corine:
# Scholz, Y. (2012). Renewable energy based electricity supply at low costs:
# development of the REMix model and application for Europe. ( p.42 / p.28)
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 31, 32]
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32]
distance: 1000
distance_grid_codes: [1, 2, 3, 4, 5, 6]
natura: true
@ -182,8 +181,7 @@ renewable:
# This correction factor of 0.854337 may be in order if using reanalysis data.
# for discussion refer to https://github.com/PyPSA/pypsa-eur/pull/304
# correction_factor: 0.854337
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
natura: true
excluder_resolution: 100
potential: simple # or conservative
@ -195,7 +193,7 @@ renewable:
hydro_max_hours: "energy_capacity_totals_by_country" # one of energy_capacity_totals_by_country, estimate_by_large_installations or a float
clip_min_inflow: 1.0
conventional:
conventional:
nuclear:
p_max_pu: "data/nuclear_p_max_pu.csv" # float of file name
@ -221,7 +219,7 @@ transformers:
type: ''
load:
power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data
power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data
interpolate_limit: 3 # data gaps up until this size are interpolated linearly
time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from
manual_adjustments: true # false
@ -304,7 +302,7 @@ solving:
plotting:
map:
figsize: [7, 7]
boundaries: [-10.2, 29, 35, 72]
boundaries: [-10.2, 29, 35, 72]
p_nom:
bus_size_factor: 5.e+4
linewidth_factor: 3.e+3
@ -323,50 +321,50 @@ plotting:
AC_carriers: ["AC line", "AC transformer"]
link_carriers: ["DC line", "Converter AC-DC"]
tech_colors:
"onwind" : "#235ebc"
"onshore wind" : "#235ebc"
'offwind' : "#6895dd"
'offwind-ac' : "#6895dd"
'offshore wind' : "#6895dd"
'offshore wind ac' : "#6895dd"
'offwind-dc' : "#74c6f2"
'offshore wind dc' : "#74c6f2"
"hydro" : "#08ad97"
"hydro+PHS" : "#08ad97"
"PHS" : "#08ad97"
"hydro reservoir" : "#08ad97"
'hydroelectricity' : '#08ad97'
"ror" : "#4adbc8"
"run of river" : "#4adbc8"
'solar' : "#f9d002"
'solar PV' : "#f9d002"
'solar thermal' : '#ffef60'
'biomass' : '#0c6013'
'solid biomass' : '#06540d'
'biogas' : '#23932d'
'waste' : '#68896b'
'geothermal' : '#ba91b1'
"OCGT" : "#d35050"
"gas" : "#d35050"
"natural gas" : "#d35050"
"CCGT" : "#b20101"
"nuclear" : "#ff9000"
"coal" : "#707070"
"lignite" : "#9e5a01"
"oil" : "#262626"
"H2" : "#ea048a"
"hydrogen storage" : "#ea048a"
"battery" : "#b8ea04"
"Electric load" : "#f9d002"
"electricity" : "#f9d002"
"lines" : "#70af1d"
"transmission lines" : "#70af1d"
"AC-AC" : "#70af1d"
"AC line" : "#70af1d"
"links" : "#8a1caf"
"HVDC links" : "#8a1caf"
"DC-DC" : "#8a1caf"
"DC link" : "#8a1caf"
"onwind": "#235ebc"
"onshore wind": "#235ebc"
'offwind': "#6895dd"
'offwind-ac': "#6895dd"
'offshore wind': "#6895dd"
'offshore wind ac': "#6895dd"
'offwind-dc': "#74c6f2"
'offshore wind dc': "#74c6f2"
"hydro": "#08ad97"
"hydro+PHS": "#08ad97"
"PHS": "#08ad97"
"hydro reservoir": "#08ad97"
'hydroelectricity': '#08ad97'
"ror": "#4adbc8"
"run of river": "#4adbc8"
'solar': "#f9d002"
'solar PV': "#f9d002"
'solar thermal': '#ffef60'
'biomass': '#0c6013'
'solid biomass': '#06540d'
'biogas': '#23932d'
'waste': '#68896b'
'geothermal': '#ba91b1'
"OCGT": "#d35050"
"gas": "#d35050"
"natural gas": "#d35050"
"CCGT": "#b20101"
"nuclear": "#ff9000"
"coal": "#707070"
"lignite": "#9e5a01"
"oil": "#262626"
"H2": "#ea048a"
"hydrogen storage": "#ea048a"
"battery": "#b8ea04"
"Electric load": "#f9d002"
"electricity": "#f9d002"
"lines": "#70af1d"
"transmission lines": "#70af1d"
"AC-AC": "#70af1d"
"AC line": "#70af1d"
"links": "#8a1caf"
"HVDC links": "#8a1caf"
"DC-DC": "#8a1caf"
"DC link": "#8a1caf"
nice_names:
OCGT: "Open-Cycle Gas"
CCGT: "Combined-Cycle Gas"

View File

@ -9,7 +9,7 @@ logging:
level: INFO
format: '%(levelname)s:%(name)s:%(message)s'
run:
run:
name: ""
scenario:
@ -73,8 +73,7 @@ renewable:
corine:
# Scholz, Y. (2012). Renewable energy based electricity supply at low costs:
# development of the REMix model and application for Europe. ( p.42 / p.28)
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 31, 32]
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32]
distance: 1000
distance_grid_codes: [1, 2, 3, 4, 5, 6]
natura: true
@ -126,8 +125,7 @@ renewable:
# power." Applied Energy 135 (2014): 704-720.
# This correction factor of 0.854337 may be in order if using reanalysis data.
# correction_factor: 0.854337
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
natura: true
excluder_resolution: 200
potential: simple # or conservative
@ -155,9 +153,9 @@ transformers:
type: ''
load:
power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data
power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data
interpolate_limit: 3 # data gaps up until this size are interpolated linearly
time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from
time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from
manual_adjustments: true # false
scaling_factor: 1.0
@ -218,7 +216,7 @@ solving:
plotting:
map:
figsize: [7, 7]
boundaries: [-10.2, 29, 35, 72]
boundaries: [-10.2, 29, 35, 72]
p_nom:
bus_size_factor: 5.e+4
linewidth_factor: 3.e+3
@ -237,50 +235,50 @@ plotting:
AC_carriers: ["AC line", "AC transformer"]
link_carriers: ["DC line", "Converter AC-DC"]
tech_colors:
"onwind" : "#235ebc"
"onshore wind" : "#235ebc"
'offwind' : "#6895dd"
'offwind-ac' : "#6895dd"
'offshore wind' : "#6895dd"
'offshore wind ac' : "#6895dd"
'offwind-dc' : "#74c6f2"
'offshore wind dc' : "#74c6f2"
"hydro" : "#08ad97"
"hydro+PHS" : "#08ad97"
"PHS" : "#08ad97"
"hydro reservoir" : "#08ad97"
'hydroelectricity' : '#08ad97'
"ror" : "#4adbc8"
"run of river" : "#4adbc8"
'solar' : "#f9d002"
'solar PV' : "#f9d002"
'solar thermal' : '#ffef60'
'biomass' : '#0c6013'
'solid biomass' : '#06540d'
'biogas' : '#23932d'
'waste' : '#68896b'
'geothermal' : '#ba91b1'
"OCGT" : "#d35050"
"gas" : "#d35050"
"natural gas" : "#d35050"
"CCGT" : "#b20101"
"nuclear" : "#ff9000"
"coal" : "#707070"
"lignite" : "#9e5a01"
"oil" : "#262626"
"H2" : "#ea048a"
"hydrogen storage" : "#ea048a"
"battery" : "#b8ea04"
"Electric load" : "#f9d002"
"electricity" : "#f9d002"
"lines" : "#70af1d"
"transmission lines" : "#70af1d"
"AC-AC" : "#70af1d"
"AC line" : "#70af1d"
"links" : "#8a1caf"
"HVDC links" : "#8a1caf"
"DC-DC" : "#8a1caf"
"DC link" : "#8a1caf"
"onwind": "#235ebc"
"onshore wind": "#235ebc"
'offwind': "#6895dd"
'offwind-ac': "#6895dd"
'offshore wind': "#6895dd"
'offshore wind ac': "#6895dd"
'offwind-dc': "#74c6f2"
'offshore wind dc': "#74c6f2"
"hydro": "#08ad97"
"hydro+PHS": "#08ad97"
"PHS": "#08ad97"
"hydro reservoir": "#08ad97"
'hydroelectricity': '#08ad97'
"ror": "#4adbc8"
"run of river": "#4adbc8"
'solar': "#f9d002"
'solar PV': "#f9d002"
'solar thermal': '#ffef60'
'biomass': '#0c6013'
'solid biomass': '#06540d'
'biogas': '#23932d'
'waste': '#68896b'
'geothermal': '#ba91b1'
"OCGT": "#d35050"
"gas": "#d35050"
"natural gas": "#d35050"
"CCGT": "#b20101"
"nuclear": "#ff9000"
"coal": "#707070"
"lignite": "#9e5a01"
"oil": "#262626"
"H2": "#ea048a"
"hydrogen storage": "#ea048a"
"battery": "#b8ea04"
"Electric load": "#f9d002"
"electricity": "#f9d002"
"lines": "#70af1d"
"transmission lines": "#70af1d"
"AC-AC": "#70af1d"
"AC line": "#70af1d"
"links": "#8a1caf"
"HVDC links": "#8a1caf"
"DC-DC": "#8a1caf"
"DC link": "#8a1caf"
nice_names:
OCGT: "Open-Cycle Gas"
CCGT: "Combined-Cycle Gas"

View File

@ -47,4 +47,4 @@ Report generated on: 03-28-2022 11:20:48
"INTL.33-12-SWE-BKWH.A"," Sweden","58.133","59.006","54.369","62.801","67.106","70.095","60.134","70.95","69.016","70.911","71.778","62.603","73.588","73.905","58.508","67.421","51.2226","68.365","74.25","70.974","77.798","78.269","65.696","53.005","59.522","72.075","61.106","65.497","68.378","65.193","66.279","66.047","78.333","60.81","63.227","74.734","61.645","64.651","61.79","64.46583","71.6"
"INTL.33-12-CHE-BKWH.A"," Switzerland","32.481","35.13","35.974","35.069","29.871","31.731","32.576","34.328","35.437","29.477","29.497","31.756","32.373","35.416","38.678","34.817","28.458","33.70257","33.136","39.604","36.466","40.895","34.862","34.471","33.411","30.914","30.649","34.898","35.676","35.366","35.704","32.069","38.218","38.08","37.659","37.879","34.281","33.754","34.637","37.6596","40.62"
"INTL.33-12-TUR-BKWH.A"," Turkey","11.159","12.308","13.81","11.13","13.19","11.822","11.637","18.314","28.447","17.61","22.917","22.456","26.302","33.611","30.28","35.186","40.07","39.41784","41.80671","34.33","30.57","23.77","33.346","34.977","45.623","39.165","43.802","35.492","32.937","35.598","51.423","51.155","56.669","58.225","39.75","65.856","66.686","57.824","59.49","87.99714","77.39"
"INTL.33-12-GBR-BKWH.A"," United Kingdom","3.921","4.369","4.543","4.548","3.992","4.08","4.767","4.13","4.915","4.732","5.119","4.534","5.329","4.237","5.043","4.79","3.359","4.127","5.067","5.283","5.035","4.015","4.74","3.195","4.795","4.873","4.547","5.026","5.094","5.178","3.566","5.655","5.286","4.667","5.832","6.246","5.342","5.836","5.189","5.89941","7.64"
"INTL.33-12-GBR-BKWH.A"," United Kingdom","3.921","4.369","4.543","4.548","3.992","4.08","4.767","4.13","4.915","4.732","5.119","4.534","5.329","4.237","5.043","4.79","3.359","4.127","5.067","5.283","5.035","4.015","4.74","3.195","4.795","4.873","4.547","5.026","5.094","5.178","3.566","5.655","5.286","4.667","5.832","6.246","5.342","5.836","5.189","5.89941","7.64"

Can't render this file because it has a wrong number of fields in line 3.

View File

@ -13,4 +13,4 @@ SI,0.94
ES,0.89
SE,0.82
CH,0.86
GB,0.67
GB,0.67

1 country factor
13 ES 0.89
14 SE 0.82
15 CH 0.86
16 GB 0.67

View File

@ -71,4 +71,4 @@
.wy-nav-content {
max-width: 910px !important;
}
}
}

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 20017-2020 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
@ -16,19 +17,19 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../scripts'))
sys.path.insert(0, os.path.abspath("../scripts"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
@ -36,47 +37,47 @@ sys.path.insert(0, os.path.abspath('../scripts'))
extensions = [
#'sphinx.ext.autodoc',
#'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.graphviz',
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.graphviz",
#'sphinx.ext.pngmath',
#'sphinxcontrib.tikz',
#'rinoh.frontend.sphinx',
'sphinx.ext.imgconverter', # for SVG conversion
"sphinx.ext.imgconverter", # for SVG conversion
]
autodoc_default_flags = ['members']
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
source_suffix = ".rst"
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
master_doc = "index"
# General information about the project.
project = u'PyPSA-Eur'
copyright = u'2017-2022 Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), David Schlachtberger (FIAS), Tom Brown (TUB, KIT, FIAS); 2019-2022 Fabian Neumann (TUB, KIT)'
author = u'Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), David Schlachtberger (FIAS), Tom Brown (TUB, KIT, FIAS), Fabian Neumann (TUB, KIT)'
project = "PyPSA-Eur"
copyright = "2017-2022 Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), David Schlachtberger (FIAS), Tom Brown (TUB, KIT, FIAS); 2019-2022 Fabian Neumann (TUB, KIT)"
author = "Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), David Schlachtberger (FIAS), Tom Brown (TUB, KIT, FIAS), Fabian Neumann (TUB, KIT)"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.6'
version = "0.6"
# The full version, including alpha/beta/rc tags.
release = u'0.6.0'
release = "0.6.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@ -87,37 +88,37 @@ language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
@ -127,35 +128,35 @@ todo_include_todos = True
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'display_version': True,
'sticky_navigation': True,
"display_version": True,
"sticky_navigation": True,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# html_favicon = None
# These folders are copied to the documentation's HTML output
html_static_path = ["_static"]
@ -167,130 +168,127 @@ html_css_files = ["theme_overrides.css"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyPSAEurdoc'
htmlhelp_basename = "PyPSAEurdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyPSA-Eur.tex', u'PyPSA-Eur Documentation',
u'author', 'manual'),
(master_doc, "PyPSA-Eur.tex", "PyPSA-Eur Documentation", "author", "manual"),
]
#Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html
rinoh_documents = [(master_doc, # top-level file (index.rst)
'PyPSA-Eur', # output (target.pdf)
'PyPSA-Eur Documentation', # document title
'author')] # document author
# Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html
rinoh_documents = [
(
master_doc, # top-level file (index.rst)
"PyPSA-Eur", # output (target.pdf)
"PyPSA-Eur Documentation", # document title
"author",
)
] # document author
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pypsa-eur', u'PyPSA-Eur Documentation',
[author], 1)
]
man_pages = [(master_doc, "pypsa-eur", "PyPSA-Eur Documentation", [author], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
@ -299,23 +297,29 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyPSA-Eur', u'PyPSA-Eur Documentation',
author, 'PyPSA-Eur', 'One line description of project.',
'Miscellaneous'),
(
master_doc,
"PyPSA-Eur",
"PyPSA-Eur Documentation",
author,
"PyPSA-Eur",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
intersphinx_mapping = {"https://docs.python.org/": None}

View File

@ -26,4 +26,4 @@ estimate_renewable_capacities,,,
-- from_opsd,--,bool,"Add capacities from OPSD data"
-- year,--,bool,"Renewable capacities are based on existing capacities reported by IRENA for the specified year"
-- expansion_limit,--,float or false,"Artificially limit maximum capacities to factor * (IRENA capacities), i.e. 110% of <years>'s capacities => expansion_limit: 1.1 false: Use estimated renewable potentials determine by the workflow"
-- technology_mapping,,,"Mapping between powerplantmatching and PyPSA-Eur technology names"
-- technology_mapping,,,"Mapping between powerplantmatching and PyPSA-Eur technology names"

1 Unit Values Description
26 -- from_opsd -- bool Add capacities from OPSD data
27 -- year -- bool Renewable capacities are based on existing capacities reported by IRENA for the specified year
28 -- expansion_limit -- float or false Artificially limit maximum capacities to factor * (IRENA capacities), i.e. 110% of <years>'s capacities => expansion_limit: 1.1 false: Use estimated renewable potentials determine by the workflow
29 -- technology_mapping Mapping between powerplantmatching and PyPSA-Eur technology names

View File

@ -1,6 +1,6 @@
,Unit,Values,Description
url,--,string,"Link to open power system data time series data."
power_statistics,bool,"{true, false}",Whether to load the electricity consumption data of the ENTSOE power statistics (only for files from 2019 and before) or from the ENTSOE transparency data (only has load data from 2015 onwards).
power_statistics,bool,"{true, false}",Whether to load the electricity consumption data of the ENTSOE power statistics (only for files from 2019 and before) or from the ENTSOE transparency data (only has load data from 2015 onwards).
interpolate_limit,hours,integer,"Maximum gap size (consecutive nans) which interpolated linearly."
time_shift_for_large_gaps,string,string,"Periods which are used for copying time-slices in order to fill large gaps of nans. Have to be valid ``pandas`` period strings."
manual_adjustments,bool,"{true, false}","Whether to adjust the load data manually according to the function in :func:`manual_adjustment`."

1 Unit Values Description
2 url -- string Link to open power system data time series data.
3 power_statistics bool {true, false} Whether to load the electricity consumption data of the ENTSOE power statistics (only for files from 2019 and before) or from the ENTSOE transparency data (only has load data from 2015 onwards). Whether to load the electricity consumption data of the ENTSOE power statistics (only for files from 2019 and before) or from the ENTSOE transparency data (only has load data from 2015 onwards).
4 interpolate_limit hours integer Maximum gap size (consecutive nans) which interpolated linearly.
5 time_shift_for_large_gaps string string Periods which are used for copying time-slices in order to fill large gaps of nans. Have to be valid ``pandas`` period strings.
6 manual_adjustments bool {true, false} Whether to adjust the load data manually according to the function in :func:`manual_adjustment`.

View File

@ -9,4 +9,4 @@ Trigger, Description, Definition, Status
``BAU``, Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities``, ``solve_network``: `add_opts_constraints() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/solve_network.py#L66>`__, Untested
``SAFE``, Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network., ``solve_network`` `add_opts_constraints() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/solve_network.py#L73>`__, Untested
``carrier+{c|p|m}factor``,"Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values.", ``prepare_network``, In active use
``CH4L``,"Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal)", ``prepare_network``: ``add_gaslimit()``, In active use
``CH4L``,"Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal)", ``prepare_network``: ``add_gaslimit()``, In active use

1 Trigger Description Definition Status
9 ``BAU`` Add a per-``carrier`` minimal overall capacity; i.e. at least ``40GW`` of ``OCGT`` in Europe; configured in ``electricity: BAU_mincapacities`` ``solve_network``: `add_opts_constraints() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/solve_network.py#L66>`__ Untested
10 ``SAFE`` Add a capacity reserve margin of a certain fraction above the peak demand to which renewable generators and storage do *not* contribute. Ignores network. ``solve_network`` `add_opts_constraints() <https://github.com/PyPSA/pypsa-eur/blob/6b964540ed39d44079cdabddee8333f486d0cd63/scripts/solve_network.py#L73>`__ Untested
11 ``carrier+{c|p|m}factor`` Alter the capital cost (``c``), installable potential (``p``) or marginal costs (``m``) of a carrier by a factor. Example: ``solar+c0.5`` reduces the capital cost of solar to 50\% of original values. ``prepare_network`` In active use
12 ``CH4L`` Add an overall absolute gas limit. If configured in ``electricity: gaslimit`` it is given in MWh thermal, if a float is appended, the overall gaslimit is assumed to be given in TWh thermal (e.g. ``CH4L200`` limits gas dispatch to 200 TWh termal) ``prepare_network``: ``add_gaslimit()`` In active use

View File

@ -35,7 +35,7 @@ It is common conduct to analyse energy system optimisation models for **multiple
e.g. assessing their sensitivity towards changing the temporal and/or geographical resolution or investigating how
investment changes as more ambitious greenhouse-gas emission reduction targets are applied.
The ``run`` section is used for running and storing scenarios with different configurations which are not covered by :ref:`wildcards`. It determines the path at which resources, networks and results are stored. Therefore the user can run different configurations within the same directory. If a run with a non-empty name should use cutouts shared across runs, set ``shared_cutouts`` to `true`.
The ``run`` section is used for running and storing scenarios with different configurations which are not covered by :ref:`wildcards`. It determines the path at which resources, networks and results are stored. Therefore the user can run different configurations within the same directory. If a run with a non-empty name should use cutouts shared across runs, set ``shared_cutouts`` to `true`.
.. literalinclude:: ../config.default.yaml
:language: yaml
@ -107,7 +107,7 @@ Specifies the temporal range to build an energy system model for as arguments to
``atlite``
==========
Define and specify the ``atlite.Cutout`` used for calculating renewable potentials and time-series. All options except for ``features`` are directly used as `cutout parameters <https://atlite.readthedocs.io/en/latest/ref_api.html#cutout>`_.
Define and specify the ``atlite.Cutout`` used for calculating renewable potentials and time-series. All options except for ``features`` are directly used as `cutout parameters <https://atlite.readthedocs.io/en/latest/ref_api.html#cutout>`_.
.. literalinclude:: ../config.default.yaml
:language: yaml
@ -194,7 +194,7 @@ Define and specify the ``atlite.Cutout`` used for calculating renewable potentia
``conventional``
=============
Define additional generator attribute for conventional carrier types. If a scalar value is given it is applied to all generators. However if a string starting with "data/" is given, the value is interpreted as a path to a csv file with country specific values. Then, the values are read in and applied to all generators of the given carrier in the given country. Note that the value(s) overwrite the existing values in the corresponding section of the ``generators`` dataframe.
Define additional generator attribute for conventional carrier types. If a scalar value is given it is applied to all generators. However if a string starting with "data/" is given, the value is interpreted as a path to a csv file with country specific values. Then, the values are read in and applied to all generators of the given carrier in the given country. Note that the value(s) overwrite the existing values in the corresponding section of the ``generators`` dataframe.
.. literalinclude:: ../config.default.yaml
:language: yaml

View File

@ -195,7 +195,7 @@ The included ``.nc`` files are PyPSA network files which can be imported with Py
import pypsa
filename = "elec_s_1024_ec.nc" # example
filename = "elec_s_1024_ec.nc" # example
n = pypsa.Network(filename)
Licence

View File

@ -30,7 +30,7 @@ The :ref:`tutorial` uses a smaller cutout than required for the full model (30 M
.. note::
To download cutouts yourself from the `ECMWF ERA5 <https://software.ecmwf.int/wiki/display/CKB/ERA5+data+documentation>`_ you need to `set up the CDS API <https://cds.climate.copernicus.eu/api-how-to>`_.
**Relevant Settings**

View File

@ -10,7 +10,7 @@ Release Notes
Upcoming Release
================
* Individual commits are now tested against pre-commit hooks. This includes black style formatting, sorting of package imports, Snakefile formatting and others. Installation instructions can for the pre-commit can be found `here <https://pre-commit.com/>`_.
* Individual commits are now tested against pre-commit hooks. This includes black style formatting, sorting of package imports, Snakefile formatting and others. Installation instructions can for the pre-commit can be found `here <https://pre-commit.com/>`_.
* Pre-commit CI is now part of the repository's CI.
@ -24,7 +24,7 @@ PyPSA-Eur 0.6.0 (10th September 2022)
* When transforming all transmission lines to a unified voltage level of 380kV,
the workflow now preserves the transmission capacity rather than electrical
impedance and reactance.
impedance and reactance.
* Memory resources are now specified for all rules.
@ -45,29 +45,29 @@ PyPSA-Eur 0.5.0 (27th July 2022)
* New network topology extracted from the ENTSO-E interactive map.
* Added existing renewable capacities for all countries based on IRENA
statistics (IRENASTAT) using new ``powerplantmatching`` version:
* The corresponding ``config`` entries changed, cf. ``config.default.yaml``:
* old: ``estimate_renewable_capacities_from_capacity_stats``
* new: ``estimate_renewable_capacities``
* The estimation is endabled by setting the subkey ``enable`` to ``True``.
statistics (IRENASTAT) using new ``powerplantmatching`` version:
* The corresponding ``config`` entries changed, cf. ``config.default.yaml``:
* old: ``estimate_renewable_capacities_from_capacity_stats``
* new: ``estimate_renewable_capacities``
* The estimation is endabled by setting the subkey ``enable`` to ``True``.
* Configuration of reference year for capacities can be configured (default:
``2020``)
``2020``)
* The list of renewables provided by the OPSD database can be used as a basis,
using the tag ``from_opsd: True``. This adds the renewables from the
database and fills up the missing capacities with the heuristic
distribution.
distribution.
* Uniform expansion limit of renewable build-up based on existing capacities
can be configured using ``expansion_limit`` option (default: ``false``;
limited to determined renewable potentials)
limited to determined renewable potentials)
* Distribution of country-level capacities proportional to maximum annual
energy yield for each bus region
energy yield for each bus region
* The config key ``renewable_capacities_from_OPSD`` is deprecated and was moved
under the section, ``estimate_renewable_capacities``. To enable it, set
``from_opsd`` to ``True``.
``from_opsd`` to ``True``.
* Add operational reserve margin constraint analogous to `GenX implementation
<https://genxproject.github.io/GenX/dev/core/#Reserves>`_. Can be activated
with config setting ``electricity: operational_reserve:``.
with config setting ``electricity: operational_reserve:``.
* Implement country-specific Energy Availability Factors (EAFs) for nuclear
power plants based on IAEA 2018-2020 reported country averages. These are
@ -87,7 +87,7 @@ PyPSA-Eur 0.5.0 (27th July 2022)
* Hierarchical clustering was introduced. Distance metric is calculated from
renewable potentials on hourly (feature entry ends with ``-time``) or annual
(feature entry in config end with ``-cap``) values.
* Greedy modularity clustering was introduced. Distance metric is based on electrical distance taking into account the impedance of all transmission lines of the network.
* Techno-economic parameters of technologies (e.g. costs and efficiencies) will
@ -100,7 +100,7 @@ PyPSA-Eur 0.5.0 (27th July 2022)
<https://github.com/PyPSA/pypsa-eur/pull/184>`_].
* A new section ``conventional`` was added to the config file. This section
contains configurations for conventional carriers.
contains configurations for conventional carriers.
* Add configuration option to implement arbitrary generator attributes for
conventional generation technologies.
@ -127,25 +127,25 @@ PyPSA-Eur 0.5.0 (27th July 2022)
* The inclusion of renewable carriers is now specified in the config entry
``renewable_carriers``. Before this was done by commenting/uncommenting
sub-sections in the ``renewable`` config section.
sub-sections in the ``renewable`` config section.
* Now, all carriers that should be extendable have to be listed in the config
entry ``extendable_carriers``. Before, renewable carriers were always set to
be extendable. For backwards compatibility, the workflow is still looking at
the listed carriers under the ``renewable`` key. In the future, all of them
have to be listed under ``extendable_carriers``.
have to be listed under ``extendable_carriers``.
* It is now possible to set conventional power plants as extendable by adding
them to the list of extendable ``Generator`` carriers in the config.
them to the list of extendable ``Generator`` carriers in the config.
* Listing conventional carriers in ``extendable_carriers`` but not in
``conventional_carriers``, sets the corresponding conventional power plants as
extendable without a lower capacity bound of today's capacities.
extendable without a lower capacity bound of today's capacities.
* Now, conventional carriers have an assigned capital cost by default.
* Now, conventional carriers have an assigned capital cost by default.
* The ``build_year`` and ``lifetime`` column are now defined for conventional
power plants.
power plants.
* Use updated SARAH-2 and ERA5 cutouts with slightly wider scope to east and
additional variables.
@ -155,7 +155,7 @@ PyPSA-Eur 0.5.0 (27th July 2022)
<https://snakemake.readthedocs.io/en/stable/snakefiles/rules.html#standard-resources>`_
``mem_mb`` rather than ``mem``.
* The powerplants that have been shut down by 2021 are filtered out.
* The powerplants that have been shut down by 2021 are filtered out.
* Updated historical `EIA hydro generation data <https://www.eia.gov/international/data/world>`_.
@ -179,7 +179,7 @@ PyPSA-Eur 0.5.0 (27th July 2022)
* Fix crs bug. Change crs 4236 to 4326.
* ``powerplantmatching>=0.5.1`` is now required for ``IRENASTATS``.
* ``powerplantmatching>=0.5.1`` is now required for ``IRENASTATS``.
* Update rasterio version to correctly calculate exclusion raster.
@ -251,7 +251,7 @@ PyPSA-Eur 0.4.0 (22th September 2021)
(~factor 2). A lot of the code which calculated the land-use availability is now
outsourced and does not rely on ``glaes``, ``geokit`` anymore. This facilitates
the environment building and version compatibility of ``gdal``, ``libgdal`` with
other packages [`#224 <https://github.com/PyPSA/pypsa-eur/pull/224>`_].
other packages [`#224 <https://github.com/PyPSA/pypsa-eur/pull/224>`_].
* Implemented changes to ``n.snapshot_weightings`` in new PyPSA version v0.18
(cf. `PyPSA/PyPSA/#227 <https://github.com/PyPSA/PyPSA/pull/227>`_)
@ -274,7 +274,7 @@ PyPSA-Eur 0.4.0 (22th September 2021)
used or maintained.
* The connection cost of generators in :mod:`simplify_network` are now reported
in ``resources/connection_costs_s{simpl}.csv``
in ``resources/connection_costs_s{simpl}.csv``
[`#261 <https://github.com/PyPSA/pypsa-eur/pull/261>`_].
* The tutorial cutout was renamed from ``cutouts/europe-2013-era5.nc`` to
@ -282,9 +282,9 @@ PyPSA-Eur 0.4.0 (22th September 2021)
cutouts side-by-side.
* The flag ``keep_all_available_areas`` in the configuration for renewable
potentials was deprecated and now defaults to ``True``.
potentials was deprecated and now defaults to ``True``.
* Update dependencies in ``envs/environment.yaml``
* Update dependencies in ``envs/environment.yaml``
[`#257 <https://github.com/PyPSA/pypsa-eur/pull/257>`_]
* Continuous integration testing switches to Github Actions from Travis CI
@ -313,7 +313,7 @@ PyPSA-Eur 0.4.0 (22th September 2021)
* Value for ``co2base`` in ``config.yaml`` adjusted to 1.487e9 t CO2-eq
(from 3.1e9 t CO2-eq). The new value represents emissions related to the
electricity sector for EU+UK+Balkan. The old value was too high and used when
the emissions wildcard in ``{opts}`` was used
the emissions wildcard in ``{opts}`` was used
[`#233 <https://github.com/PyPSA/pypsa-eur/pull/233>`_].
* Add escape in :mod:`base_network` if all TYNDP links are already
@ -321,11 +321,11 @@ PyPSA-Eur 0.4.0 (22th September 2021)
[`#246 <https://github.com/PyPSA/pypsa-eur/pull/246>`_].
* In :mod:`solve_operations_network` the optimised capacities are now
fixed for all extendable links, not only HVDC links
fixed for all extendable links, not only HVDC links
[`#244 <https://github.com/PyPSA/pypsa-eur/pull/244>`_].
* The ``focus_weights`` are now also considered when pre-clustering in
the :mod:`simplify_network` rule
the :mod:`simplify_network` rule
[`#241 <https://github.com/PyPSA/pypsa-eur/pull/241>`_].
* in :mod:`build_renewable_profile` where offshore wind profiles could
@ -345,7 +345,7 @@ PyPSA-Eur 0.4.0 (22th September 2021)
load shedding generators are only added at the AC buses, excluding buses for H2
and battery stores [`#269 <https://github.com/PyPSA/pypsa-eur/pull/269>`_].
* Delete duplicated capital costs at battery discharge link
* Delete duplicated capital costs at battery discharge link
[`#240 <https://github.com/PyPSA/pypsa-eur/pull/240>`_].
* Propagate the solver log file name to the solver. Previously, the
@ -362,7 +362,7 @@ Using the ``{opts}`` wildcard for scenarios:
* An option is introduced which adds constraints such that each country or node produces on average a minimal share of its total consumption itself.
For example ``EQ0.5c`` set in the ``{opts}`` wildcard requires each country to produce on average at least 50% of its consumption. Additionally,
the option ``ATK`` requires autarky at each node and removes all means of power transmission through lines and links. ``ATKc`` only removes
cross-border transfer capacities.
cross-border transfer capacities.
[`#166 <https://github.com/PyPSA/pypsa-eur/pull/166>`_].
* Added an option to alter the capital cost (``c``) or installable potentials (``p``) of carriers by a factor via ``carrier+{c,p}factor`` in the ``{opts}`` wildcard.
@ -449,7 +449,7 @@ Other:
[`#191 <https://github.com/PyPSA/pypsa-eur/pull/191>`_].
* Raise a warning if ``tech_colors`` in the config are not defined for all carriers
[`#178 <https://github.com/PyPSA/pypsa-eur/pull/178>`_].
[`#178 <https://github.com/PyPSA/pypsa-eur/pull/178>`_].
PyPSA-Eur 0.2.0 (8th June 2020)

View File

@ -18,4 +18,4 @@ pyyaml
seaborn
memory_profiler
tables
descartes
descartes

View File

@ -155,4 +155,5 @@ formats depends on the used backend. To query the supported file types on your s
.. code:: python
import matplotlib.pyplot as plt
plt.gcf().canvas.get_supported_filetypes()

View File

@ -4,427 +4,427 @@
name: pypsa-eur
channels:
- bioconda
- http://conda.anaconda.org/gurobi
- conda-forge
- defaults
- bioconda
- http://conda.anaconda.org/gurobi
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1
- _openmp_mutex=4.5
- abseil-cpp=20210324.2
- affine=2.3.1
- alsa-lib=1.2.3.2
- altair=4.2.0
- ampl-mp=3.1.0
- amply=0.1.5
- anyio=3.6.1
- appdirs=1.4.4
- argon2-cffi=21.3.0
- argon2-cffi-bindings=21.2.0
- arrow-cpp=8.0.0
- asttokens=2.0.5
- atlite=0.2.9
- attrs=21.4.0
- aws-c-cal=0.5.11
- aws-c-common=0.6.2
- aws-c-event-stream=0.2.7
- aws-c-io=0.10.5
- aws-checksums=0.1.11
- aws-sdk-cpp=1.8.186
- babel=2.10.3
- backcall=0.2.0
- backports=1.0
- backports.functools_lru_cache=1.6.4
- beautifulsoup4=4.11.1
- bleach=5.0.1
- blinker=1.4
- blosc=1.21.1
- bokeh=2.4.3
- boost-cpp=1.74.0
- bottleneck=1.3.5
- branca=0.5.0
- brotli=1.0.9
- brotli-bin=1.0.9
- brotlipy=0.7.0
- bzip2=1.0.8
- c-ares=1.18.1
- ca-certificates=2022.6.15.1
- cachetools=5.0.0
- cairo=1.16.0
- cartopy=0.20.1
- cdsapi=0.5.1
- certifi=2022.6.15.1
- cffi=1.15.1
- cfitsio=4.0.0
- cftime=1.6.1
- charset-normalizer=2.1.0
- click=8.0.4
- click-plugins=1.1.1
- cligj=0.7.2
- cloudpickle=2.1.0
- coin-or-cbc=2.10.8
- coin-or-cgl=0.60.6
- coin-or-clp=1.17.7
- coin-or-osi=0.108.7
- coin-or-utils=2.11.6
- coincbc=2.10.8
- colorama=0.4.5
- colorcet=3.0.0
- commonmark=0.9.1
- configargparse=1.5.3
- connection_pool=0.0.3
- country_converter=0.7.4
- cryptography=37.0.4
- curl=7.83.1
- cycler=0.11.0
- cytoolz=0.12.0
- dask=2022.7.0
- dask-core=2022.7.0
- dataclasses=0.8
- datrie=0.8.2
- dbus=1.13.6
- debugpy=1.6.0
- decorator=5.1.1
- defusedxml=0.7.1
- deprecation=2.1.0
- descartes=1.1.0
- distributed=2022.7.0
- distro=1.6.0
- docutils=0.19
- dpath=2.0.6
- entrypoints=0.4
- entsoe-py=0.5.4
- et_xmlfile=1.0.1
- executing=0.8.3
- expat=2.4.8
- filelock=3.7.1
- fiona=1.8.20
- flit-core=3.7.1
- folium=0.12.1.post1
- font-ttf-dejavu-sans-mono=2.37
- font-ttf-inconsolata=3.000
- font-ttf-source-code-pro=2.038
- font-ttf-ubuntu=0.83
- fontconfig=2.14.0
- fonts-conda-ecosystem=1
- fonts-conda-forge=1
- fonttools=4.34.4
- freetype=2.10.4
- freexl=1.0.6
- fsspec=2022.5.0
- future=0.18.2
- gdal=3.3.3
- geographiclib=1.52
- geojson-rewind=1.0.2
- geopandas=0.11.0
- geopandas-base=0.11.0
- geopy=2.2.0
- geos=3.10.0
- geotiff=1.7.0
- gettext=0.19.8.1
- gflags=2.2.2
- giflib=5.2.1
- gitdb=4.0.9
- gitpython=3.1.27
- glog=0.6.0
- gmp=6.2.1
- graphite2=1.3.13
- grpc-cpp=1.45.2
- gst-plugins-base=1.18.5
- gstreamer=1.18.5
- harfbuzz=2.9.1
- hdf4=4.2.15
- hdf5=1.12.1
- heapdict=1.0.1
- icu=68.2
- idna=3.3
- importlib-metadata=4.11.4
- importlib_metadata=4.11.4
- importlib_resources=5.8.0
- iniconfig=1.1.1
- ipykernel=6.15.1
- ipython=8.4.0
- ipython_genutils=0.2.0
- ipywidgets=7.7.1
- jedi=0.18.1
- jinja2=3.1.2
- joblib=1.1.0
- jpeg=9e
- json-c=0.15
- json5=0.9.5
- jsonschema=4.7.2
- jupyter_client=7.3.4
- jupyter_core=4.10.0
- jupyter_server=1.18.1
- kealib=1.4.15
- keyutils=1.6.1
- kiwisolver=1.4.4
- krb5=1.19.3
- lcms2=2.12
- ld_impl_linux-64=2.36.1
- lerc=3.0
- libblas=3.9.0
- libbrotlicommon=1.0.9
- libbrotlidec=1.0.9
- libbrotlienc=1.0.9
- libcblas=3.9.0
- libclang=11.1.0
- libcrc32c=1.1.2
- libcurl=7.83.1
- libdap4=3.20.6
- libdeflate=1.12
- libedit=3.1.20191231
- libev=4.33
- libevent=2.1.10
- libffi=3.4.2
- libgcc-ng=12.1.0
- libgdal=3.3.3
- libgfortran-ng=12.1.0
- libgfortran5=12.1.0
- libglib=2.72.1
- libgomp=12.1.0
- libgoogle-cloud=1.40.2
- libiconv=1.16
- libkml=1.3.0
- liblapack=3.9.0
- liblapacke=3.9.0
- libllvm11=11.1.0
- libnetcdf=4.8.1
- libnghttp2=1.47.0
- libnsl=2.0.0
- libogg=1.3.4
- libopenblas=0.3.20
- libopus=1.3.1
- libpng=1.6.37
- libpq=13.5
- libprotobuf=3.20.1
- librttopo=1.1.0
- libsodium=1.0.18
- libspatialindex=1.9.3
- libspatialite=5.0.1
- libssh2=1.10.0
- libstdcxx-ng=12.1.0
- libthrift=0.16.0
- libtiff=4.4.0
- libutf8proc=2.7.0
- libuuid=2.32.1
- libvorbis=1.3.7
- libwebp=1.2.2
- libwebp-base=1.2.2
- libxcb=1.13
- libxkbcommon=1.0.3
- libxml2=2.9.12
- libxslt=1.1.33
- libzip=1.9.2
- libzlib=1.2.12
- locket=1.0.0
- lxml=4.8.0
- lz4=4.0.0
- lz4-c=1.9.3
- lzo=2.10
- mapclassify=2.4.3
- markdown=3.4.1
- markupsafe=2.1.1
- matplotlib=3.5.2
- matplotlib-base=3.5.2
- matplotlib-inline=0.1.3
- memory_profiler=0.60.0
- metis=5.1.0
- mistune=0.8.4
- msgpack-python=1.0.4
- mumps-include=5.2.1
- mumps-seq=5.2.1
- munch=2.5.0
- munkres=1.1.4
- mysql-common=8.0.29
- mysql-libs=8.0.29
- nbclassic=0.4.3
- nbclient=0.6.6
- nbconvert=6.5.0
- nbconvert-core=6.5.0
- nbconvert-pandoc=6.5.0
- nbformat=5.4.0
- ncurses=6.3
- nest-asyncio=1.5.5
- netcdf4=1.6.0
- networkx=2.8.4
- nomkl=1.0
- notebook=6.4.12
- notebook-shim=0.1.0
- nspr=4.32
- nss=3.78
- numexpr=2.8.3
- numpy=1.23.1
- openjdk=11.0.9.1
- openjpeg=2.4.0
- openpyxl=3.0.9
- openssl=1.1.1q
- orc=1.7.5
- packaging=21.3
- pandas=1.4.3
- pandoc=2.18
- pandocfilters=1.5.0
- parquet-cpp=1.5.1
- parso=0.8.3
- partd=1.2.0
- patsy=0.5.2
- pcre=8.45
- pexpect=4.8.0
- pickleshare=0.7.5
- pillow=9.2.0
- pip=22.1.2
- pixman=0.40.0
- plac=1.3.5
- pluggy=1.0.0
- ply=3.11
- poppler=21.09.0
- poppler-data=0.4.11
- postgresql=13.5
- powerplantmatching=0.5.4
- progressbar2=4.0.0
- proj=8.1.1
- prometheus_client=0.14.1
- prompt-toolkit=3.0.30
- protobuf=3.20.1
- psutil=5.9.1
- pthread-stubs=0.4
- ptyprocess=0.7.0
- pulp=2.6.0
- pure_eval=0.2.2
- py=1.11.0
- pyarrow=8.0.0
- pycountry=20.7.3
- pycparser=2.21
- pyct=0.4.6
- pyct-core=0.4.6
- pydeck=0.7.1
- pygments=2.12.0
- pympler=0.9
- pyomo=6.4.1
- pyopenssl=22.0.0
- pyparsing=3.0.9
- pyproj=3.2.1
- pypsa=0.20.0
- pyqt=5.12.3
- pyqt-impl=5.12.3
- pyqt5-sip=4.19.18
- pyqtchart=5.12
- pyqtwebengine=5.12.1
- pyrsistent=0.18.1
- pyshp=2.3.0
- pysocks=1.7.1
- pytables=3.7.0
- pytest=7.1.2
- python=3.9.13
- python-dateutil=2.8.2
- python-fastjsonschema=2.16.1
- python-tzdata=2022.1
- python-utils=3.3.3
- python_abi=3.9
- pytz=2022.1
- pytz-deprecation-shim=0.1.0.post0
- pyviz_comms=2.2.0
- pyxlsb=1.0.9
- pyyaml=6.0
- pyzmq=23.2.0
- qt=5.12.9
- rasterio=1.2.9
- ratelimiter=1.2.0
- re2=2022.06.01
- readline=8.1.2
- requests=2.28.1
- retry=0.9.2
- rich=12.5.1
- rtree=1.0.0
- s2n=1.0.10
- scikit-learn=1.1.1
- scipy=1.8.1
- scotch=6.0.9
- seaborn=0.11.2
- seaborn-base=0.11.2
- semver=2.13.0
- send2trash=1.8.0
- setuptools=63.2.0
- setuptools-scm=7.0.5
- setuptools_scm=7.0.5
- shapely=1.8.0
- six=1.16.0
- smart_open=6.0.0
- smmap=3.0.5
- snakemake-minimal=7.8.5
- snappy=1.1.9
- sniffio=1.2.0
- snuggs=1.4.7
- sortedcontainers=2.4.0
- soupsieve=2.3.1
- sqlite=3.39.1
- stack_data=0.3.0
- statsmodels=0.13.2
- stopit=1.1.2
- streamlit=1.10.0
- tabula-py=2.2.0
- tabulate=0.8.10
- tblib=1.7.0
- tenacity=8.0.1
- terminado=0.15.0
- threadpoolctl=3.1.0
- tiledb=2.3.4
- tinycss2=1.1.1
- tk=8.6.12
- toml=0.10.2
- tomli=2.0.1
- toolz=0.12.0
- toposort=1.7
- tornado=6.1
- tqdm=4.64.0
- traitlets=5.3.0
- typing-extensions=4.3.0
- typing_extensions=4.3.0
- tzcode=2022a
- tzdata=2022a
- tzlocal=4.2
- unicodedata2=14.0.0
- unidecode=1.3.4
- unixodbc=2.3.10
- urllib3=1.26.10
- validators=0.18.2
- watchdog=2.1.9
- wcwidth=0.2.5
- webencodings=0.5.1
- websocket-client=1.3.3
- wheel=0.37.1
- widgetsnbextension=3.6.1
- wrapt=1.14.1
- xarray=2022.3.0
- xerces-c=3.2.3
- xlrd=2.0.1
- xorg-fixesproto=5.0
- xorg-inputproto=2.3.2
- xorg-kbproto=1.0.7
- xorg-libice=1.0.10
- xorg-libsm=1.2.3
- xorg-libx11=1.7.2
- xorg-libxau=1.0.9
- xorg-libxdmcp=1.1.3
- xorg-libxext=1.3.4
- xorg-libxfixes=5.0.3
- xorg-libxi=1.7.10
- xorg-libxrender=0.9.10
- xorg-libxtst=1.2.3
- xorg-recordproto=1.14.2
- xorg-renderproto=0.11.1
- xorg-xextproto=7.3.0
- xorg-xproto=7.0.31
- xyzservices=2022.6.0
- xz=5.2.5
- yaml=0.2.5
- yte=1.5.1
- zeromq=4.3.4
- zict=2.2.0
- zipp=3.8.0
- zlib=1.2.12
- zstd=1.5.2
- pip:
- countrycode==0.2
- tsam==2.1.0
- vresutils==0.3.1
- _libgcc_mutex=0.1
- _openmp_mutex=4.5
- abseil-cpp=20210324.2
- affine=2.3.1
- alsa-lib=1.2.3.2
- altair=4.2.0
- ampl-mp=3.1.0
- amply=0.1.5
- anyio=3.6.1
- appdirs=1.4.4
- argon2-cffi=21.3.0
- argon2-cffi-bindings=21.2.0
- arrow-cpp=8.0.0
- asttokens=2.0.5
- atlite=0.2.9
- attrs=21.4.0
- aws-c-cal=0.5.11
- aws-c-common=0.6.2
- aws-c-event-stream=0.2.7
- aws-c-io=0.10.5
- aws-checksums=0.1.11
- aws-sdk-cpp=1.8.186
- babel=2.10.3
- backcall=0.2.0
- backports=1.0
- backports.functools_lru_cache=1.6.4
- beautifulsoup4=4.11.1
- bleach=5.0.1
- blinker=1.4
- blosc=1.21.1
- bokeh=2.4.3
- boost-cpp=1.74.0
- bottleneck=1.3.5
- branca=0.5.0
- brotli=1.0.9
- brotli-bin=1.0.9
- brotlipy=0.7.0
- bzip2=1.0.8
- c-ares=1.18.1
- ca-certificates=2022.6.15.1
- cachetools=5.0.0
- cairo=1.16.0
- cartopy=0.20.1
- cdsapi=0.5.1
- certifi=2022.6.15.1
- cffi=1.15.1
- cfitsio=4.0.0
- cftime=1.6.1
- charset-normalizer=2.1.0
- click=8.0.4
- click-plugins=1.1.1
- cligj=0.7.2
- cloudpickle=2.1.0
- coin-or-cbc=2.10.8
- coin-or-cgl=0.60.6
- coin-or-clp=1.17.7
- coin-or-osi=0.108.7
- coin-or-utils=2.11.6
- coincbc=2.10.8
- colorama=0.4.5
- colorcet=3.0.0
- commonmark=0.9.1
- configargparse=1.5.3
- connection_pool=0.0.3
- country_converter=0.7.4
- cryptography=37.0.4
- curl=7.83.1
- cycler=0.11.0
- cytoolz=0.12.0
- dask=2022.7.0
- dask-core=2022.7.0
- dataclasses=0.8
- datrie=0.8.2
- dbus=1.13.6
- debugpy=1.6.0
- decorator=5.1.1
- defusedxml=0.7.1
- deprecation=2.1.0
- descartes=1.1.0
- distributed=2022.7.0
- distro=1.6.0
- docutils=0.19
- dpath=2.0.6
- entrypoints=0.4
- entsoe-py=0.5.4
- et_xmlfile=1.0.1
- executing=0.8.3
- expat=2.4.8
- filelock=3.7.1
- fiona=1.8.20
- flit-core=3.7.1
- folium=0.12.1.post1
- font-ttf-dejavu-sans-mono=2.37
- font-ttf-inconsolata=3.000
- font-ttf-source-code-pro=2.038
- font-ttf-ubuntu=0.83
- fontconfig=2.14.0
- fonts-conda-ecosystem=1
- fonts-conda-forge=1
- fonttools=4.34.4
- freetype=2.10.4
- freexl=1.0.6
- fsspec=2022.5.0
- future=0.18.2
- gdal=3.3.3
- geographiclib=1.52
- geojson-rewind=1.0.2
- geopandas=0.11.0
- geopandas-base=0.11.0
- geopy=2.2.0
- geos=3.10.0
- geotiff=1.7.0
- gettext=0.19.8.1
- gflags=2.2.2
- giflib=5.2.1
- gitdb=4.0.9
- gitpython=3.1.27
- glog=0.6.0
- gmp=6.2.1
- graphite2=1.3.13
- grpc-cpp=1.45.2
- gst-plugins-base=1.18.5
- gstreamer=1.18.5
- harfbuzz=2.9.1
- hdf4=4.2.15
- hdf5=1.12.1
- heapdict=1.0.1
- icu=68.2
- idna=3.3
- importlib-metadata=4.11.4
- importlib_metadata=4.11.4
- importlib_resources=5.8.0
- iniconfig=1.1.1
- ipykernel=6.15.1
- ipython=8.4.0
- ipython_genutils=0.2.0
- ipywidgets=7.7.1
- jedi=0.18.1
- jinja2=3.1.2
- joblib=1.1.0
- jpeg=9e
- json-c=0.15
- json5=0.9.5
- jsonschema=4.7.2
- jupyter_client=7.3.4
- jupyter_core=4.10.0
- jupyter_server=1.18.1
- kealib=1.4.15
- keyutils=1.6.1
- kiwisolver=1.4.4
- krb5=1.19.3
- lcms2=2.12
- ld_impl_linux-64=2.36.1
- lerc=3.0
- libblas=3.9.0
- libbrotlicommon=1.0.9
- libbrotlidec=1.0.9
- libbrotlienc=1.0.9
- libcblas=3.9.0
- libclang=11.1.0
- libcrc32c=1.1.2
- libcurl=7.83.1
- libdap4=3.20.6
- libdeflate=1.12
- libedit=3.1.20191231
- libev=4.33
- libevent=2.1.10
- libffi=3.4.2
- libgcc-ng=12.1.0
- libgdal=3.3.3
- libgfortran-ng=12.1.0
- libgfortran5=12.1.0
- libglib=2.72.1
- libgomp=12.1.0
- libgoogle-cloud=1.40.2
- libiconv=1.16
- libkml=1.3.0
- liblapack=3.9.0
- liblapacke=3.9.0
- libllvm11=11.1.0
- libnetcdf=4.8.1
- libnghttp2=1.47.0
- libnsl=2.0.0
- libogg=1.3.4
- libopenblas=0.3.20
- libopus=1.3.1
- libpng=1.6.37
- libpq=13.5
- libprotobuf=3.20.1
- librttopo=1.1.0
- libsodium=1.0.18
- libspatialindex=1.9.3
- libspatialite=5.0.1
- libssh2=1.10.0
- libstdcxx-ng=12.1.0
- libthrift=0.16.0
- libtiff=4.4.0
- libutf8proc=2.7.0
- libuuid=2.32.1
- libvorbis=1.3.7
- libwebp=1.2.2
- libwebp-base=1.2.2
- libxcb=1.13
- libxkbcommon=1.0.3
- libxml2=2.9.12
- libxslt=1.1.33
- libzip=1.9.2
- libzlib=1.2.12
- locket=1.0.0
- lxml=4.8.0
- lz4=4.0.0
- lz4-c=1.9.3
- lzo=2.10
- mapclassify=2.4.3
- markdown=3.4.1
- markupsafe=2.1.1
- matplotlib=3.5.2
- matplotlib-base=3.5.2
- matplotlib-inline=0.1.3
- memory_profiler=0.60.0
- metis=5.1.0
- mistune=0.8.4
- msgpack-python=1.0.4
- mumps-include=5.2.1
- mumps-seq=5.2.1
- munch=2.5.0
- munkres=1.1.4
- mysql-common=8.0.29
- mysql-libs=8.0.29
- nbclassic=0.4.3
- nbclient=0.6.6
- nbconvert=6.5.0
- nbconvert-core=6.5.0
- nbconvert-pandoc=6.5.0
- nbformat=5.4.0
- ncurses=6.3
- nest-asyncio=1.5.5
- netcdf4=1.6.0
- networkx=2.8.4
- nomkl=1.0
- notebook=6.4.12
- notebook-shim=0.1.0
- nspr=4.32
- nss=3.78
- numexpr=2.8.3
- numpy=1.23.1
- openjdk=11.0.9.1
- openjpeg=2.4.0
- openpyxl=3.0.9
- openssl=1.1.1q
- orc=1.7.5
- packaging=21.3
- pandas=1.4.3
- pandoc=2.18
- pandocfilters=1.5.0
- parquet-cpp=1.5.1
- parso=0.8.3
- partd=1.2.0
- patsy=0.5.2
- pcre=8.45
- pexpect=4.8.0
- pickleshare=0.7.5
- pillow=9.2.0
- pip=22.1.2
- pixman=0.40.0
- plac=1.3.5
- pluggy=1.0.0
- ply=3.11
- poppler=21.09.0
- poppler-data=0.4.11
- postgresql=13.5
- powerplantmatching=0.5.4
- progressbar2=4.0.0
- proj=8.1.1
- prometheus_client=0.14.1
- prompt-toolkit=3.0.30
- protobuf=3.20.1
- psutil=5.9.1
- pthread-stubs=0.4
- ptyprocess=0.7.0
- pulp=2.6.0
- pure_eval=0.2.2
- py=1.11.0
- pyarrow=8.0.0
- pycountry=20.7.3
- pycparser=2.21
- pyct=0.4.6
- pyct-core=0.4.6
- pydeck=0.7.1
- pygments=2.12.0
- pympler=0.9
- pyomo=6.4.1
- pyopenssl=22.0.0
- pyparsing=3.0.9
- pyproj=3.2.1
- pypsa=0.20.0
- pyqt=5.12.3
- pyqt-impl=5.12.3
- pyqt5-sip=4.19.18
- pyqtchart=5.12
- pyqtwebengine=5.12.1
- pyrsistent=0.18.1
- pyshp=2.3.0
- pysocks=1.7.1
- pytables=3.7.0
- pytest=7.1.2
- python=3.9.13
- python-dateutil=2.8.2
- python-fastjsonschema=2.16.1
- python-tzdata=2022.1
- python-utils=3.3.3
- python_abi=3.9
- pytz=2022.1
- pytz-deprecation-shim=0.1.0.post0
- pyviz_comms=2.2.0
- pyxlsb=1.0.9
- pyyaml=6.0
- pyzmq=23.2.0
- qt=5.12.9
- rasterio=1.2.9
- ratelimiter=1.2.0
- re2=2022.06.01
- readline=8.1.2
- requests=2.28.1
- retry=0.9.2
- rich=12.5.1
- rtree=1.0.0
- s2n=1.0.10
- scikit-learn=1.1.1
- scipy=1.8.1
- scotch=6.0.9
- seaborn=0.11.2
- seaborn-base=0.11.2
- semver=2.13.0
- send2trash=1.8.0
- setuptools=63.2.0
- setuptools-scm=7.0.5
- setuptools_scm=7.0.5
- shapely=1.8.0
- six=1.16.0
- smart_open=6.0.0
- smmap=3.0.5
- snakemake-minimal=7.8.5
- snappy=1.1.9
- sniffio=1.2.0
- snuggs=1.4.7
- sortedcontainers=2.4.0
- soupsieve=2.3.1
- sqlite=3.39.1
- stack_data=0.3.0
- statsmodels=0.13.2
- stopit=1.1.2
- streamlit=1.10.0
- tabula-py=2.2.0
- tabulate=0.8.10
- tblib=1.7.0
- tenacity=8.0.1
- terminado=0.15.0
- threadpoolctl=3.1.0
- tiledb=2.3.4
- tinycss2=1.1.1
- tk=8.6.12
- toml=0.10.2
- tomli=2.0.1
- toolz=0.12.0
- toposort=1.7
- tornado=6.1
- tqdm=4.64.0
- traitlets=5.3.0
- typing-extensions=4.3.0
- typing_extensions=4.3.0
- tzcode=2022a
- tzdata=2022a
- tzlocal=4.2
- unicodedata2=14.0.0
- unidecode=1.3.4
- unixodbc=2.3.10
- urllib3=1.26.10
- validators=0.18.2
- watchdog=2.1.9
- wcwidth=0.2.5
- webencodings=0.5.1
- websocket-client=1.3.3
- wheel=0.37.1
- widgetsnbextension=3.6.1
- wrapt=1.14.1
- xarray=2022.3.0
- xerces-c=3.2.3
- xlrd=2.0.1
- xorg-fixesproto=5.0
- xorg-inputproto=2.3.2
- xorg-kbproto=1.0.7
- xorg-libice=1.0.10
- xorg-libsm=1.2.3
- xorg-libx11=1.7.2
- xorg-libxau=1.0.9
- xorg-libxdmcp=1.1.3
- xorg-libxext=1.3.4
- xorg-libxfixes=5.0.3
- xorg-libxi=1.7.10
- xorg-libxrender=0.9.10
- xorg-libxtst=1.2.3
- xorg-recordproto=1.14.2
- xorg-renderproto=0.11.1
- xorg-xextproto=7.3.0
- xorg-xproto=7.0.31
- xyzservices=2022.6.0
- xz=5.2.5
- yaml=0.2.5
- yte=1.5.1
- zeromq=4.3.4
- zict=2.2.0
- zipp=3.8.0
- zlib=1.2.12
- zstd=1.5.2
- pip:
- countrycode==0.2
- tsam==2.1.0
- vresutils==0.3.1

View File

@ -4,57 +4,57 @@
name: pypsa-eur
channels:
- conda-forge
- bioconda
- conda-forge
- bioconda
dependencies:
- python>=3.8
- pip
- python>=3.8
- pip
- pypsa>=0.20
- atlite>=0.2.9
- dask
- pypsa>=0.20
- atlite>=0.2.9
- dask
# Dependencies of the workflow itself
- xlrd
- openpyxl
- pycountry
- seaborn
- snakemake-minimal
- memory_profiler
- yaml
- pytables
- lxml
- powerplantmatching>=0.5.4
- numpy
- pandas
- geopandas>=0.11.0
- xarray
- netcdf4
- networkx
- scipy
- shapely<2.0 # need to address deprecations
- progressbar2
- pyomo
- matplotlib
- proj
- fiona <= 1.18.20 # Till issue https://github.com/Toblerity/Fiona/issues/1085 is not solved
- country_converter
- xlrd
- openpyxl
- pycountry
- seaborn
- snakemake-minimal
- memory_profiler
- yaml
- pytables
- lxml
- powerplantmatching>=0.5.4
- numpy
- pandas
- geopandas>=0.11.0
- xarray
- netcdf4
- networkx
- scipy
- shapely<2.0 # need to address deprecations
- progressbar2
- pyomo
- matplotlib
- proj
- fiona <= 1.18.20 # Till issue https://github.com/Toblerity/Fiona/issues/1085 is not solved
- country_converter
# Keep in conda environment when calling ipython
- ipython
- ipython
# GIS dependencies:
- cartopy
- descartes
- rasterio<=1.2.9 # 1.2.10 creates error https://github.com/PyPSA/atlite/issues/238
- cartopy
- descartes
- rasterio<=1.2.9 # 1.2.10 creates error https://github.com/PyPSA/atlite/issues/238
# PyPSA-Eur-Sec Dependencies
- geopy
- tqdm
- pytz
- tabula-py
- pyxlsb
- geopy
- tqdm
- pytz
- tabula-py
- pyxlsb
- pip:
- vresutils>=0.3.1
- tsam>=1.1.0
- pip:
- vresutils>=0.3.1
- tsam>=1.1.0

View File

@ -1,11 +1,14 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
import pandas as pd
from pathlib import Path
REGION_COLS = ['geometry', 'name', 'x', 'y', 'country']
import pandas as pd
REGION_COLS = ["geometry", "name", "x", "y", "country"]
def configure_logging(snakemake, skip_handlers=False):
"""
@ -28,21 +31,26 @@ def configure_logging(snakemake, skip_handlers=False):
import logging
kwargs = snakemake.config.get('logging', dict()).copy()
kwargs = snakemake.config.get("logging", dict()).copy()
kwargs.setdefault("level", "INFO")
if skip_handlers is False:
fallback_path = Path(__file__).parent.joinpath('..', 'logs', f"{snakemake.rule}.log")
logfile = snakemake.log.get('python', snakemake.log[0] if snakemake.log
else fallback_path)
fallback_path = Path(__file__).parent.joinpath(
"..", "logs", f"{snakemake.rule}.log"
)
logfile = snakemake.log.get(
"python", snakemake.log[0] if snakemake.log else fallback_path
)
kwargs.update(
{'handlers': [
# Prefer the 'python' log, otherwise take the first log for each
# Snakemake rule
logging.FileHandler(logfile),
logging.StreamHandler()
{
"handlers": [
# Prefer the 'python' log, otherwise take the first log for each
# Snakemake rule
logging.FileHandler(logfile),
logging.StreamHandler(),
]
})
}
)
logging.basicConfig(**kwargs)
@ -80,137 +88,182 @@ def load_network(import_name=None, custom_components=None):
if custom_components is not None:
override_components = pypsa.components.components.copy()
override_component_attrs = Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()})
override_component_attrs = Dict(
{k: v.copy() for k, v in pypsa.components.component_attrs.items()}
)
for k, v in custom_components.items():
override_components.loc[k] = v['component']
override_component_attrs[k] = pd.DataFrame(columns = ["type","unit","default","description","status"])
for attr, val in v['attributes'].items():
override_components.loc[k] = v["component"]
override_component_attrs[k] = pd.DataFrame(
columns=["type", "unit", "default", "description", "status"]
)
for attr, val in v["attributes"].items():
override_component_attrs[k].loc[attr] = val
return pypsa.Network(import_name=import_name,
override_components=override_components,
override_component_attrs=override_component_attrs)
return pypsa.Network(
import_name=import_name,
override_components=override_components,
override_component_attrs=override_component_attrs,
)
def pdbcast(v, h):
return pd.DataFrame(v.values.reshape((-1, 1)) * h.values,
index=v.index, columns=h.index)
return pd.DataFrame(
v.values.reshape((-1, 1)) * h.values, index=v.index, columns=h.index
)
def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True):
import pypsa
from add_electricity import update_transmission_costs, load_costs
from add_electricity import load_costs, update_transmission_costs
n = pypsa.Network(fn)
n.loads["carrier"] = n.loads.bus.map(n.buses.carrier) + " load"
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
n.links["carrier"] = (n.links.bus0.map(n.buses.carrier) + "-" + n.links.bus1.map(n.buses.carrier))
n.links["carrier"] = (
n.links.bus0.map(n.buses.carrier) + "-" + n.links.bus1.map(n.buses.carrier)
)
n.lines["carrier"] = "AC line"
n.transformers["carrier"] = "AC transformer"
n.lines['s_nom'] = n.lines['s_nom_min']
n.links['p_nom'] = n.links['p_nom_min']
n.lines["s_nom"] = n.lines["s_nom_min"]
n.links["p_nom"] = n.links["p_nom_min"]
if combine_hydro_ps:
n.storage_units.loc[n.storage_units.carrier.isin({'PHS', 'hydro'}), 'carrier'] = 'hydro+PHS'
n.storage_units.loc[
n.storage_units.carrier.isin({"PHS", "hydro"}), "carrier"
] = "hydro+PHS"
# if the carrier was not set on the heat storage units
# bus_carrier = n.storage_units.bus.map(n.buses.carrier)
# n.storage_units.loc[bus_carrier == "heat","carrier"] = "water tanks"
Nyears = n.snapshot_weightings.objective.sum() / 8760.
costs = load_costs(tech_costs, config['costs'], config['electricity'], Nyears)
Nyears = n.snapshot_weightings.objective.sum() / 8760.0
costs = load_costs(tech_costs, config["costs"], config["electricity"], Nyears)
update_transmission_costs(n, costs)
return n
def update_p_nom_max(n):
# if extendable carriers (solar/onwind/...) have capacity >= 0,
# e.g. existing assets from the OPSD project are included to the network,
# the installed capacity might exceed the expansion limit.
# Hence, we update the assumptions.
n.generators.p_nom_max = n.generators[['p_nom_min', 'p_nom_max']].max(1)
n.generators.p_nom_max = n.generators[["p_nom_min", "p_nom_max"]].max(1)
def aggregate_p_nom(n):
return pd.concat([
n.generators.groupby("carrier").p_nom_opt.sum(),
n.storage_units.groupby("carrier").p_nom_opt.sum(),
n.links.groupby("carrier").p_nom_opt.sum(),
n.loads_t.p.groupby(n.loads.carrier,axis=1).sum().mean()
])
return pd.concat(
[
n.generators.groupby("carrier").p_nom_opt.sum(),
n.storage_units.groupby("carrier").p_nom_opt.sum(),
n.links.groupby("carrier").p_nom_opt.sum(),
n.loads_t.p.groupby(n.loads.carrier, axis=1).sum().mean(),
]
)
def aggregate_p(n):
return pd.concat([
n.generators_t.p.sum().groupby(n.generators.carrier).sum(),
n.storage_units_t.p.sum().groupby(n.storage_units.carrier).sum(),
n.stores_t.p.sum().groupby(n.stores.carrier).sum(),
-n.loads_t.p.sum().groupby(n.loads.carrier).sum()
])
return pd.concat(
[
n.generators_t.p.sum().groupby(n.generators.carrier).sum(),
n.storage_units_t.p.sum().groupby(n.storage_units.carrier).sum(),
n.stores_t.p.sum().groupby(n.stores.carrier).sum(),
-n.loads_t.p.sum().groupby(n.loads.carrier).sum(),
]
)
def aggregate_e_nom(n):
return pd.concat([
(n.storage_units["p_nom_opt"]*n.storage_units["max_hours"]).groupby(n.storage_units["carrier"]).sum(),
n.stores["e_nom_opt"].groupby(n.stores.carrier).sum()
])
return pd.concat(
[
(n.storage_units["p_nom_opt"] * n.storage_units["max_hours"])
.groupby(n.storage_units["carrier"])
.sum(),
n.stores["e_nom_opt"].groupby(n.stores.carrier).sum(),
]
)
def aggregate_p_curtailed(n):
return pd.concat([
((n.generators_t.p_max_pu.sum().multiply(n.generators.p_nom_opt) - n.generators_t.p.sum())
.groupby(n.generators.carrier).sum()),
((n.storage_units_t.inflow.sum() - n.storage_units_t.p.sum())
.groupby(n.storage_units.carrier).sum())
])
return pd.concat(
[
(
(
n.generators_t.p_max_pu.sum().multiply(n.generators.p_nom_opt)
- n.generators_t.p.sum()
)
.groupby(n.generators.carrier)
.sum()
),
(
(n.storage_units_t.inflow.sum() - n.storage_units_t.p.sum())
.groupby(n.storage_units.carrier)
.sum()
),
]
)
def aggregate_costs(n, flatten=False, opts=None, existing_only=False):
components = dict(Link=("p_nom", "p0"),
Generator=("p_nom", "p"),
StorageUnit=("p_nom", "p"),
Store=("e_nom", "p"),
Line=("s_nom", None),
Transformer=("s_nom", None))
components = dict(
Link=("p_nom", "p0"),
Generator=("p_nom", "p"),
StorageUnit=("p_nom", "p"),
Store=("e_nom", "p"),
Line=("s_nom", None),
Transformer=("s_nom", None),
)
costs = {}
for c, (p_nom, p_attr) in zip(
n.iterate_components(components.keys(), skip_empty=False),
components.values()
n.iterate_components(components.keys(), skip_empty=False), components.values()
):
if c.df.empty: continue
if not existing_only: p_nom += "_opt"
costs[(c.list_name, 'capital')] = (c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum()
if c.df.empty:
continue
if not existing_only:
p_nom += "_opt"
costs[(c.list_name, "capital")] = (
(c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum()
)
if p_attr is not None:
p = c.pnl[p_attr].sum()
if c.name == 'StorageUnit':
if c.name == "StorageUnit":
p = p.loc[p > 0]
costs[(c.list_name, 'marginal')] = (p*c.df.marginal_cost).groupby(c.df.carrier).sum()
costs[(c.list_name, "marginal")] = (
(p * c.df.marginal_cost).groupby(c.df.carrier).sum()
)
costs = pd.concat(costs)
if flatten:
assert opts is not None
conv_techs = opts['conv_techs']
conv_techs = opts["conv_techs"]
costs = costs.reset_index(level=0, drop=True)
costs = costs['capital'].add(
costs['marginal'].rename({t: t + ' marginal' for t in conv_techs}),
fill_value=0.
costs = costs["capital"].add(
costs["marginal"].rename({t: t + " marginal" for t in conv_techs}),
fill_value=0.0,
)
return costs
def progress_retrieve(url, file):
import urllib
from progressbar import ProgressBar
pbar = ProgressBar(0, 100)
def dlProgress(count, blockSize, totalSize):
pbar.update( int(count * blockSize * 100 / totalSize) )
pbar.update(int(count * blockSize * 100 / totalSize))
urllib.request.urlretrieve(url, file, reporthook=dlProgress)
def get_aggregation_strategies(aggregation_strategies):
# default aggregation strategies that cannot be defined in .yaml format must be specified within
# the function, otherwise (when defaults are passed in the function's definition) they get lost
@ -222,7 +275,7 @@ def get_aggregation_strategies(aggregation_strategies):
bus_strategies = dict(country=_make_consense("Bus", "country"))
bus_strategies.update(aggregation_strategies.get("buses", {}))
generator_strategies = {'build_year': lambda x: 0, 'lifetime': lambda x: np.inf}
generator_strategies = {"build_year": lambda x: 0, "lifetime": lambda x: np.inf}
generator_strategies.update(aggregation_strategies.get("generators", {}))
return bus_strategies, generator_strategies
@ -244,15 +297,17 @@ def mock_snakemake(rulename, **wildcards):
keyword arguments fixing the wildcards. Only necessary if wildcards are
needed.
"""
import snakemake as sm
import os
import snakemake as sm
from packaging.version import Version, parse
from pypsa.descriptors import Dict
from snakemake.script import Snakemake
from packaging.version import Version, parse
script_dir = Path(__file__).parent.resolve()
assert Path.cwd().resolve() == script_dir, \
f'mock_snakemake has to be run from the repository scripts directory {script_dir}'
assert (
Path.cwd().resolve() == script_dir
), f"mock_snakemake has to be run from the repository scripts directory {script_dir}"
os.chdir(script_dir.parent)
for p in sm.SNAKEFILE_CHOICES:
if os.path.exists(p):
@ -273,9 +328,18 @@ def mock_snakemake(rulename, **wildcards):
io[i] = os.path.abspath(io[i])
make_accessable(job.input, job.output, job.log)
snakemake = Snakemake(job.input, job.output, job.params, job.wildcards,
job.threads, job.resources, job.log,
job.dag.workflow.config, job.rule.name, None,)
snakemake = Snakemake(
job.input,
job.output,
job.params,
job.wildcards,
job.threads,
job.resources,
job.log,
job.dag.workflow.config,
job.rule.name,
None,
)
# create log and output dir if not existent
for path in list(snakemake.log) + list(snakemake.output):
Path(path).parent.mkdir(parents=True, exist_ok=True)

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
@ -50,14 +51,16 @@ The rule :mod:`add_extra_components` attaches additional extendable components t
- ``Stores`` of carrier 'H2' and/or 'battery' in combination with ``Links``. If this option is chosen, the script adds extra buses with corresponding carrier where energy ``Stores`` are attached and which are connected to the corresponding power buses via two links, one each for charging and discharging. This leads to three investment variables for the energy capacity, charging and discharging capacity of the storage unit.
"""
import logging
from _helpers import configure_logging
import pypsa
import pandas as pd
import numpy as np
from add_electricity import (load_costs, add_nice_carrier_names,
_add_missing_carriers_from_costs)
import pandas as pd
import pypsa
from _helpers import configure_logging
from add_electricity import (
_add_missing_carriers_from_costs,
add_nice_carrier_names,
load_costs,
)
idx = pd.IndexSlice
@ -65,8 +68,8 @@ logger = logging.getLogger(__name__)
def attach_storageunits(n, costs, elec_opts):
carriers = elec_opts['extendable_carriers']['StorageUnit']
max_hours = elec_opts['max_hours']
carriers = elec_opts["extendable_carriers"]["StorageUnit"]
max_hours = elec_opts["max_hours"]
_add_missing_carriers_from_costs(n, costs, carriers)
@ -77,131 +80,167 @@ def attach_storageunits(n, costs, elec_opts):
for carrier in carriers:
roundtrip_correction = 0.5 if carrier == "battery" else 1
n.madd("StorageUnit", buses_i, ' ' + carrier,
bus=buses_i,
carrier=carrier,
p_nom_extendable=True,
capital_cost=costs.at[carrier, 'capital_cost'],
marginal_cost=costs.at[carrier, 'marginal_cost'],
efficiency_store=costs.at[lookup_store[carrier], 'efficiency']**roundtrip_correction,
efficiency_dispatch=costs.at[lookup_dispatch[carrier], 'efficiency']**roundtrip_correction,
max_hours=max_hours[carrier],
cyclic_state_of_charge=True
n.madd(
"StorageUnit",
buses_i,
" " + carrier,
bus=buses_i,
carrier=carrier,
p_nom_extendable=True,
capital_cost=costs.at[carrier, "capital_cost"],
marginal_cost=costs.at[carrier, "marginal_cost"],
efficiency_store=costs.at[lookup_store[carrier], "efficiency"]
** roundtrip_correction,
efficiency_dispatch=costs.at[lookup_dispatch[carrier], "efficiency"]
** roundtrip_correction,
max_hours=max_hours[carrier],
cyclic_state_of_charge=True,
)
def attach_stores(n, costs, elec_opts):
carriers = elec_opts['extendable_carriers']['Store']
carriers = elec_opts["extendable_carriers"]["Store"]
_add_missing_carriers_from_costs(n, costs, carriers)
buses_i = n.buses.index
bus_sub_dict = {k: n.buses[k].values for k in ['x', 'y', 'country']}
bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]}
if 'H2' in carriers:
if "H2" in carriers:
h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict)
n.madd("Store", h2_buses_i,
bus=h2_buses_i,
carrier='H2',
e_nom_extendable=True,
e_cyclic=True,
capital_cost=costs.at["hydrogen storage underground", "capital_cost"])
n.madd(
"Store",
h2_buses_i,
bus=h2_buses_i,
carrier="H2",
e_nom_extendable=True,
e_cyclic=True,
capital_cost=costs.at["hydrogen storage underground", "capital_cost"],
)
n.madd("Link", h2_buses_i + " Electrolysis",
bus0=buses_i,
bus1=h2_buses_i,
carrier='H2 electrolysis',
p_nom_extendable=True,
efficiency=costs.at["electrolysis", "efficiency"],
capital_cost=costs.at["electrolysis", "capital_cost"],
marginal_cost=costs.at["electrolysis", "marginal_cost"])
n.madd(
"Link",
h2_buses_i + " Electrolysis",
bus0=buses_i,
bus1=h2_buses_i,
carrier="H2 electrolysis",
p_nom_extendable=True,
efficiency=costs.at["electrolysis", "efficiency"],
capital_cost=costs.at["electrolysis", "capital_cost"],
marginal_cost=costs.at["electrolysis", "marginal_cost"],
)
n.madd("Link", h2_buses_i + " Fuel Cell",
bus0=h2_buses_i,
bus1=buses_i,
carrier='H2 fuel cell',
p_nom_extendable=True,
efficiency=costs.at["fuel cell", "efficiency"],
#NB: fixed cost is per MWel
capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"],
marginal_cost=costs.at["fuel cell", "marginal_cost"])
n.madd(
"Link",
h2_buses_i + " Fuel Cell",
bus0=h2_buses_i,
bus1=buses_i,
carrier="H2 fuel cell",
p_nom_extendable=True,
efficiency=costs.at["fuel cell", "efficiency"],
# NB: fixed cost is per MWel
capital_cost=costs.at["fuel cell", "capital_cost"]
* costs.at["fuel cell", "efficiency"],
marginal_cost=costs.at["fuel cell", "marginal_cost"],
)
if 'battery' in carriers:
b_buses_i = n.madd("Bus", buses_i + " battery", carrier="battery", **bus_sub_dict)
if "battery" in carriers:
b_buses_i = n.madd(
"Bus", buses_i + " battery", carrier="battery", **bus_sub_dict
)
n.madd("Store", b_buses_i,
bus=b_buses_i,
carrier='battery',
e_cyclic=True,
e_nom_extendable=True,
capital_cost=costs.at['battery storage', 'capital_cost'],
marginal_cost=costs.at["battery", "marginal_cost"])
n.madd(
"Store",
b_buses_i,
bus=b_buses_i,
carrier="battery",
e_cyclic=True,
e_nom_extendable=True,
capital_cost=costs.at["battery storage", "capital_cost"],
marginal_cost=costs.at["battery", "marginal_cost"],
)
n.madd("Link", b_buses_i + " charger",
bus0=buses_i,
bus1=b_buses_i,
carrier='battery charger',
# the efficiencies are "round trip efficiencies"
efficiency=costs.at['battery inverter', 'efficiency']**0.5,
capital_cost=costs.at['battery inverter', 'capital_cost'],
p_nom_extendable=True,
marginal_cost=costs.at["battery inverter", "marginal_cost"])
n.madd(
"Link",
b_buses_i + " charger",
bus0=buses_i,
bus1=b_buses_i,
carrier="battery charger",
# the efficiencies are "round trip efficiencies"
efficiency=costs.at["battery inverter", "efficiency"] ** 0.5,
capital_cost=costs.at["battery inverter", "capital_cost"],
p_nom_extendable=True,
marginal_cost=costs.at["battery inverter", "marginal_cost"],
)
n.madd("Link", b_buses_i + " discharger",
bus0=b_buses_i,
bus1=buses_i,
carrier='battery discharger',
efficiency=costs.at['battery inverter','efficiency']**0.5,
p_nom_extendable=True,
marginal_cost=costs.at["battery inverter", "marginal_cost"])
n.madd(
"Link",
b_buses_i + " discharger",
bus0=b_buses_i,
bus1=buses_i,
carrier="battery discharger",
efficiency=costs.at["battery inverter", "efficiency"] ** 0.5,
p_nom_extendable=True,
marginal_cost=costs.at["battery inverter", "marginal_cost"],
)
def attach_hydrogen_pipelines(n, costs, elec_opts):
ext_carriers = elec_opts['extendable_carriers']
as_stores = ext_carriers.get('Store', [])
ext_carriers = elec_opts["extendable_carriers"]
as_stores = ext_carriers.get("Store", [])
if 'H2 pipeline' not in ext_carriers.get('Link',[]): return
if "H2 pipeline" not in ext_carriers.get("Link", []):
return
assert 'H2' in as_stores, ("Attaching hydrogen pipelines requires hydrogen "
"storage to be modelled as Store-Link-Bus combination. See "
"`config.yaml` at `electricity: extendable_carriers: Store:`.")
assert "H2" in as_stores, (
"Attaching hydrogen pipelines requires hydrogen "
"storage to be modelled as Store-Link-Bus combination. See "
"`config.yaml` at `electricity: extendable_carriers: Store:`."
)
# determine bus pairs
attrs = ["bus0","bus1","length"]
candidates = pd.concat([n.lines[attrs], n.links.query('carrier=="DC"')[attrs]])\
.reset_index(drop=True)
attrs = ["bus0", "bus1", "length"]
candidates = pd.concat(
[n.lines[attrs], n.links.query('carrier=="DC"')[attrs]]
).reset_index(drop=True)
# remove bus pair duplicates regardless of order of bus0 and bus1
h2_links = candidates[~pd.DataFrame(np.sort(candidates[['bus0', 'bus1']])).duplicated()]
h2_links = candidates[
~pd.DataFrame(np.sort(candidates[["bus0", "bus1"]])).duplicated()
]
h2_links.index = h2_links.apply(lambda c: f"H2 pipeline {c.bus0}-{c.bus1}", axis=1)
# add pipelines
n.madd("Link",
h2_links.index,
bus0=h2_links.bus0.values + " H2",
bus1=h2_links.bus1.values + " H2",
p_min_pu=-1,
p_nom_extendable=True,
length=h2_links.length.values,
capital_cost=costs.at['H2 pipeline','capital_cost']*h2_links.length,
efficiency=costs.at['H2 pipeline','efficiency'],
carrier="H2 pipeline")
n.madd(
"Link",
h2_links.index,
bus0=h2_links.bus0.values + " H2",
bus1=h2_links.bus1.values + " H2",
p_min_pu=-1,
p_nom_extendable=True,
length=h2_links.length.values,
capital_cost=costs.at["H2 pipeline", "capital_cost"] * h2_links.length,
efficiency=costs.at["H2 pipeline", "efficiency"],
carrier="H2 pipeline",
)
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('add_extra_components',
simpl='', clusters=5)
snakemake = mock_snakemake("add_extra_components", simpl="", clusters=5)
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network)
elec_config = snakemake.config['electricity']
Nyears = n.snapshot_weightings.objective.sum() / 8760.
costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], elec_config, Nyears)
elec_config = snakemake.config["electricity"]
Nyears = n.snapshot_weightings.objective.sum() / 8760.0
costs = load_costs(
snakemake.input.tech_costs, snakemake.config["costs"], elec_config, Nyears
)
attach_storageunits(n, costs, elec_config)
attach_stores(n, costs, elec_config)

View File

@ -1,10 +1,13 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# coding: utf-8
"""
Creates the network topology from a `ENTSO-E map extract <https://github.com/PyPSA/GridKit/tree/master/entsoe>`_ (March 2022) as a PyPSA network.
Creates the network topology from a `ENTSO-E map extract
<https://github.com/PyPSA/GridKit/tree/master/entsoe>`_ (March 2022) as a PyPSA
network.
Relevant Settings
-----------------
@ -59,25 +62,24 @@ Outputs
Description
-----------
"""
import logging
from _helpers import configure_logging
import pypsa
import yaml
import pandas as pd
import geopandas as gpd
import numpy as np
import networkx as nx
from scipy import spatial
from scipy.sparse import csgraph
from itertools import product
from shapely.geometry import Point, LineString
import shapely, shapely.prepared, shapely.wkt
import geopandas as gpd
import networkx as nx
import numpy as np
import pandas as pd
import pypsa
import shapely
import shapely.prepared
import shapely.wkt
import yaml
from _helpers import configure_logging
from scipy import spatial
from scipy.sparse import csgraph
from shapely.geometry import LineString, Point
logger = logging.getLogger(__name__)
@ -97,48 +99,73 @@ def _get_country(df):
def _find_closest_links(links, new_links, distance_upper_bound=1.5):
treecoords = np.asarray([np.asarray(shapely.wkt.loads(s).coords)[[0, -1]].flatten()
for s in links.geometry])
querycoords = np.vstack([new_links[['x1', 'y1', 'x2', 'y2']],
new_links[['x2', 'y2', 'x1', 'y1']]])
treecoords = np.asarray(
[
np.asarray(shapely.wkt.loads(s).coords)[[0, -1]].flatten()
for s in links.geometry
]
)
querycoords = np.vstack(
[new_links[["x1", "y1", "x2", "y2"]], new_links[["x2", "y2", "x1", "y1"]]]
)
tree = spatial.KDTree(treecoords)
dist, ind = tree.query(querycoords, distance_upper_bound=distance_upper_bound)
found_b = ind < len(links)
found_i = np.arange(len(new_links)*2)[found_b] % len(new_links)
return pd.DataFrame(dict(D=dist[found_b],
i=links.index[ind[found_b] % len(links)]),
index=new_links.index[found_i]).sort_values(by='D')\
[lambda ds: ~ds.index.duplicated(keep='first')]\
.sort_index()['i']
found_i = np.arange(len(new_links) * 2)[found_b] % len(new_links)
return (
pd.DataFrame(
dict(D=dist[found_b], i=links.index[ind[found_b] % len(links)]),
index=new_links.index[found_i],
)
.sort_values(by="D")[lambda ds: ~ds.index.duplicated(keep="first")]
.sort_index()["i"]
)
def _load_buses_from_eg(eg_buses, europe_shape, config_elec):
buses = (pd.read_csv(eg_buses, quotechar="'",
true_values=['t'], false_values=['f'],
dtype=dict(bus_id="str"))
.set_index("bus_id")
.drop(['station_id'], axis=1)
.rename(columns=dict(voltage='v_nom')))
buses = (
pd.read_csv(
eg_buses,
quotechar="'",
true_values=["t"],
false_values=["f"],
dtype=dict(bus_id="str"),
)
.set_index("bus_id")
.drop(["station_id"], axis=1)
.rename(columns=dict(voltage="v_nom"))
)
buses['carrier'] = buses.pop('dc').map({True: 'DC', False: 'AC'})
buses['under_construction'] = buses['under_construction'].fillna(False).astype(bool)
buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"})
buses["under_construction"] = buses["under_construction"].fillna(False).astype(bool)
# remove all buses outside of all countries including exclusive economic zones (offshore)
europe_shape = gpd.read_file(europe_shape).loc[0, 'geometry']
europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"]
europe_shape_prepped = shapely.prepared.prep(europe_shape)
buses_in_europe_b = buses[['x', 'y']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1)
buses_in_europe_b = buses[["x", "y"]].apply(
lambda p: europe_shape_prepped.contains(Point(p)), axis=1
)
buses_with_v_nom_to_keep_b = buses.v_nom.isin(config_elec['voltages']) | buses.v_nom.isnull()
logger.info("Removing buses with voltages {}".format(pd.Index(buses.v_nom.unique()).dropna().difference(config_elec['voltages'])))
buses_with_v_nom_to_keep_b = (
buses.v_nom.isin(config_elec["voltages"]) | buses.v_nom.isnull()
)
logger.info(
"Removing buses with voltages {}".format(
pd.Index(buses.v_nom.unique()).dropna().difference(config_elec["voltages"])
)
)
return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b])
def _load_transformers_from_eg(buses, eg_transformers):
transformers = (pd.read_csv(eg_transformers, quotechar="'",
true_values=['t'], false_values=['f'],
dtype=dict(transformer_id='str', bus0='str', bus1='str'))
.set_index('transformer_id'))
transformers = pd.read_csv(
eg_transformers,
quotechar="'",
true_values=["t"],
false_values=["f"],
dtype=dict(transformer_id="str", bus0="str", bus1="str"),
).set_index("transformer_id")
transformers = _remove_dangling_branches(transformers, buses)
@ -146,33 +173,40 @@ def _load_transformers_from_eg(buses, eg_transformers):
def _load_converters_from_eg(buses, eg_converters):
converters = (pd.read_csv(eg_converters, quotechar="'",
true_values=['t'], false_values=['f'],
dtype=dict(converter_id='str', bus0='str', bus1='str'))
.set_index('converter_id'))
converters = pd.read_csv(
eg_converters,
quotechar="'",
true_values=["t"],
false_values=["f"],
dtype=dict(converter_id="str", bus0="str", bus1="str"),
).set_index("converter_id")
converters = _remove_dangling_branches(converters, buses)
converters['carrier'] = 'B2B'
converters["carrier"] = "B2B"
return converters
def _load_links_from_eg(buses, eg_links):
links = (pd.read_csv(eg_links, quotechar="'", true_values=['t'], false_values=['f'],
dtype=dict(link_id='str', bus0='str', bus1='str', under_construction="bool"))
.set_index('link_id'))
links = pd.read_csv(
eg_links,
quotechar="'",
true_values=["t"],
false_values=["f"],
dtype=dict(link_id="str", bus0="str", bus1="str", under_construction="bool"),
).set_index("link_id")
links['length'] /= 1e3
links["length"] /= 1e3
# Skagerrak Link is connected to 132kV bus which is removed in _load_buses_from_eg.
# Connect to neighboring 380kV bus
links.loc[links.bus1=='6396', 'bus1'] = '6398'
links.loc[links.bus1 == "6396", "bus1"] = "6398"
links = _remove_dangling_branches(links, buses)
# Add DC line parameters
links['carrier'] = 'DC'
links["carrier"] = "DC"
return links
@ -181,15 +215,21 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape):
links_tyndp = pd.read_csv(links_tyndp)
# remove all links from list which lie outside all of the desired countries
europe_shape = gpd.read_file(europe_shape).loc[0, 'geometry']
europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"]
europe_shape_prepped = shapely.prepared.prep(europe_shape)
x1y1_in_europe_b = links_tyndp[['x1', 'y1']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1)
x2y2_in_europe_b = links_tyndp[['x2', 'y2']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1)
x1y1_in_europe_b = links_tyndp[["x1", "y1"]].apply(
lambda p: europe_shape_prepped.contains(Point(p)), axis=1
)
x2y2_in_europe_b = links_tyndp[["x2", "y2"]].apply(
lambda p: europe_shape_prepped.contains(Point(p)), axis=1
)
is_within_covered_countries_b = x1y1_in_europe_b & x2y2_in_europe_b
if not is_within_covered_countries_b.all():
logger.info("TYNDP links outside of the covered area (skipping): " +
", ".join(links_tyndp.loc[~ is_within_covered_countries_b, "Name"]))
logger.info(
"TYNDP links outside of the covered area (skipping): "
+ ", ".join(links_tyndp.loc[~is_within_covered_countries_b, "Name"])
)
links_tyndp = links_tyndp.loc[is_within_covered_countries_b]
if links_tyndp.empty:
@ -197,25 +237,32 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape):
has_replaces_b = links_tyndp.replaces.notnull()
oids = dict(Bus=_get_oid(buses), Link=_get_oid(links))
keep_b = dict(Bus=pd.Series(True, index=buses.index),
Link=pd.Series(True, index=links.index))
for reps in links_tyndp.loc[has_replaces_b, 'replaces']:
for comps in reps.split(':'):
oids_to_remove = comps.split('.')
keep_b = dict(
Bus=pd.Series(True, index=buses.index), Link=pd.Series(True, index=links.index)
)
for reps in links_tyndp.loc[has_replaces_b, "replaces"]:
for comps in reps.split(":"):
oids_to_remove = comps.split(".")
c = oids_to_remove.pop(0)
keep_b[c] &= ~oids[c].isin(oids_to_remove)
buses = buses.loc[keep_b['Bus']]
links = links.loc[keep_b['Link']]
buses = buses.loc[keep_b["Bus"]]
links = links.loc[keep_b["Link"]]
links_tyndp["j"] = _find_closest_links(links, links_tyndp, distance_upper_bound=0.20)
links_tyndp["j"] = _find_closest_links(
links, links_tyndp, distance_upper_bound=0.20
)
# Corresponds approximately to 20km tolerances
if links_tyndp["j"].notnull().any():
logger.info("TYNDP links already in the dataset (skipping): " + ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"]))
logger.info(
"TYNDP links already in the dataset (skipping): "
+ ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"])
)
links_tyndp = links_tyndp.loc[links_tyndp["j"].isnull()]
if links_tyndp.empty: return buses, links
if links_tyndp.empty:
return buses, links
tree = spatial.KDTree(buses[['x', 'y']])
tree = spatial.KDTree(buses[["x", "y"]])
_, ind0 = tree.query(links_tyndp[["x1", "y1"]])
ind0_b = ind0 < len(buses)
links_tyndp.loc[ind0_b, "bus0"] = buses.index[ind0[ind0_b]]
@ -224,24 +271,42 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape):
ind1_b = ind1 < len(buses)
links_tyndp.loc[ind1_b, "bus1"] = buses.index[ind1[ind1_b]]
links_tyndp_located_b = links_tyndp["bus0"].notnull() & links_tyndp["bus1"].notnull()
links_tyndp_located_b = (
links_tyndp["bus0"].notnull() & links_tyndp["bus1"].notnull()
)
if not links_tyndp_located_b.all():
logger.warning("Did not find connected buses for TYNDP links (skipping): " + ", ".join(links_tyndp.loc[~links_tyndp_located_b, "Name"]))
logger.warning(
"Did not find connected buses for TYNDP links (skipping): "
+ ", ".join(links_tyndp.loc[~links_tyndp_located_b, "Name"])
)
links_tyndp = links_tyndp.loc[links_tyndp_located_b]
logger.info("Adding the following TYNDP links: " + ", ".join(links_tyndp["Name"]))
links_tyndp = links_tyndp[["bus0", "bus1"]].assign(
carrier='DC',
carrier="DC",
p_nom=links_tyndp["Power (MW)"],
length=links_tyndp["Length (given) (km)"].fillna(links_tyndp["Length (distance*1.2) (km)"]),
length=links_tyndp["Length (given) (km)"].fillna(
links_tyndp["Length (distance*1.2) (km)"]
),
under_construction=True,
underground=False,
geometry=(links_tyndp[["x1", "y1", "x2", "y2"]]
.apply(lambda s: str(LineString([[s.x1, s.y1], [s.x2, s.y2]])), axis=1)),
tags=('"name"=>"' + links_tyndp["Name"] + '", ' +
'"ref"=>"' + links_tyndp["Ref"] + '", ' +
'"status"=>"' + links_tyndp["status"] + '"')
geometry=(
links_tyndp[["x1", "y1", "x2", "y2"]].apply(
lambda s: str(LineString([[s.x1, s.y1], [s.x2, s.y2]])), axis=1
)
),
tags=(
'"name"=>"'
+ links_tyndp["Name"]
+ '", '
+ '"ref"=>"'
+ links_tyndp["Ref"]
+ '", '
+ '"status"=>"'
+ links_tyndp["status"]
+ '"'
),
)
links_tyndp.index = "T" + links_tyndp.index.astype(str)
@ -252,13 +317,25 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape):
def _load_lines_from_eg(buses, eg_lines):
lines = (pd.read_csv(eg_lines, quotechar="'", true_values=['t'], false_values=['f'],
dtype=dict(line_id='str', bus0='str', bus1='str',
underground="bool", under_construction="bool"))
.set_index('line_id')
.rename(columns=dict(voltage='v_nom', circuits='num_parallel')))
lines = (
pd.read_csv(
eg_lines,
quotechar="'",
true_values=["t"],
false_values=["f"],
dtype=dict(
line_id="str",
bus0="str",
bus1="str",
underground="bool",
under_construction="bool",
),
)
.set_index("line_id")
.rename(columns=dict(voltage="v_nom", circuits="num_parallel"))
)
lines['length'] /= 1e3
lines["length"] /= 1e3
lines = _remove_dangling_branches(lines, buses)
@ -269,18 +346,20 @@ def _apply_parameter_corrections(n, parameter_corrections):
with open(parameter_corrections) as f:
corrections = yaml.safe_load(f)
if corrections is None: return
if corrections is None:
return
for component, attrs in corrections.items():
df = n.df(component)
oid = _get_oid(df)
if attrs is None: continue
if attrs is None:
continue
for attr, repls in attrs.items():
for i, r in repls.items():
if i == 'oid':
if i == "oid":
r = oid.map(repls["oid"]).dropna()
elif i == 'index':
elif i == "index":
r = pd.Series(repls["index"])
else:
raise NotImplementedError()
@ -289,78 +368,87 @@ def _apply_parameter_corrections(n, parameter_corrections):
def _set_electrical_parameters_lines(lines, config):
v_noms = config['electricity']['voltages']
linetypes = config['lines']['types']
v_noms = config["electricity"]["voltages"]
linetypes = config["lines"]["types"]
for v_nom in v_noms:
lines.loc[lines["v_nom"] == v_nom, 'type'] = linetypes[v_nom]
lines.loc[lines["v_nom"] == v_nom, "type"] = linetypes[v_nom]
lines['s_max_pu'] = config['lines']['s_max_pu']
lines["s_max_pu"] = config["lines"]["s_max_pu"]
return lines
def _set_lines_s_nom_from_linetypes(n):
n.lines['s_nom'] = (
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines['v_nom'] * n.lines.num_parallel
n.lines["s_nom"] = (
np.sqrt(3)
* n.lines["type"].map(n.line_types.i_nom)
* n.lines["v_nom"]
* n.lines.num_parallel
)
def _set_electrical_parameters_links(links, config, links_p_nom):
if links.empty: return links
if links.empty:
return links
p_max_pu = config['links'].get('p_max_pu', 1.)
links['p_max_pu'] = p_max_pu
links['p_min_pu'] = -p_max_pu
p_max_pu = config["links"].get("p_max_pu", 1.0)
links["p_max_pu"] = p_max_pu
links["p_min_pu"] = -p_max_pu
links_p_nom = pd.read_csv(links_p_nom)
# filter links that are not in operation anymore
removed_b = links_p_nom.Remarks.str.contains('Shut down|Replaced', na=False)
removed_b = links_p_nom.Remarks.str.contains("Shut down|Replaced", na=False)
links_p_nom = links_p_nom[~removed_b]
# find closest link for all links in links_p_nom
links_p_nom['j'] = _find_closest_links(links, links_p_nom)
links_p_nom["j"] = _find_closest_links(links, links_p_nom)
links_p_nom = links_p_nom.groupby(['j'],as_index=False).agg({'Power (MW)': 'sum'})
links_p_nom = links_p_nom.groupby(["j"], as_index=False).agg({"Power (MW)": "sum"})
p_nom = links_p_nom.dropna(subset=["j"]).set_index("j")["Power (MW)"]
# Don't update p_nom if it's already set
p_nom_unset = p_nom.drop(links.index[links.p_nom.notnull()], errors='ignore') if "p_nom" in links else p_nom
p_nom_unset = (
p_nom.drop(links.index[links.p_nom.notnull()], errors="ignore")
if "p_nom" in links
else p_nom
)
links.loc[p_nom_unset.index, "p_nom"] = p_nom_unset
return links
def _set_electrical_parameters_converters(converters, config):
p_max_pu = config['links'].get('p_max_pu', 1.)
converters['p_max_pu'] = p_max_pu
converters['p_min_pu'] = -p_max_pu
p_max_pu = config["links"].get("p_max_pu", 1.0)
converters["p_max_pu"] = p_max_pu
converters["p_min_pu"] = -p_max_pu
converters['p_nom'] = 2000
converters["p_nom"] = 2000
# Converters are combined with links
converters['under_construction'] = False
converters['underground'] = False
converters["under_construction"] = False
converters["underground"] = False
return converters
def _set_electrical_parameters_transformers(transformers, config):
config = config['transformers']
config = config["transformers"]
## Add transformer parameters
transformers["x"] = config.get('x', 0.1)
transformers["s_nom"] = config.get('s_nom', 2000)
transformers['type'] = config.get('type', '')
transformers["x"] = config.get("x", 0.1)
transformers["s_nom"] = config.get("s_nom", 2000)
transformers["type"] = config.get("type", "")
return transformers
def _remove_dangling_branches(branches, buses):
return pd.DataFrame(branches.loc[branches.bus0.isin(buses.index) & branches.bus1.isin(buses.index)])
return pd.DataFrame(
branches.loc[branches.bus0.isin(buses.index) & branches.bus1.isin(buses.index)]
)
def _remove_unconnected_components(network):
@ -370,46 +458,62 @@ def _remove_unconnected_components(network):
component_sizes = component.value_counts()
components_to_remove = component_sizes.iloc[1:]
logger.info("Removing {} unconnected network components with less than {} buses. In total {} buses."
.format(len(components_to_remove), components_to_remove.max(), components_to_remove.sum()))
logger.info(
"Removing {} unconnected network components with less than {} buses. In total {} buses.".format(
len(components_to_remove),
components_to_remove.max(),
components_to_remove.sum(),
)
)
return network[component == component_sizes.index[0]]
def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
buses = n.buses
def buses_in_shape(shape):
shape = shapely.prepared.prep(shape)
return pd.Series(
np.fromiter((shape.contains(Point(x, y))
for x, y in buses.loc[:,["x", "y"]].values),
dtype=bool, count=len(buses)),
index=buses.index
np.fromiter(
(
shape.contains(Point(x, y))
for x, y in buses.loc[:, ["x", "y"]].values
),
dtype=bool,
count=len(buses),
),
index=buses.index,
)
countries = config['countries']
country_shapes = gpd.read_file(country_shapes).set_index('name')['geometry']
countries = config["countries"]
country_shapes = gpd.read_file(country_shapes).set_index("name")["geometry"]
# reindexing necessary for supporting empty geo-dataframes
offshore_shapes = gpd.read_file(offshore_shapes)
offshore_shapes = offshore_shapes.reindex(columns=['name', 'geometry']).set_index('name')['geometry']
substation_b = buses['symbol'].str.contains('substation|converter station', case=False)
offshore_shapes = offshore_shapes.reindex(columns=["name", "geometry"]).set_index(
"name"
)["geometry"]
substation_b = buses["symbol"].str.contains(
"substation|converter station", case=False
)
def prefer_voltage(x, which):
index = x.index
if len(index) == 1:
return pd.Series(index, index)
key = (x.index[0]
if x['v_nom'].isnull().all()
else getattr(x['v_nom'], 'idx' + which)())
key = (
x.index[0]
if x["v_nom"].isnull().all()
else getattr(x["v_nom"], "idx" + which)()
)
return pd.Series(key, index)
gb = buses.loc[substation_b].groupby(['x', 'y'], as_index=False,
group_keys=False, sort=False)
bus_map_low = gb.apply(prefer_voltage, 'min')
gb = buses.loc[substation_b].groupby(
["x", "y"], as_index=False, group_keys=False, sort=False
)
bus_map_low = gb.apply(prefer_voltage, "min")
lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False)
bus_map_high = gb.apply(prefer_voltage, 'max')
bus_map_high = gb.apply(prefer_voltage, "max")
hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False)
onshore_b = pd.Series(False, buses.index)
@ -420,47 +524,66 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
onshore_country_b = buses_in_shape(onshore_shape)
onshore_b |= onshore_country_b
buses.loc[onshore_country_b, 'country'] = country
buses.loc[onshore_country_b, "country"] = country
if country not in offshore_shapes.index: continue
if country not in offshore_shapes.index:
continue
offshore_country_b = buses_in_shape(offshore_shapes[country])
offshore_b |= offshore_country_b
buses.loc[offshore_country_b, 'country'] = country
buses.loc[offshore_country_b, "country"] = country
# Only accept buses as low-voltage substations (where load is attached), if
# they have at least one connection which is not under_construction
has_connections_b = pd.Series(False, index=buses.index)
for b, df in product(('bus0', 'bus1'), (n.lines, n.links)):
has_connections_b |= ~ df.groupby(b).under_construction.min()
for b, df in product(("bus0", "bus1"), (n.lines, n.links)):
has_connections_b |= ~df.groupby(b).under_construction.min()
buses['substation_lv'] = lv_b & onshore_b & (~ buses['under_construction']) & has_connections_b
buses['substation_off'] = (offshore_b | (hv_b & onshore_b)) & (~ buses['under_construction'])
buses["substation_lv"] = (
lv_b & onshore_b & (~buses["under_construction"]) & has_connections_b
)
buses["substation_off"] = (offshore_b | (hv_b & onshore_b)) & (
~buses["under_construction"]
)
c_nan_b = buses.country.isnull()
if c_nan_b.sum() > 0:
c_tag = _get_country(buses.loc[c_nan_b])
c_tag.loc[~c_tag.isin(countries)] = np.nan
n.buses.loc[c_nan_b, 'country'] = c_tag
n.buses.loc[c_nan_b, "country"] = c_tag
c_tag_nan_b = n.buses.country.isnull()
# Nearest country in path length defines country of still homeless buses
# Work-around until commit 705119 lands in pypsa release
n.transformers['length'] = 0.
graph = n.graph(weight='length')
n.transformers.drop('length', axis=1, inplace=True)
n.transformers["length"] = 0.0
graph = n.graph(weight="length")
n.transformers.drop("length", axis=1, inplace=True)
for b in n.buses.index[c_tag_nan_b]:
df = (pd.DataFrame(dict(pathlength=nx.single_source_dijkstra_path_length(graph, b, cutoff=200)))
.join(n.buses.country).dropna())
assert not df.empty, "No buses with defined country within 200km of bus `{}`".format(b)
n.buses.at[b, 'country'] = df.loc[df.pathlength.idxmin(), 'country']
df = (
pd.DataFrame(
dict(
pathlength=nx.single_source_dijkstra_path_length(
graph, b, cutoff=200
)
)
)
.join(n.buses.country)
.dropna()
)
assert (
not df.empty
), "No buses with defined country within 200km of bus `{}`".format(b)
n.buses.at[b, "country"] = df.loc[df.pathlength.idxmin(), "country"]
logger.warning("{} buses are not in any country or offshore shape,"
" {} have been assigned from the tag of the entsoe map,"
" the rest from the next bus in terms of pathlength."
.format(c_nan_b.sum(), c_nan_b.sum() - c_tag_nan_b.sum()))
logger.warning(
"{} buses are not in any country or offshore shape,"
" {} have been assigned from the tag of the entsoe map,"
" the rest from the next bus in terms of pathlength.".format(
c_nan_b.sum(), c_nan_b.sum() - c_tag_nan_b.sum()
)
)
return buses
@ -469,11 +592,13 @@ def _replace_b2b_converter_at_country_border_by_link(n):
# Affects only the B2B converter in Lithuania at the Polish border at the moment
buscntry = n.buses.country
linkcntry = n.links.bus0.map(buscntry)
converters_i = n.links.index[(n.links.carrier == 'B2B') & (linkcntry == n.links.bus1.map(buscntry))]
converters_i = n.links.index[
(n.links.carrier == "B2B") & (linkcntry == n.links.bus1.map(buscntry))
]
def findforeignbus(G, i):
cntry = linkcntry.at[i]
for busattr in ('bus0', 'bus1'):
for busattr in ("bus0", "bus1"):
b0 = n.links.at[i, busattr]
for b1 in G[b0]:
if buscntry[b1] != cntry:
@ -486,67 +611,93 @@ def _replace_b2b_converter_at_country_border_by_link(n):
if busattr is not None:
comp, line = next(iter(G[b0][b1]))
if comp != "Line":
logger.warning("Unable to replace B2B `{}` expected a Line, but found a {}"
.format(i, comp))
logger.warning(
"Unable to replace B2B `{}` expected a Line, but found a {}".format(
i, comp
)
)
continue
n.links.at[i, busattr] = b1
n.links.at[i, 'p_nom'] = min(n.links.at[i, 'p_nom'], n.lines.at[line, 's_nom'])
n.links.at[i, 'carrier'] = 'DC'
n.links.at[i, 'underwater_fraction'] = 0.
n.links.at[i, 'length'] = n.lines.at[line, 'length']
n.links.at[i, "p_nom"] = min(
n.links.at[i, "p_nom"], n.lines.at[line, "s_nom"]
)
n.links.at[i, "carrier"] = "DC"
n.links.at[i, "underwater_fraction"] = 0.0
n.links.at[i, "length"] = n.lines.at[line, "length"]
n.remove("Line", line)
n.remove("Bus", b0)
logger.info("Replacing B2B converter `{}` together with bus `{}` and line `{}` by an HVDC tie-line {}-{}"
.format(i, b0, line, linkcntry.at[i], buscntry.at[b1]))
logger.info(
"Replacing B2B converter `{}` together with bus `{}` and line `{}` by an HVDC tie-line {}-{}".format(
i, b0, line, linkcntry.at[i], buscntry.at[b1]
)
)
def _set_links_underwater_fraction(n, offshore_shapes):
if n.links.empty: return
if n.links.empty:
return
if not hasattr(n.links, 'geometry'):
n.links['underwater_fraction'] = 0.
if not hasattr(n.links, "geometry"):
n.links["underwater_fraction"] = 0.0
else:
offshore_shape = gpd.read_file(offshore_shapes).unary_union
links = gpd.GeoSeries(n.links.geometry.dropna().map(shapely.wkt.loads))
n.links['underwater_fraction'] = links.intersection(offshore_shape).length / links.length
n.links["underwater_fraction"] = (
links.intersection(offshore_shape).length / links.length
)
def _adjust_capacities_of_under_construction_branches(n, config):
lines_mode = config['lines'].get('under_construction', 'undef')
if lines_mode == 'zero':
n.lines.loc[n.lines.under_construction, 'num_parallel'] = 0.
n.lines.loc[n.lines.under_construction, 's_nom'] = 0.
elif lines_mode == 'remove':
lines_mode = config["lines"].get("under_construction", "undef")
if lines_mode == "zero":
n.lines.loc[n.lines.under_construction, "num_parallel"] = 0.0
n.lines.loc[n.lines.under_construction, "s_nom"] = 0.0
elif lines_mode == "remove":
n.mremove("Line", n.lines.index[n.lines.under_construction])
elif lines_mode != 'keep':
logger.warning("Unrecognized configuration for `lines: under_construction` = `{}`. Keeping under construction lines.")
elif lines_mode != "keep":
logger.warning(
"Unrecognized configuration for `lines: under_construction` = `{}`. Keeping under construction lines."
)
links_mode = config['links'].get('under_construction', 'undef')
if links_mode == 'zero':
n.links.loc[n.links.under_construction, "p_nom"] = 0.
elif links_mode == 'remove':
links_mode = config["links"].get("under_construction", "undef")
if links_mode == "zero":
n.links.loc[n.links.under_construction, "p_nom"] = 0.0
elif links_mode == "remove":
n.mremove("Link", n.links.index[n.links.under_construction])
elif links_mode != 'keep':
logger.warning("Unrecognized configuration for `links: under_construction` = `{}`. Keeping under construction links.")
elif links_mode != "keep":
logger.warning(
"Unrecognized configuration for `links: under_construction` = `{}`. Keeping under construction links."
)
if lines_mode == 'remove' or links_mode == 'remove':
if lines_mode == "remove" or links_mode == "remove":
# We might need to remove further unconnected components
n = _remove_unconnected_components(n)
return n
def base_network(eg_buses, eg_converters, eg_transformers, eg_lines, eg_links,
links_p_nom, links_tyndp, europe_shape, country_shapes, offshore_shapes,
parameter_corrections, config):
def base_network(
eg_buses,
eg_converters,
eg_transformers,
eg_lines,
eg_links,
links_p_nom,
links_tyndp,
europe_shape,
country_shapes,
offshore_shapes,
parameter_corrections,
config,
):
buses = _load_buses_from_eg(eg_buses, europe_shape, config['electricity'])
buses = _load_buses_from_eg(eg_buses, europe_shape, config["electricity"])
links = _load_links_from_eg(buses, eg_links)
if config['links'].get('include_tyndp'):
if config["links"].get("include_tyndp"):
buses, links = _add_links_from_tyndp(buses, links, links_tyndp, europe_shape)
converters = _load_converters_from_eg(buses, eg_converters)
@ -560,9 +711,9 @@ def base_network(eg_buses, eg_converters, eg_transformers, eg_lines, eg_links,
converters = _set_electrical_parameters_converters(converters, config)
n = pypsa.Network()
n.name = 'PyPSA-Eur'
n.name = "PyPSA-Eur"
n.set_snapshots(pd.date_range(freq='h', **config['snapshots']))
n.set_snapshots(pd.date_range(freq="h", **config["snapshots"]))
n.import_components_from_dataframe(buses, "Bus")
n.import_components_from_dataframe(lines, "Line")
@ -586,15 +737,28 @@ def base_network(eg_buses, eg_converters, eg_transformers, eg_lines, eg_links,
return n
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('base_network')
snakemake = mock_snakemake("base_network")
configure_logging(snakemake)
n = base_network(snakemake.input.eg_buses, snakemake.input.eg_converters, snakemake.input.eg_transformers, snakemake.input.eg_lines, snakemake.input.eg_links,
snakemake.input.links_p_nom, snakemake.input.links_tyndp, snakemake.input.europe_shape, snakemake.input.country_shapes, snakemake.input.offshore_shapes,
snakemake.input.parameter_corrections, snakemake.config)
n = base_network(
snakemake.input.eg_buses,
snakemake.input.eg_converters,
snakemake.input.eg_transformers,
snakemake.input.eg_lines,
snakemake.input.eg_links,
snakemake.input.links_p_nom,
snakemake.input.links_tyndp,
snakemake.input.europe_shape,
snakemake.input.country_shapes,
snakemake.input.offshore_shapes,
snakemake.input.parameter_corrections,
snakemake.config,
)
n.meta = snakemake.config
n.export_to_netcdf(snakemake.output[0])

View File

@ -1,9 +1,11 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Creates Voronoi shapes for each bus representing both onshore and offshore regions.
Creates Voronoi shapes for each bus representing both onshore and offshore
regions.
Relevant Settings
-----------------
@ -38,19 +40,18 @@ Outputs
Description
-----------
"""
import logging
from _helpers import configure_logging, REGION_COLS
import pypsa
import os
import pandas as pd
import numpy as np
import geopandas as gpd
from shapely.geometry import Polygon
import numpy as np
import pandas as pd
import pypsa
from _helpers import REGION_COLS, configure_logging
from scipy.spatial import Voronoi
from shapely.geometry import Polygon
logger = logging.getLogger(__name__)
@ -81,11 +82,19 @@ def voronoi_partition_pts(points, outline):
# to avoid any network positions outside all Voronoi cells, append
# the corners of a rectangle framing these points
vor = Voronoi(np.vstack((points,
[[xmin-3.*xspan, ymin-3.*yspan],
[xmin-3.*xspan, ymax+3.*yspan],
[xmax+3.*xspan, ymin-3.*yspan],
[xmax+3.*xspan, ymax+3.*yspan]])))
vor = Voronoi(
np.vstack(
(
points,
[
[xmin - 3.0 * xspan, ymin - 3.0 * yspan],
[xmin - 3.0 * xspan, ymax + 3.0 * yspan],
[xmax + 3.0 * xspan, ymin - 3.0 * yspan],
[xmax + 3.0 * xspan, ymax + 3.0 * yspan],
],
)
)
)
polygons = []
for i in range(len(points)):
@ -98,23 +107,27 @@ def voronoi_partition_pts(points, outline):
polygons.append(poly)
return np.array(polygons, dtype=object)
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_bus_regions')
snakemake = mock_snakemake("build_bus_regions")
configure_logging(snakemake)
countries = snakemake.config['countries']
countries = snakemake.config["countries"]
n = pypsa.Network(snakemake.input.base_network)
country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry']
country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index("name")[
"geometry"
]
offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes)
offshore_shapes = offshore_shapes.reindex(columns=REGION_COLS).set_index('name')['geometry']
offshore_shapes = offshore_shapes.reindex(columns=REGION_COLS).set_index("name")[
"geometry"
]
onshore_regions = []
offshore_regions = []
@ -124,29 +137,42 @@ if __name__ == "__main__":
onshore_shape = country_shapes[country]
onshore_locs = n.buses.loc[c_b & n.buses.substation_lv, ["x", "y"]]
onshore_regions.append(gpd.GeoDataFrame({
'name': onshore_locs.index,
'x': onshore_locs['x'],
'y': onshore_locs['y'],
'geometry': voronoi_partition_pts(onshore_locs.values, onshore_shape),
'country': country
}))
onshore_regions.append(
gpd.GeoDataFrame(
{
"name": onshore_locs.index,
"x": onshore_locs["x"],
"y": onshore_locs["y"],
"geometry": voronoi_partition_pts(
onshore_locs.values, onshore_shape
),
"country": country,
}
)
)
if country not in offshore_shapes.index: continue
if country not in offshore_shapes.index:
continue
offshore_shape = offshore_shapes[country]
offshore_locs = n.buses.loc[c_b & n.buses.substation_off, ["x", "y"]]
offshore_regions_c = gpd.GeoDataFrame({
'name': offshore_locs.index,
'x': offshore_locs['x'],
'y': offshore_locs['y'],
'geometry': voronoi_partition_pts(offshore_locs.values, offshore_shape),
'country': country
})
offshore_regions_c = gpd.GeoDataFrame(
{
"name": offshore_locs.index,
"x": offshore_locs["x"],
"y": offshore_locs["y"],
"geometry": voronoi_partition_pts(offshore_locs.values, offshore_shape),
"country": country,
}
)
offshore_regions_c = offshore_regions_c.loc[offshore_regions_c.area > 1e-2]
offshore_regions.append(offshore_regions_c)
pd.concat(onshore_regions, ignore_index=True).to_file(snakemake.output.regions_onshore)
pd.concat(onshore_regions, ignore_index=True).to_file(
snakemake.output.regions_onshore
)
if offshore_regions:
pd.concat(offshore_regions, ignore_index=True).to_file(snakemake.output.regions_offshore)
pd.concat(offshore_regions, ignore_index=True).to_file(
snakemake.output.regions_offshore
)
else:
offshore_shapes.to_frame().to_file(snakemake.output.regions_offshore)
offshore_shapes.to_frame().to_file(snakemake.output.regions_offshore)

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
@ -88,43 +89,42 @@ A **SARAH-2 cutout** can be used to amend the fields ``temperature``, ``influx_t
Description
-----------
"""
import logging
import atlite
import geopandas as gpd
import pandas as pd
from _helpers import configure_logging
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_cutout', cutout='europe-2013-era5')
snakemake = mock_snakemake("build_cutout", cutout="europe-2013-era5")
configure_logging(snakemake)
cutout_params = snakemake.config['atlite']['cutouts'][snakemake.wildcards.cutout]
cutout_params = snakemake.config["atlite"]["cutouts"][snakemake.wildcards.cutout]
snapshots = pd.date_range(freq='h', **snakemake.config['snapshots'])
snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"])
time = [snapshots[0], snapshots[-1]]
cutout_params['time'] = slice(*cutout_params.get('time', time))
cutout_params["time"] = slice(*cutout_params.get("time", time))
if {'x', 'y', 'bounds'}.isdisjoint(cutout_params):
if {"x", "y", "bounds"}.isdisjoint(cutout_params):
# Determine the bounds from bus regions with a buffer of two grid cells
onshore = gpd.read_file(snakemake.input.regions_onshore)
offshore = gpd.read_file(snakemake.input.regions_offshore)
regions = pd.concat([onshore, offshore])
d = max(cutout_params.get('dx', 0.25), cutout_params.get('dy', 0.25))*2
cutout_params['bounds'] = regions.total_bounds + [-d, -d, d, d]
elif {'x', 'y'}.issubset(cutout_params):
cutout_params['x'] = slice(*cutout_params['x'])
cutout_params['y'] = slice(*cutout_params['y'])
regions = pd.concat([onshore, offshore])
d = max(cutout_params.get("dx", 0.25), cutout_params.get("dy", 0.25)) * 2
cutout_params["bounds"] = regions.total_bounds + [-d, -d, d, d]
elif {"x", "y"}.issubset(cutout_params):
cutout_params["x"] = slice(*cutout_params["x"])
cutout_params["y"] = slice(*cutout_params["y"])
logging.info(f"Preparing cutout with parameters {cutout_params}.")
features = cutout_params.pop('features', None)
features = cutout_params.pop("features", None)
cutout = atlite.Cutout(snakemake.output[0], **cutout_params)
cutout.prepare(features=features)

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
@ -60,51 +61,61 @@ Description
"""
import logging
from _helpers import configure_logging
import atlite
import country_converter as coco
import geopandas as gpd
import pandas as pd
from _helpers import configure_logging
import country_converter as coco
cc = coco.CountryConverter()
def get_eia_annual_hydro_generation(fn, countries):
# in billion kWh/a = TWh/a
df = pd.read_csv(fn, skiprows=2, index_col=1, na_values=[u' ','--']).iloc[1:, 1:]
df = pd.read_csv(fn, skiprows=2, index_col=1, na_values=[" ", "--"]).iloc[1:, 1:]
df.index = df.index.str.strip()
former_countries = {
"Former Czechoslovakia": dict(
countries=["Czech Republic", "Slovakia"],
start=1980, end=1992),
countries=["Czech Republic", "Slovakia"], start=1980, end=1992
),
"Former Serbia and Montenegro": dict(
countries=["Serbia", "Montenegro"],
start=1992, end=2005),
countries=["Serbia", "Montenegro"], start=1992, end=2005
),
"Former Yugoslavia": dict(
countries=["Slovenia", "Croatia", "Bosnia and Herzegovina", "Serbia", "Montenegro", "North Macedonia"],
start=1980, end=1991),
countries=[
"Slovenia",
"Croatia",
"Bosnia and Herzegovina",
"Serbia",
"Montenegro",
"North Macedonia",
],
start=1980,
end=1991,
),
}
for k, v in former_countries.items():
period = [str(i) for i in range(v["start"], v["end"]+1)]
ratio = df.loc[v['countries']].T.dropna().sum()
period = [str(i) for i in range(v["start"], v["end"] + 1)]
ratio = df.loc[v["countries"]].T.dropna().sum()
ratio /= ratio.sum()
for country in v['countries']:
for country in v["countries"]:
df.loc[country, period] = df.loc[k, period] * ratio[country]
baltic_states = ["Latvia", "Estonia", "Lithuania"]
df.loc[baltic_states] = df.loc[baltic_states].T.fillna(df.loc[baltic_states].mean(axis=1)).T
df.loc[baltic_states] = (
df.loc[baltic_states].T.fillna(df.loc[baltic_states].mean(axis=1)).T
)
df.loc["Germany"] = df.filter(like='Germany', axis=0).sum()
df.loc["Serbia"] += df.loc["Kosovo"].fillna(0.)
df = df.loc[~df.index.str.contains('Former')]
df.loc["Germany"] = df.filter(like="Germany", axis=0).sum()
df.loc["Serbia"] += df.loc["Kosovo"].fillna(0.0)
df = df.loc[~df.index.str.contains("Former")]
df.drop(["Europe", "Germany, West", "Germany, East", "Kosovo"], inplace=True)
df.index = cc.convert(df.index, to='iso2')
df.index.name = 'countries'
df.index = cc.convert(df.index, to="iso2")
df.index.name = "countries"
df = df.T[countries] * 1e6 # in MWh/a
@ -114,28 +125,34 @@ def get_eia_annual_hydro_generation(fn, countries):
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_hydro_profile')
snakemake = mock_snakemake("build_hydro_profile")
configure_logging(snakemake)
config_hydro = snakemake.config['renewable']['hydro']
config_hydro = snakemake.config["renewable"]["hydro"]
cutout = atlite.Cutout(snakemake.input.cutout)
countries = snakemake.config['countries']
country_shapes = (gpd.read_file(snakemake.input.country_shapes)
.set_index('name')['geometry'].reindex(countries))
country_shapes.index.name = 'countries'
countries = snakemake.config["countries"]
country_shapes = (
gpd.read_file(snakemake.input.country_shapes)
.set_index("name")["geometry"]
.reindex(countries)
)
country_shapes.index.name = "countries"
fn = snakemake.input.eia_hydro_generation
eia_stats = get_eia_annual_hydro_generation(fn, countries)
inflow = cutout.runoff(shapes=country_shapes,
smooth=True,
lower_threshold_quantile=True,
normalize_using_yearly=eia_stats)
if 'clip_min_inflow' in config_hydro:
inflow = inflow.where(inflow > config_hydro['clip_min_inflow'], 0)
inflow = cutout.runoff(
shapes=country_shapes,
smooth=True,
lower_threshold_quantile=True,
normalize_using_yearly=eia_stats,
)
if "clip_min_inflow" in config_hydro:
inflow = inflow.where(inflow > config_hydro["clip_min_inflow"], 0)
inflow.to_netcdf(snakemake.output[0])

View File

@ -1,10 +1,15 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
This rule downloads the load data from `Open Power System Data Time series <https://data.open-power-system-data.org/time_series/>`_. For all countries in the network, the per country load timeseries with suffix ``_load_actual_entsoe_transparency`` are extracted from the dataset. After filling small gaps linearly and large gaps by copying time-slice of a given period, the load data is exported to a ``.csv`` file.
This rule downloads the load data from `Open Power System Data Time series
<https://data.open-power-system-data.org/time_series/>`_. For all countries in
the network, the per country load timeseries with suffix
``_load_actual_entsoe_transparency`` are extracted from the dataset. After
filling small gaps linearly and large gaps by copying time-slice of a given
period, the load data is exported to a ``.csv`` file.
Relevant Settings
-----------------
@ -32,17 +37,15 @@ Outputs
-------
- ``resources/load.csv``:
"""
import logging
logger = logging.getLogger(__name__)
from _helpers import configure_logging
import pandas as pd
import numpy as np
logger = logging.getLogger(__name__)
import dateutil
import numpy as np
import pandas as pd
from _helpers import configure_logging
from pandas import Timedelta as Delta
@ -71,23 +74,29 @@ def load_timeseries(fn, years, countries, powerstatistics=True):
"""
logger.info(f"Retrieving load data from '{fn}'.")
pattern = 'power_statistics' if powerstatistics else 'transparency'
pattern = f'_load_actual_entsoe_{pattern}'
rename = lambda s: s[:-len(pattern)]
pattern = "power_statistics" if powerstatistics else "transparency"
pattern = f"_load_actual_entsoe_{pattern}"
rename = lambda s: s[: -len(pattern)]
date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True)
return (pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)
.filter(like=pattern)
.rename(columns=rename)
.dropna(how="all", axis=0)
.rename(columns={'GB_UKM' : 'GB'})
.filter(items=countries)
.loc[years])
return (
pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)
.filter(like=pattern)
.rename(columns=rename)
.dropna(how="all", axis=0)
.rename(columns={"GB_UKM": "GB"})
.filter(items=countries)
.loc[years]
)
def consecutive_nans(ds):
return (ds.isnull().astype(int)
.groupby(ds.notnull().astype(int).cumsum()[ds.isnull()])
.transform('sum').fillna(0))
return (
ds.isnull()
.astype(int)
.groupby(ds.notnull().astype(int).cumsum()[ds.isnull()])
.transform("sum")
.fillna(0)
)
def fill_large_gaps(ds, shift):
@ -97,140 +106,200 @@ def fill_large_gaps(ds, shift):
This function fills gaps ragning from 3 to 168 hours (one week).
"""
shift = Delta(shift)
nhours = shift / np.timedelta64(1, 'h')
nhours = shift / np.timedelta64(1, "h")
if (consecutive_nans(ds) > nhours).any():
logger.warning('There exist gaps larger then the time shift used for '
'copying time slices.')
logger.warning(
"There exist gaps larger then the time shift used for "
"copying time slices."
)
time_shift = pd.Series(ds.values, ds.index + shift)
return ds.where(ds.notnull(), time_shift.reindex_like(ds))
def nan_statistics(df):
def max_consecutive_nans(ds):
return (ds.isnull().astype(int)
.groupby(ds.notnull().astype(int).cumsum())
.sum().max())
return (
ds.isnull()
.astype(int)
.groupby(ds.notnull().astype(int).cumsum())
.sum()
.max()
)
consecutive = df.apply(max_consecutive_nans)
total = df.isnull().sum()
max_total_per_month = df.isnull().resample('m').sum().max()
return pd.concat([total, consecutive, max_total_per_month],
keys=['total', 'consecutive', 'max_total_per_month'], axis=1)
max_total_per_month = df.isnull().resample("m").sum().max()
return pd.concat(
[total, consecutive, max_total_per_month],
keys=["total", "consecutive", "max_total_per_month"],
axis=1,
)
def copy_timeslice(load, cntry, start, stop, delta, fn_load=None):
start = pd.Timestamp(start)
stop = pd.Timestamp(stop)
if (start in load.index and stop in load.index):
if start-delta in load.index and stop-delta in load.index and cntry in load:
load.loc[start:stop, cntry] = load.loc[start-delta:stop-delta, cntry].values
if start in load.index and stop in load.index:
if start - delta in load.index and stop - delta in load.index and cntry in load:
load.loc[start:stop, cntry] = load.loc[
start - delta : stop - delta, cntry
].values
elif fn_load is not None:
duration = pd.date_range(freq='h', start=start-delta, end=stop-delta)
duration = pd.date_range(freq="h", start=start - delta, end=stop - delta)
load_raw = load_timeseries(fn_load, duration, [cntry], powerstatistics)
load.loc[start:stop, cntry] = load_raw.loc[start-delta:stop-delta, cntry].values
load.loc[start:stop, cntry] = load_raw.loc[
start - delta : stop - delta, cntry
].values
def manual_adjustment(load, fn_load, powerstatistics):
"""
Adjust gaps manual for load data from OPSD time-series package.
1. For the ENTSOE power statistics load data (if powerstatistics is True)
1. For the ENTSOE power statistics load data (if powerstatistics is True)
Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the
same load curve as Serbia and Albania the same as Macdedonia, both scaled
by the corresponding ratio of total energy consumptions reported by
IEA Data browser [0] for the year 2013.
Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the
same load curve as Serbia and Albania the same as Macdedonia, both scaled
by the corresponding ratio of total energy consumptions reported by
IEA Data browser [0] for the year 2013.
2. For the ENTSOE transparency load data (if powerstatistics is False)
2. For the ENTSOE transparency load data (if powerstatistics is False)
Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the
same load curve as Montenegro, scaled by the corresponding ratio of total energy
consumptions reported by IEA Data browser [0] for the year 2016.
Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the
same load curve as Montenegro, scaled by the corresponding ratio of total energy
consumptions reported by IEA Data browser [0] for the year 2016.
[0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons
[0] https://www.iea.org/data-and-statistics?country=WORLD&fuel=Electricity%20and%20heat&indicator=TotElecCons
Parameters
----------
load : pd.DataFrame
Load time-series with UTC timestamps x ISO-2 countries
powerstatistics: bool
Whether argument load comprises the electricity consumption data of
the ENTSOE power statistics or of the ENTSOE transparency map
load_fn: str
File name or url location (file format .csv)
Parameters
----------
load : pd.DataFrame
Load time-series with UTC timestamps x ISO-2 countries
powerstatistics: bool
Whether argument load comprises the electricity consumption data of
the ENTSOE power statistics or of the ENTSOE transparency map
load_fn: str
File name or url location (file format .csv)
Returns
-------
load : pd.DataFrame
Manual adjusted and interpolated load time-series with UTC
timestamps x ISO-2 countries
Returns
-------
load : pd.DataFrame
Manual adjusted and interpolated load time-series with UTC
timestamps x ISO-2 countries
"""
if powerstatistics:
if 'MK' in load.columns:
if 'AL' not in load.columns or load.AL.isnull().values.all():
load['AL'] = load['MK'] * (4.1 / 7.4)
if 'RS' in load.columns:
if 'KV' not in load.columns or load.KV.isnull().values.all():
load['KV'] = load['RS'] * (4.8 / 27.)
if "MK" in load.columns:
if "AL" not in load.columns or load.AL.isnull().values.all():
load["AL"] = load["MK"] * (4.1 / 7.4)
if "RS" in load.columns:
if "KV" not in load.columns or load.KV.isnull().values.all():
load["KV"] = load["RS"] * (4.8 / 27.0)
copy_timeslice(load, 'GR', '2015-08-11 21:00', '2015-08-15 20:00', Delta(weeks=1))
copy_timeslice(load, 'AT', '2018-12-31 22:00', '2019-01-01 22:00', Delta(days=2))
copy_timeslice(load, 'CH', '2010-01-19 07:00', '2010-01-19 22:00', Delta(days=1))
copy_timeslice(load, 'CH', '2010-03-28 00:00', '2010-03-28 21:00', Delta(days=1))
copy_timeslice(
load, "GR", "2015-08-11 21:00", "2015-08-15 20:00", Delta(weeks=1)
)
copy_timeslice(
load, "AT", "2018-12-31 22:00", "2019-01-01 22:00", Delta(days=2)
)
copy_timeslice(
load, "CH", "2010-01-19 07:00", "2010-01-19 22:00", Delta(days=1)
)
copy_timeslice(
load, "CH", "2010-03-28 00:00", "2010-03-28 21:00", Delta(days=1)
)
# is a WE, so take WE before
copy_timeslice(load, 'CH', '2010-10-08 13:00', '2010-10-10 21:00', Delta(weeks=1))
copy_timeslice(load, 'CH', '2010-11-04 04:00', '2010-11-04 22:00', Delta(days=1))
copy_timeslice(load, 'NO', '2010-12-09 11:00', '2010-12-09 18:00', Delta(days=1))
copy_timeslice(
load, "CH", "2010-10-08 13:00", "2010-10-10 21:00", Delta(weeks=1)
)
copy_timeslice(
load, "CH", "2010-11-04 04:00", "2010-11-04 22:00", Delta(days=1)
)
copy_timeslice(
load, "NO", "2010-12-09 11:00", "2010-12-09 18:00", Delta(days=1)
)
# whole january missing
copy_timeslice(load, 'GB', '2010-01-01 00:00', '2010-01-31 23:00', Delta(days=-365), fn_load)
copy_timeslice(
load,
"GB",
"2010-01-01 00:00",
"2010-01-31 23:00",
Delta(days=-365),
fn_load,
)
# 1.1. at midnight gets special treatment
copy_timeslice(load, 'IE', '2016-01-01 00:00', '2016-01-01 01:00', Delta(days=-366), fn_load)
copy_timeslice(load, 'PT', '2016-01-01 00:00', '2016-01-01 01:00', Delta(days=-366), fn_load)
copy_timeslice(load, 'GB', '2016-01-01 00:00', '2016-01-01 01:00', Delta(days=-366), fn_load)
copy_timeslice(
load,
"IE",
"2016-01-01 00:00",
"2016-01-01 01:00",
Delta(days=-366),
fn_load,
)
copy_timeslice(
load,
"PT",
"2016-01-01 00:00",
"2016-01-01 01:00",
Delta(days=-366),
fn_load,
)
copy_timeslice(
load,
"GB",
"2016-01-01 00:00",
"2016-01-01 01:00",
Delta(days=-366),
fn_load,
)
else:
if 'ME' in load:
if 'AL' not in load and 'AL' in countries:
load['AL'] = load.ME * (5.7/2.9)
if 'MK' not in load and 'MK' in countries:
load['MK'] = load.ME * (6.7/2.9)
copy_timeslice(load, 'BG', '2018-10-27 21:00', '2018-10-28 22:00', Delta(weeks=1))
if "ME" in load:
if "AL" not in load and "AL" in countries:
load["AL"] = load.ME * (5.7 / 2.9)
if "MK" not in load and "MK" in countries:
load["MK"] = load.ME * (6.7 / 2.9)
copy_timeslice(
load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1)
)
return load
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_load_data')
snakemake = mock_snakemake("build_load_data")
configure_logging(snakemake)
powerstatistics = snakemake.config['load']['power_statistics']
interpolate_limit = snakemake.config['load']['interpolate_limit']
countries = snakemake.config['countries']
snapshots = pd.date_range(freq='h', **snakemake.config['snapshots'])
powerstatistics = snakemake.config["load"]["power_statistics"]
interpolate_limit = snakemake.config["load"]["interpolate_limit"]
countries = snakemake.config["countries"]
snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"])
years = slice(snapshots[0], snapshots[-1])
time_shift = snakemake.config['load']['time_shift_for_large_gaps']
time_shift = snakemake.config["load"]["time_shift_for_large_gaps"]
load = load_timeseries(snakemake.input[0], years, countries, powerstatistics)
if snakemake.config['load']['manual_adjustments']:
if snakemake.config["load"]["manual_adjustments"]:
load = manual_adjustment(load, snakemake.input[0], powerstatistics)
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")
load = load.interpolate(method='linear', limit=interpolate_limit)
load = load.interpolate(method="linear", limit=interpolate_limit)
logger.info("Filling larger gaps by copying time-slices of period "
f"'{time_shift}'.")
logger.info(
"Filling larger gaps by copying time-slices of period " f"'{time_shift}'."
)
load = load.apply(fill_large_gaps, shift=time_shift)
assert not load.isna().any().any(), (
'Load data contains nans. Adjust the parameters '
'`time_shift_for_large_gaps` or modify the `manual_adjustment` function '
'for implementing the needed load data modifications.')
"Load data contains nans. Adjust the parameters "
"`time_shift_for_large_gaps` or modify the `manual_adjustment` function "
"for implementing the needed load data modifications."
)
load.to_csv(snakemake.output[0])

View File

@ -1,9 +1,12 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Rasters the vector data of the `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas onto all cutout regions.
Rasters the vector data of the `Natura 2000
<https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas onto all
cutout regions.
Relevant Settings
-----------------
@ -36,15 +39,14 @@ Outputs
Description
-----------
"""
import logging
from _helpers import configure_logging
import atlite
import geopandas as gpd
import rasterio as rio
from _helpers import configure_logging
from rasterio.features import geometry_mask
from rasterio.warp import transform_bounds
@ -56,11 +58,11 @@ def determine_cutout_xXyY(cutout_name):
assert cutout.crs.to_epsg() == 4326
x, X, y, Y = cutout.extent
dx, dy = cutout.dx, cutout.dy
return [x - dx/2., X + dx/2., y - dy/2., Y + dy/2.]
return [x - dx / 2.0, X + dx / 2.0, y - dy / 2.0, Y + dy / 2.0]
def get_transform_and_shape(bounds, res):
left, bottom = [(b // res)* res for b in bounds[:2]]
left, bottom = [(b // res) * res for b in bounds[:2]]
right, top = [(b // res + 1) * res for b in bounds[2:]]
shape = int((top - bottom) // res), int((right - left) / res)
transform = rio.Affine(res, 0, left, 0, -res, top)
@ -68,9 +70,10 @@ def get_transform_and_shape(bounds, res):
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_natura_raster')
snakemake = mock_snakemake("build_natura_raster")
configure_logging(snakemake)
cutouts = snakemake.input.cutouts
@ -83,7 +86,16 @@ if __name__ == "__main__":
raster = ~geometry_mask(shapes.geometry, out_shape[::-1], transform)
raster = raster.astype(rio.uint8)
with rio.open(snakemake.output[0], 'w', driver='GTiff', dtype=rio.uint8,
count=1, transform=transform, crs=3035, compress='lzw',
width=raster.shape[1], height=raster.shape[0]) as dst:
with rio.open(
snakemake.output[0],
"w",
driver="GTiff",
dtype=rio.uint8,
count=1,
transform=transform,
crs=3035,
compress="lzw",
width=raster.shape[1],
height=raster.shape[0],
) as dst:
dst.write(raster, indexes=1)

View File

@ -1,10 +1,15 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# coding: utf-8
"""
Retrieves conventional powerplant capacities and locations from `powerplantmatching <https://github.com/FRESNA/powerplantmatching>`_, assigns these to buses and creates a ``.csv`` file. It is possible to amend the powerplant database with custom entries provided in ``data/custom_powerplants.csv``.
Retrieves conventional powerplant capacities and locations from
`powerplantmatching <https://github.com/FRESNA/powerplantmatching>`_, assigns
these to buses and creates a ``.csv`` file. It is possible to amend the
powerplant database with custom entries provided in
``data/custom_powerplants.csv``.
Relevant Settings
-----------------
@ -68,16 +73,14 @@ The configuration options ``electricity: powerplants_filter`` and ``electricity:
powerplants_filter: Country not in ['Germany'] and YearCommissioned <= 2015
custom_powerplants: YearCommissioned <= 2015
"""
import logging
from _helpers import configure_logging
import pypsa
import powerplantmatching as pm
import pandas as pd
import powerplantmatching as pm
import pypsa
from _helpers import configure_logging
from powerplantmatching.export import map_country_bus
logger = logging.getLogger(__name__)
@ -86,70 +89,78 @@ logger = logging.getLogger(__name__)
def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False):
if not custom_ppl_query:
return ppl
add_ppls = pd.read_csv(custom_powerplants, index_col=0, dtype={'bus': 'str'})
add_ppls = pd.read_csv(custom_powerplants, index_col=0, dtype={"bus": "str"})
if isinstance(custom_ppl_query, str):
add_ppls.query(custom_ppl_query, inplace=True)
return pd.concat([ppl, add_ppls], sort=False, ignore_index=True, verify_integrity=True)
return pd.concat(
[ppl, add_ppls], sort=False, ignore_index=True, verify_integrity=True
)
def replace_natural_gas_technology(df):
mapping = {'Steam Turbine': 'OCGT', "Combustion Engine": "OCGT"}
tech = df.Technology.replace(mapping).fillna('OCGT')
return df.Technology.where(df.Fueltype != 'Natural Gas', tech)
mapping = {"Steam Turbine": "OCGT", "Combustion Engine": "OCGT"}
tech = df.Technology.replace(mapping).fillna("OCGT")
return df.Technology.where(df.Fueltype != "Natural Gas", tech)
def replace_natural_gas_fueltype(df):
return df.Fueltype.where(df.Fueltype != 'Natural Gas', df.Technology)
def replace_natural_gas_fueltype(df):
return df.Fueltype.where(df.Fueltype != "Natural Gas", df.Technology)
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_powerplants')
snakemake = mock_snakemake("build_powerplants")
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.base_network)
countries = n.buses.country.unique()
ppl = (pm.powerplants(from_url=True)
.powerplant.fill_missing_decommissioning_years()
.powerplant.convert_country_to_alpha2()
.query('Fueltype not in ["Solar", "Wind"] and Country in @countries')
.assign(Technology=replace_natural_gas_technology)
.assign(Fueltype=replace_natural_gas_fueltype))
ppl = (
pm.powerplants(from_url=True)
.powerplant.fill_missing_decommissioning_years()
.powerplant.convert_country_to_alpha2()
.query('Fueltype not in ["Solar", "Wind"] and Country in @countries')
.assign(Technology=replace_natural_gas_technology)
.assign(Fueltype=replace_natural_gas_fueltype)
)
# Correct bioenergy for countries where possible
opsd = pm.data.OPSD_VRE().powerplant.convert_country_to_alpha2()
opsd = opsd.query('Country in @countries and Fueltype == "Bioenergy"')
opsd['Name'] = "Biomass"
opsd["Name"] = "Biomass"
available_countries = opsd.Country.unique()
ppl = ppl.query('not (Country in @available_countries and Fueltype == "Bioenergy")')
ppl = ppl.query('not (Country in @available_countries and Fueltype == "Bioenergy")')
ppl = pd.concat([ppl, opsd])
ppl_query = snakemake.config['electricity']['powerplants_filter']
ppl_query = snakemake.config["electricity"]["powerplants_filter"]
if isinstance(ppl_query, str):
ppl.query(ppl_query, inplace=True)
# add carriers from own powerplant files:
custom_ppl_query = snakemake.config['electricity']['custom_powerplants']
ppl = add_custom_powerplants(ppl, snakemake.input.custom_powerplants, custom_ppl_query)
custom_ppl_query = snakemake.config["electricity"]["custom_powerplants"]
ppl = add_custom_powerplants(
ppl, snakemake.input.custom_powerplants, custom_ppl_query
)
countries_wo_ppl = set(countries)-set(ppl.Country.unique())
countries_wo_ppl = set(countries) - set(ppl.Country.unique())
if countries_wo_ppl:
logging.warning(f"No powerplants known in: {', '.join(countries_wo_ppl)}")
substations = n.buses.query('substation_lv')
substations = n.buses.query("substation_lv")
ppl = map_country_bus(ppl, substations)
bus_null_b = ppl["bus"].isnull()
if bus_null_b.any():
logging.warning(f"Couldn't find close bus for {bus_null_b.sum()} powerplants. "
"Removing them from the powerplants list.")
logging.warning(
f"Couldn't find close bus for {bus_null_b.sum()} powerplants. "
"Removing them from the powerplants list."
)
ppl = ppl[~bus_null_b]
# TODO: This has to fixed in PPM, some powerplants are still duplicated
cumcount = ppl.groupby(['bus', 'Fueltype']).cumcount() + 1
# TODO: This has to fixed in PPM, some powerplants are still duplicated
cumcount = ppl.groupby(["bus", "Fueltype"]).cumcount() + 1
ppl.Name = ppl.Name.where(cumcount == 1, ppl.Name + " " + cumcount.astype(str))
ppl.reset_index(drop=True).to_csv(snakemake.output[0])

View File

@ -1,15 +1,17 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""Calculates for each network node the
(i) installable capacity (based on land-use), (ii) the available generation time
series (based on weather data), and (iii) the average distance from the node for
onshore wind, AC-connected offshore wind, DC-connected offshore wind and solar
PV generators. In addition for offshore wind it calculates the fraction of the
grid connection which is under water.
"""
Calculates for each network node the (i) installable capacity (based on land-
use), (ii) the available generation time series (based on weather data), and
(iii) the average distance from the node for onshore wind, AC-connected
offshore wind, DC-connected offshore wind and solar PV generators. In addition
for offshore wind it calculates the fraction of the grid connection which is
under water.
.. note:: Hydroelectric profiles are built in script :mod:`build_hydro_profiles`.
@ -177,132 +179,148 @@ node (`p_nom_max`): ``simple`` and ``conservative``:
- ``conservative`` assertains the nodal limit by increasing capacities
proportional to the layout until the limit of an individual grid cell is
reached.
"""
import progressbar as pgb
import geopandas as gpd
import xarray as xr
import numpy as np
import functools
import atlite
import logging
import time
import atlite
import geopandas as gpd
import numpy as np
import progressbar as pgb
import xarray as xr
from _helpers import configure_logging
from dask.distributed import Client, LocalCluster
from pypsa.geo import haversine
from shapely.geometry import LineString
import time
from dask.distributed import Client, LocalCluster
from _helpers import configure_logging
logger = logging.getLogger(__name__)
if __name__ == '__main__':
if 'snakemake' not in globals():
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_renewable_profiles', technology='solar')
snakemake = mock_snakemake("build_renewable_profiles", technology="solar")
configure_logging(snakemake)
pgb.streams.wrap_stderr()
nprocesses = int(snakemake.threads)
noprogress = not snakemake.config['atlite'].get('show_progress', False)
config = snakemake.config['renewable'][snakemake.wildcards.technology]
resource = config['resource'] # pv panel config / wind turbine config
correction_factor = config.get('correction_factor', 1.)
capacity_per_sqkm = config['capacity_per_sqkm']
p_nom_max_meth = config.get('potential', 'conservative')
noprogress = not snakemake.config["atlite"].get("show_progress", False)
config = snakemake.config["renewable"][snakemake.wildcards.technology]
resource = config["resource"] # pv panel config / wind turbine config
correction_factor = config.get("correction_factor", 1.0)
capacity_per_sqkm = config["capacity_per_sqkm"]
p_nom_max_meth = config.get("potential", "conservative")
if isinstance(config.get("corine", {}), list):
config['corine'] = {'grid_codes': config['corine']}
config["corine"] = {"grid_codes": config["corine"]}
if correction_factor != 1.:
logger.info(f'correction_factor is set as {correction_factor}')
if correction_factor != 1.0:
logger.info(f"correction_factor is set as {correction_factor}")
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)
client = Client(cluster, asynchronous=True)
cutout = atlite.Cutout(snakemake.input['cutout'])
cutout = atlite.Cutout(snakemake.input["cutout"])
regions = gpd.read_file(snakemake.input.regions)
assert not regions.empty, (f"List of regions in {snakemake.input.regions} is empty, please "
"disable the corresponding renewable technology")
assert not regions.empty, (
f"List of regions in {snakemake.input.regions} is empty, please "
"disable the corresponding renewable technology"
)
# do not pull up, set_index does not work if geo dataframe is empty
regions = regions.set_index('name').rename_axis('bus')
regions = regions.set_index("name").rename_axis("bus")
buses = regions.index
res = config.get("excluder_resolution", 100)
excluder = atlite.ExclusionContainer(crs=3035, res=res)
if config['natura']:
if config["natura"]:
excluder.add_raster(snakemake.input.natura, nodata=0, allow_no_overlap=True)
corine = config.get("corine", {})
if "grid_codes" in corine:
codes = corine["grid_codes"]
excluder.add_raster(snakemake.input.corine, codes=codes, invert=True, crs=3035)
if corine.get("distance", 0.) > 0.:
if corine.get("distance", 0.0) > 0.0:
codes = corine["distance_grid_codes"]
buffer = corine["distance"]
excluder.add_raster(snakemake.input.corine, codes=codes, buffer=buffer, crs=3035)
excluder.add_raster(
snakemake.input.corine, codes=codes, buffer=buffer, crs=3035
)
if "ship_threshold" in config:
shipping_threshold=config["ship_threshold"] * 8760 * 6 # approximation because 6 years of data which is hourly collected
shipping_threshold = (
config["ship_threshold"] * 8760 * 6
) # approximation because 6 years of data which is hourly collected
func = functools.partial(np.less, shipping_threshold)
excluder.add_raster(snakemake.input.ship_density, codes=func, crs=4326, allow_no_overlap=True)
excluder.add_raster(
snakemake.input.ship_density, codes=func, crs=4326, allow_no_overlap=True
)
if "max_depth" in config:
# lambda not supported for atlite + multiprocessing
# use named function np.greater with partially frozen argument instead
# and exclude areas where: -max_depth > grid cell depth
func = functools.partial(np.greater,-config['max_depth'])
func = functools.partial(np.greater, -config["max_depth"])
excluder.add_raster(snakemake.input.gebco, codes=func, crs=4326, nodata=-1000)
if 'min_shore_distance' in config:
buffer = config['min_shore_distance']
if "min_shore_distance" in config:
buffer = config["min_shore_distance"]
excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer)
if 'max_shore_distance' in config:
buffer = config['max_shore_distance']
excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer, invert=True)
if "max_shore_distance" in config:
buffer = config["max_shore_distance"]
excluder.add_geometry(
snakemake.input.country_shapes, buffer=buffer, invert=True
)
kwargs = dict(nprocesses=nprocesses, disable_progressbar=noprogress)
if noprogress:
logger.info('Calculate landuse availabilities...')
logger.info("Calculate landuse availabilities...")
start = time.time()
availability = cutout.availabilitymatrix(regions, excluder, **kwargs)
duration = time.time() - start
logger.info(f'Completed availability calculation ({duration:2.2f}s)')
logger.info(f"Completed availability calculation ({duration:2.2f}s)")
else:
availability = cutout.availabilitymatrix(regions, excluder, **kwargs)
area = cutout.grid.to_crs(3035).area / 1e6
area = xr.DataArray(area.values.reshape(cutout.shape),
[cutout.coords['y'], cutout.coords['x']])
area = xr.DataArray(
area.values.reshape(cutout.shape), [cutout.coords["y"], cutout.coords["x"]]
)
potential = capacity_per_sqkm * availability.sum('bus') * area
func = getattr(cutout, resource.pop('method'))
resource['dask_kwargs'] = {"scheduler": client}
potential = capacity_per_sqkm * availability.sum("bus") * area
func = getattr(cutout, resource.pop("method"))
resource["dask_kwargs"] = {"scheduler": client}
capacity_factor = correction_factor * func(capacity_factor=True, **resource)
layout = capacity_factor * area * capacity_per_sqkm
profile, capacities = func(matrix=availability.stack(spatial=['y','x']),
layout=layout, index=buses,
per_unit=True, return_capacity=True, **resource)
profile, capacities = func(
matrix=availability.stack(spatial=["y", "x"]),
layout=layout,
index=buses,
per_unit=True,
return_capacity=True,
**resource,
)
logger.info(f"Calculating maximal capacity per bus (method '{p_nom_max_meth}')")
if p_nom_max_meth == 'simple':
if p_nom_max_meth == "simple":
p_nom_max = capacity_per_sqkm * availability @ area
elif p_nom_max_meth == 'conservative':
max_cap_factor = capacity_factor.where(availability!=0).max(['x', 'y'])
elif p_nom_max_meth == "conservative":
max_cap_factor = capacity_factor.where(availability != 0).max(["x", "y"])
p_nom_max = capacities / max_cap_factor
else:
raise AssertionError('Config key `potential` should be one of "simple" '
f'(default) or "conservative", not "{p_nom_max_meth}"')
raise AssertionError(
'Config key `potential` should be one of "simple" '
f'(default) or "conservative", not "{p_nom_max_meth}"'
)
logger.info("Calculate average distances.")
layoutmatrix = (layout * availability).stack(spatial=["y", "x"])
logger.info('Calculate average distances.')
layoutmatrix = (layout * availability).stack(spatial=['y','x'])
coords = cutout.grid[['x', 'y']]
bus_coords = regions[['x', 'y']]
coords = cutout.grid[["x", "y"]]
bus_coords = regions[["x", "y"]]
average_distance = []
centre_of_mass = []
@ -311,39 +329,45 @@ if __name__ == '__main__':
nz_b = row != 0
row = row[nz_b]
co = coords[nz_b]
distances = haversine(bus_coords.loc[bus], co)
distances = haversine(bus_coords.loc[bus], co)
average_distance.append((distances * (row / row.sum())).sum())
centre_of_mass.append(co.values.T @ (row / row.sum()))
average_distance = xr.DataArray(average_distance, [buses])
centre_of_mass = xr.DataArray(centre_of_mass, [buses, ('spatial', ['x', 'y'])])
ds = xr.merge([(correction_factor * profile).rename('profile'),
capacities.rename('weight'),
p_nom_max.rename('p_nom_max'),
potential.rename('potential'),
average_distance.rename('average_distance')])
centre_of_mass = xr.DataArray(centre_of_mass, [buses, ("spatial", ["x", "y"])])
ds = xr.merge(
[
(correction_factor * profile).rename("profile"),
capacities.rename("weight"),
p_nom_max.rename("p_nom_max"),
potential.rename("potential"),
average_distance.rename("average_distance"),
]
)
if snakemake.wildcards.technology.startswith("offwind"):
logger.info('Calculate underwater fraction of connections.')
offshore_shape = gpd.read_file(snakemake.input['offshore_shapes']).unary_union
logger.info("Calculate underwater fraction of connections.")
offshore_shape = gpd.read_file(snakemake.input["offshore_shapes"]).unary_union
underwater_fraction = []
for bus in buses:
p = centre_of_mass.sel(bus=bus).data
line = LineString([p, regions.loc[bus, ['x', 'y']]])
frac = line.intersection(offshore_shape).length/line.length
line = LineString([p, regions.loc[bus, ["x", "y"]]])
frac = line.intersection(offshore_shape).length / line.length
underwater_fraction.append(frac)
ds['underwater_fraction'] = xr.DataArray(underwater_fraction, [buses])
ds["underwater_fraction"] = xr.DataArray(underwater_fraction, [buses])
# select only buses with some capacity and minimal capacity factor
ds = ds.sel(bus=((ds['profile'].mean('time') > config.get('min_p_max_pu', 0.)) &
(ds['p_nom_max'] > config.get('min_p_nom_max', 0.))))
ds = ds.sel(
bus=(
(ds["profile"].mean("time") > config.get("min_p_max_pu", 0.0))
& (ds["p_nom_max"] > config.get("min_p_nom_max", 0.0))
)
)
if 'clip_p_max_pu' in config:
min_p_max_pu = config['clip_p_max_pu']
ds['profile'] = ds['profile'].where(ds['profile'] >= min_p_max_pu, 0)
if "clip_p_max_pu" in config:
min_p_max_pu = config["clip_p_max_pu"]
ds["profile"] = ds["profile"].where(ds["profile"] >= min_p_max_pu, 0)
ds.to_netcdf(snakemake.output.profile)

View File

@ -1,9 +1,12 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Creates GIS shape files of the countries, exclusive economic zones and `NUTS3 <https://en.wikipedia.org/wiki/Nomenclature_of_Territorial_Units_for_Statistics>`_ areas.
Creates GIS shape files of the countries, exclusive economic zones and `NUTS3 <
https://en.wikipedia.org/wiki/Nomenclature_of_Territorial_Units_for_Statistics>
`_ areas.
Relevant Settings
-----------------
@ -64,22 +67,20 @@ Outputs
Description
-----------
"""
import logging
from _helpers import configure_logging
import numpy as np
from operator import attrgetter
from functools import reduce
from itertools import takewhile
from operator import attrgetter
import pandas as pd
import geopandas as gpd
import numpy as np
import pandas as pd
import pycountry as pyc
from _helpers import configure_logging
from shapely.geometry import MultiPolygon, Polygon
from shapely.ops import unary_union
import pycountry as pyc
logger = logging.getLogger(__name__)
@ -94,40 +95,58 @@ def _get_country(target, **keys):
def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True):
if isinstance(polys, MultiPolygon):
polys = sorted(polys.geoms, key=attrgetter('area'), reverse=True)
polys = sorted(polys.geoms, key=attrgetter("area"), reverse=True)
mainpoly = polys[0]
mainlength = np.sqrt(mainpoly.area/(2.*np.pi))
mainlength = np.sqrt(mainpoly.area / (2.0 * np.pi))
if mainpoly.area > minarea:
polys = MultiPolygon([p
for p in takewhile(lambda p: p.area > minarea, polys)
if not filterremote or (mainpoly.distance(p) < mainlength)])
polys = MultiPolygon(
[
p
for p in takewhile(lambda p: p.area > minarea, polys)
if not filterremote or (mainpoly.distance(p) < mainlength)
]
)
else:
polys = mainpoly
return polys.simplify(tolerance=tolerance)
def countries(naturalearth, country_list):
if 'RS' in country_list: country_list.append('KV')
if "RS" in country_list:
country_list.append("KV")
df = gpd.read_file(naturalearth)
# Names are a hassle in naturalearth, try several fields
fieldnames = (df[x].where(lambda s: s!='-99') for x in ('ISO_A2', 'WB_A2', 'ADM0_A3'))
df['name'] = reduce(lambda x,y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2]
fieldnames = (
df[x].where(lambda s: s != "-99") for x in ("ISO_A2", "WB_A2", "ADM0_A3")
)
df["name"] = reduce(lambda x, y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2]
df = df.loc[df.name.isin(country_list) & ((df['scalerank'] == 0) | (df['scalerank'] == 5))]
s = df.set_index('name')['geometry'].map(_simplify_polys)
if 'RS' in country_list: s['RS'] = s['RS'].union(s.pop('KV'))
df = df.loc[
df.name.isin(country_list) & ((df["scalerank"] == 0) | (df["scalerank"] == 5))
]
s = df.set_index("name")["geometry"].map(_simplify_polys)
if "RS" in country_list:
s["RS"] = s["RS"].union(s.pop("KV"))
return s
def eez(country_shapes, eez, country_list):
df = gpd.read_file(eez)
df = df.loc[df['ISO_3digit'].isin([_get_country('alpha_3', alpha_2=c) for c in country_list])]
df['name'] = df['ISO_3digit'].map(lambda c: _get_country('alpha_2', alpha_3=c))
s = df.set_index('name').geometry.map(lambda s: _simplify_polys(s, filterremote=False))
s = gpd.GeoSeries({k:v for k,v in s.iteritems() if v.distance(country_shapes[k]) < 1e-3})
df = df.loc[
df["ISO_3digit"].isin(
[_get_country("alpha_3", alpha_2=c) for c in country_list]
)
]
df["name"] = df["ISO_3digit"].map(lambda c: _get_country("alpha_2", alpha_3=c))
s = df.set_index("name").geometry.map(
lambda s: _simplify_polys(s, filterremote=False)
)
s = gpd.GeoSeries(
{k: v for k, v in s.iteritems() if v.distance(country_shapes[k]) < 1e-3}
)
s = s.to_frame("geometry")
s.index.name = "name"
return s
@ -140,84 +159,121 @@ def country_cover(country_shapes, eez_shapes=None):
europe_shape = unary_union(shapes)
if isinstance(europe_shape, MultiPolygon):
europe_shape = max(europe_shape, key=attrgetter('area'))
europe_shape = max(europe_shape, key=attrgetter("area"))
return Polygon(shell=europe_shape.exterior)
def nuts3(country_shapes, nuts3, nuts3pop, nuts3gdp, ch_cantons, ch_popgdp):
df = gpd.read_file(nuts3)
df = df.loc[df['STAT_LEVL_'] == 3]
df['geometry'] = df['geometry'].map(_simplify_polys)
df = df.rename(columns={'NUTS_ID': 'id'})[['id', 'geometry']].set_index('id')
df = df.loc[df["STAT_LEVL_"] == 3]
df["geometry"] = df["geometry"].map(_simplify_polys)
df = df.rename(columns={"NUTS_ID": "id"})[["id", "geometry"]].set_index("id")
pop = pd.read_table(nuts3pop, na_values=[':'], delimiter=' ?\t', engine='python')
pop = (pop
.set_index(pd.MultiIndex.from_tuples(pop.pop('unit,geo\\time').str.split(','))).loc['THS']
.applymap(lambda x: pd.to_numeric(x, errors='coerce'))
.fillna(method='bfill', axis=1))['2014']
pop = pd.read_table(nuts3pop, na_values=[":"], delimiter=" ?\t", engine="python")
pop = (
pop.set_index(
pd.MultiIndex.from_tuples(pop.pop("unit,geo\\time").str.split(","))
)
.loc["THS"]
.applymap(lambda x: pd.to_numeric(x, errors="coerce"))
.fillna(method="bfill", axis=1)
)["2014"]
gdp = pd.read_table(nuts3gdp, na_values=[':'], delimiter=' ?\t', engine='python')
gdp = (gdp
.set_index(pd.MultiIndex.from_tuples(gdp.pop('unit,geo\\time').str.split(','))).loc['EUR_HAB']
.applymap(lambda x: pd.to_numeric(x, errors='coerce'))
.fillna(method='bfill', axis=1))['2014']
gdp = pd.read_table(nuts3gdp, na_values=[":"], delimiter=" ?\t", engine="python")
gdp = (
gdp.set_index(
pd.MultiIndex.from_tuples(gdp.pop("unit,geo\\time").str.split(","))
)
.loc["EUR_HAB"]
.applymap(lambda x: pd.to_numeric(x, errors="coerce"))
.fillna(method="bfill", axis=1)
)["2014"]
cantons = pd.read_csv(ch_cantons)
cantons = cantons.set_index(cantons['HASC'].str[3:])['NUTS']
cantons = cantons.str.pad(5, side='right', fillchar='0')
cantons = cantons.set_index(cantons["HASC"].str[3:])["NUTS"]
cantons = cantons.str.pad(5, side="right", fillchar="0")
swiss = pd.read_excel(ch_popgdp, skiprows=3, index_col=0)
swiss.columns = swiss.columns.to_series().map(cantons)
swiss_pop = pd.to_numeric(swiss.loc['Residents in 1000', 'CH040':])
swiss_pop = pd.to_numeric(swiss.loc["Residents in 1000", "CH040":])
pop = pd.concat([pop, swiss_pop])
swiss_gdp = pd.to_numeric(swiss.loc['Gross domestic product per capita in Swiss francs', 'CH040':])
swiss_gdp = pd.to_numeric(
swiss.loc["Gross domestic product per capita in Swiss francs", "CH040":]
)
gdp = pd.concat([gdp, swiss_gdp])
df = df.join(pd.DataFrame(dict(pop=pop, gdp=gdp)))
df['country'] = df.index.to_series().str[:2].replace(dict(UK='GB', EL='GR'))
df["country"] = df.index.to_series().str[:2].replace(dict(UK="GB", EL="GR"))
excludenuts = pd.Index(('FRA10', 'FRA20', 'FRA30', 'FRA40', 'FRA50',
'PT200', 'PT300',
'ES707', 'ES703', 'ES704','ES705', 'ES706', 'ES708', 'ES709',
'FI2', 'FR9'))
excludecountry = pd.Index(('MT', 'TR', 'LI', 'IS', 'CY', 'KV'))
excludenuts = pd.Index(
(
"FRA10",
"FRA20",
"FRA30",
"FRA40",
"FRA50",
"PT200",
"PT300",
"ES707",
"ES703",
"ES704",
"ES705",
"ES706",
"ES708",
"ES709",
"FI2",
"FR9",
)
)
excludecountry = pd.Index(("MT", "TR", "LI", "IS", "CY", "KV"))
df = df.loc[df.index.difference(excludenuts)]
df = df.loc[~df.country.isin(excludecountry)]
manual = gpd.GeoDataFrame(
[['BA1', 'BA', 3871.],
['RS1', 'RS', 7210.],
['AL1', 'AL', 2893.]],
columns=['NUTS_ID', 'country', 'pop']
).set_index('NUTS_ID')
manual['geometry'] = manual['country'].map(country_shapes)
[["BA1", "BA", 3871.0], ["RS1", "RS", 7210.0], ["AL1", "AL", 2893.0]],
columns=["NUTS_ID", "country", "pop"],
).set_index("NUTS_ID")
manual["geometry"] = manual["country"].map(country_shapes)
manual = manual.dropna()
df = pd.concat([df, manual], sort=False)
df.loc['ME000', 'pop'] = 650.
df.loc["ME000", "pop"] = 650.0
return df
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_shapes')
snakemake = mock_snakemake("build_shapes")
configure_logging(snakemake)
country_shapes = countries(snakemake.input.naturalearth, snakemake.config['countries'])
country_shapes = countries(
snakemake.input.naturalearth, snakemake.config["countries"]
)
country_shapes.reset_index().to_file(snakemake.output.country_shapes)
offshore_shapes = eez(country_shapes, snakemake.input.eez, snakemake.config['countries'])
offshore_shapes = eez(
country_shapes, snakemake.input.eez, snakemake.config["countries"]
)
offshore_shapes.reset_index().to_file(snakemake.output.offshore_shapes)
europe_shape = gpd.GeoDataFrame(geometry=[country_cover(country_shapes, offshore_shapes.geometry)])
europe_shape = gpd.GeoDataFrame(
geometry=[country_cover(country_shapes, offshore_shapes.geometry)]
)
europe_shape.reset_index().to_file(snakemake.output.europe_shape)
nuts3_shapes = nuts3(country_shapes, snakemake.input.nuts3, snakemake.input.nuts3pop,
snakemake.input.nuts3gdp, snakemake.input.ch_cantons, snakemake.input.ch_popgdp)
nuts3_shapes = nuts3(
country_shapes,
snakemake.input.nuts3,
snakemake.input.nuts3pop,
snakemake.input.nuts3gdp,
snakemake.input.ch_cantons,
snakemake.input.ch_popgdp,
)
nuts3_shapes.reset_index().to_file(snakemake.output.nuts3_shapes)

View File

@ -1,9 +1,14 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Transforms the global ship density data from https://datacatalog.worldbank.org/search/dataset/0037580/Global-Shipping-Traffic-Density to the size of the considered cutout. The global ship density raster is later used for the exclusion when calculating the offshore potentials.
Transforms the global ship density data from
https://datacatalog.worldbank.org/search/dataset/0037580/Global-Shipping-
Traffic-Density to the size of the considered cutout. The global ship density
raster is later used for the exclusion when calculating the offshore
potentials.
Relevant Settings
-----------------
@ -30,23 +35,23 @@ Outputs
Description
-----------
"""
import logging
import os
import zipfile
import xarray as xr
from _helpers import configure_logging
from build_natura_raster import determine_cutout_xXyY
import zipfile
import xarray as xr
import os
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_ship_raster')
snakemake = mock_snakemake("build_ship_raster")
configure_logging(snakemake)
cutouts = snakemake.input.cutouts
@ -55,7 +60,9 @@ if __name__ == "__main__":
with zipfile.ZipFile(snakemake.input.ship_density) as zip_f:
zip_f.extract("shipdensity_global.tif")
with xr.open_rasterio("shipdensity_global.tif") as ship_density:
ship_density = ship_density.drop(["band"]).sel(x=slice(min(xs),max(Xs)), y=slice(max(Ys),min(ys)))
ship_density = ship_density.drop(["band"]).sel(
x=slice(min(xs), max(Xs)), y=slice(max(Ys), min(ys))
)
ship_density.to_netcdf(snakemake.output[0])
os.remove("shipdensity_global.tif")
os.remove("shipdensity_global.tif")

View File

@ -1,10 +1,12 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# coding: utf-8
"""
Creates networks clustered to ``{cluster}`` number of zones with aggregated buses, generators and transmission corridors.
Creates networks clustered to ``{cluster}`` number of zones with aggregated
buses, generators and transmission corridors.
Relevant Settings
-----------------
@ -118,28 +120,28 @@ Exemplary unsolved network clustered to 37 nodes:
.. image:: ../img/elec_s_37.png
:scale: 40 %
:align: center
"""
import logging
from _helpers import configure_logging, update_p_nom_max, get_aggregation_strategies
import pypsa
import pandas as pd
import numpy as np
import geopandas as gpd
import pyomo.environ as po
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from functools import reduce
from pypsa.networkclustering import (busmap_by_kmeans, busmap_by_hac,
busmap_by_greedy_modularity, get_clustering_from_busmap)
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyomo.environ as po
import pypsa
import seaborn as sns
from _helpers import configure_logging, get_aggregation_strategies, update_p_nom_max
from pypsa.networkclustering import (
busmap_by_greedy_modularity,
busmap_by_hac,
busmap_by_kmeans,
get_clustering_from_busmap,
)
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning)
warnings.filterwarnings(action="ignore", category=UserWarning)
from add_electricity import load_costs
@ -148,19 +150,21 @@ idx = pd.IndexSlice
logger = logging.getLogger(__name__)
def normed(x): return (x/x.sum()).fillna(0.)
def normed(x):
return (x / x.sum()).fillna(0.0)
def weighting_for_country(n, x):
conv_carriers = {'OCGT','CCGT','PHS', 'hydro'}
gen = (n
.generators.loc[n.generators.carrier.isin(conv_carriers)]
.groupby('bus').p_nom.sum()
.reindex(n.buses.index, fill_value=0.) +
n
.storage_units.loc[n.storage_units.carrier.isin(conv_carriers)]
.groupby('bus').p_nom.sum()
.reindex(n.buses.index, fill_value=0.))
conv_carriers = {"OCGT", "CCGT", "PHS", "hydro"}
gen = n.generators.loc[n.generators.carrier.isin(conv_carriers)].groupby(
"bus"
).p_nom.sum().reindex(n.buses.index, fill_value=0.0) + n.storage_units.loc[
n.storage_units.carrier.isin(conv_carriers)
].groupby(
"bus"
).p_nom.sum().reindex(
n.buses.index, fill_value=0.0
)
load = n.loads_t.p_set.mean().groupby(n.loads.bus).sum()
b_i = x.index
@ -168,34 +172,41 @@ def weighting_for_country(n, x):
l = normed(load.reindex(b_i, fill_value=0))
w = g + l
return (w * (100. / w.max())).clip(lower=1.).astype(int)
return (w * (100.0 / w.max())).clip(lower=1.0).astype(int)
def get_feature_for_hac(n, buses_i=None, feature=None):
if buses_i is None:
buses_i = n.buses.index
if feature is None:
feature = "solar+onwind-time"
carriers = feature.split('-')[0].split('+')
carriers = feature.split("-")[0].split("+")
if "offwind" in carriers:
carriers.remove("offwind")
carriers = np.append(carriers, network.generators.carrier.filter(like='offwind').unique())
carriers = np.append(
carriers, network.generators.carrier.filter(like="offwind").unique()
)
if feature.split('-')[1] == 'cap':
if feature.split("-")[1] == "cap":
feature_data = pd.DataFrame(index=buses_i, columns=carriers)
for carrier in carriers:
gen_i = n.generators.query("carrier == @carrier").index
attach = n.generators_t.p_max_pu[gen_i].mean().rename(index = n.generators.loc[gen_i].bus)
attach = (
n.generators_t.p_max_pu[gen_i]
.mean()
.rename(index=n.generators.loc[gen_i].bus)
)
feature_data[carrier] = attach
if feature.split('-')[1] == 'time':
if feature.split("-")[1] == "time":
feature_data = pd.DataFrame(columns=buses_i)
for carrier in carriers:
gen_i = n.generators.query("carrier == @carrier").index
attach = n.generators_t.p_max_pu[gen_i].rename(columns = n.generators.loc[gen_i].bus)
attach = n.generators_t.p_max_pu[gen_i].rename(
columns=n.generators.loc[gen_i].bus
)
feature_data = pd.concat([feature_data, attach], axis=0)[buses_i]
feature_data = feature_data.T
@ -208,80 +219,114 @@ def get_feature_for_hac(n, buses_i=None, feature=None):
def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"):
"""Determine the number of clusters per country"""
"""
Determine the number of clusters per country.
"""
L = (n.loads_t.p_set.mean()
.groupby(n.loads.bus).sum()
.groupby([n.buses.country, n.buses.sub_network]).sum()
.pipe(normed))
L = (
n.loads_t.p_set.mean()
.groupby(n.loads.bus)
.sum()
.groupby([n.buses.country, n.buses.sub_network])
.sum()
.pipe(normed)
)
N = n.buses.groupby(['country', 'sub_network']).size()
N = n.buses.groupby(["country", "sub_network"]).size()
assert n_clusters >= len(N) and n_clusters <= N.sum(), \
f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries."
assert (
n_clusters >= len(N) and n_clusters <= N.sum()
), f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries."
if focus_weights is not None:
total_focus = sum(list(focus_weights.values()))
assert total_focus <= 1.0, "The sum of focus weights must be less than or equal to 1."
assert (
total_focus <= 1.0
), "The sum of focus weights must be less than or equal to 1."
for country, weight in focus_weights.items():
L[country] = weight / len(L[country])
remainder = [c not in focus_weights.keys() for c in L.index.get_level_values('country')]
remainder = [
c not in focus_weights.keys() for c in L.index.get_level_values("country")
]
L[remainder] = L.loc[remainder].pipe(normed) * (1 - total_focus)
logger.warning('Using custom focus weights for determining number of clusters.')
logger.warning("Using custom focus weights for determining number of clusters.")
assert np.isclose(L.sum(), 1.0, rtol=1e-3), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}."
assert np.isclose(
L.sum(), 1.0, rtol=1e-3
), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}."
m = po.ConcreteModel()
def n_bounds(model, *n_id):
return (1, N[n_id])
m.n = po.Var(list(L.index), bounds=n_bounds, domain=po.Integers)
m.tot = po.Constraint(expr=(po.summation(m.n) == n_clusters))
m.objective = po.Objective(expr=sum((m.n[i] - L.loc[i]*n_clusters)**2 for i in L.index),
sense=po.minimize)
m.objective = po.Objective(
expr=sum((m.n[i] - L.loc[i] * n_clusters) ** 2 for i in L.index),
sense=po.minimize,
)
opt = po.SolverFactory(solver_name)
if not opt.has_capability('quadratic_objective'):
logger.warning(f'The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`.')
opt = po.SolverFactory('ipopt')
if not opt.has_capability("quadratic_objective"):
logger.warning(
f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`."
)
opt = po.SolverFactory("ipopt")
results = opt.solve(m)
assert results['Solver'][0]['Status'] == 'ok', f"Solver returned non-optimally: {results}"
assert (
results["Solver"][0]["Status"] == "ok"
), f"Solver returned non-optimally: {results}"
return pd.Series(m.n.get_values(), index=L.index).round().astype(int)
def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algorithm="kmeans", feature=None, **algorithm_kwds):
def busmap_for_n_clusters(
n,
n_clusters,
solver_name,
focus_weights=None,
algorithm="kmeans",
feature=None,
**algorithm_kwds,
):
if algorithm == "kmeans":
algorithm_kwds.setdefault('n_init', 1000)
algorithm_kwds.setdefault('max_iter', 30000)
algorithm_kwds.setdefault('tol', 1e-6)
algorithm_kwds.setdefault('random_state', 0)
algorithm_kwds.setdefault("n_init", 1000)
algorithm_kwds.setdefault("max_iter", 30000)
algorithm_kwds.setdefault("tol", 1e-6)
algorithm_kwds.setdefault("random_state", 0)
def fix_country_assignment_for_hac(n):
from scipy.sparse import csgraph
# overwrite country of nodes that are disconnected from their country-topology
for country in n.buses.country.unique():
m = n[n.buses.country ==country].copy()
m = n[n.buses.country == country].copy()
_, labels = csgraph.connected_components(m.adjacency_matrix(), directed=False)
_, labels = csgraph.connected_components(
m.adjacency_matrix(), directed=False
)
component = pd.Series(labels, index=m.buses.index)
component_sizes = component.value_counts()
if len(component_sizes)>1:
disconnected_bus = component[component==component_sizes.index[-1]].index[0]
if len(component_sizes) > 1:
disconnected_bus = component[
component == component_sizes.index[-1]
].index[0]
neighbor_bus = (
n.lines.query("bus0 == @disconnected_bus or bus1 == @disconnected_bus")
.iloc[0][['bus0', 'bus1']]
)
new_country = list(set(n.buses.loc[neighbor_bus].country)-set([country]))[0]
neighbor_bus = n.lines.query(
"bus0 == @disconnected_bus or bus1 == @disconnected_bus"
).iloc[0][["bus0", "bus1"]]
new_country = list(
set(n.buses.loc[neighbor_bus].country) - set([country])
)[0]
logger.info(
f"overwriting country `{country}` of bus `{disconnected_bus}` "
@ -296,75 +341,107 @@ def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algori
n = fix_country_assignment_for_hac(n)
if (algorithm != "hac") and (feature is not None):
logger.warning(f"Keyword argument feature is only valid for algorithm `hac`. "
f"Given feature `{feature}` will be ignored.")
logger.warning(
f"Keyword argument feature is only valid for algorithm `hac`. "
f"Given feature `{feature}` will be ignored."
)
n.determine_network_topology()
n_clusters = distribute_clusters(n, n_clusters, focus_weights=focus_weights, solver_name=solver_name)
n_clusters = distribute_clusters(
n, n_clusters, focus_weights=focus_weights, solver_name=solver_name
)
def busmap_for_country(x):
prefix = x.name[0] + x.name[1] + ' '
prefix = x.name[0] + x.name[1] + " "
logger.debug(f"Determining busmap for country {prefix[:-1]}")
if len(x) == 1:
return pd.Series(prefix + '0', index=x.index)
return pd.Series(prefix + "0", index=x.index)
weight = weighting_for_country(n, x)
if algorithm == "kmeans":
return prefix + busmap_by_kmeans(n, weight, n_clusters[x.name], buses_i=x.index, **algorithm_kwds)
return prefix + busmap_by_kmeans(
n, weight, n_clusters[x.name], buses_i=x.index, **algorithm_kwds
)
elif algorithm == "hac":
return prefix + busmap_by_hac(n, n_clusters[x.name], buses_i=x.index, feature=feature.loc[x.index])
return prefix + busmap_by_hac(
n, n_clusters[x.name], buses_i=x.index, feature=feature.loc[x.index]
)
elif algorithm == "modularity":
return prefix + busmap_by_greedy_modularity(n, n_clusters[x.name], buses_i=x.index)
return prefix + busmap_by_greedy_modularity(
n, n_clusters[x.name], buses_i=x.index
)
else:
raise ValueError(f"`algorithm` must be one of 'kmeans' or 'hac'. Is {algorithm}.")
raise ValueError(
f"`algorithm` must be one of 'kmeans' or 'hac'. Is {algorithm}."
)
return (n.buses.groupby(['country', 'sub_network'], group_keys=False)
.apply(busmap_for_country).squeeze().rename('busmap'))
return (
n.buses.groupby(["country", "sub_network"], group_keys=False)
.apply(busmap_for_country)
.squeeze()
.rename("busmap")
)
def clustering_for_n_clusters(n, n_clusters, custom_busmap=False, aggregate_carriers=None,
line_length_factor=1.25, aggregation_strategies=dict(), solver_name="cbc",
algorithm="hac", feature=None, extended_link_costs=0, focus_weights=None):
def clustering_for_n_clusters(
n,
n_clusters,
custom_busmap=False,
aggregate_carriers=None,
line_length_factor=1.25,
aggregation_strategies=dict(),
solver_name="cbc",
algorithm="hac",
feature=None,
extended_link_costs=0,
focus_weights=None,
):
bus_strategies, generator_strategies = get_aggregation_strategies(aggregation_strategies)
bus_strategies, generator_strategies = get_aggregation_strategies(
aggregation_strategies
)
if not isinstance(custom_busmap, pd.Series):
busmap = busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm, feature)
busmap = busmap_for_n_clusters(
n, n_clusters, solver_name, focus_weights, algorithm, feature
)
else:
busmap = custom_busmap
clustering = get_clustering_from_busmap(
n, busmap,
n,
busmap,
bus_strategies=bus_strategies,
aggregate_generators_weighted=True,
aggregate_generators_carriers=aggregate_carriers,
aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=line_length_factor,
generator_strategies=generator_strategies,
scale_link_capital_costs=False)
scale_link_capital_costs=False,
)
if not n.links.empty:
nc = clustering.network
nc.links['underwater_fraction'] = (n.links.eval('underwater_fraction * length')
.div(nc.links.length).dropna())
nc.links['capital_cost'] = (nc.links['capital_cost']
.add((nc.links.length - n.links.length)
.clip(lower=0).mul(extended_link_costs),
fill_value=0))
nc.links["underwater_fraction"] = (
n.links.eval("underwater_fraction * length").div(nc.links.length).dropna()
)
nc.links["capital_cost"] = nc.links["capital_cost"].add(
(nc.links.length - n.links.length).clip(lower=0).mul(extended_link_costs),
fill_value=0,
)
return clustering
def cluster_regions(busmaps, input=None, output=None):
busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0])
for which in ('regions_onshore', 'regions_offshore'):
for which in ("regions_onshore", "regions_offshore"):
regions = gpd.read_file(getattr(input, which))
regions = regions.reindex(columns=["name", "geometry"]).set_index('name')
regions = regions.reindex(columns=["name", "geometry"]).set_index("name")
regions_c = regions.dissolve(busmap)
regions_c.index.name = 'name'
regions_c.index.name = "name"
regions_c = regions_c.reset_index()
regions_c.to_file(getattr(output, which))
@ -375,78 +452,110 @@ def plot_busmap_for_n_clusters(n, n_clusters, fn=None):
cr = sns.color_palette("hls", len(cs))
n.plot(bus_colors=busmap.map(dict(zip(cs, cr))))
if fn is not None:
plt.savefig(fn, bbox_inches='tight')
plt.savefig(fn, bbox_inches="tight")
del cs, cr
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('cluster_network', simpl='', clusters='5')
snakemake = mock_snakemake("cluster_network", simpl="", clusters="5")
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network)
focus_weights = snakemake.config.get('focus_weights', None)
focus_weights = snakemake.config.get("focus_weights", None)
renewable_carriers = pd.Index([tech
for tech in n.generators.carrier.unique()
if tech in snakemake.config['renewable']])
renewable_carriers = pd.Index(
[
tech
for tech in n.generators.carrier.unique()
if tech in snakemake.config["renewable"]
]
)
if snakemake.wildcards.clusters.endswith('m'):
if snakemake.wildcards.clusters.endswith("m"):
n_clusters = int(snakemake.wildcards.clusters[:-1])
aggregate_carriers = snakemake.config["electricity"].get("conventional_carriers")
elif snakemake.wildcards.clusters == 'all':
aggregate_carriers = snakemake.config["electricity"].get(
"conventional_carriers"
)
elif snakemake.wildcards.clusters == "all":
n_clusters = len(n.buses)
aggregate_carriers = None # All
aggregate_carriers = None # All
else:
n_clusters = int(snakemake.wildcards.clusters)
aggregate_carriers = None # All
aggregate_carriers = None # All
if n_clusters == len(n.buses):
# Fast-path if no clustering is necessary
busmap = n.buses.index.to_series()
linemap = n.lines.index.to_series()
clustering = pypsa.networkclustering.Clustering(n, busmap, linemap, linemap, pd.Series(dtype='O'))
clustering = pypsa.networkclustering.Clustering(
n, busmap, linemap, linemap, pd.Series(dtype="O")
)
else:
line_length_factor = snakemake.config['lines']['length_factor']
Nyears = n.snapshot_weightings.objective.sum()/8760
line_length_factor = snakemake.config["lines"]["length_factor"]
Nyears = n.snapshot_weightings.objective.sum() / 8760
hvac_overhead_cost = (load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears)
.at['HVAC overhead', 'capital_cost'])
hvac_overhead_cost = load_costs(
snakemake.input.tech_costs,
snakemake.config["costs"],
snakemake.config["electricity"],
Nyears,
).at["HVAC overhead", "capital_cost"]
def consense(x):
v = x.iat[0]
assert ((x == v).all() or x.isnull().all()), (
"The `potential` configuration option must agree for all renewable carriers, for now!"
)
assert (
x == v
).all() or x.isnull().all(), "The `potential` configuration option must agree for all renewable carriers, for now!"
return v
aggregation_strategies = snakemake.config["clustering"].get("aggregation_strategies", {})
aggregation_strategies = snakemake.config["clustering"].get(
"aggregation_strategies", {}
)
# translate str entries of aggregation_strategies to pd.Series functions:
aggregation_strategies = {
p: {k: getattr(pd.Series, v) for k,v in aggregation_strategies[p].items()}
p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()}
for p in aggregation_strategies.keys()
}
custom_busmap = snakemake.config["enable"].get("custom_busmap", False)
if custom_busmap:
custom_busmap = pd.read_csv(snakemake.input.custom_busmap, index_col=0, squeeze=True)
custom_busmap = pd.read_csv(
snakemake.input.custom_busmap, index_col=0, squeeze=True
)
custom_busmap.index = custom_busmap.index.astype(str)
logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}")
cluster_config = snakemake.config.get('clustering', {}).get('cluster_network', {})
clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap, aggregate_carriers,
line_length_factor, aggregation_strategies,
snakemake.config['solving']['solver']['name'],
cluster_config.get("algorithm", "hac"),
cluster_config.get("feature", "solar+onwind-time"),
hvac_overhead_cost, focus_weights)
cluster_config = snakemake.config.get("clustering", {}).get(
"cluster_network", {}
)
clustering = clustering_for_n_clusters(
n,
n_clusters,
custom_busmap,
aggregate_carriers,
line_length_factor,
aggregation_strategies,
snakemake.config["solving"]["solver"]["name"],
cluster_config.get("algorithm", "hac"),
cluster_config.get("feature", "solar+onwind-time"),
hvac_overhead_cost,
focus_weights,
)
update_p_nom_max(clustering.network)
clustering.network.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
clustering.network.meta = dict(
snakemake.config, **dict(wildcards=dict(snakemake.wildcards))
)
clustering.network.export_to_netcdf(snakemake.output.network)
for attr in ('busmap', 'linemap'): #also available: linemap_positive, linemap_negative
for attr in (
"busmap",
"linemap",
): # also available: linemap_positive, linemap_negative
getattr(clustering, attr).to_csv(snakemake.output[attr])
cluster_regions((clustering.busmap,), snakemake.input, snakemake.output)

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
@ -51,23 +52,21 @@ the line volume/cost cap field can be set to one of the following:
* ``lcall`` for all line cost caps
Replacing '/summaries/' with '/plots/' creates nice colored maps of the results.
"""
import logging
from _helpers import configure_logging
import os
import pypsa
import pandas as pd
import pandas as pd
import pypsa
from _helpers import configure_logging
from add_electricity import load_costs, update_transmission_costs
idx = pd.IndexSlice
logger = logging.getLogger(__name__)
opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"}
opt_name = {"Store": "e", "Line": "s", "Transformer": "s"}
def _add_indexed_rows(df, raw_index):
@ -79,105 +78,149 @@ def _add_indexed_rows(df, raw_index):
def assign_carriers(n):
if "carrier" not in n.loads:
n.loads["carrier"] = "electricity"
for carrier in ["transport","heat","urban heat"]:
n.loads.loc[n.loads.index.str.contains(carrier),"carrier"] = carrier
for carrier in ["transport", "heat", "urban heat"]:
n.loads.loc[n.loads.index.str.contains(carrier), "carrier"] = carrier
n.storage_units['carrier'].replace({'hydro': 'hydro+PHS', 'PHS': 'hydro+PHS'}, inplace=True)
n.storage_units["carrier"].replace(
{"hydro": "hydro+PHS", "PHS": "hydro+PHS"}, inplace=True
)
if "carrier" not in n.lines:
n.lines["carrier"] = "AC"
n.lines["carrier"].replace({"AC": "lines"}, inplace=True)
if n.links.empty: n.links["carrier"] = pd.Series(dtype=str)
if n.links.empty:
n.links["carrier"] = pd.Series(dtype=str)
n.links["carrier"].replace({"DC": "lines"}, inplace=True)
if "EU gas store" in n.stores.index and n.stores.loc["EU gas Store","carrier"] == "":
n.stores.loc["EU gas Store","carrier"] = "gas Store"
if (
"EU gas store" in n.stores.index
and n.stores.loc["EU gas Store", "carrier"] == ""
):
n.stores.loc["EU gas Store", "carrier"] = "gas Store"
def calculate_costs(n, label, costs):
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}):
capital_costs = c.df.capital_cost*c.df[opt_name.get(c.name,"p") + "_nom_opt"]
for c in n.iterate_components(
n.branch_components | n.controllable_one_port_components ^ {"Load"}
):
capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum()
# Index tuple(s) indicating the newly to-be-added row(s)
raw_index = tuple([[c.list_name],["capital"],list(capital_costs_grouped.index)])
raw_index = tuple(
[[c.list_name], ["capital"], list(capital_costs_grouped.index)]
)
costs = _add_indexed_rows(costs, raw_index)
costs.loc[idx[raw_index],label] = capital_costs_grouped.values
costs.loc[idx[raw_index], label] = capital_costs_grouped.values
if c.name == "Link":
p = c.pnl.p0.multiply(n.snapshot_weightings.generators,axis=0).sum()
p = c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum()
elif c.name == "Line":
continue
elif c.name == "StorageUnit":
p_all = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0)
p_all[p_all < 0.] = 0.
p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
p_all[p_all < 0.0] = 0.0
p = p_all.sum()
else:
p = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0).sum()
p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum()
marginal_costs = p*c.df.marginal_cost
marginal_costs = p * c.df.marginal_cost
marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum()
costs = costs.reindex(costs.index.union(pd.MultiIndex.from_product([[c.list_name],["marginal"],marginal_costs_grouped.index])))
costs = costs.reindex(
costs.index.union(
pd.MultiIndex.from_product(
[[c.list_name], ["marginal"], marginal_costs_grouped.index]
)
)
)
costs.loc[idx[c.list_name,"marginal",list(marginal_costs_grouped.index)],label] = marginal_costs_grouped.values
costs.loc[
idx[c.list_name, "marginal", list(marginal_costs_grouped.index)], label
] = marginal_costs_grouped.values
return costs
def calculate_curtailment(n, label, curtailment):
avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum()
def calculate_curtailment(n, label, curtailment):
avail = (
n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt)
.sum()
.groupby(n.generators.carrier)
.sum()
)
used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()
curtailment[label] = (((avail - used)/avail)*100).round(3)
curtailment[label] = (((avail - used) / avail) * 100).round(3)
return curtailment
def calculate_energy(n, label, energy):
for c in n.iterate_components(n.one_port_components | n.branch_components):
for c in n.iterate_components(n.one_port_components|n.branch_components):
if c.name in {'Generator', 'Load', 'ShuntImpedance'}:
c_energies = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum()
elif c.name in {'StorageUnit', 'Store'}:
c_energies = c.pnl.p.multiply(n.snapshot_weightings.stores,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum()
if c.name in {"Generator", "Load", "ShuntImpedance"}:
c_energies = (
c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
.sum()
.multiply(c.df.sign)
.groupby(c.df.carrier)
.sum()
)
elif c.name in {"StorageUnit", "Store"}:
c_energies = (
c.pnl.p.multiply(n.snapshot_weightings.stores, axis=0)
.sum()
.multiply(c.df.sign)
.groupby(c.df.carrier)
.sum()
)
else:
c_energies = (-c.pnl.p1.multiply(n.snapshot_weightings.generators,axis=0).sum() - c.pnl.p0.multiply(n.snapshot_weightings.generators,axis=0).sum()).groupby(c.df.carrier).sum()
c_energies = (
(
-c.pnl.p1.multiply(n.snapshot_weightings.generators, axis=0).sum()
- c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum()
)
.groupby(c.df.carrier)
.sum()
)
energy = include_in_summary(energy, [c.list_name], label, c_energies)
return energy
def include_in_summary(summary, multiindexprefix, label, item):
def include_in_summary(summary, multiindexprefix, label, item):
# Index tuple(s) indicating the newly to-be-added row(s)
raw_index = tuple([multiindexprefix,list(item.index)])
raw_index = tuple([multiindexprefix, list(item.index)])
summary = _add_indexed_rows(summary, raw_index)
summary.loc[idx[raw_index], label] = item.values
return summary
def calculate_capacity(n,label,capacity):
def calculate_capacity(n, label, capacity):
for c in n.iterate_components(n.one_port_components):
if 'p_nom_opt' in c.df.columns:
c_capacities = abs(c.df.p_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum()
if "p_nom_opt" in c.df.columns:
c_capacities = (
abs(c.df.p_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum()
)
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
elif 'e_nom_opt' in c.df.columns:
c_capacities = abs(c.df.e_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum()
elif "e_nom_opt" in c.df.columns:
c_capacities = (
abs(c.df.e_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum()
)
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
for c in n.iterate_components(n.passive_branch_components):
c_capacities = c.df['s_nom_opt'].groupby(c.df.carrier).sum()
c_capacities = c.df["s_nom_opt"].groupby(c.df.carrier).sum()
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
for c in n.iterate_components(n.controllable_branch_components):
@ -186,8 +229,12 @@ def calculate_capacity(n,label,capacity):
return capacity
def calculate_supply(n, label, supply):
"""calculate the max dispatch of each component at the buses where the loads are attached"""
"""
calculate the max dispatch of each component at the buses where the loads
are attached.
"""
load_types = n.buses.carrier.unique()
@ -195,7 +242,7 @@ def calculate_supply(n, label, supply):
buses = n.buses.query("carrier == @i").index
bus_map = pd.Series(False,index=n.buses.index)
bus_map = pd.Series(False, index=n.buses.index)
bus_map.loc[buses] = True
@ -206,35 +253,49 @@ def calculate_supply(n, label, supply):
if len(items) == 0 or c.pnl.p.empty:
continue
s = c.pnl.p[items].max().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum()
s = (
c.pnl.p[items]
.max()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"])
.sum()
)
# Index tuple(s) indicating the newly to-be-added row(s)
raw_index = tuple([[i],[c.list_name],list(s.index)])
raw_index = tuple([[i], [c.list_name], list(s.index)])
supply = _add_indexed_rows(supply, raw_index)
supply.loc[idx[raw_index],label] = s.values
supply.loc[idx[raw_index], label] = s.values
for c in n.iterate_components(n.branch_components):
for end in ["0","1"]:
for end in ["0", "1"]:
items = c.df.index[c.df["bus" + end].map(bus_map)]
if len(items) == 0 or c.pnl["p"+end].empty:
if len(items) == 0 or c.pnl["p" + end].empty:
continue
#lots of sign compensation for direction and to do maximums
s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items,'carrier']).sum()
# lots of sign compensation for direction and to do maximums
s = (-1) ** (1 - int(end)) * (
(-1) ** int(end) * c.pnl["p" + end][items]
).max().groupby(c.df.loc[items, "carrier"]).sum()
supply = supply.reindex(supply.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index])))
supply.loc[idx[i,c.list_name,list(s.index)],label] = s.values
supply = supply.reindex(
supply.index.union(
pd.MultiIndex.from_product([[i], [c.list_name], s.index])
)
)
supply.loc[idx[i, c.list_name, list(s.index)], label] = s.values
return supply
def calculate_supply_energy(n, label, supply_energy):
"""calculate the total dispatch of each component at the buses where the loads are attached"""
"""
calculate the total dispatch of each component at the buses where the loads
are attached.
"""
load_types = n.buses.carrier.unique()
@ -242,7 +303,7 @@ def calculate_supply_energy(n, label, supply_energy):
buses = n.buses.query("carrier == @i").index
bus_map = pd.Series(False,index=n.buses.index)
bus_map = pd.Series(False, index=n.buses.index)
bus_map.loc[buses] = True
@ -253,55 +314,83 @@ def calculate_supply_energy(n, label, supply_energy):
if len(items) == 0 or c.pnl.p.empty:
continue
s = c.pnl.p[items].sum().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum()
s = (
c.pnl.p[items]
.sum()
.multiply(c.df.loc[items, "sign"])
.groupby(c.df.loc[items, "carrier"])
.sum()
)
# Index tuple(s) indicating the newly to-be-added row(s)
raw_index = tuple([[i],[c.list_name],list(s.index)])
raw_index = tuple([[i], [c.list_name], list(s.index)])
supply_energy = _add_indexed_rows(supply_energy, raw_index)
supply_energy.loc[idx[raw_index],label] = s.values
supply_energy.loc[idx[raw_index], label] = s.values
for c in n.iterate_components(n.branch_components):
for end in ["0","1"]:
for end in ["0", "1"]:
items = c.df.index[c.df["bus" + end].map(bus_map)]
if len(items) == 0 or c.pnl['p' + end].empty:
if len(items) == 0 or c.pnl["p" + end].empty:
continue
s = (-1)*c.pnl["p"+end][items].sum().groupby(c.df.loc[items,'carrier']).sum()
s = (-1) * c.pnl["p" + end][items].sum().groupby(
c.df.loc[items, "carrier"]
).sum()
supply_energy = supply_energy.reindex(supply_energy.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index])))
supply_energy.loc[idx[i,c.list_name,list(s.index)],label] = s.values
supply_energy = supply_energy.reindex(
supply_energy.index.union(
pd.MultiIndex.from_product([[i], [c.list_name], s.index])
)
)
supply_energy.loc[idx[i, c.list_name, list(s.index)], label] = s.values
return supply_energy
def calculate_metrics(n,label,metrics):
def calculate_metrics(n, label, metrics):
metrics = metrics.reindex(
metrics.index.union(
pd.Index(
[
"line_volume",
"line_volume_limit",
"line_volume_AC",
"line_volume_DC",
"line_volume_shadow",
"co2_shadow",
]
)
)
)
metrics = metrics.reindex(metrics.index.union(pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"])))
metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[
n.links.carrier == "DC"
].sum()
metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum()
metrics.at["line_volume", label] = metrics.loc[
["line_volume_AC", "line_volume_DC"], label
].sum()
metrics.at["line_volume_DC",label] = (n.links.length*n.links.p_nom_opt)[n.links.carrier == "DC"].sum()
metrics.at["line_volume_AC",label] = (n.lines.length*n.lines.s_nom_opt).sum()
metrics.at["line_volume",label] = metrics.loc[["line_volume_AC","line_volume_DC"],label].sum()
if hasattr(n, "line_volume_limit"):
metrics.at["line_volume_limit", label] = n.line_volume_limit
if hasattr(n,"line_volume_limit"):
metrics.at["line_volume_limit",label] = n.line_volume_limit
if hasattr(n,"line_volume_limit_dual"):
metrics.at["line_volume_shadow",label] = n.line_volume_limit_dual
if hasattr(n, "line_volume_limit_dual"):
metrics.at["line_volume_shadow", label] = n.line_volume_limit_dual
if "CO2Limit" in n.global_constraints.index:
metrics.at["co2_shadow",label] = n.global_constraints.at["CO2Limit","mu"]
metrics.at["co2_shadow", label] = n.global_constraints.at["CO2Limit", "mu"]
return metrics
def calculate_prices(n,label,prices):
bus_type = pd.Series(n.buses.index.str[3:],n.buses.index).replace("","electricity")
def calculate_prices(n, label, prices):
bus_type = pd.Series(n.buses.index.str[3:], n.buses.index).replace(
"", "electricity"
)
prices = prices.reindex(prices.index.union(bus_type.value_counts().index))
@ -311,19 +400,37 @@ def calculate_prices(n,label,prices):
return prices
def calculate_weighted_prices(n,label,weighted_prices):
def calculate_weighted_prices(n, label, weighted_prices):
logger.warning("Weighted prices don't include storage units as loads")
weighted_prices = weighted_prices.reindex(pd.Index(["electricity","heat","space heat","urban heat","space urban heat","gas","H2"]))
weighted_prices = weighted_prices.reindex(
pd.Index(
[
"electricity",
"heat",
"space heat",
"urban heat",
"space urban heat",
"gas",
"H2",
]
)
)
link_loads = {"electricity" : ["heat pump", "resistive heater", "battery charger", "H2 Electrolysis"],
"heat" : ["water tanks charger"],
"urban heat" : ["water tanks charger"],
"space heat" : [],
"space urban heat" : [],
"gas" : ["OCGT","gas boiler","CHP electric","CHP heat"],
"H2" : ["Sabatier", "H2 Fuel Cell"]}
link_loads = {
"electricity": [
"heat pump",
"resistive heater",
"battery charger",
"H2 Electrolysis",
],
"heat": ["water tanks charger"],
"urban heat": ["water tanks charger"],
"space heat": [],
"space urban heat": [],
"gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"],
"H2": ["Sabatier", "H2 Fuel Cell"],
}
for carrier in link_loads:
@ -332,64 +439,77 @@ def calculate_weighted_prices(n,label,weighted_prices):
elif carrier[:5] == "space":
suffix = carrier[5:]
else:
suffix = " " + carrier
suffix = " " + carrier
buses = n.buses.index[n.buses.index.str[2:] == suffix]
if buses.empty:
continue
if carrier in ["H2","gas"]:
load = pd.DataFrame(index=n.snapshots,columns=buses,data=0.)
if carrier in ["H2", "gas"]:
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
elif carrier[:5] == "space":
load = heat_demand_df[buses.str[:2]].rename(columns=lambda i: str(i)+suffix)
load = heat_demand_df[buses.str[:2]].rename(
columns=lambda i: str(i) + suffix
)
else:
load = n.loads_t.p_set[buses]
for tech in link_loads[carrier]:
names = n.links.index[n.links.index.to_series().str[-len(tech):] == tech]
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
if names.empty:
continue
load += n.links_t.p0[names].groupby(n.links.loc[names,"bus0"],axis=1).sum(axis=1)
load += (
n.links_t.p0[names]
.groupby(n.links.loc[names, "bus0"], axis=1)
.sum(axis=1)
)
# Add H2 Store when charging
if carrier == "H2":
stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1)
stores[stores > 0.] = 0.
stores = (
n.stores_t.p[buses + " Store"]
.groupby(n.stores.loc[buses + " Store", "bus"], axis=1)
.sum(axis=1)
)
stores[stores > 0.0] = 0.0
load += -stores
weighted_prices.loc[carrier,label] = (load*n.buses_t.marginal_price[buses]).sum().sum()/load.sum().sum()
weighted_prices.loc[carrier, label] = (
load * n.buses_t.marginal_price[buses]
).sum().sum() / load.sum().sum()
if carrier[:5] == "space":
print(load*n.buses_t.marginal_price[buses])
print(load * n.buses_t.marginal_price[buses])
return weighted_prices
outputs = ["costs",
"curtailment",
"energy",
"capacity",
"supply",
"supply_energy",
"prices",
"weighted_prices",
"metrics",
]
outputs = [
"costs",
"curtailment",
"energy",
"capacity",
"supply",
"supply_energy",
"prices",
"weighted_prices",
"metrics",
]
def make_summaries(networks_dict, paths, config, country='all'):
columns = pd.MultiIndex.from_tuples(networks_dict.keys(),names=["simpl","clusters","ll","opts"])
def make_summaries(networks_dict, paths, config, country="all"):
columns = pd.MultiIndex.from_tuples(
networks_dict.keys(), names=["simpl", "clusters", "ll", "opts"]
)
dfs = {}
for output in outputs:
dfs[output] = pd.DataFrame(columns=columns,dtype=float)
dfs[output] = pd.DataFrame(columns=columns, dtype=float)
for label, filename in networks_dict.items():
print(label, filename)
@ -403,11 +523,11 @@ def make_summaries(networks_dict, paths, config, country='all'):
logger.warning("Skipping {filename}".format(filename=filename))
continue
if country != 'all':
if country != "all":
n = n[n.buses.country == country]
Nyears = n.snapshot_weightings.objective.sum() / 8760.
costs = load_costs(paths[0], config['costs'], config['electricity'], Nyears)
Nyears = n.snapshot_weightings.objective.sum() / 8760.0
costs = load_costs(paths[0], config["costs"], config["electricity"], Nyears)
update_transmission_costs(n, costs)
assign_carriers(n)
@ -425,13 +545,20 @@ def to_csv(dfs, dir):
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('make_summary', simpl='',
clusters='5', ll='copt', opts='Co2L-24H', country='all')
network_dir = os.path.join('..', 'results', 'networks')
snakemake = mock_snakemake(
"make_summary",
simpl="",
clusters="5",
ll="copt",
opts="Co2L-24H",
country="all",
)
network_dir = os.path.join("..", "results", "networks")
else:
network_dir = os.path.join('results', 'networks')
network_dir = os.path.join("results", "networks")
configure_logging(snakemake)
config = snakemake.config
@ -448,14 +575,18 @@ if __name__ == "__main__":
else:
ll = [wildcards.ll]
networks_dict = {(simpl,clusters,l,opts) :
os.path.join(network_dir, f'elec_s{simpl}_'
f'{clusters}_ec_l{l}_{opts}.nc')
for simpl in expand_from_wildcard("simpl", config)
for clusters in expand_from_wildcard("clusters", config)
for l in ll
for opts in expand_from_wildcard("opts", config)}
networks_dict = {
(simpl, clusters, l, opts): os.path.join(
network_dir, f"elec_s{simpl}_" f"{clusters}_ec_l{l}_{opts}.nc"
)
for simpl in expand_from_wildcard("simpl", config)
for clusters in expand_from_wildcard("clusters", config)
for l in ll
for opts in expand_from_wildcard("opts", config)
}
dfs = make_summaries(networks_dict, snakemake.input, config, country=wildcards.country)
dfs = make_summaries(
networks_dict, snakemake.input, config, country=wildcards.country
)
to_csv(dfs, snakemake.output[0])

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
@ -16,20 +17,24 @@ Outputs
Description
-----------
"""
import logging
from _helpers import (load_network_for_plots, aggregate_p, aggregate_costs, configure_logging)
import pandas as pd
import numpy as np
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.patches import Circle, Ellipse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from _helpers import (
aggregate_costs,
aggregate_p,
configure_logging,
load_network_for_plots,
)
from matplotlib.legend_handler import HandlerPatch
from matplotlib.patches import Circle, Ellipse
to_rgba = mpl.colors.colorConverter.to_rgba
logger = logging.getLogger(__name__)
@ -37,240 +42,352 @@ logger = logging.getLogger(__name__)
def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False):
fig = ax.get_figure()
def axes2pt():
return np.diff(ax.transData.transform([(0,0), (1,1)]), axis=0)[0] * (72./fig.dpi)
return np.diff(ax.transData.transform([(0, 0), (1, 1)]), axis=0)[0] * (
72.0 / fig.dpi
)
ellipses = []
if not dont_resize_actively:
def update_width_height(event):
dist = axes2pt()
for e, radius in ellipses: e.width, e.height = 2. * radius * dist
fig.canvas.mpl_connect('resize_event', update_width_height)
ax.callbacks.connect('xlim_changed', update_width_height)
ax.callbacks.connect('ylim_changed', update_width_height)
for e, radius in ellipses:
e.width, e.height = 2.0 * radius * dist
def legend_circle_handler(legend, orig_handle, xdescent, ydescent,
width, height, fontsize):
w, h = 2. * orig_handle.get_radius() * axes2pt()
e = Ellipse(xy=(0.5*width-0.5*xdescent, 0.5*height-0.5*ydescent), width=w, height=w)
fig.canvas.mpl_connect("resize_event", update_width_height)
ax.callbacks.connect("xlim_changed", update_width_height)
ax.callbacks.connect("ylim_changed", update_width_height)
def legend_circle_handler(
legend, orig_handle, xdescent, ydescent, width, height, fontsize
):
w, h = 2.0 * orig_handle.get_radius() * axes2pt()
e = Ellipse(
xy=(0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent),
width=w,
height=w,
)
ellipses.append((e, orig_handle.get_radius()))
return e
return {Circle: HandlerPatch(patch_func=legend_circle_handler)}
def make_legend_circles_for(sizes, scale=1.0, **kw):
return [Circle((0,0), radius=(s/scale)**0.5, **kw) for s in sizes]
return [Circle((0, 0), radius=(s / scale) ** 0.5, **kw) for s in sizes]
def set_plot_style():
plt.style.use(['classic', 'seaborn-white',
{'axes.grid': False, 'grid.linestyle': '--', 'grid.color': u'0.6',
'hatch.color': 'white',
'patch.linewidth': 0.5,
'font.size': 12,
'legend.fontsize': 'medium',
'lines.linewidth': 1.5,
'pdf.fonttype': 42,
}])
plt.style.use(
[
"classic",
"seaborn-white",
{
"axes.grid": False,
"grid.linestyle": "--",
"grid.color": "0.6",
"hatch.color": "white",
"patch.linewidth": 0.5,
"font.size": 12,
"legend.fontsize": "medium",
"lines.linewidth": 1.5,
"pdf.fonttype": 42,
},
]
)
def plot_map(n, opts, ax=None, attribute='p_nom'):
def plot_map(n, opts, ax=None, attribute="p_nom"):
if ax is None:
ax = plt.gca()
## DATA
line_colors = {'cur': "purple",
'exp': mpl.colors.rgb2hex(to_rgba("red", 0.7), True)}
tech_colors = opts['tech_colors']
line_colors = {
"cur": "purple",
"exp": mpl.colors.rgb2hex(to_rgba("red", 0.7), True),
}
tech_colors = opts["tech_colors"]
if attribute == 'p_nom':
if attribute == "p_nom":
# bus_sizes = n.generators_t.p.sum().loc[n.generators.carrier == "load"].groupby(n.generators.bus).sum()
bus_sizes = pd.concat((n.generators.query('carrier != "load"').groupby(['bus', 'carrier']).p_nom_opt.sum(),
n.storage_units.groupby(['bus', 'carrier']).p_nom_opt.sum()))
bus_sizes = pd.concat(
(
n.generators.query('carrier != "load"')
.groupby(["bus", "carrier"])
.p_nom_opt.sum(),
n.storage_units.groupby(["bus", "carrier"]).p_nom_opt.sum(),
)
)
line_widths_exp = n.lines.s_nom_opt
line_widths_cur = n.lines.s_nom_min
link_widths_exp = n.links.p_nom_opt
link_widths_cur = n.links.p_nom_min
else:
raise 'plotting of {} has not been implemented yet'.format(attribute)
raise "plotting of {} has not been implemented yet".format(attribute)
line_colors_with_alpha = \
((line_widths_cur / n.lines.s_nom > 1e-3)
.map({True: line_colors['cur'], False: to_rgba(line_colors['cur'], 0.)}))
link_colors_with_alpha = \
((link_widths_cur / n.links.p_nom > 1e-3)
.map({True: line_colors['cur'], False: to_rgba(line_colors['cur'], 0.)}))
line_colors_with_alpha = (line_widths_cur / n.lines.s_nom > 1e-3).map(
{True: line_colors["cur"], False: to_rgba(line_colors["cur"], 0.0)}
)
link_colors_with_alpha = (link_widths_cur / n.links.p_nom > 1e-3).map(
{True: line_colors["cur"], False: to_rgba(line_colors["cur"], 0.0)}
)
## FORMAT
linewidth_factor = opts['map'][attribute]['linewidth_factor']
bus_size_factor = opts['map'][attribute]['bus_size_factor']
linewidth_factor = opts["map"][attribute]["linewidth_factor"]
bus_size_factor = opts["map"][attribute]["bus_size_factor"]
## PLOT
n.plot(line_widths=line_widths_exp/linewidth_factor,
link_widths=link_widths_exp/linewidth_factor,
line_colors=line_colors['exp'],
link_colors=line_colors['exp'],
bus_sizes=bus_sizes/bus_size_factor,
bus_colors=tech_colors,
boundaries=map_boundaries,
color_geomap=True, geomap=True,
ax=ax)
n.plot(line_widths=line_widths_cur/linewidth_factor,
link_widths=link_widths_cur/linewidth_factor,
line_colors=line_colors_with_alpha,
link_colors=link_colors_with_alpha,
bus_sizes=0,
boundaries=map_boundaries,
color_geomap=True, geomap=True,
ax=ax)
ax.set_aspect('equal')
ax.axis('off')
n.plot(
line_widths=line_widths_exp / linewidth_factor,
link_widths=link_widths_exp / linewidth_factor,
line_colors=line_colors["exp"],
link_colors=line_colors["exp"],
bus_sizes=bus_sizes / bus_size_factor,
bus_colors=tech_colors,
boundaries=map_boundaries,
color_geomap=True,
geomap=True,
ax=ax,
)
n.plot(
line_widths=line_widths_cur / linewidth_factor,
link_widths=link_widths_cur / linewidth_factor,
line_colors=line_colors_with_alpha,
link_colors=link_colors_with_alpha,
bus_sizes=0,
boundaries=map_boundaries,
color_geomap=True,
geomap=True,
ax=ax,
)
ax.set_aspect("equal")
ax.axis("off")
# Rasterize basemap
# TODO : Check if this also works with cartopy
for c in ax.collections[:2]: c.set_rasterized(True)
for c in ax.collections[:2]:
c.set_rasterized(True)
# LEGEND
handles = []
labels = []
for s in (10, 1):
handles.append(plt.Line2D([0],[0],color=line_colors['exp'],
linewidth=s*1e3/linewidth_factor))
handles.append(
plt.Line2D(
[0], [0], color=line_colors["exp"], linewidth=s * 1e3 / linewidth_factor
)
)
labels.append("{} GW".format(s))
l1_1 = ax.legend(handles, labels,
loc="upper left", bbox_to_anchor=(0.24, 1.01),
frameon=False,
labelspacing=0.8, handletextpad=1.5,
title='Transmission Exp./Exist. ')
l1_1 = ax.legend(
handles,
labels,
loc="upper left",
bbox_to_anchor=(0.24, 1.01),
frameon=False,
labelspacing=0.8,
handletextpad=1.5,
title="Transmission Exp./Exist. ",
)
ax.add_artist(l1_1)
handles = []
labels = []
for s in (10, 5):
handles.append(plt.Line2D([0],[0],color=line_colors['cur'],
linewidth=s*1e3/linewidth_factor))
handles.append(
plt.Line2D(
[0], [0], color=line_colors["cur"], linewidth=s * 1e3 / linewidth_factor
)
)
labels.append("/")
l1_2 = ax.legend(handles, labels,
loc="upper left", bbox_to_anchor=(0.26, 1.01),
frameon=False,
labelspacing=0.8, handletextpad=0.5,
title=' ')
l1_2 = ax.legend(
handles,
labels,
loc="upper left",
bbox_to_anchor=(0.26, 1.01),
frameon=False,
labelspacing=0.8,
handletextpad=0.5,
title=" ",
)
ax.add_artist(l1_2)
handles = make_legend_circles_for([10e3, 5e3, 1e3], scale=bus_size_factor, facecolor="w")
handles = make_legend_circles_for(
[10e3, 5e3, 1e3], scale=bus_size_factor, facecolor="w"
)
labels = ["{} GW".format(s) for s in (10, 5, 3)]
l2 = ax.legend(handles, labels,
loc="upper left", bbox_to_anchor=(0.01, 1.01),
frameon=False, labelspacing=1.0,
title='Generation',
handler_map=make_handler_map_to_scale_circles_as_in(ax))
l2 = ax.legend(
handles,
labels,
loc="upper left",
bbox_to_anchor=(0.01, 1.01),
frameon=False,
labelspacing=1.0,
title="Generation",
handler_map=make_handler_map_to_scale_circles_as_in(ax),
)
ax.add_artist(l2)
techs = (bus_sizes.index.levels[1]).intersection(pd.Index(opts['vre_techs'] + opts['conv_techs'] + opts['storage_techs']))
techs = (bus_sizes.index.levels[1]).intersection(
pd.Index(opts["vre_techs"] + opts["conv_techs"] + opts["storage_techs"])
)
handles = []
labels = []
for t in techs:
handles.append(plt.Line2D([0], [0], color=tech_colors[t], marker='o', markersize=8, linewidth=0))
labels.append(opts['nice_names'].get(t, t))
l3 = ax.legend(handles, labels, loc="upper center", bbox_to_anchor=(0.5, -0.), # bbox_to_anchor=(0.72, -0.05),
handletextpad=0., columnspacing=0.5, ncol=4, title='Technology')
handles.append(
plt.Line2D(
[0], [0], color=tech_colors[t], marker="o", markersize=8, linewidth=0
)
)
labels.append(opts["nice_names"].get(t, t))
l3 = ax.legend(
handles,
labels,
loc="upper center",
bbox_to_anchor=(0.5, -0.0), # bbox_to_anchor=(0.72, -0.05),
handletextpad=0.0,
columnspacing=0.5,
ncol=4,
title="Technology",
)
return fig
def plot_total_energy_pie(n, opts, ax=None):
if ax is None: ax = plt.gca()
if ax is None:
ax = plt.gca()
ax.set_title('Energy per technology', fontdict=dict(fontsize="medium"))
ax.set_title("Energy per technology", fontdict=dict(fontsize="medium"))
e_primary = aggregate_p(n).drop('load', errors='ignore').loc[lambda s: s>0]
e_primary = aggregate_p(n).drop("load", errors="ignore").loc[lambda s: s > 0]
patches, texts, autotexts = ax.pie(e_primary,
patches, texts, autotexts = ax.pie(
e_primary,
startangle=90,
labels = e_primary.rename(opts['nice_names']).index,
autopct='%.0f%%',
labels=e_primary.rename(opts["nice_names"]).index,
autopct="%.0f%%",
shadow=False,
colors = [opts['tech_colors'][tech] for tech in e_primary.index])
colors=[opts["tech_colors"][tech] for tech in e_primary.index],
)
for t1, t2, i in zip(texts, autotexts, e_primary.index):
if e_primary.at[i] < 0.04 * e_primary.sum():
t1.remove()
t2.remove()
def plot_total_cost_bar(n, opts, ax=None):
if ax is None: ax = plt.gca()
if ax is None:
ax = plt.gca()
total_load = (n.snapshot_weightings.generators * n.loads_t.p.sum(axis=1)).sum()
tech_colors = opts['tech_colors']
tech_colors = opts["tech_colors"]
def split_costs(n):
costs = aggregate_costs(n).reset_index(level=0, drop=True)
costs_ex = aggregate_costs(n, existing_only=True).reset_index(level=0, drop=True)
return (costs['capital'].add(costs['marginal'], fill_value=0.),
costs_ex['capital'], costs['capital'] - costs_ex['capital'], costs['marginal'])
costs_ex = aggregate_costs(n, existing_only=True).reset_index(
level=0, drop=True
)
return (
costs["capital"].add(costs["marginal"], fill_value=0.0),
costs_ex["capital"],
costs["capital"] - costs_ex["capital"],
costs["marginal"],
)
costs, costs_cap_ex, costs_cap_new, costs_marg = split_costs(n)
costs_graph = pd.DataFrame(dict(a=costs.drop('load', errors='ignore')),
index=['AC-AC', 'AC line', 'onwind', 'offwind-ac',
'offwind-dc', 'solar', 'OCGT','CCGT', 'battery', 'H2']).dropna()
bottom = np.array([0., 0.])
costs_graph = pd.DataFrame(
dict(a=costs.drop("load", errors="ignore")),
index=[
"AC-AC",
"AC line",
"onwind",
"offwind-ac",
"offwind-dc",
"solar",
"OCGT",
"CCGT",
"battery",
"H2",
],
).dropna()
bottom = np.array([0.0, 0.0])
texts = []
for i,ind in enumerate(costs_graph.index):
data = np.asarray(costs_graph.loc[ind])/total_load
ax.bar([0.5], data, bottom=bottom, color=tech_colors[ind],
width=0.7, zorder=-1)
for i, ind in enumerate(costs_graph.index):
data = np.asarray(costs_graph.loc[ind]) / total_load
ax.bar([0.5], data, bottom=bottom, color=tech_colors[ind], width=0.7, zorder=-1)
bottom_sub = bottom
bottom = bottom+data
bottom = bottom + data
if ind in opts['conv_techs'] + ['AC line']:
if ind in opts["conv_techs"] + ["AC line"]:
for c in [costs_cap_ex, costs_marg]:
if ind in c:
data_sub = np.asarray([c.loc[ind]])/total_load
ax.bar([0.5], data_sub, linewidth=0,
bottom=bottom_sub, color=tech_colors[ind],
width=0.7, zorder=-1, alpha=0.8)
data_sub = np.asarray([c.loc[ind]]) / total_load
ax.bar(
[0.5],
data_sub,
linewidth=0,
bottom=bottom_sub,
color=tech_colors[ind],
width=0.7,
zorder=-1,
alpha=0.8,
)
bottom_sub += data_sub
if abs(data[-1]) < 5:
continue
text = ax.text(1.1,(bottom-0.5*data)[-1]-3,opts['nice_names'].get(ind,ind))
text = ax.text(
1.1, (bottom - 0.5 * data)[-1] - 3, opts["nice_names"].get(ind, ind)
)
texts.append(text)
ax.set_ylabel("Average system cost [Eur/MWh]")
ax.set_ylim([0, opts.get('costs_max', 80)])
ax.set_ylim([0, opts.get("costs_max", 80)])
ax.set_xlim([0, 1])
ax.set_xticklabels([])
ax.grid(True, axis="y", color='k', linestyle='dotted')
ax.grid(True, axis="y", color="k", linestyle="dotted")
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('plot_network', simpl='',
clusters='5', ll='copt', opts='Co2L-24H',
attr='p_nom', ext="pdf")
snakemake = mock_snakemake(
"plot_network",
simpl="",
clusters="5",
ll="copt",
opts="Co2L-24H",
attr="p_nom",
ext="pdf",
)
configure_logging(snakemake)
set_plot_style()
config, wildcards = snakemake.config, snakemake.wildcards
map_figsize = config["plotting"]['map']['figsize']
map_boundaries = config["plotting"]['map']['boundaries']
map_figsize = config["plotting"]["map"]["figsize"]
map_boundaries = config["plotting"]["map"]["boundaries"]
n = load_network_for_plots(snakemake.input.network, snakemake.input.tech_costs, config)
n = load_network_for_plots(
snakemake.input.network, snakemake.input.tech_costs, config
)
scenario_opts = wildcards.opts.split('-')
scenario_opts = wildcards.opts.split("-")
fig, ax = plt.subplots(figsize=map_figsize, subplot_kw={"projection": ccrs.PlateCarree()})
fig, ax = plt.subplots(
figsize=map_figsize, subplot_kw={"projection": ccrs.PlateCarree()}
)
plot_map(n, config["plotting"], ax=ax, attribute=wildcards.attr)
fig.savefig(snakemake.output.only_map, dpi=150, bbox_inches='tight')
fig.savefig(snakemake.output.only_map, dpi=150, bbox_inches="tight")
ax1 = fig.add_axes([-0.115, 0.625, 0.2, 0.2])
plot_total_energy_pie(n, config["plotting"], ax=ax1)
@ -281,9 +398,12 @@ if __name__ == "__main__":
ll = wildcards.ll
ll_type = ll[0]
ll_factor = ll[1:]
lbl = dict(c='line cost', v='line volume')[ll_type]
amnt = '{ll} x today\'s'.format(ll=ll_factor) if ll_factor != 'opt' else 'optimal'
fig.suptitle('Expansion to {amount} {label} at {clusters} clusters'
.format(amount=amnt, label=lbl, clusters=wildcards.clusters))
lbl = dict(c="line cost", v="line volume")[ll_type]
amnt = "{ll} x today's".format(ll=ll_factor) if ll_factor != "opt" else "optimal"
fig.suptitle(
"Expansion to {amount} {label} at {clusters} clusters".format(
amount=amnt, label=lbl, clusters=wildcards.clusters
)
)
fig.savefig(snakemake.output.ext, transparent=True, bbox_inches='tight')
fig.savefig(snakemake.output.ext, transparent=True, bbox_inches="tight")

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
@ -16,14 +17,13 @@ Outputs
Description
-----------
"""
import logging
from _helpers import configure_logging
import pypsa
import pandas as pd
import matplotlib.pyplot as plt
import pandas as pd
import pypsa
from _helpers import configure_logging
logger = logging.getLogger(__name__)
@ -31,11 +31,13 @@ logger = logging.getLogger(__name__)
def cum_p_nom_max(net, tech, country=None):
carrier_b = net.generators.carrier == tech
generators = pd.DataFrame(dict(
p_nom_max=net.generators.loc[carrier_b, 'p_nom_max'],
p_max_pu=net.generators_t.p_max_pu.loc[:,carrier_b].mean(),
country=net.generators.loc[carrier_b, 'bus'].map(net.buses.country)
)).sort_values("p_max_pu", ascending=False)
generators = pd.DataFrame(
dict(
p_nom_max=net.generators.loc[carrier_b, "p_nom_max"],
p_max_pu=net.generators_t.p_max_pu.loc[:, carrier_b].mean(),
country=net.generators.loc[carrier_b, "bus"].map(net.buses.country),
)
).sort_values("p_max_pu", ascending=False)
if country is not None:
generators = generators.loc[generators.country == country]
@ -46,22 +48,28 @@ def cum_p_nom_max(net, tech, country=None):
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('plot_p_nom_max', simpl='',
techs='solar,onwind,offwind-dc', ext='png',
clusts= '5,full', country= 'all')
snakemake = mock_snakemake(
"plot_p_nom_max",
simpl="",
techs="solar,onwind,offwind-dc",
ext="png",
clusts="5,full",
country="all",
)
configure_logging(snakemake)
plot_kwds = dict(drawstyle="steps-post")
clusters = snakemake.wildcards.clusts.split(',')
techs = snakemake.wildcards.techs.split(',')
clusters = snakemake.wildcards.clusts.split(",")
techs = snakemake.wildcards.techs.split(",")
country = snakemake.wildcards.country
if country == 'all':
if country == "all":
country = None
else:
plot_kwds['marker'] = 'x'
plot_kwds["marker"] = "x"
fig, axes = plt.subplots(1, len(techs))
@ -69,8 +77,9 @@ if __name__ == "__main__":
net = pypsa.Network(snakemake.input[j])
for i, tech in enumerate(techs):
cum_p_nom_max(net, tech, country).plot(x="p_max_pu", y="cum_p_nom_max",
label=cluster, ax=axes[i], **plot_kwds)
cum_p_nom_max(net, tech, country).plot(
x="p_max_pu", y="cum_p_nom_max", label=cluster, ax=axes[i], **plot_kwds
)
for i, tech in enumerate(techs):
ax = axes[i]
@ -79,4 +88,4 @@ if __name__ == "__main__":
plt.legend(title="Cluster level")
fig.savefig(snakemake.output[0], transparent=True, bbox_inches='tight')
fig.savefig(snakemake.output[0], transparent=True, bbox_inches="tight")

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
@ -16,15 +17,14 @@ Outputs
Description
-----------
"""
import os
import logging
from _helpers import configure_logging
import os
import pandas as pd
import matplotlib.pyplot as plt
import pandas as pd
from _helpers import configure_logging
logger = logging.getLogger(__name__)
@ -52,22 +52,37 @@ def rename_techs(label):
return label
preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","onshore wind","offshore wind ac", "offshore wind dc","solar PV","solar thermal","OCGT","hydrogen storage","battery storage"])
preferred_order = pd.Index(
[
"transmission lines",
"hydroelectricity",
"hydro reservoir",
"run of river",
"pumped hydro storage",
"onshore wind",
"offshore wind ac",
"offshore wind dc",
"solar PV",
"solar thermal",
"OCGT",
"hydrogen storage",
"battery storage",
]
)
def plot_costs(infn, config, fn=None):
## For now ignore the simpl header
cost_df = pd.read_csv(infn,index_col=list(range(3)),header=[1,2,3])
cost_df = pd.read_csv(infn, index_col=list(range(3)), header=[1, 2, 3])
df = cost_df.groupby(cost_df.index.get_level_values(2)).sum()
#convert to billions
df = df/1e9
# convert to billions
df = df / 1e9
df = df.groupby(df.index.map(rename_techs)).sum()
to_drop = df.index[df.max(axis=1) < config['plotting']['costs_threshold']]
to_drop = df.index[df.max(axis=1) < config["plotting"]["costs_threshold"]]
print("dropping")
@ -77,22 +92,28 @@ def plot_costs(infn, config, fn=None):
print(df.sum())
new_index = (preferred_order&df.index).append(df.index.difference(preferred_order))
new_index = (preferred_order & df.index).append(
df.index.difference(preferred_order)
)
new_columns = df.sum().sort_values().index
fig, ax = plt.subplots()
fig.set_size_inches((12,8))
fig.set_size_inches((12, 8))
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[config['plotting']['tech_colors'][i] for i in new_index])
df.loc[new_index, new_columns].T.plot(
kind="bar",
ax=ax,
stacked=True,
color=[config["plotting"]["tech_colors"][i] for i in new_index],
)
handles,labels = ax.get_legend_handles_labels()
handles, labels = ax.get_legend_handles_labels()
handles.reverse()
labels.reverse()
ax.set_ylim([0,config['plotting']['costs_max']])
ax.set_ylim([0, config["plotting"]["costs_max"]])
ax.set_ylabel("System Cost [EUR billion per year]")
@ -100,8 +121,7 @@ def plot_costs(infn, config, fn=None):
ax.grid(axis="y")
ax.legend(handles,labels,ncol=4,loc="upper left")
ax.legend(handles, labels, ncol=4, loc="upper left")
fig.tight_layout()
@ -110,17 +130,16 @@ def plot_costs(infn, config, fn=None):
def plot_energy(infn, config, fn=None):
energy_df = pd.read_csv(infn, index_col=list(range(2)),header=[1,2,3])
energy_df = pd.read_csv(infn, index_col=list(range(2)), header=[1, 2, 3])
df = energy_df.groupby(energy_df.index.get_level_values(1)).sum()
#convert MWh to TWh
df = df/1e6
# convert MWh to TWh
df = df / 1e6
df = df.groupby(df.index.map(rename_techs)).sum()
to_drop = df.index[df.abs().max(axis=1) < config['plotting']['energy_threshold']]
to_drop = df.index[df.abs().max(axis=1) < config["plotting"]["energy_threshold"]]
print("dropping")
@ -130,22 +149,28 @@ def plot_energy(infn, config, fn=None):
print(df.sum())
new_index = (preferred_order&df.index).append(df.index.difference(preferred_order))
new_index = (preferred_order & df.index).append(
df.index.difference(preferred_order)
)
new_columns = df.columns.sort_values()
fig, ax = plt.subplots()
fig.set_size_inches((12,8))
fig.set_size_inches((12, 8))
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[config['plotting']['tech_colors'][i] for i in new_index])
df.loc[new_index, new_columns].T.plot(
kind="bar",
ax=ax,
stacked=True,
color=[config["plotting"]["tech_colors"][i] for i in new_index],
)
handles,labels = ax.get_legend_handles_labels()
handles, labels = ax.get_legend_handles_labels()
handles.reverse()
labels.reverse()
ax.set_ylim([config['plotting']['energy_min'], config['plotting']['energy_max']])
ax.set_ylim([config["plotting"]["energy_min"], config["plotting"]["energy_max"]])
ax.set_ylabel("Energy [TWh/a]")
@ -153,8 +178,7 @@ def plot_energy(infn, config, fn=None):
ax.grid(axis="y")
ax.legend(handles,labels,ncol=4,loc="upper left")
ax.legend(handles, labels, ncol=4, loc="upper left")
fig.tight_layout()
@ -163,11 +187,20 @@ def plot_energy(infn, config, fn=None):
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('plot_summary', summary='energy',
simpl='', clusters=5, ll='copt', opts='Co2L-24H',
attr='', ext='png', country='all')
snakemake = mock_snakemake(
"plot_summary",
summary="energy",
simpl="",
clusters=5,
ll="copt",
opts="Co2L-24H",
attr="",
ext="png",
country="all",
)
configure_logging(snakemake)
config = snakemake.config
@ -178,4 +211,6 @@ if __name__ == "__main__":
except KeyError:
raise RuntimeError(f"plotting function for {summary} has not been defined")
func(os.path.join(snakemake.input[0], f"{summary}.csv"), config, snakemake.output[0])
func(
os.path.join(snakemake.input[0], f"{summary}.csv"), config, snakemake.output[0]
)

View File

@ -1,11 +1,13 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Extracts capacities of HVDC links from `Wikipedia <https://en.wikipedia.org/wiki/List_of_HVDC_projects>`_.
Extracts capacities of HVDC links from `Wikipedia
<https://en.wikipedia.org/wiki/List_of_HVDC_projects>`_.
Relevant Settings
-----------------
@ -33,13 +35,12 @@ Description
-----------
*None*
"""
import logging
from _helpers import configure_logging
import pandas as pd
from _helpers import configure_logging
logger = logging.getLogger(__name__)
@ -49,29 +50,45 @@ def multiply(s):
def extract_coordinates(s):
regex = (r"(\d{1,2})°(\d{1,2})(\d{1,2})″(N|S) "
r"(\d{1,2})°(\d{1,2})(\d{1,2})″(E|W)")
regex = (
r"(\d{1,2})°(\d{1,2})(\d{1,2})″(N|S) " r"(\d{1,2})°(\d{1,2})(\d{1,2})″(E|W)"
)
e = s.str.extract(regex, expand=True)
lat = (e[0].astype(float) + (e[1].astype(float) + e[2].astype(float)/60.)/60.)*e[3].map({'N': +1., 'S': -1.})
lon = (e[4].astype(float) + (e[5].astype(float) + e[6].astype(float)/60.)/60.)*e[7].map({'E': +1., 'W': -1.})
lat = (
e[0].astype(float) + (e[1].astype(float) + e[2].astype(float) / 60.0) / 60.0
) * e[3].map({"N": +1.0, "S": -1.0})
lon = (
e[4].astype(float) + (e[5].astype(float) + e[6].astype(float) / 60.0) / 60.0
) * e[7].map({"E": +1.0, "W": -1.0})
return lon, lat
if __name__ == "__main__":
if 'snakemake' not in globals():
from _helpers import mock_snakemake #rule must be enabled in config
snakemake = mock_snakemake('prepare_links_p_nom', simpl='')
if "snakemake" not in globals():
from _helpers import mock_snakemake # rule must be enabled in config
snakemake = mock_snakemake("prepare_links_p_nom", simpl="")
configure_logging(snakemake)
links_p_nom = pd.read_html('https://en.wikipedia.org/wiki/List_of_HVDC_projects', header=0, match="SwePol")[0]
links_p_nom = pd.read_html(
"https://en.wikipedia.org/wiki/List_of_HVDC_projects", header=0, match="SwePol"
)[0]
mw = "Power (MW)"
m_b = links_p_nom[mw].str.contains('x').fillna(False)
m_b = links_p_nom[mw].str.contains("x").fillna(False)
links_p_nom.loc[m_b, mw] = links_p_nom.loc[m_b, mw].str.split('x').pipe(multiply)
links_p_nom[mw] = links_p_nom[mw].str.extract("[-/]?([\d.]+)", expand=False).astype(float)
links_p_nom.loc[m_b, mw] = links_p_nom.loc[m_b, mw].str.split("x").pipe(multiply)
links_p_nom[mw] = (
links_p_nom[mw].str.extract("[-/]?([\d.]+)", expand=False).astype(float)
)
links_p_nom['x1'], links_p_nom['y1'] = extract_coordinates(links_p_nom['Converterstation 1'])
links_p_nom['x2'], links_p_nom['y2'] = extract_coordinates(links_p_nom['Converterstation 2'])
links_p_nom["x1"], links_p_nom["y1"] = extract_coordinates(
links_p_nom["Converterstation 1"]
)
links_p_nom["x2"], links_p_nom["y2"] = extract_coordinates(
links_p_nom["Converterstation 2"]
)
links_p_nom.dropna(subset=['x1', 'y1', 'x2', 'y2']).to_csv(snakemake.output[0], index=False)
links_p_nom.dropna(subset=["x1", "y1", "x2", "y2"]).to_csv(
snakemake.output[0], index=False
)

View File

@ -1,10 +1,12 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# coding: utf-8
"""
Prepare PyPSA network for solving according to :ref:`opts` and :ref:`ll`, such as
Prepare PyPSA network for solving according to :ref:`opts` and :ref:`ll`, such
as.
- adding an annual **limit** of carbon-dioxide emissions,
- adding an exogenous **price** per tonne emissions of carbon-dioxide (or other kinds),
@ -53,17 +55,15 @@ Description
The rule :mod:`prepare_all_networks` runs
for all ``scenario`` s in the configuration file
the rule :mod:`prepare_network`.
"""
import logging
from _helpers import configure_logging
import re
import pypsa
import numpy as np
import pandas as pd
import pypsa
from _helpers import configure_logging
from add_electricity import load_costs, update_transmission_costs
idx = pd.IndexSlice
@ -71,65 +71,84 @@ idx = pd.IndexSlice
logger = logging.getLogger(__name__)
def add_co2limit(n, co2limit, Nyears=1.):
n.add("GlobalConstraint", "CO2Limit",
carrier_attribute="co2_emissions", sense="<=",
constant=co2limit * Nyears)
def add_co2limit(n, co2limit, Nyears=1.0):
n.add(
"GlobalConstraint",
"CO2Limit",
carrier_attribute="co2_emissions",
sense="<=",
constant=co2limit * Nyears,
)
def add_gaslimit(n, gaslimit, Nyears=1.):
def add_gaslimit(n, gaslimit, Nyears=1.0):
sel = n.carriers.index.intersection(["OCGT", "CCGT", "CHP"])
n.carriers.loc[sel, "gas_usage"] = 1.
n.carriers.loc[sel, "gas_usage"] = 1.0
n.add("GlobalConstraint", "GasLimit",
carrier_attribute="gas_usage", sense="<=",
constant=gaslimit * Nyears)
n.add(
"GlobalConstraint",
"GasLimit",
carrier_attribute="gas_usage",
sense="<=",
constant=gaslimit * Nyears,
)
def add_emission_prices(n, emission_prices={'co2': 0.}, exclude_co2=False):
if exclude_co2: emission_prices.pop('co2')
ep = (pd.Series(emission_prices).rename(lambda x: x+'_emissions') *
n.carriers.filter(like='_emissions')).sum(axis=1)
def add_emission_prices(n, emission_prices={"co2": 0.0}, exclude_co2=False):
if exclude_co2:
emission_prices.pop("co2")
ep = (
pd.Series(emission_prices).rename(lambda x: x + "_emissions")
* n.carriers.filter(like="_emissions")
).sum(axis=1)
gen_ep = n.generators.carrier.map(ep) / n.generators.efficiency
n.generators['marginal_cost'] += gen_ep
n.generators["marginal_cost"] += gen_ep
su_ep = n.storage_units.carrier.map(ep) / n.storage_units.efficiency_dispatch
n.storage_units['marginal_cost'] += su_ep
n.storage_units["marginal_cost"] += su_ep
def set_line_s_max_pu(n, s_max_pu = 0.7):
n.lines['s_max_pu'] = s_max_pu
def set_line_s_max_pu(n, s_max_pu=0.7):
n.lines["s_max_pu"] = s_max_pu
logger.info(f"N-1 security margin of lines set to {s_max_pu}")
def set_transmission_limit(n, ll_type, factor, costs, Nyears=1):
links_dc_b = n.links.carrier == 'DC' if not n.links.empty else pd.Series()
links_dc_b = n.links.carrier == "DC" if not n.links.empty else pd.Series()
_lines_s_nom = (np.sqrt(3) * n.lines.type.map(n.line_types.i_nom) *
n.lines.num_parallel * n.lines.bus0.map(n.buses.v_nom))
lines_s_nom = n.lines.s_nom.where(n.lines.type == '', _lines_s_nom)
_lines_s_nom = (
np.sqrt(3)
* n.lines.type.map(n.line_types.i_nom)
* n.lines.num_parallel
* n.lines.bus0.map(n.buses.v_nom)
)
lines_s_nom = n.lines.s_nom.where(n.lines.type == "", _lines_s_nom)
col = 'capital_cost' if ll_type == 'c' else 'length'
ref = (lines_s_nom @ n.lines[col] +
n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col])
col = "capital_cost" if ll_type == "c" else "length"
ref = (
lines_s_nom @ n.lines[col]
+ n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col]
)
update_transmission_costs(n, costs)
if factor == 'opt' or float(factor) > 1.0:
n.lines['s_nom_min'] = lines_s_nom
n.lines['s_nom_extendable'] = True
if factor == "opt" or float(factor) > 1.0:
n.lines["s_nom_min"] = lines_s_nom
n.lines["s_nom_extendable"] = True
n.links.loc[links_dc_b, 'p_nom_min'] = n.links.loc[links_dc_b, 'p_nom']
n.links.loc[links_dc_b, 'p_nom_extendable'] = True
n.links.loc[links_dc_b, "p_nom_min"] = n.links.loc[links_dc_b, "p_nom"]
n.links.loc[links_dc_b, "p_nom_extendable"] = True
if factor != 'opt':
con_type = 'expansion_cost' if ll_type == 'c' else 'volume_expansion'
if factor != "opt":
con_type = "expansion_cost" if ll_type == "c" else "volume_expansion"
rhs = float(factor) * ref
n.add('GlobalConstraint', f'l{ll_type}_limit',
type=f'transmission_{con_type}_limit',
sense='<=', constant=rhs, carrier_attribute='AC, DC')
n.add(
"GlobalConstraint",
f"l{ll_type}_limit",
type=f"transmission_{con_type}_limit",
sense="<=",
constant=rhs,
carrier_attribute="AC, DC",
)
return n
@ -143,7 +162,7 @@ def average_every_nhours(n, offset):
m.snapshot_weightings = snapshot_weightings
for c in n.iterate_components():
pnl = getattr(m, c.list_name+"_t")
pnl = getattr(m, c.list_name + "_t")
for k, df in c.pnl.items():
if not df.empty:
pnl[k] = df.resample(offset).mean()
@ -156,23 +175,29 @@ def apply_time_segmentation(n, segments, solver_name="cbc"):
try:
import tsam.timeseriesaggregation as tsam
except:
raise ModuleNotFoundError("Optional dependency 'tsam' not found."
"Install via 'pip install tsam'")
raise ModuleNotFoundError(
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
)
p_max_pu_norm = n.generators_t.p_max_pu.max()
p_max_pu = n.generators_t.p_max_pu / p_max_pu_norm
load_norm = n.loads_t.p_set.max()
load = n.loads_t.p_set / load_norm
inflow_norm = n.storage_units_t.inflow.max()
inflow = n.storage_units_t.inflow / inflow_norm
raw = pd.concat([p_max_pu, load, inflow], axis=1, sort=False)
agg = tsam.TimeSeriesAggregation(raw, hoursPerPeriod=len(raw),
noTypicalPeriods=1, noSegments=int(segments),
segmentation=True, solver=solver_name)
agg = tsam.TimeSeriesAggregation(
raw,
hoursPerPeriod=len(raw),
noTypicalPeriods=1,
noSegments=int(segments),
segmentation=True,
solver=solver_name,
)
segmented = agg.createTypicalPeriods()
@ -180,9 +205,11 @@ def apply_time_segmentation(n, segments, solver_name="cbc"):
offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0)
snapshots = [n.snapshots[0] + pd.Timedelta(f"{offset}h") for offset in offsets]
n.set_snapshots(pd.DatetimeIndex(snapshots, name='name'))
n.snapshot_weightings = pd.Series(weightings, index=snapshots, name="weightings", dtype="float64")
n.set_snapshots(pd.DatetimeIndex(snapshots, name="name"))
n.snapshot_weightings = pd.Series(
weightings, index=snapshots, name="weightings", dtype="float64"
)
segmented.index = snapshots
n.generators_t.p_max_pu = segmented[n.generators_t.p_max_pu.columns] * p_max_pu_norm
n.loads_t.p_set = segmented[n.loads_t.p_set.columns] * load_norm
@ -190,49 +217,57 @@ def apply_time_segmentation(n, segments, solver_name="cbc"):
return n
def enforce_autarky(n, only_crossborder=False):
if only_crossborder:
lines_rm = n.lines.loc[
n.lines.bus0.map(n.buses.country) !=
n.lines.bus1.map(n.buses.country)
].index
n.lines.bus0.map(n.buses.country) != n.lines.bus1.map(n.buses.country)
].index
links_rm = n.links.loc[
n.links.bus0.map(n.buses.country) !=
n.links.bus1.map(n.buses.country)
].index
n.links.bus0.map(n.buses.country) != n.links.bus1.map(n.buses.country)
].index
else:
lines_rm = n.lines.index
links_rm = n.links.loc[n.links.carrier=="DC"].index
links_rm = n.links.loc[n.links.carrier == "DC"].index
n.mremove("Line", lines_rm)
n.mremove("Link", links_rm)
def set_line_nom_max(n, s_nom_max_set=np.inf, p_nom_max_set=np.inf):
n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True)
n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True)
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('prepare_network', simpl='',
clusters='40', ll='v0.3', opts='Co2L-24H')
snakemake = mock_snakemake(
"prepare_network", simpl="", clusters="40", ll="v0.3", opts="Co2L-24H"
)
configure_logging(snakemake)
opts = snakemake.wildcards.opts.split('-')
opts = snakemake.wildcards.opts.split("-")
n = pypsa.Network(snakemake.input[0])
Nyears = n.snapshot_weightings.objective.sum() / 8760.
costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears)
Nyears = n.snapshot_weightings.objective.sum() / 8760.0
costs = load_costs(
snakemake.input.tech_costs,
snakemake.config["costs"],
snakemake.config["electricity"],
Nyears,
)
set_line_s_max_pu(n, snakemake.config['lines']['s_max_pu'])
set_line_s_max_pu(n, snakemake.config["lines"]["s_max_pu"])
for o in opts:
m = re.match(r'^\d+h$', o, re.IGNORECASE)
m = re.match(r"^\d+h$", o, re.IGNORECASE)
if m is not None:
n = average_every_nhours(n, m.group(0))
break
for o in opts:
m = re.match(r'^\d+seg$', o, re.IGNORECASE)
m = re.match(r"^\d+seg$", o, re.IGNORECASE)
if m is not None:
solver_name = snakemake.config["solving"]["solver"]["name"]
n = apply_time_segmentation(n, m.group(0)[:-3], solver_name)
@ -242,11 +277,11 @@ if __name__ == "__main__":
if "Co2L" in o:
m = re.findall("[0-9]*\.?[0-9]+$", o)
if len(m) > 0:
co2limit = float(m[0]) * snakemake.config['electricity']['co2base']
co2limit = float(m[0]) * snakemake.config["electricity"]["co2base"]
add_co2limit(n, co2limit, Nyears)
logger.info("Setting CO2 limit according to wildcard value.")
else:
add_co2limit(n, snakemake.config['electricity']['co2limit'], Nyears)
add_co2limit(n, snakemake.config["electricity"]["co2limit"], Nyears)
logger.info("Setting CO2 limit according to config value.")
break
@ -277,24 +312,27 @@ if __name__ == "__main__":
comps = {"Generator", "Link", "StorageUnit", "Store"}
for c in n.iterate_components(comps):
sel = c.df.carrier.str.contains(carrier)
c.df.loc[sel,attr] *= factor
c.df.loc[sel, attr] *= factor
for o in opts:
if 'Ep' in o:
if "Ep" in o:
m = re.findall("[0-9]*\.?[0-9]+$", o)
if len(m) > 0:
logger.info("Setting emission prices according to wildcard value.")
add_emission_prices(n, dict(co2=float(m[0])))
else:
logger.info("Setting emission prices according to config value.")
add_emission_prices(n, snakemake.config['costs']['emission_prices'])
add_emission_prices(n, snakemake.config["costs"]["emission_prices"])
break
ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:]
set_transmission_limit(n, ll_type, factor, costs, Nyears)
set_line_nom_max(n, s_nom_max_set=snakemake.config["lines"].get("s_nom_max,", np.inf),
p_nom_max_set=snakemake.config["links"].get("p_nom_max,", np.inf))
set_line_nom_max(
n,
s_nom_max_set=snakemake.config["lines"].get("s_nom_max,", np.inf),
p_nom_max_set=snakemake.config["links"].get("p_nom_max,", np.inf),
)
if "ATK" in opts:
enforce_autarky(n)

View File

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright 2019-2022 Fabian Hofmann (TUB, FIAS)
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
@ -33,24 +34,27 @@ The :ref:`tutorial` uses a smaller `data bundle <https://zenodo.org/record/35179
"""
import logging
from _helpers import progress_retrieve, configure_logging
import tarfile
from pathlib import Path
from _helpers import configure_logging, progress_retrieve
logger = logging.getLogger(__name__)
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('retrieve_databundle')
rootpath = '..'
else:
rootpath = '.'
configure_logging(snakemake) # TODO Make logging compatible with progressbar (see PR #102)
if snakemake.config['tutorial']:
snakemake = mock_snakemake("retrieve_databundle")
rootpath = ".."
else:
rootpath = "."
configure_logging(
snakemake
) # TODO Make logging compatible with progressbar (see PR #102)
if snakemake.config["tutorial"]:
url = "https://zenodo.org/record/3517921/files/pypsa-eur-tutorial-data-bundle.tar.xz"
else:
url = "https://zenodo.org/record/3517935/files/pypsa-eur-data-bundle.tar.xz"

View File

@ -1,12 +1,13 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
# coding: utf-8
"""
Lifts electrical transmission network to a single 380 kV voltage layer,
removes dead-ends of the network,
and reduces multi-hop HVDC connections to a single link.
Lifts electrical transmission network to a single 380 kV voltage layer, removes
dead-ends of the network, and reduces multi-hop HVDC connections to a single
link.
Relevant Settings
-----------------
@ -85,21 +86,23 @@ The rule :mod:`simplify_network` does up to four things:
"""
import logging
from _helpers import configure_logging, update_p_nom_max, get_aggregation_strategies
from cluster_network import clustering_for_n_clusters, cluster_regions
from add_electricity import load_costs
import pandas as pd
import numpy as np
import scipy as sp
from scipy.sparse.csgraph import connected_components, dijkstra
from functools import reduce
import numpy as np
import pandas as pd
import pypsa
import scipy as sp
from _helpers import configure_logging, get_aggregation_strategies, update_p_nom_max
from add_electricity import load_costs
from cluster_network import cluster_regions, clustering_for_n_clusters
from pypsa.io import import_components_from_dataframe, import_series_from_dataframe
from pypsa.networkclustering import busmap_by_stubs, aggregategenerators, aggregateoneport, get_clustering_from_busmap
from pypsa.networkclustering import (
aggregategenerators,
aggregateoneport,
busmap_by_stubs,
get_clustering_from_busmap,
)
from scipy.sparse.csgraph import connected_components, dijkstra
logger = logging.getLogger(__name__)
@ -117,26 +120,26 @@ def simplify_network_to_380(n):
"""
logger.info("Mapping all network lines onto a single 380kV layer")
n.buses['v_nom'] = 380.
n.buses["v_nom"] = 380.0
linetype_380, = n.lines.loc[n.lines.v_nom == 380., 'type'].unique()
n.lines['type'] = linetype_380
(linetype_380,) = n.lines.loc[n.lines.v_nom == 380.0, "type"].unique()
n.lines["type"] = linetype_380
n.lines["v_nom"] = 380
n.lines["i_nom"] = n.line_types.i_nom[linetype_380]
n.lines['num_parallel'] = n.lines.eval("s_nom / (sqrt(3) * v_nom * i_nom)")
n.lines["num_parallel"] = n.lines.eval("s_nom / (sqrt(3) * v_nom * i_nom)")
trafo_map = pd.Series(n.transformers.bus1.values, n.transformers.bus0.values)
trafo_map = trafo_map[~trafo_map.index.duplicated(keep='first')]
trafo_map = trafo_map[~trafo_map.index.duplicated(keep="first")]
several_trafo_b = trafo_map.isin(trafo_map.index)
trafo_map[several_trafo_b] = trafo_map[several_trafo_b].map(trafo_map)
missing_buses_i = n.buses.index.difference(trafo_map.index)
missing = pd.Series(missing_buses_i, missing_buses_i)
trafo_map = pd.concat([trafo_map, missing])
for c in n.one_port_components|n.branch_components:
for c in n.one_port_components | n.branch_components:
df = n.df(c)
for col in df.columns:
if col.startswith('bus'):
if col.startswith("bus"):
df[col] = df[col].map(trafo_map)
n.mremove("Transformer", n.transformers.index)
@ -146,22 +149,30 @@ def simplify_network_to_380(n):
def _prepare_connection_costs_per_link(n, costs, config):
if n.links.empty: return {}
if n.links.empty:
return {}
connection_costs_per_link = {}
for tech in config['renewable']:
if tech.startswith('offwind'):
for tech in config["renewable"]:
if tech.startswith("offwind"):
connection_costs_per_link[tech] = (
n.links.length * config['lines']['length_factor'] *
(n.links.underwater_fraction * costs.at[tech + '-connection-submarine', 'capital_cost'] +
(1. - n.links.underwater_fraction) * costs.at[tech + '-connection-underground', 'capital_cost'])
n.links.length
* config["lines"]["length_factor"]
* (
n.links.underwater_fraction
* costs.at[tech + "-connection-submarine", "capital_cost"]
+ (1.0 - n.links.underwater_fraction)
* costs.at[tech + "-connection-underground", "capital_cost"]
)
)
return connection_costs_per_link
def _compute_connection_costs_to_bus(n, busmap, costs, config, connection_costs_per_link=None, buses=None):
def _compute_connection_costs_to_bus(
n, busmap, costs, config, connection_costs_per_link=None, buses=None
):
if connection_costs_per_link is None:
connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config)
@ -171,12 +182,21 @@ def _compute_connection_costs_to_bus(n, busmap, costs, config, connection_costs_
connection_costs_to_bus = pd.DataFrame(index=buses)
for tech in connection_costs_per_link:
adj = n.adjacency_matrix(weights=pd.concat(dict(Link=connection_costs_per_link[tech].reindex(n.links.index),
Line=pd.Series(0., n.lines.index))))
adj = n.adjacency_matrix(
weights=pd.concat(
dict(
Link=connection_costs_per_link[tech].reindex(n.links.index),
Line=pd.Series(0.0, n.lines.index),
)
)
)
costs_between_buses = dijkstra(adj, directed=False, indices=n.buses.index.get_indexer(buses))
connection_costs_to_bus[tech] = costs_between_buses[np.arange(len(buses)),
n.buses.index.get_indexer(busmap.loc[buses])]
costs_between_buses = dijkstra(
adj, directed=False, indices=n.buses.index.get_indexer(buses)
)
connection_costs_to_bus[tech] = costs_between_buses[
np.arange(len(buses)), n.buses.index.get_indexer(busmap.loc[buses])
]
return connection_costs_to_bus
@ -185,20 +205,34 @@ def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, out
connection_costs = {}
for tech in connection_costs_to_bus:
tech_b = n.generators.carrier == tech
costs = n.generators.loc[tech_b, "bus"].map(connection_costs_to_bus[tech]).loc[lambda s: s>0]
costs = (
n.generators.loc[tech_b, "bus"]
.map(connection_costs_to_bus[tech])
.loc[lambda s: s > 0]
)
if not costs.empty:
n.generators.loc[costs.index, "capital_cost"] += costs
logger.info("Displacing {} generator(s) and adding connection costs to capital_costs: {} "
.format(tech, ", ".join("{:.0f} Eur/MW/a for `{}`".format(d, b) for b, d in costs.iteritems())))
logger.info(
"Displacing {} generator(s) and adding connection costs to capital_costs: {} ".format(
tech,
", ".join(
"{:.0f} Eur/MW/a for `{}`".format(d, b)
for b, d in costs.iteritems()
),
)
)
connection_costs[tech] = costs
pd.DataFrame(connection_costs).to_csv(output.connection_costs)
def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output,
aggregate_one_ports={"Load", "StorageUnit"},
aggregation_strategies=dict()):
def _aggregate_and_move_components(
n,
busmap,
connection_costs_to_bus,
output,
aggregate_one_ports={"Load", "StorageUnit"},
aggregation_strategies=dict(),
):
def replace_components(n, c, df, pnl):
n.mremove(c, n.df(c).index)
@ -236,8 +270,10 @@ def simplify_links(n, costs, config, output, aggregation_strategies=dict()):
return n, n.buses.index.to_series()
# Determine connected link components, ignore all links but DC
adjacency_matrix = n.adjacency_matrix(branch_components=['Link'],
weights=dict(Link=(n.links.carrier == 'DC').astype(float)))
adjacency_matrix = n.adjacency_matrix(
branch_components=["Link"],
weights=dict(Link=(n.links.carrier == "DC").astype(float)),
)
_, labels = connected_components(adjacency_matrix, directed=False)
labels = pd.Series(labels, n.buses.index)
@ -248,22 +284,23 @@ def simplify_links(n, costs, config, output, aggregation_strategies=dict()):
nodes = frozenset(nodes)
seen = set()
supernodes = {m for m in nodes
if len(G.adj[m]) > 2 or (set(G.adj[m]) - nodes)}
supernodes = {m for m in nodes if len(G.adj[m]) > 2 or (set(G.adj[m]) - nodes)}
for u in supernodes:
for m, ls in G.adj[u].items():
if m not in nodes or m in seen: continue
if m not in nodes or m in seen:
continue
buses = [u, m]
links = [list(ls)] #[name for name in ls]]
links = [list(ls)] # [name for name in ls]]
while m not in (supernodes | seen):
seen.add(m)
for m2, ls in G.adj[m].items():
if m2 in seen or m2 == u: continue
if m2 in seen or m2 == u:
continue
buses.append(m2)
links.append(list(ls)) # [name for name in ls])
links.append(list(ls)) # [name for name in ls])
break
else:
# stub
@ -276,83 +313,123 @@ def simplify_links(n, costs, config, output, aggregation_strategies=dict()):
busmap = n.buses.index.to_series()
connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config)
connection_costs_to_bus = pd.DataFrame(0., index=n.buses.index, columns=list(connection_costs_per_link))
connection_costs_to_bus = pd.DataFrame(
0.0, index=n.buses.index, columns=list(connection_costs_per_link)
)
for lbl in labels.value_counts().loc[lambda s: s > 2].index:
for b, buses, links in split_links(labels.index[labels == lbl]):
if len(buses) <= 2: continue
if len(buses) <= 2:
continue
logger.debug('nodes = {}'.format(labels.index[labels == lbl]))
logger.debug('b = {}\nbuses = {}\nlinks = {}'.format(b, buses, links))
logger.debug("nodes = {}".format(labels.index[labels == lbl]))
logger.debug("b = {}\nbuses = {}\nlinks = {}".format(b, buses, links))
m = sp.spatial.distance_matrix(n.buses.loc[b, ['x', 'y']],
n.buses.loc[buses[1:-1], ['x', 'y']])
m = sp.spatial.distance_matrix(
n.buses.loc[b, ["x", "y"]], n.buses.loc[buses[1:-1], ["x", "y"]]
)
busmap.loc[buses] = b[np.r_[0, m.argmin(axis=0), 1]]
connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(n, busmap, costs, config, connection_costs_per_link, buses)
connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(
n, busmap, costs, config, connection_costs_per_link, buses
)
all_links = [i for _, i in sum(links, [])]
p_max_pu = config['links'].get('p_max_pu', 1.)
lengths = n.links.loc[all_links, 'length']
name = lengths.idxmax() + '+{}'.format(len(links) - 1)
p_max_pu = config["links"].get("p_max_pu", 1.0)
lengths = n.links.loc[all_links, "length"]
name = lengths.idxmax() + "+{}".format(len(links) - 1)
params = dict(
carrier='DC',
bus0=b[0], bus1=b[1],
length=sum(n.links.loc[[i for _, i in l], 'length'].mean() for l in links),
p_nom=min(n.links.loc[[i for _, i in l], 'p_nom'].sum() for l in links),
underwater_fraction=sum(lengths/lengths.sum() * n.links.loc[all_links, 'underwater_fraction']),
carrier="DC",
bus0=b[0],
bus1=b[1],
length=sum(
n.links.loc[[i for _, i in l], "length"].mean() for l in links
),
p_nom=min(n.links.loc[[i for _, i in l], "p_nom"].sum() for l in links),
underwater_fraction=sum(
lengths
/ lengths.sum()
* n.links.loc[all_links, "underwater_fraction"]
),
p_max_pu=p_max_pu,
p_min_pu=-p_max_pu,
underground=False,
under_construction=False
under_construction=False,
)
logger.info("Joining the links {} connecting the buses {} to simple link {}".format(", ".join(all_links), ", ".join(buses), name))
logger.info(
"Joining the links {} connecting the buses {} to simple link {}".format(
", ".join(all_links), ", ".join(buses), name
)
)
n.mremove("Link", all_links)
static_attrs = n.components["Link"]["attrs"].loc[lambda df: df.static]
for attr, default in static_attrs.default.iteritems(): params.setdefault(attr, default)
for attr, default in static_attrs.default.iteritems():
params.setdefault(attr, default)
n.links.loc[name] = pd.Series(params)
# n.add("Link", **params)
logger.debug("Collecting all components using the busmap")
_aggregate_and_move_components(n, busmap, connection_costs_to_bus, output,
aggregation_strategies=aggregation_strategies)
_aggregate_and_move_components(
n,
busmap,
connection_costs_to_bus,
output,
aggregation_strategies=aggregation_strategies,
)
return n, busmap
def remove_stubs(n, costs, config, output, aggregation_strategies=dict()):
logger.info("Removing stubs")
busmap = busmap_by_stubs(n) # ['country'])
busmap = busmap_by_stubs(n) # ['country'])
connection_costs_to_bus = _compute_connection_costs_to_bus(n, busmap, costs, config)
_aggregate_and_move_components(n, busmap, connection_costs_to_bus, output,
aggregation_strategies=aggregation_strategies)
_aggregate_and_move_components(
n,
busmap,
connection_costs_to_bus,
output,
aggregation_strategies=aggregation_strategies,
)
return n, busmap
def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None):
# can be used to aggregate a selection of buses to electrically closest neighbors
# if no buses are given, nodes that are no substations or without offshore connection are aggregated
if buses_i is None:
logger.info("Aggregating buses that are no substations or have no valid offshore connection")
buses_i = list(set(n.buses.index)-set(n.generators.bus)-set(n.loads.bus))
logger.info(
"Aggregating buses that are no substations or have no valid offshore connection"
)
buses_i = list(set(n.buses.index) - set(n.generators.bus) - set(n.loads.bus))
weight = pd.concat({'Line': n.lines.length/n.lines.s_nom.clip(1e-3),
'Link': n.links.length/n.links.p_nom.clip(1e-3)})
weight = pd.concat(
{
"Line": n.lines.length / n.lines.s_nom.clip(1e-3),
"Link": n.links.length / n.links.p_nom.clip(1e-3),
}
)
adj = n.adjacency_matrix(branch_components=['Line', 'Link'], weights=weight)
adj = n.adjacency_matrix(branch_components=["Line", "Link"], weights=weight)
bus_indexer = n.buses.index.get_indexer(buses_i)
dist = pd.DataFrame(dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index)
dist = pd.DataFrame(
dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index
)
dist[buses_i] = np.inf # bus in buses_i should not be assigned to different bus in buses_i
dist[
buses_i
] = np.inf # bus in buses_i should not be assigned to different bus in buses_i
for c in n.buses.country.unique():
incountry_b = n.buses.country == c
@ -361,49 +438,68 @@ def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None):
busmap = n.buses.index.to_series()
busmap.loc[buses_i] = dist.idxmin(1)
bus_strategies, generator_strategies = get_aggregation_strategies(aggregation_strategies)
bus_strategies, generator_strategies = get_aggregation_strategies(
aggregation_strategies
)
clustering = get_clustering_from_busmap(n, busmap,
bus_strategies=bus_strategies,
aggregate_generators_weighted=True,
aggregate_generators_carriers=None,
aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=1.0,
generator_strategies=generator_strategies,
scale_link_capital_costs=False)
clustering = get_clustering_from_busmap(
n,
busmap,
bus_strategies=bus_strategies,
aggregate_generators_weighted=True,
aggregate_generators_carriers=None,
aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=1.0,
generator_strategies=generator_strategies,
scale_link_capital_costs=False,
)
return clustering.network, busmap
def cluster(n, n_clusters, config, algorithm="hac", feature=None, aggregation_strategies=dict()):
def cluster(
n, n_clusters, config, algorithm="hac", feature=None, aggregation_strategies=dict()
):
logger.info(f"Clustering to {n_clusters} buses")
focus_weights = config.get('focus_weights', None)
focus_weights = config.get("focus_weights", None)
renewable_carriers = pd.Index([tech
for tech in n.generators.carrier.unique()
if tech.split('-', 2)[0] in config['renewable']])
renewable_carriers = pd.Index(
[
tech
for tech in n.generators.carrier.unique()
if tech.split("-", 2)[0] in config["renewable"]
]
)
clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap=False,
aggregation_strategies=aggregation_strategies,
solver_name=config['solving']['solver']['name'],
algorithm=algorithm, feature=feature,
focus_weights=focus_weights)
clustering = clustering_for_n_clusters(
n,
n_clusters,
custom_busmap=False,
aggregation_strategies=aggregation_strategies,
solver_name=config["solving"]["solver"]["name"],
algorithm=algorithm,
feature=feature,
focus_weights=focus_weights,
)
return clustering.network, clustering.busmap
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('simplify_network', simpl='f')
snakemake = mock_snakemake("simplify_network", simpl="f")
configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network)
aggregation_strategies = snakemake.config["clustering"].get("aggregation_strategies", {})
aggregation_strategies = snakemake.config["clustering"].get(
"aggregation_strategies", {}
)
# translate str entries of aggregation_strategies to pd.Series functions:
aggregation_strategies = {
p: {k: getattr(pd.Series, v) for k,v in aggregation_strategies[p].items()}
p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()}
for p in aggregation_strategies.keys()
}
@ -411,44 +507,78 @@ if __name__ == "__main__":
Nyears = n.snapshot_weightings.objective.sum() / 8760
technology_costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears)
technology_costs = load_costs(
snakemake.input.tech_costs,
snakemake.config["costs"],
snakemake.config["electricity"],
Nyears,
)
n, simplify_links_map = simplify_links(n, technology_costs, snakemake.config, snakemake.output,
aggregation_strategies)
n, simplify_links_map = simplify_links(
n, technology_costs, snakemake.config, snakemake.output, aggregation_strategies
)
n, stub_map = remove_stubs(n, technology_costs, snakemake.config, snakemake.output,
aggregation_strategies=aggregation_strategies)
n, stub_map = remove_stubs(
n,
technology_costs,
snakemake.config,
snakemake.output,
aggregation_strategies=aggregation_strategies,
)
busmaps = [trafo_map, simplify_links_map, stub_map]
cluster_config = snakemake.config.get('clustering', {}).get('simplify_network', {})
if cluster_config.get('clustering', {}).get('simplify_network', {}).get('to_substations', False):
cluster_config = snakemake.config.get("clustering", {}).get("simplify_network", {})
if (
cluster_config.get("clustering", {})
.get("simplify_network", {})
.get("to_substations", False)
):
n, substation_map = aggregate_to_substations(n, aggregation_strategies)
busmaps.append(substation_map)
# treatment of outliers (nodes without a profile for considered carrier):
# all nodes that have no profile of the given carrier are being aggregated to closest neighbor
if (
snakemake.config.get("clustering", {}).get("cluster_network", {}).get("algorithm", "hac") == "hac" or
cluster_config.get("algorithm", "hac") == "hac"
snakemake.config.get("clustering", {})
.get("cluster_network", {})
.get("algorithm", "hac")
== "hac"
or cluster_config.get("algorithm", "hac") == "hac"
):
carriers = cluster_config.get("feature", "solar+onwind-time").split('-')[0].split('+')
carriers = (
cluster_config.get("feature", "solar+onwind-time").split("-")[0].split("+")
)
for carrier in carriers:
buses_i = list(set(n.buses.index)-set(n.generators.query("carrier == @carrier").bus))
logger.info(f'clustering preparaton (hac): aggregating {len(buses_i)} buses of type {carrier}.')
buses_i = list(
set(n.buses.index) - set(n.generators.query("carrier == @carrier").bus)
)
logger.info(
f"clustering preparaton (hac): aggregating {len(buses_i)} buses of type {carrier}."
)
n, busmap_hac = aggregate_to_substations(n, aggregation_strategies, buses_i)
busmaps.append(busmap_hac)
if snakemake.wildcards.simpl:
n, cluster_map = cluster(n, int(snakemake.wildcards.simpl), snakemake.config,
cluster_config.get('algorithm', 'hac'),
cluster_config.get('feature', None),
aggregation_strategies)
n, cluster_map = cluster(
n,
int(snakemake.wildcards.simpl),
snakemake.config,
cluster_config.get("algorithm", "hac"),
cluster_config.get("feature", None),
aggregation_strategies,
)
busmaps.append(cluster_map)
# some entries in n.buses are not updated in previous functions, therefore can be wrong. as they are not needed
# and are lost when clustering (for example with the simpl wildcard), we remove them for consistency:
buses_c = {'symbol', 'tags', 'under_construction', 'substation_lv', 'substation_off'}.intersection(n.buses.columns)
buses_c = {
"symbol",
"tags",
"under_construction",
"substation_lv",
"substation_off",
}.intersection(n.buses.columns)
n.buses = n.buses.drop(buses_c, axis=1)
update_p_nom_max(n)

View File

@ -1,9 +1,11 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Solves linear optimal power flow for a network iteratively while updating reactances.
Solves linear optimal power flow for a network iteratively while updating
reactances.
Relevant Settings
-----------------
@ -73,104 +75,123 @@ Details (and errors made through this heuristic) are discussed in the paper
The rule :mod:`solve_all_networks` runs
for all ``scenario`` s in the configuration file
the rule :mod:`solve_network`.
"""
import logging
from _helpers import configure_logging
import re
from pathlib import Path
import numpy as np
import pandas as pd
import re
import pypsa
from pypsa.linopf import (get_var, define_constraints, define_variables,
linexpr, join_exprs, network_lopf, ilopf)
from _helpers import configure_logging
from pypsa.descriptors import get_switchable_as_dense as get_as_dense
from pathlib import Path
from pypsa.linopf import (
define_constraints,
define_variables,
get_var,
ilopf,
join_exprs,
linexpr,
network_lopf,
)
from vresutils.benchmark import memory_logger
logger = logging.getLogger(__name__)
def prepare_network(n, solve_opts):
if 'clip_p_max_pu' in solve_opts:
if "clip_p_max_pu" in solve_opts:
for df in (n.generators_t.p_max_pu, n.storage_units_t.inflow):
df.where(df>solve_opts['clip_p_max_pu'], other=0., inplace=True)
df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)
load_shedding = solve_opts.get('load_shedding')
load_shedding = solve_opts.get("load_shedding")
if load_shedding:
n.add("Carrier", "load", color="#dd2e23", nice_name="Load shedding")
buses_i = n.buses.query("carrier == 'AC'").index
if not np.isscalar(load_shedding): load_shedding = 1e2 # Eur/kWh
if not np.isscalar(load_shedding):
load_shedding = 1e2 # Eur/kWh
# intersect between macroeconomic and surveybased
# willingness to pay
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full)
n.madd("Generator", buses_i, " load",
bus=buses_i,
carrier='load',
sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW
marginal_cost=load_shedding,
p_nom=1e9 # kW
)
n.madd(
"Generator",
buses_i,
" load",
bus=buses_i,
carrier="load",
sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW
marginal_cost=load_shedding,
p_nom=1e9, # kW
)
if solve_opts.get('noisy_costs'):
if solve_opts.get("noisy_costs"):
for t in n.iterate_components(n.one_port_components):
#if 'capital_cost' in t.df:
# if 'capital_cost' in t.df:
# t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5)
if 'marginal_cost' in t.df:
t.df['marginal_cost'] += (1e-2 + 2e-3 *
(np.random.random(len(t.df)) - 0.5))
if "marginal_cost" in t.df:
t.df["marginal_cost"] += 1e-2 + 2e-3 * (
np.random.random(len(t.df)) - 0.5
)
for t in n.iterate_components(['Line', 'Link']):
t.df['capital_cost'] += (1e-1 +
2e-2*(np.random.random(len(t.df)) - 0.5)) * t.df['length']
for t in n.iterate_components(["Line", "Link"]):
t.df["capital_cost"] += (
1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5)
) * t.df["length"]
if solve_opts.get('nhours'):
nhours = solve_opts['nhours']
if solve_opts.get("nhours"):
nhours = solve_opts["nhours"]
n.set_snapshots(n.snapshots[:nhours])
n.snapshot_weightings[:] = 8760. / nhours
n.snapshot_weightings[:] = 8760.0 / nhours
return n
def add_CCL_constraints(n, config):
agg_p_nom_limits = config['electricity'].get('agg_p_nom_limits')
agg_p_nom_limits = config["electricity"].get("agg_p_nom_limits")
try:
agg_p_nom_minmax = pd.read_csv(agg_p_nom_limits,
index_col=list(range(2)))
agg_p_nom_minmax = pd.read_csv(agg_p_nom_limits, index_col=list(range(2)))
except IOError:
logger.exception("Need to specify the path to a .csv file containing "
"aggregate capacity limits per country in "
"config['electricity']['agg_p_nom_limit'].")
logger.info("Adding per carrier generation capacity constraints for "
"individual countries")
logger.exception(
"Need to specify the path to a .csv file containing "
"aggregate capacity limits per country in "
"config['electricity']['agg_p_nom_limit']."
)
logger.info(
"Adding per carrier generation capacity constraints for " "individual countries"
)
gen_country = n.generators.bus.map(n.buses.country)
# cc means country and carrier
p_nom_per_cc = (pd.DataFrame(
{'p_nom': linexpr((1, get_var(n, 'Generator', 'p_nom'))),
'country': gen_country, 'carrier': n.generators.carrier})
.dropna(subset=['p_nom'])
.groupby(['country', 'carrier']).p_nom
.apply(join_exprs))
minimum = agg_p_nom_minmax['min'].dropna()
p_nom_per_cc = (
pd.DataFrame(
{
"p_nom": linexpr((1, get_var(n, "Generator", "p_nom"))),
"country": gen_country,
"carrier": n.generators.carrier,
}
)
.dropna(subset=["p_nom"])
.groupby(["country", "carrier"])
.p_nom.apply(join_exprs)
)
minimum = agg_p_nom_minmax["min"].dropna()
if not minimum.empty:
minconstraint = define_constraints(n, p_nom_per_cc[minimum.index],
'>=', minimum, 'agg_p_nom', 'min')
maximum = agg_p_nom_minmax['max'].dropna()
minconstraint = define_constraints(
n, p_nom_per_cc[minimum.index], ">=", minimum, "agg_p_nom", "min"
)
maximum = agg_p_nom_minmax["max"].dropna()
if not maximum.empty:
maxconstraint = define_constraints(n, p_nom_per_cc[maximum.index],
'<=', maximum, 'agg_p_nom', 'max')
maxconstraint = define_constraints(
n, p_nom_per_cc[maximum.index], "<=", maximum, "agg_p_nom", "max"
)
def add_EQ_constraints(n, o, scaling=1e-1):
float_regex = "[0-9]*\.?[0-9]+"
level = float(re.findall(float_regex, o)[0])
if o[-1] == 'c':
if o[-1] == "c":
ggrouper = n.generators.bus.map(n.buses.country)
lgrouper = n.loads.bus.map(n.buses.country)
sgrouper = n.storage_units.bus.map(n.buses.country)
@ -178,135 +199,167 @@ def add_EQ_constraints(n, o, scaling=1e-1):
ggrouper = n.generators.bus
lgrouper = n.loads.bus
sgrouper = n.storage_units.bus
load = n.snapshot_weightings.generators @ \
n.loads_t.p_set.groupby(lgrouper, axis=1).sum()
inflow = n.snapshot_weightings.stores @ \
n.storage_units_t.inflow.groupby(sgrouper, axis=1).sum()
inflow = inflow.reindex(load.index).fillna(0.)
rhs = scaling * ( level * load - inflow )
lhs_gen = linexpr((n.snapshot_weightings.generators * scaling,
get_var(n, "Generator", "p").T)
).T.groupby(ggrouper, axis=1).apply(join_exprs)
lhs_spill = linexpr((-n.snapshot_weightings.stores * scaling,
get_var(n, "StorageUnit", "spill").T)
).T.groupby(sgrouper, axis=1).apply(join_exprs)
load = (
n.snapshot_weightings.generators
@ n.loads_t.p_set.groupby(lgrouper, axis=1).sum()
)
inflow = (
n.snapshot_weightings.stores
@ n.storage_units_t.inflow.groupby(sgrouper, axis=1).sum()
)
inflow = inflow.reindex(load.index).fillna(0.0)
rhs = scaling * (level * load - inflow)
lhs_gen = (
linexpr(
(n.snapshot_weightings.generators * scaling, get_var(n, "Generator", "p").T)
)
.T.groupby(ggrouper, axis=1)
.apply(join_exprs)
)
lhs_spill = (
linexpr(
(
-n.snapshot_weightings.stores * scaling,
get_var(n, "StorageUnit", "spill").T,
)
)
.T.groupby(sgrouper, axis=1)
.apply(join_exprs)
)
lhs_spill = lhs_spill.reindex(lhs_gen.index).fillna("")
lhs = lhs_gen + lhs_spill
define_constraints(n, lhs, ">=", rhs, "equity", "min")
def add_BAU_constraints(n, config):
mincaps = pd.Series(config['electricity']['BAU_mincapacities'])
lhs = (linexpr((1, get_var(n, 'Generator', 'p_nom')))
.groupby(n.generators.carrier).apply(join_exprs))
define_constraints(n, lhs, '>=', mincaps[lhs.index], 'Carrier', 'bau_mincaps')
mincaps = pd.Series(config["electricity"]["BAU_mincapacities"])
lhs = (
linexpr((1, get_var(n, "Generator", "p_nom")))
.groupby(n.generators.carrier)
.apply(join_exprs)
)
define_constraints(n, lhs, ">=", mincaps[lhs.index], "Carrier", "bau_mincaps")
def add_SAFE_constraints(n, config):
peakdemand = (1. + config['electricity']['SAFE_reservemargin']) *\
n.loads_t.p_set.sum(axis=1).max()
conv_techs = config['plotting']['conv_techs']
exist_conv_caps = n.generators.query('~p_nom_extendable & carrier in @conv_techs')\
.p_nom.sum()
ext_gens_i = n.generators.query('carrier in @conv_techs & p_nom_extendable').index
lhs = linexpr((1, get_var(n, 'Generator', 'p_nom')[ext_gens_i])).sum()
peakdemand = (
1.0 + config["electricity"]["SAFE_reservemargin"]
) * n.loads_t.p_set.sum(axis=1).max()
conv_techs = config["plotting"]["conv_techs"]
exist_conv_caps = n.generators.query(
"~p_nom_extendable & carrier in @conv_techs"
).p_nom.sum()
ext_gens_i = n.generators.query("carrier in @conv_techs & p_nom_extendable").index
lhs = linexpr((1, get_var(n, "Generator", "p_nom")[ext_gens_i])).sum()
rhs = peakdemand - exist_conv_caps
define_constraints(n, lhs, '>=', rhs, 'Safe', 'mintotalcap')
define_constraints(n, lhs, ">=", rhs, "Safe", "mintotalcap")
def add_operational_reserve_margin_constraint(n, config):
reserve_config = config["electricity"]["operational_reserve"]
EPSILON_LOAD = reserve_config["epsilon_load"]
EPSILON_VRES = reserve_config["epsilon_vres"]
CONTINGENCY = reserve_config["contingency"]
# Reserve Variables
reserve = get_var(n, 'Generator', 'r')
# Reserve Variables
reserve = get_var(n, "Generator", "r")
lhs = linexpr((1, reserve)).sum(1)
# Share of extendable renewable capacities
ext_i = n.generators.query('p_nom_extendable').index
ext_i = n.generators.query("p_nom_extendable").index
vres_i = n.generators_t.p_max_pu.columns
if not ext_i.empty and not vres_i.empty:
capacity_factor = n.generators_t.p_max_pu[vres_i.intersection(ext_i)]
renewable_capacity_variables = get_var(n, 'Generator', 'p_nom')[vres_i.intersection(ext_i)]
lhs += linexpr((-EPSILON_VRES * capacity_factor, renewable_capacity_variables)).sum(1)
renewable_capacity_variables = get_var(n, "Generator", "p_nom")[
vres_i.intersection(ext_i)
]
lhs += linexpr(
(-EPSILON_VRES * capacity_factor, renewable_capacity_variables)
).sum(1)
# Total demand at t
demand = n.loads_t.p.sum(1)
demand = n.loads_t.p.sum(1)
# VRES potential of non extendable generators
capacity_factor = n.generators_t.p_max_pu[vres_i.difference(ext_i)]
renewable_capacity = n.generators.p_nom[vres_i.difference(ext_i)]
potential = (capacity_factor * renewable_capacity).sum(1)
# Right-hand-side
rhs = EPSILON_LOAD * demand + EPSILON_VRES * potential + CONTINGENCY
define_constraints(n, lhs, '>=', rhs, "Reserve margin")
define_constraints(n, lhs, ">=", rhs, "Reserve margin")
def update_capacity_constraint(n):
gen_i = n.generators.index
ext_i = n.generators.query('p_nom_extendable').index
fix_i = n.generators.query('not p_nom_extendable').index
ext_i = n.generators.query("p_nom_extendable").index
fix_i = n.generators.query("not p_nom_extendable").index
dispatch = get_var(n, "Generator", "p")
reserve = get_var(n, "Generator", "r")
dispatch = get_var(n, 'Generator', 'p')
reserve = get_var(n, 'Generator', 'r')
capacity_fixed = n.generators.p_nom[fix_i]
p_max_pu = get_as_dense(n, 'Generator', 'p_max_pu')
p_max_pu = get_as_dense(n, "Generator", "p_max_pu")
lhs = linexpr((1, dispatch), (1, reserve))
if not ext_i.empty:
capacity_variable = get_var(n, 'Generator', 'p_nom')
lhs += linexpr((-p_max_pu[ext_i], capacity_variable)).reindex(columns=gen_i, fill_value='')
capacity_variable = get_var(n, "Generator", "p_nom")
lhs += linexpr((-p_max_pu[ext_i], capacity_variable)).reindex(
columns=gen_i, fill_value=""
)
rhs = (p_max_pu[fix_i] * capacity_fixed).reindex(columns=gen_i, fill_value=0)
define_constraints(n, lhs, '<=', rhs, 'Generators', 'updated_capacity_constraint')
define_constraints(n, lhs, "<=", rhs, "Generators", "updated_capacity_constraint")
def add_operational_reserve_margin(n, sns, config):
"""
Build reserve margin constraints based on the formulation given in
Build reserve margin constraints based on the formulation given in
https://genxproject.github.io/GenX/dev/core/#Reserves.
"""
define_variables(n, 0, np.inf, 'Generator', 'r', axes=[sns, n.generators.index])
define_variables(n, 0, np.inf, "Generator", "r", axes=[sns, n.generators.index])
add_operational_reserve_margin_constraint(n, config)
update_capacity_constraint(n)
def add_battery_constraints(n):
nodes = n.buses.index[n.buses.carrier == "battery"]
if nodes.empty or ('Link', 'p_nom') not in n.variables.index:
if nodes.empty or ("Link", "p_nom") not in n.variables.index:
return
link_p_nom = get_var(n, "Link", "p_nom")
lhs = linexpr((1,link_p_nom[nodes + " charger"]),
(-n.links.loc[nodes + " discharger", "efficiency"].values,
link_p_nom[nodes + " discharger"].values))
define_constraints(n, lhs, "=", 0, 'Link', 'charger_ratio')
lhs = linexpr(
(1, link_p_nom[nodes + " charger"]),
(
-n.links.loc[nodes + " discharger", "efficiency"].values,
link_p_nom[nodes + " discharger"].values,
),
)
define_constraints(n, lhs, "=", 0, "Link", "charger_ratio")
def extra_functionality(n, snapshots):
"""
Collects supplementary constraints which will be passed to ``pypsa.linopf.network_lopf``.
If you want to enforce additional custom constraints, this is a good location to add them.
The arguments ``opts`` and ``snakemake.config`` are expected to be attached to the network.
Collects supplementary constraints which will be passed to
``pypsa.linopf.network_lopf``.
If you want to enforce additional custom constraints, this is a good
location to add them. The arguments ``opts`` and
``snakemake.config`` are expected to be attached to the network.
"""
opts = n.opts
config = n.config
if 'BAU' in opts and n.generators.p_nom_extendable.any():
if "BAU" in opts and n.generators.p_nom_extendable.any():
add_BAU_constraints(n, config)
if 'SAFE' in opts and n.generators.p_nom_extendable.any():
if "SAFE" in opts and n.generators.p_nom_extendable.any():
add_SAFE_constraints(n, config)
if 'CCL' in opts and n.generators.p_nom_extendable.any():
if "CCL" in opts and n.generators.p_nom_extendable.any():
add_CCL_constraints(n, config)
reserve = config["electricity"].get("operational_reserve", {})
if reserve.get("activate"):
@ -317,54 +370,71 @@ def extra_functionality(n, snapshots):
add_battery_constraints(n)
def solve_network(n, config, opts='', **kwargs):
solver_options = config['solving']['solver'].copy()
solver_name = solver_options.pop('name')
cf_solving = config['solving']['options']
track_iterations = cf_solving.get('track_iterations', False)
min_iterations = cf_solving.get('min_iterations', 4)
max_iterations = cf_solving.get('max_iterations', 6)
def solve_network(n, config, opts="", **kwargs):
solver_options = config["solving"]["solver"].copy()
solver_name = solver_options.pop("name")
cf_solving = config["solving"]["options"]
track_iterations = cf_solving.get("track_iterations", False)
min_iterations = cf_solving.get("min_iterations", 4)
max_iterations = cf_solving.get("max_iterations", 6)
# add to network for extra_functionality
n.config = config
n.opts = opts
skip_iterations = cf_solving.get('skip_iterations', False)
skip_iterations = cf_solving.get("skip_iterations", False)
if not n.lines.s_nom_extendable.any():
skip_iterations = True
logger.info("No expandable lines found. Skipping iterative solving.")
if skip_iterations:
network_lopf(n, solver_name=solver_name, solver_options=solver_options,
extra_functionality=extra_functionality, **kwargs)
network_lopf(
n,
solver_name=solver_name,
solver_options=solver_options,
extra_functionality=extra_functionality,
**kwargs
)
else:
ilopf(n, solver_name=solver_name, solver_options=solver_options,
track_iterations=track_iterations,
min_iterations=min_iterations,
max_iterations=max_iterations,
extra_functionality=extra_functionality, **kwargs)
ilopf(
n,
solver_name=solver_name,
solver_options=solver_options,
track_iterations=track_iterations,
min_iterations=min_iterations,
max_iterations=max_iterations,
extra_functionality=extra_functionality,
**kwargs
)
return n
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('solve_network', simpl='',
clusters='5', ll='copt', opts='Co2L-BAU-CCL-24H')
snakemake = mock_snakemake(
"solve_network", simpl="", clusters="5", ll="copt", opts="Co2L-BAU-CCL-24H"
)
configure_logging(snakemake)
tmpdir = snakemake.config['solving'].get('tmpdir')
tmpdir = snakemake.config["solving"].get("tmpdir")
if tmpdir is not None:
Path(tmpdir).mkdir(parents=True, exist_ok=True)
opts = snakemake.wildcards.opts.split('-')
solve_opts = snakemake.config['solving']['options']
opts = snakemake.wildcards.opts.split("-")
solve_opts = snakemake.config["solving"]["options"]
fn = getattr(snakemake.log, 'memory', None)
with memory_logger(filename=fn, interval=30.) as mem:
fn = getattr(snakemake.log, "memory", None)
with memory_logger(filename=fn, interval=30.0) as mem:
n = pypsa.Network(snakemake.input[0])
n = prepare_network(n, solve_opts)
n = solve_network(n, snakemake.config, opts, solver_dir=tmpdir,
solver_logfile=snakemake.log.solver)
n = solve_network(
n,
snakemake.config,
opts,
solver_dir=tmpdir,
solver_logfile=snakemake.log.solver,
)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0])

View File

@ -1,10 +1,11 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Solves linear optimal dispatch in hourly resolution
using the capacities of previous capacity expansion in rule :mod:`solve_network`.
Solves linear optimal dispatch in hourly resolution using the capacities of
previous capacity expansion in rule :mod:`solve_network`.
Relevant Settings
-----------------
@ -42,65 +43,80 @@ Outputs
Description
-----------
"""
import logging
from _helpers import configure_logging
import pypsa
import numpy as np
from pathlib import Path
import numpy as np
import pypsa
from _helpers import configure_logging
from solve_network import prepare_network, solve_network
from vresutils.benchmark import memory_logger
from solve_network import solve_network, prepare_network
logger = logging.getLogger(__name__)
def set_parameters_from_optimized(n, n_optim):
lines_typed_i = n.lines.index[n.lines.type != '']
n.lines.loc[lines_typed_i, 'num_parallel'] = \
n_optim.lines['num_parallel'].reindex(lines_typed_i, fill_value=0.)
n.lines.loc[lines_typed_i, 's_nom'] = (
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel)
lines_untyped_i = n.lines.index[n.lines.type == '']
for attr in ('s_nom', 'r', 'x'):
n.lines.loc[lines_untyped_i, attr] = \
n_optim.lines[attr].reindex(lines_untyped_i, fill_value=0.)
n.lines['s_nom_extendable'] = False
def set_parameters_from_optimized(n, n_optim):
lines_typed_i = n.lines.index[n.lines.type != ""]
n.lines.loc[lines_typed_i, "num_parallel"] = n_optim.lines["num_parallel"].reindex(
lines_typed_i, fill_value=0.0
)
n.lines.loc[lines_typed_i, "s_nom"] = (
np.sqrt(3)
* n.lines["type"].map(n.line_types.i_nom)
* n.lines.bus0.map(n.buses.v_nom)
* n.lines.num_parallel
)
lines_untyped_i = n.lines.index[n.lines.type == ""]
for attr in ("s_nom", "r", "x"):
n.lines.loc[lines_untyped_i, attr] = n_optim.lines[attr].reindex(
lines_untyped_i, fill_value=0.0
)
n.lines["s_nom_extendable"] = False
links_dc_i = n.links.index[n.links.p_nom_extendable]
n.links.loc[links_dc_i, 'p_nom'] = \
n_optim.links['p_nom_opt'].reindex(links_dc_i, fill_value=0.)
n.links.loc[links_dc_i, 'p_nom_extendable'] = False
n.links.loc[links_dc_i, "p_nom"] = n_optim.links["p_nom_opt"].reindex(
links_dc_i, fill_value=0.0
)
n.links.loc[links_dc_i, "p_nom_extendable"] = False
gen_extend_i = n.generators.index[n.generators.p_nom_extendable]
n.generators.loc[gen_extend_i, 'p_nom'] = \
n_optim.generators['p_nom_opt'].reindex(gen_extend_i, fill_value=0.)
n.generators.loc[gen_extend_i, 'p_nom_extendable'] = False
n.generators.loc[gen_extend_i, "p_nom"] = n_optim.generators["p_nom_opt"].reindex(
gen_extend_i, fill_value=0.0
)
n.generators.loc[gen_extend_i, "p_nom_extendable"] = False
stor_units_extend_i = n.storage_units.index[n.storage_units.p_nom_extendable]
n.storage_units.loc[stor_units_extend_i, 'p_nom'] = \
n_optim.storage_units['p_nom_opt'].reindex(stor_units_extend_i, fill_value=0.)
n.storage_units.loc[stor_units_extend_i, 'p_nom_extendable'] = False
n.storage_units.loc[stor_units_extend_i, "p_nom"] = n_optim.storage_units[
"p_nom_opt"
].reindex(stor_units_extend_i, fill_value=0.0)
n.storage_units.loc[stor_units_extend_i, "p_nom_extendable"] = False
stor_extend_i = n.stores.index[n.stores.e_nom_extendable]
n.stores.loc[stor_extend_i, 'e_nom'] = \
n_optim.stores['e_nom_opt'].reindex(stor_extend_i, fill_value=0.)
n.stores.loc[stor_extend_i, 'e_nom_extendable'] = False
n.stores.loc[stor_extend_i, "e_nom"] = n_optim.stores["e_nom_opt"].reindex(
stor_extend_i, fill_value=0.0
)
n.stores.loc[stor_extend_i, "e_nom_extendable"] = False
return n
if __name__ == "__main__":
if 'snakemake' not in globals():
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('solve_operations_network',
simpl='', clusters='5', ll='copt', opts='Co2L-BAU-24H')
snakemake = mock_snakemake(
"solve_operations_network",
simpl="",
clusters="5",
ll="copt",
opts="Co2L-BAU-24H",
)
configure_logging(snakemake)
tmpdir = snakemake.config['solving'].get('tmpdir')
tmpdir = snakemake.config["solving"].get("tmpdir")
if tmpdir is not None:
Path(tmpdir).mkdir(parents=True, exist_ok=True)
@ -109,14 +125,19 @@ if __name__ == "__main__":
n = set_parameters_from_optimized(n, n_optim)
del n_optim
opts = snakemake.wildcards.opts.split('-')
snakemake.config['solving']['options']['skip_iterations'] = False
opts = snakemake.wildcards.opts.split("-")
snakemake.config["solving"]["options"]["skip_iterations"] = False
fn = getattr(snakemake.log, 'memory', None)
with memory_logger(filename=fn, interval=30.) as mem:
n = prepare_network(n, snakemake.config['solving']['options'])
n = solve_network(n, snakemake.config, opts, solver_dir=tmpdir,
solver_logfile=snakemake.log.solver)
fn = getattr(snakemake.log, "memory", None)
with memory_logger(filename=fn, interval=30.0) as mem:
n = prepare_network(n, snakemake.config["solving"]["options"])
n = solve_network(
n,
snakemake.config,
opts,
solver_dir=tmpdir,
solver_logfile=snakemake.log.solver,
)
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
n.export_to_netcdf(snakemake.output[0])

View File

@ -8,7 +8,7 @@ logging:
level: INFO
format: '%(levelname)s:%(name)s:%(message)s'
run:
run:
name: ""
scenario:
@ -72,8 +72,7 @@ renewable:
corine:
# Scholz, Y. (2012). Renewable energy based electricity supply at low costs:
# development of the REMix model and application for Europe. ( p.42 / p.28)
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 31, 32]
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32]
distance: 1000
distance_grid_codes: [1, 2, 3, 4, 5, 6]
natura: true
@ -124,8 +123,7 @@ renewable:
# sector: The economic potential of photovoltaics and concentrating solar
# power." Applied Energy 135 (2014): 704-720.
correction_factor: 0.854337
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
natura: true
excluder_resolution: 200
potential: simple # or conservative
@ -153,7 +151,7 @@ transformers:
type: ''
load:
power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data
power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data
interpolate_limit: 3 # data gaps up until this size are interpolated linearly
time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from
manual_adjustments: true # false
@ -232,7 +230,7 @@ solving:
plotting:
map:
figsize: [7, 7]
boundaries: [-10.2, 29, 35, 72]
boundaries: [-10.2, 29, 35, 72]
p_nom:
bus_size_factor: 5.e+4
linewidth_factor: 3.e+3
@ -251,50 +249,50 @@ plotting:
AC_carriers: ["AC line", "AC transformer"]
link_carriers: ["DC line", "Converter AC-DC"]
tech_colors:
"onwind" : "#235ebc"
"onshore wind" : "#235ebc"
'offwind' : "#6895dd"
'offwind-ac' : "#6895dd"
'offshore wind' : "#6895dd"
'offshore wind ac' : "#6895dd"
'offwind-dc' : "#74c6f2"
'offshore wind dc' : "#74c6f2"
"hydro" : "#08ad97"
"hydro+PHS" : "#08ad97"
"PHS" : "#08ad97"
"hydro reservoir" : "#08ad97"
'hydroelectricity' : '#08ad97'
"ror" : "#4adbc8"
"run of river" : "#4adbc8"
'solar' : "#f9d002"
'solar PV' : "#f9d002"
'solar thermal' : '#ffef60'
'biomass' : '#0c6013'
'solid biomass' : '#06540d'
'biogas' : '#23932d'
'waste' : '#68896b'
'geothermal' : '#ba91b1'
"OCGT" : "#d35050"
"gas" : "#d35050"
"natural gas" : "#d35050"
"CCGT" : "#b20101"
"nuclear" : "#ff9000"
"coal" : "#707070"
"lignite" : "#9e5a01"
"oil" : "#262626"
"H2" : "#ea048a"
"hydrogen storage" : "#ea048a"
"battery" : "#b8ea04"
"Electric load" : "#f9d002"
"electricity" : "#f9d002"
"lines" : "#70af1d"
"transmission lines" : "#70af1d"
"AC-AC" : "#70af1d"
"AC line" : "#70af1d"
"links" : "#8a1caf"
"HVDC links" : "#8a1caf"
"DC-DC" : "#8a1caf"
"DC link" : "#8a1caf"
"onwind": "#235ebc"
"onshore wind": "#235ebc"
'offwind': "#6895dd"
'offwind-ac': "#6895dd"
'offshore wind': "#6895dd"
'offshore wind ac': "#6895dd"
'offwind-dc': "#74c6f2"
'offshore wind dc': "#74c6f2"
"hydro": "#08ad97"
"hydro+PHS": "#08ad97"
"PHS": "#08ad97"
"hydro reservoir": "#08ad97"
'hydroelectricity': '#08ad97'
"ror": "#4adbc8"
"run of river": "#4adbc8"
'solar': "#f9d002"
'solar PV': "#f9d002"
'solar thermal': '#ffef60'
'biomass': '#0c6013'
'solid biomass': '#06540d'
'biogas': '#23932d'
'waste': '#68896b'
'geothermal': '#ba91b1'
"OCGT": "#d35050"
"gas": "#d35050"
"natural gas": "#d35050"
"CCGT": "#b20101"
"nuclear": "#ff9000"
"coal": "#707070"
"lignite": "#9e5a01"
"oil": "#262626"
"H2": "#ea048a"
"hydrogen storage": "#ea048a"
"battery": "#b8ea04"
"Electric load": "#f9d002"
"electricity": "#f9d002"
"lines": "#70af1d"
"transmission lines": "#70af1d"
"AC-AC": "#70af1d"
"AC line": "#70af1d"
"links": "#8a1caf"
"HVDC links": "#8a1caf"
"DC-DC": "#8a1caf"
"DC link": "#8a1caf"
nice_names:
OCGT: "Open-Cycle Gas"
CCGT: "Combined-Cycle Gas"