Merge pull request #416 from PyPSA/misc/precommit-ci-2
Add pre-commit CI
This commit is contained in:
commit
378d1ef82b
10
.github/workflows/ci.yaml
vendored
10
.github/workflows/ci.yaml
vendored
@ -55,9 +55,19 @@ jobs:
|
||||
run: |
|
||||
echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc
|
||||
|
||||
- name: Add solver to environment
|
||||
run: |
|
||||
echo -e "- glpk\n- ipopt" >> envs/environment.yaml
|
||||
|
||||
- name: Add solver to environment
|
||||
run: |
|
||||
echo -e "- glpk\n- ipopt<3.13.3" >> envs/environment.yaml
|
||||
if: ${{ matrix.label }} == 'windows-latest'
|
||||
|
||||
- name: Add solver to environment
|
||||
run: |
|
||||
echo -e "- glpk\n- ipopt" >> envs/environment.yaml
|
||||
if: ${{ matrix.label }} != 'windows-latest'
|
||||
|
||||
- name: Setup Mambaforge
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
|
83
.pre-commit-config.yaml
Normal file
83
.pre-commit-config.yaml
Normal file
@ -0,0 +1,83 @@
|
||||
exclude: "^LICENSES"
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.3.0
|
||||
hooks:
|
||||
- id: check-merge-conflict
|
||||
- id: end-of-file-fixer
|
||||
- id: fix-encoding-pragma
|
||||
- id: mixed-line-ending
|
||||
- id: trailing-whitespace
|
||||
- id: check-added-large-files
|
||||
args: ["--maxkb=2000"]
|
||||
|
||||
# Sort package imports alphabetically
|
||||
- repo: https://github.com/PyCQA/isort
|
||||
rev: 5.10.1
|
||||
hooks:
|
||||
- id: isort
|
||||
args: ["--profile", "black", "--filter-files"]
|
||||
|
||||
# Convert relative imports to absolute imports
|
||||
- repo: https://github.com/MarcoGorelli/absolufy-imports
|
||||
rev: v0.3.1
|
||||
hooks:
|
||||
- id: absolufy-imports
|
||||
|
||||
# Find common spelling mistakes in comments and docstrings
|
||||
- repo: https://github.com/codespell-project/codespell
|
||||
rev: v2.2.1
|
||||
hooks:
|
||||
- id: codespell
|
||||
args: ['--ignore-regex="(\b[A-Z]+\b)"', '--ignore-words-list=fom'] # Ignore capital case words, e.g. country codes
|
||||
types_or: [python, rst, markdown]
|
||||
files: ^(scripts|doc)/
|
||||
|
||||
# Make docstrings PEP 257 compliant
|
||||
- repo: https://github.com/myint/docformatter
|
||||
rev: v1.5.0
|
||||
hooks:
|
||||
- id: docformatter
|
||||
args: ["--in-place", "--make-summary-multi-line", "--pre-summary-newline"]
|
||||
|
||||
- repo: https://github.com/keewis/blackdoc
|
||||
rev: v0.3.5
|
||||
hooks:
|
||||
- id: blackdoc
|
||||
|
||||
# Formatting with "black" coding style
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 22.8.0
|
||||
hooks:
|
||||
# Format Python files
|
||||
- id: black
|
||||
# Format Jupyter Python notebooks
|
||||
- id: black-jupyter
|
||||
|
||||
# Remove output from Jupyter notebooks
|
||||
- repo: https://github.com/aflc/pre-commit-jupyter
|
||||
rev: v1.2.1
|
||||
hooks:
|
||||
- id: jupyter-notebook-cleanup
|
||||
args: ["--remove-kernel-metadata"]
|
||||
|
||||
# Do YAML formatting (before the linter checks it for misses)
|
||||
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
|
||||
rev: v2.4.0
|
||||
hooks:
|
||||
- id: pretty-format-yaml
|
||||
args: [--autofix, --indent, "2", --preserve-quotes]
|
||||
|
||||
# Format Snakemake rule / workflow files
|
||||
- repo: https://github.com/snakemake/snakefmt
|
||||
rev: 0.4.4
|
||||
hooks:
|
||||
- id: snakefmt
|
||||
|
||||
# For cleaning jupyter notebooks
|
||||
- repo: https://github.com/aflc/pre-commit-jupyter
|
||||
rev: v1.2.1
|
||||
hooks:
|
||||
- id: jupyter-notebook-cleanup
|
||||
exclude: examples/solve-on-remote.ipynb
|
702
Snakefile
702
Snakefile
@ -6,277 +6,425 @@ from os.path import normpath, exists
|
||||
from shutil import copyfile, move
|
||||
|
||||
from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider
|
||||
|
||||
HTTP = HTTPRemoteProvider()
|
||||
|
||||
if not exists("config.yaml"):
|
||||
copyfile("config.default.yaml", "config.yaml")
|
||||
|
||||
|
||||
configfile: "config.yaml"
|
||||
|
||||
|
||||
run = config.get("run", {})
|
||||
RDIR = run["name"] + "/" if run.get("name") else ""
|
||||
CDIR = RDIR if not run.get("shared_cutouts") else ""
|
||||
|
||||
COSTS = "resources/" + RDIR + "costs.csv"
|
||||
ATLITE_NPROCESSES = config['atlite'].get('nprocesses', 4)
|
||||
ATLITE_NPROCESSES = config["atlite"].get("nprocesses", 4)
|
||||
|
||||
|
||||
wildcard_constraints:
|
||||
simpl="[a-zA-Z0-9]*|all",
|
||||
clusters="[0-9]+m?|all",
|
||||
ll="(v|c)([0-9\.]+|opt|all)|all",
|
||||
opts="[-+a-zA-Z0-9\.]*"
|
||||
opts="[-+a-zA-Z0-9\.]*",
|
||||
|
||||
|
||||
rule cluster_all_networks:
|
||||
input: expand("networks/" + RDIR + "elec_s{simpl}_{clusters}.nc", **config['scenario'])
|
||||
input:
|
||||
expand("networks/" + RDIR + "elec_s{simpl}_{clusters}.nc", **config["scenario"]),
|
||||
|
||||
|
||||
rule extra_components_all_networks:
|
||||
input: expand("networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc", **config['scenario'])
|
||||
input:
|
||||
expand(
|
||||
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc", **config["scenario"]
|
||||
),
|
||||
|
||||
|
||||
rule prepare_all_networks:
|
||||
input: expand("networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario'])
|
||||
input:
|
||||
expand(
|
||||
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
**config["scenario"]
|
||||
),
|
||||
|
||||
|
||||
rule solve_all_networks:
|
||||
input: expand("results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", **config['scenario'])
|
||||
input:
|
||||
expand(
|
||||
"results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
**config["scenario"]
|
||||
),
|
||||
|
||||
|
||||
if config['enable'].get('prepare_links_p_nom', False):
|
||||
if config["enable"].get("prepare_links_p_nom", False):
|
||||
|
||||
rule prepare_links_p_nom:
|
||||
output: 'data/links_p_nom.csv'
|
||||
log: "logs/" + RDIR + "prepare_links_p_nom.log"
|
||||
output:
|
||||
"data/links_p_nom.csv",
|
||||
log:
|
||||
"logs/" + RDIR + "prepare_links_p_nom.log",
|
||||
threads: 1
|
||||
resources: mem_mb=500
|
||||
script: 'scripts/prepare_links_p_nom.py'
|
||||
resources:
|
||||
mem_mb=500,
|
||||
script:
|
||||
"scripts/prepare_links_p_nom.py"
|
||||
|
||||
|
||||
datafiles = ['ch_cantons.csv', 'je-e-21.03.02.xls',
|
||||
'eez/World_EEZ_v8_2014.shp',
|
||||
'hydro_capacities.csv', 'naturalearth/ne_10m_admin_0_countries.shp',
|
||||
'NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp', 'nama_10r_3popgdp.tsv.gz',
|
||||
'nama_10r_3gdp.tsv.gz', 'corine/g250_clc06_V18_5.tif']
|
||||
datafiles = [
|
||||
"ch_cantons.csv",
|
||||
"je-e-21.03.02.xls",
|
||||
"eez/World_EEZ_v8_2014.shp",
|
||||
"hydro_capacities.csv",
|
||||
"naturalearth/ne_10m_admin_0_countries.shp",
|
||||
"NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp",
|
||||
"nama_10r_3popgdp.tsv.gz",
|
||||
"nama_10r_3gdp.tsv.gz",
|
||||
"corine/g250_clc06_V18_5.tif",
|
||||
]
|
||||
|
||||
|
||||
if not config.get('tutorial', False):
|
||||
if not config.get("tutorial", False):
|
||||
datafiles.extend(["natura/Natura2000_end2015.shp", "GEBCO_2014_2D.nc"])
|
||||
|
||||
|
||||
if config['enable'].get('retrieve_databundle', True):
|
||||
if config["enable"].get("retrieve_databundle", True):
|
||||
|
||||
rule retrieve_databundle:
|
||||
output: expand('data/bundle/{file}', file=datafiles)
|
||||
log: "logs/" + RDIR + "retrieve_databundle.log"
|
||||
resources: mem_mb=1000
|
||||
script: 'scripts/retrieve_databundle.py'
|
||||
output:
|
||||
expand("data/bundle/{file}", file=datafiles),
|
||||
log:
|
||||
"logs/" + RDIR + "retrieve_databundle.log",
|
||||
resources:
|
||||
mem_mb=1000,
|
||||
script:
|
||||
"scripts/retrieve_databundle.py"
|
||||
|
||||
|
||||
rule retrieve_load_data:
|
||||
input: HTTP.remote("data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv", keep_local=True, static=True)
|
||||
output: "data/load_raw.csv"
|
||||
resources: mem_mb=5000
|
||||
run: move(input[0], output[0])
|
||||
input:
|
||||
HTTP.remote(
|
||||
"data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv",
|
||||
keep_local=True,
|
||||
static=True,
|
||||
),
|
||||
output:
|
||||
"data/load_raw.csv",
|
||||
resources:
|
||||
mem_mb=5000,
|
||||
run:
|
||||
move(input[0], output[0])
|
||||
|
||||
|
||||
rule build_load_data:
|
||||
input: "data/load_raw.csv"
|
||||
output: "resources/" + RDIR + "load.csv"
|
||||
log: "logs/" + RDIR + "build_load_data.log"
|
||||
resources: mem_mb=5000
|
||||
script: 'scripts/build_load_data.py'
|
||||
input:
|
||||
"data/load_raw.csv",
|
||||
output:
|
||||
"resources/" + RDIR + "load.csv",
|
||||
log:
|
||||
"logs/" + RDIR + "build_load_data.log",
|
||||
resources:
|
||||
mem_mb=5000,
|
||||
script:
|
||||
"scripts/build_load_data.py"
|
||||
|
||||
|
||||
rule build_powerplants:
|
||||
input:
|
||||
base_network="networks/" + RDIR + "base.nc",
|
||||
custom_powerplants="data/custom_powerplants.csv"
|
||||
output: "resources/" + RDIR + "powerplants.csv"
|
||||
log: "logs/" + RDIR + "build_powerplants.log"
|
||||
custom_powerplants="data/custom_powerplants.csv",
|
||||
output:
|
||||
"resources/" + RDIR + "powerplants.csv",
|
||||
log:
|
||||
"logs/" + RDIR + "build_powerplants.log",
|
||||
threads: 1
|
||||
resources: mem_mb=5000
|
||||
script: "scripts/build_powerplants.py"
|
||||
resources:
|
||||
mem_mb=5000,
|
||||
script:
|
||||
"scripts/build_powerplants.py"
|
||||
|
||||
|
||||
rule base_network:
|
||||
input:
|
||||
eg_buses='data/entsoegridkit/buses.csv',
|
||||
eg_lines='data/entsoegridkit/lines.csv',
|
||||
eg_links='data/entsoegridkit/links.csv',
|
||||
eg_converters='data/entsoegridkit/converters.csv',
|
||||
eg_transformers='data/entsoegridkit/transformers.csv',
|
||||
parameter_corrections='data/parameter_corrections.yaml',
|
||||
links_p_nom='data/links_p_nom.csv',
|
||||
links_tyndp='data/links_tyndp.csv',
|
||||
eg_buses="data/entsoegridkit/buses.csv",
|
||||
eg_lines="data/entsoegridkit/lines.csv",
|
||||
eg_links="data/entsoegridkit/links.csv",
|
||||
eg_converters="data/entsoegridkit/converters.csv",
|
||||
eg_transformers="data/entsoegridkit/transformers.csv",
|
||||
parameter_corrections="data/parameter_corrections.yaml",
|
||||
links_p_nom="data/links_p_nom.csv",
|
||||
links_tyndp="data/links_tyndp.csv",
|
||||
country_shapes="resources/" + RDIR + "country_shapes.geojson",
|
||||
offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson",
|
||||
europe_shape="resources/" + RDIR + "europe_shape.geojson"
|
||||
output: "networks/" + RDIR + "base.nc"
|
||||
log: "logs/" + RDIR + "base_network.log"
|
||||
benchmark: "benchmarks/" + RDIR + "base_network"
|
||||
europe_shape="resources/" + RDIR + "europe_shape.geojson",
|
||||
output:
|
||||
"networks/" + RDIR + "base.nc",
|
||||
log:
|
||||
"logs/" + RDIR + "base_network.log",
|
||||
benchmark:
|
||||
"benchmarks/" + RDIR + "base_network"
|
||||
threads: 1
|
||||
resources: mem_mb=500
|
||||
script: "scripts/base_network.py"
|
||||
resources:
|
||||
mem_mb=500,
|
||||
script:
|
||||
"scripts/base_network.py"
|
||||
|
||||
|
||||
rule build_shapes:
|
||||
input:
|
||||
naturalearth='data/bundle/naturalearth/ne_10m_admin_0_countries.shp',
|
||||
eez='data/bundle/eez/World_EEZ_v8_2014.shp',
|
||||
nuts3='data/bundle/NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp',
|
||||
nuts3pop='data/bundle/nama_10r_3popgdp.tsv.gz',
|
||||
nuts3gdp='data/bundle/nama_10r_3gdp.tsv.gz',
|
||||
ch_cantons='data/bundle/ch_cantons.csv',
|
||||
ch_popgdp='data/bundle/je-e-21.03.02.xls'
|
||||
naturalearth="data/bundle/naturalearth/ne_10m_admin_0_countries.shp",
|
||||
eez="data/bundle/eez/World_EEZ_v8_2014.shp",
|
||||
nuts3="data/bundle/NUTS_2013_60M_SH/data/NUTS_RG_60M_2013.shp",
|
||||
nuts3pop="data/bundle/nama_10r_3popgdp.tsv.gz",
|
||||
nuts3gdp="data/bundle/nama_10r_3gdp.tsv.gz",
|
||||
ch_cantons="data/bundle/ch_cantons.csv",
|
||||
ch_popgdp="data/bundle/je-e-21.03.02.xls",
|
||||
output:
|
||||
country_shapes="resources/" + RDIR + "country_shapes.geojson",
|
||||
offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson",
|
||||
europe_shape="resources/" + RDIR + "europe_shape.geojson",
|
||||
nuts3_shapes="resources/" + RDIR + "nuts3_shapes.geojson"
|
||||
log: "logs/" + RDIR + "build_shapes.log"
|
||||
nuts3_shapes="resources/" + RDIR + "nuts3_shapes.geojson",
|
||||
log:
|
||||
"logs/" + RDIR + "build_shapes.log",
|
||||
threads: 1
|
||||
resources: mem_mb=500
|
||||
script: "scripts/build_shapes.py"
|
||||
resources:
|
||||
mem_mb=500,
|
||||
script:
|
||||
"scripts/build_shapes.py"
|
||||
|
||||
|
||||
rule build_bus_regions:
|
||||
input:
|
||||
country_shapes="resources/" + RDIR + "country_shapes.geojson",
|
||||
offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson",
|
||||
base_network="networks/" + RDIR + "base.nc"
|
||||
base_network="networks/" + RDIR + "base.nc",
|
||||
output:
|
||||
regions_onshore="resources/" + RDIR + "regions_onshore.geojson",
|
||||
regions_offshore="resources/" + RDIR + "regions_offshore.geojson"
|
||||
log: "logs/" + RDIR + "build_bus_regions.log"
|
||||
regions_offshore="resources/" + RDIR + "regions_offshore.geojson",
|
||||
log:
|
||||
"logs/" + RDIR + "build_bus_regions.log",
|
||||
threads: 1
|
||||
resources: mem_mb=1000
|
||||
script: "scripts/build_bus_regions.py"
|
||||
resources:
|
||||
mem_mb=1000,
|
||||
script:
|
||||
"scripts/build_bus_regions.py"
|
||||
|
||||
|
||||
if config["enable"].get("build_cutout", False):
|
||||
|
||||
if config['enable'].get('build_cutout', False):
|
||||
rule build_cutout:
|
||||
input:
|
||||
regions_onshore="resources/" + RDIR + "regions_onshore.geojson",
|
||||
regions_offshore="resources/" + RDIR + "regions_offshore.geojson"
|
||||
output: "cutouts/" + CDIR + "{cutout}.nc"
|
||||
log: "logs/" + CDIR + "build_cutout/{cutout}.log"
|
||||
benchmark: "benchmarks/" + CDIR + "build_cutout_{cutout}"
|
||||
regions_offshore="resources/" + RDIR + "regions_offshore.geojson",
|
||||
output:
|
||||
"cutouts/" + CDIR + "{cutout}.nc",
|
||||
log:
|
||||
"logs/" + CDIR + "build_cutout/{cutout}.log",
|
||||
benchmark:
|
||||
"benchmarks/" + CDIR + "build_cutout_{cutout}"
|
||||
threads: ATLITE_NPROCESSES
|
||||
resources: mem_mb=ATLITE_NPROCESSES * 1000
|
||||
script: "scripts/build_cutout.py"
|
||||
resources:
|
||||
mem_mb=ATLITE_NPROCESSES * 1000,
|
||||
script:
|
||||
"scripts/build_cutout.py"
|
||||
|
||||
|
||||
if config['enable'].get('retrieve_cutout', True):
|
||||
if config["enable"].get("retrieve_cutout", True):
|
||||
|
||||
rule retrieve_cutout:
|
||||
input: HTTP.remote("zenodo.org/record/6382570/files/{cutout}.nc", keep_local=True, static=True)
|
||||
output: "cutouts/" + CDIR + "{cutout}.nc"
|
||||
log: "logs/" + CDIR + "retrieve_cutout_{cutout}.log"
|
||||
resources: mem_mb=5000
|
||||
run: move(input[0], output[0])
|
||||
input:
|
||||
HTTP.remote(
|
||||
"zenodo.org/record/6382570/files/{cutout}.nc",
|
||||
keep_local=True,
|
||||
static=True,
|
||||
),
|
||||
output:
|
||||
"cutouts/" + CDIR + "{cutout}.nc",
|
||||
log:
|
||||
"logs/" + CDIR + "retrieve_cutout_{cutout}.log",
|
||||
resources:
|
||||
mem_mb=5000,
|
||||
run:
|
||||
move(input[0], output[0])
|
||||
|
||||
|
||||
if config["enable"].get("retrieve_cost_data", True):
|
||||
|
||||
if config['enable'].get('retrieve_cost_data', True):
|
||||
rule retrieve_cost_data:
|
||||
input: HTTP.remote(f"raw.githubusercontent.com/PyPSA/technology-data/{config['costs']['version']}/outputs/costs_{config['costs']['year']}.csv", keep_local=True)
|
||||
output: COSTS
|
||||
log: "logs/" + RDIR + "retrieve_cost_data.log"
|
||||
resources: mem_mb=5000
|
||||
run: move(input[0], output[0])
|
||||
input:
|
||||
HTTP.remote(
|
||||
f"raw.githubusercontent.com/PyPSA/technology-data/{config['costs']['version']}/outputs/costs_{config['costs']['year']}.csv",
|
||||
keep_local=True,
|
||||
),
|
||||
output:
|
||||
COSTS,
|
||||
log:
|
||||
"logs/" + RDIR + "retrieve_cost_data.log",
|
||||
resources:
|
||||
mem_mb=5000,
|
||||
run:
|
||||
move(input[0], output[0])
|
||||
|
||||
|
||||
if config["enable"].get("build_natura_raster", False):
|
||||
|
||||
if config['enable'].get('build_natura_raster', False):
|
||||
rule build_natura_raster:
|
||||
input:
|
||||
natura="data/bundle/natura/Natura2000_end2015.shp",
|
||||
cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config['atlite'])
|
||||
output: "resources/" + RDIR + "natura.tiff"
|
||||
resources: mem_mb=5000
|
||||
log: "logs/" + RDIR + "build_natura_raster.log"
|
||||
script: "scripts/build_natura_raster.py"
|
||||
cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config["atlite"]),
|
||||
output:
|
||||
"resources/" + RDIR + "natura.tiff",
|
||||
resources:
|
||||
mem_mb=5000,
|
||||
log:
|
||||
"logs/" + RDIR + "build_natura_raster.log",
|
||||
script:
|
||||
"scripts/build_natura_raster.py"
|
||||
|
||||
|
||||
if config['enable'].get('retrieve_natura_raster', True):
|
||||
if config["enable"].get("retrieve_natura_raster", True):
|
||||
|
||||
rule retrieve_natura_raster:
|
||||
input: HTTP.remote("zenodo.org/record/4706686/files/natura.tiff", keep_local=True, static=True)
|
||||
output: "resources/" + RDIR + "natura.tiff"
|
||||
resources: mem_mb=5000
|
||||
run: move(input[0], output[0])
|
||||
input:
|
||||
HTTP.remote(
|
||||
"zenodo.org/record/4706686/files/natura.tiff",
|
||||
keep_local=True,
|
||||
static=True,
|
||||
),
|
||||
output:
|
||||
"resources/" + RDIR + "natura.tiff",
|
||||
resources:
|
||||
mem_mb=5000,
|
||||
run:
|
||||
move(input[0], output[0])
|
||||
|
||||
|
||||
rule retrieve_ship_raster:
|
||||
input: HTTP.remote("https://zenodo.org/record/6953563/files/shipdensity_global.zip", keep_local=True, static=True)
|
||||
output: "data/shipdensity_global.zip"
|
||||
resources: mem_mb=5000
|
||||
run: move(input[0], output[0])
|
||||
input:
|
||||
HTTP.remote(
|
||||
"https://zenodo.org/record/6953563/files/shipdensity_global.zip",
|
||||
keep_local=True,
|
||||
static=True,
|
||||
),
|
||||
output:
|
||||
"data/shipdensity_global.zip",
|
||||
resources:
|
||||
mem_mb=5000,
|
||||
run:
|
||||
move(input[0], output[0])
|
||||
|
||||
|
||||
rule build_ship_raster:
|
||||
input:
|
||||
ship_density="data/shipdensity_global.zip",
|
||||
cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config['atlite'])
|
||||
output: "resources/" + RDIR + "shipdensity_raster.nc"
|
||||
log: "logs/" + RDIR + "build_ship_raster.log"
|
||||
resources: mem_mb=5000
|
||||
benchmark: "benchmarks/" + RDIR + "build_ship_raster"
|
||||
script: "scripts/build_ship_raster.py"
|
||||
cutouts=expand("cutouts/" + CDIR + "{cutouts}.nc", **config["atlite"]),
|
||||
output:
|
||||
"resources/" + RDIR + "shipdensity_raster.nc",
|
||||
log:
|
||||
"logs/" + RDIR + "build_ship_raster.log",
|
||||
resources:
|
||||
mem_mb=5000,
|
||||
benchmark:
|
||||
"benchmarks/" + RDIR + "build_ship_raster"
|
||||
script:
|
||||
"scripts/build_ship_raster.py"
|
||||
|
||||
|
||||
rule build_renewable_profiles:
|
||||
input:
|
||||
base_network="networks/" + RDIR + "base.nc",
|
||||
corine="data/bundle/corine/g250_clc06_V18_5.tif",
|
||||
natura=lambda w: ("resources/" + RDIR + "natura.tiff"
|
||||
natura=lambda w: (
|
||||
"resources/" + RDIR + "natura.tiff"
|
||||
if config["renewable"][w.technology]["natura"]
|
||||
else []),
|
||||
gebco=lambda w: ("data/bundle/GEBCO_2014_2D.nc"
|
||||
else []
|
||||
),
|
||||
gebco=lambda w: (
|
||||
"data/bundle/GEBCO_2014_2D.nc"
|
||||
if "max_depth" in config["renewable"][w.technology].keys()
|
||||
else []),
|
||||
ship_density= lambda w: ("resources/" + RDIR + "shipdensity_raster.nc"
|
||||
else []
|
||||
),
|
||||
ship_density=lambda w: (
|
||||
"resources/" + RDIR + "shipdensity_raster.nc"
|
||||
if "ship_threshold" in config["renewable"][w.technology].keys()
|
||||
else []),
|
||||
else []
|
||||
),
|
||||
country_shapes="resources/" + RDIR + "country_shapes.geojson",
|
||||
offshore_shapes="resources/" + RDIR + "offshore_shapes.geojson",
|
||||
regions=lambda w: ("resources/" + RDIR + "regions_onshore.geojson"
|
||||
if w.technology in ('onwind', 'solar')
|
||||
else "resources/" + RDIR + "regions_offshore.geojson"),
|
||||
cutout=lambda w: "cutouts/" + CDIR + config["renewable"][w.technology]['cutout'] + ".nc"
|
||||
output: profile="resources/" + RDIR + "profile_{technology}.nc",
|
||||
log: "logs/" + RDIR + "build_renewable_profile_{technology}.log"
|
||||
benchmark: "benchmarks/" + RDIR + "build_renewable_profiles_{technology}"
|
||||
regions=lambda w: (
|
||||
"resources/" + RDIR + "regions_onshore.geojson"
|
||||
if w.technology in ("onwind", "solar")
|
||||
else "resources/" + RDIR + "regions_offshore.geojson"
|
||||
),
|
||||
cutout=lambda w: "cutouts/"
|
||||
+ CDIR
|
||||
+ config["renewable"][w.technology]["cutout"]
|
||||
+ ".nc",
|
||||
output:
|
||||
profile="resources/" + RDIR + "profile_{technology}.nc",
|
||||
log:
|
||||
"logs/" + RDIR + "build_renewable_profile_{technology}.log",
|
||||
benchmark:
|
||||
"benchmarks/" + RDIR + "build_renewable_profiles_{technology}"
|
||||
threads: ATLITE_NPROCESSES
|
||||
resources: mem_mb=ATLITE_NPROCESSES * 5000
|
||||
wildcard_constraints: technology="(?!hydro).*" # Any technology other than hydro
|
||||
script: "scripts/build_renewable_profiles.py"
|
||||
resources:
|
||||
mem_mb=ATLITE_NPROCESSES * 5000,
|
||||
wildcard_constraints:
|
||||
technology="(?!hydro).*", # Any technology other than hydro
|
||||
script:
|
||||
"scripts/build_renewable_profiles.py"
|
||||
|
||||
|
||||
rule build_hydro_profile:
|
||||
input:
|
||||
country_shapes="resources/" + RDIR + "country_shapes.geojson",
|
||||
eia_hydro_generation='data/eia_hydro_annual_generation.csv',
|
||||
cutout=f"cutouts/" + CDIR + config['renewable']['hydro']['cutout'] + ".nc" if "hydro" in config["renewable"] else [],
|
||||
output: "resources/" + RDIR + "profile_hydro.nc"
|
||||
log: "logs/" + RDIR + "build_hydro_profile.log"
|
||||
resources: mem_mb=5000
|
||||
script: 'scripts/build_hydro_profile.py'
|
||||
eia_hydro_generation="data/eia_hydro_annual_generation.csv",
|
||||
cutout=f"cutouts/" + CDIR + config["renewable"]["hydro"]["cutout"] + ".nc"
|
||||
if "hydro" in config["renewable"]
|
||||
else [],
|
||||
output:
|
||||
"resources/" + RDIR + "profile_hydro.nc",
|
||||
log:
|
||||
"logs/" + RDIR + "build_hydro_profile.log",
|
||||
resources:
|
||||
mem_mb=5000,
|
||||
script:
|
||||
"scripts/build_hydro_profile.py"
|
||||
|
||||
|
||||
rule add_electricity:
|
||||
input:
|
||||
**{
|
||||
f"profile_{tech}": "resources/" + RDIR + f"profile_{tech}.nc"
|
||||
for tech in config["renewable"]
|
||||
},
|
||||
**{
|
||||
f"conventional_{carrier}_{attr}": fn
|
||||
for carrier, d in config.get("conventional", {None: {}}).items()
|
||||
for attr, fn in d.items()
|
||||
if str(fn).startswith("data/")
|
||||
},
|
||||
base_network="networks/" + RDIR + "base.nc",
|
||||
tech_costs=COSTS,
|
||||
regions="resources/" + RDIR + "regions_onshore.geojson",
|
||||
powerplants="resources/" + RDIR + "powerplants.csv",
|
||||
hydro_capacities='data/bundle/hydro_capacities.csv',
|
||||
geth_hydro_capacities='data/geth2015_hydro_capacities.csv',
|
||||
hydro_capacities="data/bundle/hydro_capacities.csv",
|
||||
geth_hydro_capacities="data/geth2015_hydro_capacities.csv",
|
||||
load="resources/" + RDIR + "load.csv",
|
||||
nuts3_shapes="resources/" + RDIR + "nuts3_shapes.geojson",
|
||||
**{f"profile_{tech}": "resources/" + RDIR + f"profile_{tech}.nc"
|
||||
for tech in config['renewable']},
|
||||
**{f"conventional_{carrier}_{attr}": fn
|
||||
for carrier, d in config.get('conventional', {None: {}}).items()
|
||||
for attr, fn in d.items() if str(fn).startswith("data/")},
|
||||
output: "networks/" + RDIR + "elec.nc"
|
||||
log: "logs/" + RDIR + "add_electricity.log"
|
||||
benchmark: "benchmarks/" + RDIR + "add_electricity"
|
||||
output:
|
||||
"networks/" + RDIR + "elec.nc",
|
||||
log:
|
||||
"logs/" + RDIR + "add_electricity.log",
|
||||
benchmark:
|
||||
"benchmarks/" + RDIR + "add_electricity"
|
||||
threads: 1
|
||||
resources: mem_mb=5000
|
||||
script: "scripts/add_electricity.py"
|
||||
resources:
|
||||
mem_mb=5000,
|
||||
script:
|
||||
"scripts/add_electricity.py"
|
||||
|
||||
|
||||
rule simplify_network:
|
||||
@ -284,18 +432,22 @@ rule simplify_network:
|
||||
network="networks/" + RDIR + "elec.nc",
|
||||
tech_costs=COSTS,
|
||||
regions_onshore="resources/" + RDIR + "regions_onshore.geojson",
|
||||
regions_offshore="resources/" + RDIR + "regions_offshore.geojson"
|
||||
regions_offshore="resources/" + RDIR + "regions_offshore.geojson",
|
||||
output:
|
||||
network="networks/" + RDIR + "elec_s{simpl}.nc",
|
||||
regions_onshore="resources/" + RDIR + "regions_onshore_elec_s{simpl}.geojson",
|
||||
regions_offshore="resources/" + RDIR + "regions_offshore_elec_s{simpl}.geojson",
|
||||
busmap="resources/" + RDIR + "busmap_elec_s{simpl}.csv",
|
||||
connection_costs="resources/" + RDIR + "connection_costs_s{simpl}.csv"
|
||||
log: "logs/" + RDIR + "simplify_network/elec_s{simpl}.log"
|
||||
benchmark: "benchmarks/" + RDIR + "simplify_network/elec_s{simpl}"
|
||||
connection_costs="resources/" + RDIR + "connection_costs_s{simpl}.csv",
|
||||
log:
|
||||
"logs/" + RDIR + "simplify_network/elec_s{simpl}.log",
|
||||
benchmark:
|
||||
"benchmarks/" + RDIR + "simplify_network/elec_s{simpl}"
|
||||
threads: 1
|
||||
resources: mem_mb=4000
|
||||
script: "scripts/simplify_network.py"
|
||||
resources:
|
||||
mem_mb=4000,
|
||||
script:
|
||||
"scripts/simplify_network.py"
|
||||
|
||||
|
||||
rule cluster_network:
|
||||
@ -304,57 +456,84 @@ rule cluster_network:
|
||||
regions_onshore="resources/" + RDIR + "regions_onshore_elec_s{simpl}.geojson",
|
||||
regions_offshore="resources/" + RDIR + "regions_offshore_elec_s{simpl}.geojson",
|
||||
busmap=ancient("resources/" + RDIR + "busmap_elec_s{simpl}.csv"),
|
||||
custom_busmap=("data/custom_busmap_elec_s{simpl}_{clusters}.csv"
|
||||
if config["enable"].get("custom_busmap", False) else []),
|
||||
tech_costs=COSTS
|
||||
custom_busmap=(
|
||||
"data/custom_busmap_elec_s{simpl}_{clusters}.csv"
|
||||
if config["enable"].get("custom_busmap", False)
|
||||
else []
|
||||
),
|
||||
tech_costs=COSTS,
|
||||
output:
|
||||
network="networks/" + RDIR + "elec_s{simpl}_{clusters}.nc",
|
||||
regions_onshore="resources/" + RDIR + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||
regions_offshore="resources/" + RDIR + "regions_offshore_elec_s{simpl}_{clusters}.geojson",
|
||||
regions_onshore="resources/"
|
||||
+ RDIR
|
||||
+ "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||
regions_offshore="resources/"
|
||||
+ RDIR
|
||||
+ "regions_offshore_elec_s{simpl}_{clusters}.geojson",
|
||||
busmap="resources/" + RDIR + "busmap_elec_s{simpl}_{clusters}.csv",
|
||||
linemap="resources/" + RDIR + "linemap_elec_s{simpl}_{clusters}.csv"
|
||||
log: "logs/" + RDIR + "cluster_network/elec_s{simpl}_{clusters}.log"
|
||||
benchmark: "benchmarks/" + RDIR + "cluster_network/elec_s{simpl}_{clusters}"
|
||||
linemap="resources/" + RDIR + "linemap_elec_s{simpl}_{clusters}.csv",
|
||||
log:
|
||||
"logs/" + RDIR + "cluster_network/elec_s{simpl}_{clusters}.log",
|
||||
benchmark:
|
||||
"benchmarks/" + RDIR + "cluster_network/elec_s{simpl}_{clusters}"
|
||||
threads: 1
|
||||
resources: mem_mb=6000
|
||||
script: "scripts/cluster_network.py"
|
||||
resources:
|
||||
mem_mb=6000,
|
||||
script:
|
||||
"scripts/cluster_network.py"
|
||||
|
||||
|
||||
rule add_extra_components:
|
||||
input:
|
||||
network="networks/" + RDIR + "elec_s{simpl}_{clusters}.nc",
|
||||
tech_costs=COSTS,
|
||||
output: "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc"
|
||||
log: "logs/" + RDIR + "add_extra_components/elec_s{simpl}_{clusters}.log"
|
||||
benchmark: "benchmarks/" + RDIR + "add_extra_components/elec_s{simpl}_{clusters}_ec"
|
||||
output:
|
||||
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc",
|
||||
log:
|
||||
"logs/" + RDIR + "add_extra_components/elec_s{simpl}_{clusters}.log",
|
||||
benchmark:
|
||||
"benchmarks/" + RDIR + "add_extra_components/elec_s{simpl}_{clusters}_ec"
|
||||
threads: 1
|
||||
resources: mem_mb=3000
|
||||
script: "scripts/add_extra_components.py"
|
||||
resources:
|
||||
mem_mb=3000,
|
||||
script:
|
||||
"scripts/add_extra_components.py"
|
||||
|
||||
|
||||
rule prepare_network:
|
||||
input: "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc", tech_costs=COSTS,
|
||||
output: "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
|
||||
log: "logs/" + RDIR + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log"
|
||||
benchmark: "benchmarks/" + RDIR + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
|
||||
input:
|
||||
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc",
|
||||
tech_costs=COSTS,
|
||||
output:
|
||||
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
log:
|
||||
"logs/" + RDIR + "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.log",
|
||||
benchmark:
|
||||
(
|
||||
"benchmarks/"
|
||||
+ RDIR
|
||||
+ "prepare_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
|
||||
)
|
||||
threads: 1
|
||||
resources: mem_mb=4000
|
||||
script: "scripts/prepare_network.py"
|
||||
resources:
|
||||
mem_mb=4000,
|
||||
script:
|
||||
"scripts/prepare_network.py"
|
||||
|
||||
|
||||
def memory(w):
|
||||
factor = 3.
|
||||
for o in w.opts.split('-'):
|
||||
m = re.match(r'^(\d+)h$', o, re.IGNORECASE)
|
||||
factor = 3.0
|
||||
for o in w.opts.split("-"):
|
||||
m = re.match(r"^(\d+)h$", o, re.IGNORECASE)
|
||||
if m is not None:
|
||||
factor /= int(m.group(1))
|
||||
break
|
||||
for o in w.opts.split('-'):
|
||||
m = re.match(r'^(\d+)seg$', o, re.IGNORECASE)
|
||||
for o in w.opts.split("-"):
|
||||
m = re.match(r"^(\d+)seg$", o, re.IGNORECASE)
|
||||
if m is not None:
|
||||
factor *= int(m.group(1)) / 8760
|
||||
break
|
||||
if w.clusters.endswith('m'):
|
||||
if w.clusters.endswith("m"):
|
||||
return int(factor * (18000 + 180 * int(w.clusters[:-1])))
|
||||
elif w.clusters == "all":
|
||||
return int(factor * (18000 + 180 * 4000))
|
||||
@ -363,44 +542,87 @@ def memory(w):
|
||||
|
||||
|
||||
rule solve_network:
|
||||
input: "networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
|
||||
output: "results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
|
||||
input:
|
||||
"networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
output:
|
||||
"results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
log:
|
||||
solver=normpath("logs/" + RDIR + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"),
|
||||
python="logs/" + RDIR + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
|
||||
memory="logs/" + RDIR + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log"
|
||||
benchmark: "benchmarks/" + RDIR + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
|
||||
solver=normpath(
|
||||
"logs/"
|
||||
+ RDIR
|
||||
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"
|
||||
),
|
||||
python="logs/"
|
||||
+ RDIR
|
||||
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
|
||||
memory="logs/"
|
||||
+ RDIR
|
||||
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log",
|
||||
benchmark:
|
||||
"benchmarks/" + RDIR + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
|
||||
threads: 4
|
||||
resources: mem_mb=memory
|
||||
shadow: "minimal"
|
||||
script: "scripts/solve_network.py"
|
||||
resources:
|
||||
mem_mb=memory,
|
||||
shadow:
|
||||
"minimal"
|
||||
script:
|
||||
"scripts/solve_network.py"
|
||||
|
||||
|
||||
rule solve_operations_network:
|
||||
input:
|
||||
unprepared="networks/" + RDIR + "elec_s{simpl}_{clusters}_ec.nc",
|
||||
optimized="results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
|
||||
output: "results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc"
|
||||
optimized="results/networks/"
|
||||
+ RDIR
|
||||
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
output:
|
||||
"results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc",
|
||||
log:
|
||||
solver=normpath("logs/" + RDIR + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"),
|
||||
python="logs/" + RDIR + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
|
||||
memory="logs/" + RDIR + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log"
|
||||
benchmark: "benchmarks/" + RDIR + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
|
||||
solver=normpath(
|
||||
"logs/"
|
||||
+ RDIR
|
||||
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"
|
||||
),
|
||||
python="logs/"
|
||||
+ RDIR
|
||||
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
|
||||
memory="logs/"
|
||||
+ RDIR
|
||||
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log",
|
||||
benchmark:
|
||||
(
|
||||
"benchmarks/"
|
||||
+ RDIR
|
||||
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
|
||||
)
|
||||
threads: 4
|
||||
resources: mem_mb=(lambda w: 5000 + 372 * int(w.clusters))
|
||||
shadow: "minimal"
|
||||
script: "scripts/solve_operations_network.py"
|
||||
resources:
|
||||
mem_mb=(lambda w: 5000 + 372 * int(w.clusters)),
|
||||
shadow:
|
||||
"minimal"
|
||||
script:
|
||||
"scripts/solve_operations_network.py"
|
||||
|
||||
|
||||
rule plot_network:
|
||||
input:
|
||||
network="results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
tech_costs=COSTS
|
||||
network="results/networks/"
|
||||
+ RDIR
|
||||
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
tech_costs=COSTS,
|
||||
output:
|
||||
only_map="results/plots/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}.{ext}",
|
||||
ext="results/plots/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_ext.{ext}"
|
||||
log: "logs/" + RDIR + "plot_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_{ext}.log"
|
||||
script: "scripts/plot_network.py"
|
||||
only_map="results/plots/"
|
||||
+ RDIR
|
||||
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}.{ext}",
|
||||
ext="results/plots/"
|
||||
+ RDIR
|
||||
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_ext.{ext}",
|
||||
log:
|
||||
"logs/"
|
||||
+ RDIR
|
||||
+ "plot_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{attr}_{ext}.log",
|
||||
script:
|
||||
"scripts/plot_network.py"
|
||||
|
||||
|
||||
def input_make_summary(w):
|
||||
@ -411,39 +633,79 @@ def input_make_summary(w):
|
||||
ll = [l for l in ll if l[0] == w.ll[0]]
|
||||
else:
|
||||
ll = w.ll
|
||||
return ([COSTS] +
|
||||
expand("results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
return [COSTS] + expand(
|
||||
"results/networks/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
ll=ll,
|
||||
**{k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k)
|
||||
for k in ["simpl", "clusters", "opts"]}))
|
||||
**{
|
||||
k: config["scenario"][k] if getattr(w, k) == "all" else getattr(w, k)
|
||||
for k in ["simpl", "clusters", "opts"]
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
rule make_summary:
|
||||
input: input_make_summary
|
||||
output: directory("results/summaries/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}")
|
||||
log: "logs/" + RDIR + "make_summary/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.log",
|
||||
resources: mem_mb=500
|
||||
script: "scripts/make_summary.py"
|
||||
input:
|
||||
input_make_summary,
|
||||
output:
|
||||
directory(
|
||||
"results/summaries/"
|
||||
+ RDIR
|
||||
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}"
|
||||
),
|
||||
log:
|
||||
"logs/"
|
||||
+ RDIR
|
||||
+ "make_summary/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.log",
|
||||
resources:
|
||||
mem_mb=500,
|
||||
script:
|
||||
"scripts/make_summary.py"
|
||||
|
||||
|
||||
rule plot_summary:
|
||||
input: "results/summaries/" + RDIR + "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}"
|
||||
output: "results/plots/" + RDIR + "summary_{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.{ext}"
|
||||
log: "logs/" + RDIR + "plot_summary/{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}_{ext}.log"
|
||||
resources: mem_mb=500
|
||||
script: "scripts/plot_summary.py"
|
||||
input:
|
||||
"results/summaries/"
|
||||
+ RDIR
|
||||
+ "elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}",
|
||||
output:
|
||||
"results/plots/"
|
||||
+ RDIR
|
||||
+ "summary_{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}.{ext}",
|
||||
log:
|
||||
"logs/"
|
||||
+ RDIR
|
||||
+ "plot_summary/{summary}_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_{country}_{ext}.log",
|
||||
resources:
|
||||
mem_mb=500,
|
||||
script:
|
||||
"scripts/plot_summary.py"
|
||||
|
||||
|
||||
def input_plot_p_nom_max(w):
|
||||
return [("results/networks/" + RDIR + "elec_s{simpl}{maybe_cluster}.nc"
|
||||
.format(maybe_cluster=('' if c == 'full' else ('_' + c)), **w))
|
||||
for c in w.clusts.split(",")]
|
||||
return [
|
||||
(
|
||||
"results/networks/"
|
||||
+ RDIR
|
||||
+ "elec_s{simpl}{maybe_cluster}.nc".format(
|
||||
maybe_cluster=("" if c == "full" else ("_" + c)), **w
|
||||
)
|
||||
)
|
||||
for c in w.clusts.split(",")
|
||||
]
|
||||
|
||||
|
||||
rule plot_p_nom_max:
|
||||
input: input_plot_p_nom_max
|
||||
output: "results/plots/" + RDIR + "elec_s{simpl}_cum_p_nom_max_{clusts}_{techs}_{country}.{ext}"
|
||||
log: "logs/" + RDIR + "plot_p_nom_max/elec_s{simpl}_{clusts}_{techs}_{country}_{ext}.log"
|
||||
resources: mem_mb=500
|
||||
script: "scripts/plot_p_nom_max.py"
|
||||
|
||||
input:
|
||||
input_plot_p_nom_max,
|
||||
output:
|
||||
"results/plots/"
|
||||
+ RDIR
|
||||
+ "elec_s{simpl}_cum_p_nom_max_{clusts}_{techs}_{country}.{ext}",
|
||||
log:
|
||||
"logs/"
|
||||
+ RDIR
|
||||
+ "plot_p_nom_max/elec_s{simpl}_{clusts}_{techs}_{country}_{ext}.log",
|
||||
resources:
|
||||
mem_mb=500,
|
||||
script:
|
||||
"scripts/plot_p_nom_max.py"
|
||||
|
@ -120,8 +120,7 @@ renewable:
|
||||
corine:
|
||||
# Scholz, Y. (2012). Renewable energy based electricity supply at low costs:
|
||||
# development of the REMix model and application for Europe. ( p.42 / p.28)
|
||||
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
|
||||
24, 25, 26, 27, 28, 29, 31, 32]
|
||||
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32]
|
||||
distance: 1000
|
||||
distance_grid_codes: [1, 2, 3, 4, 5, 6]
|
||||
natura: true
|
||||
@ -182,8 +181,7 @@ renewable:
|
||||
# This correction factor of 0.854337 may be in order if using reanalysis data.
|
||||
# for discussion refer to https://github.com/PyPSA/pypsa-eur/pull/304
|
||||
# correction_factor: 0.854337
|
||||
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
|
||||
14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
|
||||
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
|
||||
natura: true
|
||||
excluder_resolution: 100
|
||||
potential: simple # or conservative
|
||||
@ -221,7 +219,7 @@ transformers:
|
||||
type: ''
|
||||
|
||||
load:
|
||||
power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data
|
||||
power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data
|
||||
interpolate_limit: 3 # data gaps up until this size are interpolated linearly
|
||||
time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from
|
||||
manual_adjustments: true # false
|
||||
|
@ -73,8 +73,7 @@ renewable:
|
||||
corine:
|
||||
# Scholz, Y. (2012). Renewable energy based electricity supply at low costs:
|
||||
# development of the REMix model and application for Europe. ( p.42 / p.28)
|
||||
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
|
||||
24, 25, 26, 27, 28, 29, 31, 32]
|
||||
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32]
|
||||
distance: 1000
|
||||
distance_grid_codes: [1, 2, 3, 4, 5, 6]
|
||||
natura: true
|
||||
@ -126,8 +125,7 @@ renewable:
|
||||
# power." Applied Energy 135 (2014): 704-720.
|
||||
# This correction factor of 0.854337 may be in order if using reanalysis data.
|
||||
# correction_factor: 0.854337
|
||||
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
|
||||
14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
|
||||
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
|
||||
natura: true
|
||||
excluder_resolution: 200
|
||||
potential: simple # or conservative
|
||||
@ -155,7 +153,7 @@ transformers:
|
||||
type: ''
|
||||
|
||||
load:
|
||||
power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data
|
||||
power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data
|
||||
interpolate_limit: 3 # data gaps up until this size are interpolated linearly
|
||||
time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from
|
||||
manual_adjustments: true # false
|
||||
|
@ -72,7 +72,7 @@ Step 3 - Installation of Cloud SDK
|
||||
|
||||
- Download Google Cloud SDK `SDK <https://cloud.google.com/sdk>`_. Check that you are logged in in your Google account. The link should lead you to the Windows installation of Google Cloud SDK.
|
||||
- Follow the "Quickstart for Windows - Before you begin" steps.
|
||||
- After the successfull installation and initialization, close the Google Cloud SDK reopen it again. Type the following command into the "Google Cloud SDK Shell":
|
||||
- After the successful installation and initialization, close the Google Cloud SDK reopen it again. Type the following command into the "Google Cloud SDK Shell":
|
||||
|
||||
.. code:: bash
|
||||
|
||||
@ -107,7 +107,7 @@ Make sure that your instance is operating for the next steps.
|
||||
- Click on the advanced setting. SSH -> Authentication.
|
||||
- Option 1. Click on the Tools button and "Install Public Key into Server..". Somewhere in your folder structure must be a public key. I found it with the following folder syntax on my local windows computer -> :\Users\...\.ssh (there should be a PKK file).
|
||||
- Option 2. Click on the Tools button and "Generate new key pair...". Save the private key at a folder you remember and add it to the "private key file" field in WinSCP. Upload the public key to the metadeta of your instance.
|
||||
- Click ok and save. Then click Login. If successfull WinSCP will open on the left side your local computer folder structure and on the right side the folder strucutre of your VM. (If you followed Option 2 and its not initially working. Stop your instance, refresh the website, reopen the WinSCP field. Afterwards your your Login should be successfull)
|
||||
- Click ok and save. Then click Login. If successful WinSCP will open on the left side your local computer folder structure and on the right side the folder structure of your VM. (If you followed Option 2 and its not initially working. Stop your instance, refresh the website, reopen the WinSCP field. Afterwards your your Login should be successful)
|
||||
|
||||
If you had struggle with the above steps, you could also try `this video <https://www.youtube.com/watch?v=lYx1oQkEF0E>`_.
|
||||
|
||||
|
84
doc/conf.py
84
doc/conf.py
@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: 20017-2020 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
@ -16,14 +17,14 @@
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import shlex
|
||||
import sys
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
sys.path.insert(0, os.path.abspath('../scripts'))
|
||||
sys.path.insert(0, os.path.abspath("../scripts"))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
@ -36,47 +37,47 @@ sys.path.insert(0, os.path.abspath('../scripts'))
|
||||
extensions = [
|
||||
#'sphinx.ext.autodoc',
|
||||
#'sphinx.ext.autosummary',
|
||||
'sphinx.ext.intersphinx',
|
||||
'sphinx.ext.todo',
|
||||
'sphinx.ext.mathjax',
|
||||
'sphinx.ext.napoleon',
|
||||
'sphinx.ext.graphviz',
|
||||
"sphinx.ext.intersphinx",
|
||||
"sphinx.ext.todo",
|
||||
"sphinx.ext.mathjax",
|
||||
"sphinx.ext.napoleon",
|
||||
"sphinx.ext.graphviz",
|
||||
#'sphinx.ext.pngmath',
|
||||
#'sphinxcontrib.tikz',
|
||||
#'rinoh.frontend.sphinx',
|
||||
'sphinx.ext.imgconverter', # for SVG conversion
|
||||
"sphinx.ext.imgconverter", # for SVG conversion
|
||||
]
|
||||
|
||||
autodoc_default_flags = ['members']
|
||||
autodoc_default_flags = ["members"]
|
||||
autosummary_generate = True
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
templates_path = ["_templates"]
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
# source_suffix = ['.rst', '.md']
|
||||
source_suffix = '.rst'
|
||||
source_suffix = ".rst"
|
||||
|
||||
# The encoding of source files.
|
||||
# source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
master_doc = "index"
|
||||
|
||||
# General information about the project.
|
||||
project = u'PyPSA-Eur'
|
||||
copyright = u'2017-2022 Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), David Schlachtberger (FIAS), Tom Brown (TUB, KIT, FIAS); 2019-2022 Fabian Neumann (TUB, KIT)'
|
||||
author = u'Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), David Schlachtberger (FIAS), Tom Brown (TUB, KIT, FIAS), Fabian Neumann (TUB, KIT)'
|
||||
project = "PyPSA-Eur"
|
||||
copyright = "2017-2022 Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), David Schlachtberger (FIAS), Tom Brown (TUB, KIT, FIAS); 2019-2022 Fabian Neumann (TUB, KIT)"
|
||||
author = "Jonas Hoersch (KIT, FIAS), Fabian Hofmann (TUB, FIAS), David Schlachtberger (FIAS), Tom Brown (TUB, KIT, FIAS), Fabian Neumann (TUB, KIT)"
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = u'0.6'
|
||||
version = "0.6"
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = u'0.6.0'
|
||||
release = "0.6.0"
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
@ -93,7 +94,7 @@ language = None
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
exclude_patterns = ["_build"]
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all
|
||||
# documents.
|
||||
@ -111,7 +112,7 @@ exclude_patterns = ['_build']
|
||||
# show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
pygments_style = "sphinx"
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
# modindex_common_prefix = []
|
||||
@ -127,14 +128,14 @@ todo_include_todos = True
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
html_theme = "sphinx_rtd_theme"
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
html_theme_options = {
|
||||
'display_version': True,
|
||||
'sticky_navigation': True,
|
||||
"display_version": True,
|
||||
"sticky_navigation": True,
|
||||
}
|
||||
|
||||
|
||||
@ -225,20 +226,17 @@ html_css_files = ["theme_overrides.css"]
|
||||
# html_search_scorer = 'scorer.js'
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'PyPSAEurdoc'
|
||||
htmlhelp_basename = "PyPSAEurdoc"
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#'figure_align': 'htbp',
|
||||
}
|
||||
@ -247,16 +245,19 @@ latex_elements = {
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'PyPSA-Eur.tex', u'PyPSA-Eur Documentation',
|
||||
u'author', 'manual'),
|
||||
(master_doc, "PyPSA-Eur.tex", "PyPSA-Eur Documentation", "author", "manual"),
|
||||
]
|
||||
|
||||
|
||||
# Added for rinoh http://www.mos6581.org/rinohtype/quickstart.html
|
||||
rinoh_documents = [(master_doc, # top-level file (index.rst)
|
||||
'PyPSA-Eur', # output (target.pdf)
|
||||
'PyPSA-Eur Documentation', # document title
|
||||
'author')] # document author
|
||||
rinoh_documents = [
|
||||
(
|
||||
master_doc, # top-level file (index.rst)
|
||||
"PyPSA-Eur", # output (target.pdf)
|
||||
"PyPSA-Eur Documentation", # document title
|
||||
"author",
|
||||
)
|
||||
] # document author
|
||||
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
@ -284,10 +285,7 @@ rinoh_documents = [(master_doc, # top-level file (index.rst)
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'pypsa-eur', u'PyPSA-Eur Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
man_pages = [(master_doc, "pypsa-eur", "PyPSA-Eur Documentation", [author], 1)]
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
# man_show_urls = False
|
||||
@ -299,9 +297,15 @@ man_pages = [
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'PyPSA-Eur', u'PyPSA-Eur Documentation',
|
||||
author, 'PyPSA-Eur', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
(
|
||||
master_doc,
|
||||
"PyPSA-Eur",
|
||||
"PyPSA-Eur Documentation",
|
||||
author,
|
||||
"PyPSA-Eur",
|
||||
"One line description of project.",
|
||||
"Miscellaneous",
|
||||
),
|
||||
]
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
@ -318,4 +322,4 @@ texinfo_documents = [
|
||||
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
intersphinx_mapping = {'https://docs.python.org/': None}
|
||||
intersphinx_mapping = {"https://docs.python.org/": None}
|
||||
|
@ -16,10 +16,20 @@ to our `GitHub repository <https://github.com/PyPSA/PyPSA-Eur>`_.
|
||||
* If you start working on a feature in the code, let us know by opening an issue or a draft pull request.
|
||||
This helps all of us to keep an overview on what is being done and helps to avoid a situation where we
|
||||
are doing the same work twice in parallel.
|
||||
* We encourage you to use the `PEP 8 coding style <https://www.python.org/dev/peps/pep-0008/>`_.
|
||||
|
||||
For linting, formatting and checking your code contributions
|
||||
against our guidelines (e.g. we use `Black <https://github.com/psf/black>`_ as code style
|
||||
use `pre-commit <https://pre-commit.com/index.html>`_:
|
||||
|
||||
1. Installation ``conda install -c conda-forge pre-commit`` or ``pip install pre-commit``
|
||||
2. Usage:
|
||||
* To automatically activate ``pre-commit`` on every ``git commit``: Run ``pre-commit install``
|
||||
* To manually run it: ``pre-commit run --all``
|
||||
|
||||
Note that installing `pre-commit` locally is not strictly necessary. If you create a Pull Request the `pre-commit CI` will be triggered automatically and take care of the checks.
|
||||
|
||||
For all code contributions we follow the four eyes principle (two person principle), i.e. all suggested code
|
||||
including our own are reviewed by a second person before they are incoporated into our repository.
|
||||
including our own are reviewed by a second person before they are incorporated into our repository.
|
||||
|
||||
If you are unfamiliar with pull requests, the GitHub help pages have a nice `guide <https://help.github.com/en/articles/about-pull-requests>`_.
|
||||
|
||||
|
@ -10,7 +10,8 @@ Release Notes
|
||||
Upcoming Release
|
||||
================
|
||||
|
||||
* new feature
|
||||
* Individual commits are now tested against pre-commit hooks. This includes black style formatting, sorting of package imports, Snakefile formatting and others. Installation instructions can for the pre-commit can be found `here <https://pre-commit.com/>`_.
|
||||
* Pre-commit CI is now part of the repository's CI.
|
||||
|
||||
|
||||
PyPSA-Eur 0.6.0 (10th September 2022)
|
||||
@ -188,7 +189,7 @@ PyPSA-Eur 0.5.0 (27th July 2022)
|
||||
|
||||
* Enable parallel computing with new dask version.
|
||||
|
||||
* Restore compatibility of ``mock_snakemake`` with lastest Snakemake versions.
|
||||
* Restore compatibility of ``mock_snakemake`` with latest Snakemake versions.
|
||||
|
||||
* Script ``build_bus_regions``: move voronoi partition from vresutils to script.
|
||||
|
||||
@ -277,7 +278,7 @@ PyPSA-Eur 0.4.0 (22th September 2021)
|
||||
[`#261 <https://github.com/PyPSA/pypsa-eur/pull/261>`_].
|
||||
|
||||
* The tutorial cutout was renamed from ``cutouts/europe-2013-era5.nc`` to
|
||||
``cutouts/be-03-2013-era5.nc`` to accomodate tutorial and productive
|
||||
``cutouts/be-03-2013-era5.nc`` to accommodate tutorial and productive
|
||||
cutouts side-by-side.
|
||||
|
||||
* The flag ``keep_all_available_areas`` in the configuration for renewable
|
||||
@ -470,7 +471,7 @@ PyPSA-Eur 0.2.0 (8th June 2020)
|
||||
|
||||
* Removed the ``id`` column for custom power plants in ``data/custom_powerplants.csv`` to avoid custom power plants with conflicting ids getting attached to the wrong bus [`#131 <https://github.com/PyPSA/pypsa-eur/pull/131>`_].
|
||||
|
||||
* Add option ``renewables: {carrier}: keep_all_available_areas:`` to use all availabe weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed [`#150 <https://github.com/PyPSA/pypsa-eur/pull/150>`_].
|
||||
* Add option ``renewables: {carrier}: keep_all_available_areas:`` to use all available weather cells for renewable profile and potential generation. The default ignores weather cells where only less than 1 MW can be installed [`#150 <https://github.com/PyPSA/pypsa-eur/pull/150>`_].
|
||||
|
||||
* Added a function ``_helpers.load_network()`` which loads a network with overridden components specified in ``snakemake.config['override_components']`` [`#128 <https://github.com/PyPSA/pypsa-eur/pull/128>`_].
|
||||
|
||||
|
@ -119,13 +119,9 @@ clustered down to 6 buses and every 24 hours aggregated to one snapshot. The com
|
||||
|
||||
orders ``snakemake`` to run the script ``solve_network`` that produces the solved network and stores it in ``.../pypsa-eur/results/networks`` with the name ``elec_s_6_ec_lcopt_Co2L-24H.nc``:
|
||||
|
||||
.. code::
|
||||
|
||||
rule solve_network:
|
||||
input: "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
|
||||
output: "results/networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc"
|
||||
[...]
|
||||
script: "scripts/solve_network.py"
|
||||
.. literalinclude:: ../Snakefile
|
||||
:start-at: rule solve_network:
|
||||
:end-before: rule solve_operations_network:
|
||||
|
||||
.. until https://github.com/snakemake/snakemake/issues/46 closed
|
||||
|
||||
@ -245,7 +241,7 @@ Once the whole worktree is finished, it should show state so in the terminal:
|
||||
|
||||
You will notice that many intermediate stages are saved, namely the outputs of each individual ``snakemake`` rule.
|
||||
|
||||
You can produce any output file occuring in the ``Snakefile`` by running
|
||||
You can produce any output file occurring in the ``Snakefile`` by running
|
||||
|
||||
.. code:: bash
|
||||
|
||||
|
@ -123,7 +123,7 @@ These cutouts will be stored in a folder specified by ``{cutout}``.
|
||||
The ``{technology}`` wildcard
|
||||
=============================
|
||||
|
||||
The ``{technology}`` wildcard specifies for which renewable energy technology to produce availablity time
|
||||
The ``{technology}`` wildcard specifies for which renewable energy technology to produce availability time
|
||||
series and potentials using the rule :mod:`build_renewable_profiles`.
|
||||
It can take the values ``onwind``, ``offwind-ac``, ``offwind-dc``, and ``solar`` but **not** ``hydro``
|
||||
(since hydroelectric plant profiles are created by a different rule).
|
||||
@ -155,4 +155,5 @@ formats depends on the used backend. To query the supported file types on your s
|
||||
.. code:: python
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
plt.gcf().canvas.get_supported_filetypes()
|
||||
|
@ -1,11 +1,14 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
import pandas as pd
|
||||
from pathlib import Path
|
||||
|
||||
REGION_COLS = ['geometry', 'name', 'x', 'y', 'country']
|
||||
import pandas as pd
|
||||
|
||||
REGION_COLS = ["geometry", "name", "x", "y", "country"]
|
||||
|
||||
|
||||
def configure_logging(snakemake, skip_handlers=False):
|
||||
"""
|
||||
@ -28,21 +31,26 @@ def configure_logging(snakemake, skip_handlers=False):
|
||||
|
||||
import logging
|
||||
|
||||
kwargs = snakemake.config.get('logging', dict()).copy()
|
||||
kwargs = snakemake.config.get("logging", dict()).copy()
|
||||
kwargs.setdefault("level", "INFO")
|
||||
|
||||
if skip_handlers is False:
|
||||
fallback_path = Path(__file__).parent.joinpath('..', 'logs', f"{snakemake.rule}.log")
|
||||
logfile = snakemake.log.get('python', snakemake.log[0] if snakemake.log
|
||||
else fallback_path)
|
||||
fallback_path = Path(__file__).parent.joinpath(
|
||||
"..", "logs", f"{snakemake.rule}.log"
|
||||
)
|
||||
logfile = snakemake.log.get(
|
||||
"python", snakemake.log[0] if snakemake.log else fallback_path
|
||||
)
|
||||
kwargs.update(
|
||||
{'handlers': [
|
||||
{
|
||||
"handlers": [
|
||||
# Prefer the 'python' log, otherwise take the first log for each
|
||||
# Snakemake rule
|
||||
logging.FileHandler(logfile),
|
||||
logging.StreamHandler()
|
||||
logging.StreamHandler(),
|
||||
]
|
||||
})
|
||||
}
|
||||
)
|
||||
logging.basicConfig(**kwargs)
|
||||
|
||||
|
||||
@ -80,128 +88,172 @@ def load_network(import_name=None, custom_components=None):
|
||||
|
||||
if custom_components is not None:
|
||||
override_components = pypsa.components.components.copy()
|
||||
override_component_attrs = Dict({k : v.copy() for k,v in pypsa.components.component_attrs.items()})
|
||||
override_component_attrs = Dict(
|
||||
{k: v.copy() for k, v in pypsa.components.component_attrs.items()}
|
||||
)
|
||||
for k, v in custom_components.items():
|
||||
override_components.loc[k] = v['component']
|
||||
override_component_attrs[k] = pd.DataFrame(columns = ["type","unit","default","description","status"])
|
||||
for attr, val in v['attributes'].items():
|
||||
override_components.loc[k] = v["component"]
|
||||
override_component_attrs[k] = pd.DataFrame(
|
||||
columns=["type", "unit", "default", "description", "status"]
|
||||
)
|
||||
for attr, val in v["attributes"].items():
|
||||
override_component_attrs[k].loc[attr] = val
|
||||
|
||||
return pypsa.Network(import_name=import_name,
|
||||
return pypsa.Network(
|
||||
import_name=import_name,
|
||||
override_components=override_components,
|
||||
override_component_attrs=override_component_attrs)
|
||||
override_component_attrs=override_component_attrs,
|
||||
)
|
||||
|
||||
|
||||
def pdbcast(v, h):
|
||||
return pd.DataFrame(v.values.reshape((-1, 1)) * h.values,
|
||||
index=v.index, columns=h.index)
|
||||
return pd.DataFrame(
|
||||
v.values.reshape((-1, 1)) * h.values, index=v.index, columns=h.index
|
||||
)
|
||||
|
||||
|
||||
def load_network_for_plots(fn, tech_costs, config, combine_hydro_ps=True):
|
||||
import pypsa
|
||||
from add_electricity import update_transmission_costs, load_costs
|
||||
from add_electricity import load_costs, update_transmission_costs
|
||||
|
||||
n = pypsa.Network(fn)
|
||||
|
||||
n.loads["carrier"] = n.loads.bus.map(n.buses.carrier) + " load"
|
||||
n.stores["carrier"] = n.stores.bus.map(n.buses.carrier)
|
||||
|
||||
n.links["carrier"] = (n.links.bus0.map(n.buses.carrier) + "-" + n.links.bus1.map(n.buses.carrier))
|
||||
n.links["carrier"] = (
|
||||
n.links.bus0.map(n.buses.carrier) + "-" + n.links.bus1.map(n.buses.carrier)
|
||||
)
|
||||
n.lines["carrier"] = "AC line"
|
||||
n.transformers["carrier"] = "AC transformer"
|
||||
|
||||
n.lines['s_nom'] = n.lines['s_nom_min']
|
||||
n.links['p_nom'] = n.links['p_nom_min']
|
||||
n.lines["s_nom"] = n.lines["s_nom_min"]
|
||||
n.links["p_nom"] = n.links["p_nom_min"]
|
||||
|
||||
if combine_hydro_ps:
|
||||
n.storage_units.loc[n.storage_units.carrier.isin({'PHS', 'hydro'}), 'carrier'] = 'hydro+PHS'
|
||||
n.storage_units.loc[
|
||||
n.storage_units.carrier.isin({"PHS", "hydro"}), "carrier"
|
||||
] = "hydro+PHS"
|
||||
|
||||
# if the carrier was not set on the heat storage units
|
||||
# bus_carrier = n.storage_units.bus.map(n.buses.carrier)
|
||||
# n.storage_units.loc[bus_carrier == "heat","carrier"] = "water tanks"
|
||||
|
||||
Nyears = n.snapshot_weightings.objective.sum() / 8760.
|
||||
costs = load_costs(tech_costs, config['costs'], config['electricity'], Nyears)
|
||||
Nyears = n.snapshot_weightings.objective.sum() / 8760.0
|
||||
costs = load_costs(tech_costs, config["costs"], config["electricity"], Nyears)
|
||||
update_transmission_costs(n, costs)
|
||||
|
||||
return n
|
||||
|
||||
|
||||
def update_p_nom_max(n):
|
||||
# if extendable carriers (solar/onwind/...) have capacity >= 0,
|
||||
# e.g. existing assets from the OPSD project are included to the network,
|
||||
# the installed capacity might exceed the expansion limit.
|
||||
# Hence, we update the assumptions.
|
||||
|
||||
n.generators.p_nom_max = n.generators[['p_nom_min', 'p_nom_max']].max(1)
|
||||
n.generators.p_nom_max = n.generators[["p_nom_min", "p_nom_max"]].max(1)
|
||||
|
||||
|
||||
def aggregate_p_nom(n):
|
||||
return pd.concat([
|
||||
return pd.concat(
|
||||
[
|
||||
n.generators.groupby("carrier").p_nom_opt.sum(),
|
||||
n.storage_units.groupby("carrier").p_nom_opt.sum(),
|
||||
n.links.groupby("carrier").p_nom_opt.sum(),
|
||||
n.loads_t.p.groupby(n.loads.carrier,axis=1).sum().mean()
|
||||
])
|
||||
n.loads_t.p.groupby(n.loads.carrier, axis=1).sum().mean(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def aggregate_p(n):
|
||||
return pd.concat([
|
||||
return pd.concat(
|
||||
[
|
||||
n.generators_t.p.sum().groupby(n.generators.carrier).sum(),
|
||||
n.storage_units_t.p.sum().groupby(n.storage_units.carrier).sum(),
|
||||
n.stores_t.p.sum().groupby(n.stores.carrier).sum(),
|
||||
-n.loads_t.p.sum().groupby(n.loads.carrier).sum()
|
||||
])
|
||||
-n.loads_t.p.sum().groupby(n.loads.carrier).sum(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def aggregate_e_nom(n):
|
||||
return pd.concat([
|
||||
(n.storage_units["p_nom_opt"]*n.storage_units["max_hours"]).groupby(n.storage_units["carrier"]).sum(),
|
||||
n.stores["e_nom_opt"].groupby(n.stores.carrier).sum()
|
||||
])
|
||||
return pd.concat(
|
||||
[
|
||||
(n.storage_units["p_nom_opt"] * n.storage_units["max_hours"])
|
||||
.groupby(n.storage_units["carrier"])
|
||||
.sum(),
|
||||
n.stores["e_nom_opt"].groupby(n.stores.carrier).sum(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def aggregate_p_curtailed(n):
|
||||
return pd.concat([
|
||||
((n.generators_t.p_max_pu.sum().multiply(n.generators.p_nom_opt) - n.generators_t.p.sum())
|
||||
.groupby(n.generators.carrier).sum()),
|
||||
((n.storage_units_t.inflow.sum() - n.storage_units_t.p.sum())
|
||||
.groupby(n.storage_units.carrier).sum())
|
||||
])
|
||||
return pd.concat(
|
||||
[
|
||||
(
|
||||
(
|
||||
n.generators_t.p_max_pu.sum().multiply(n.generators.p_nom_opt)
|
||||
- n.generators_t.p.sum()
|
||||
)
|
||||
.groupby(n.generators.carrier)
|
||||
.sum()
|
||||
),
|
||||
(
|
||||
(n.storage_units_t.inflow.sum() - n.storage_units_t.p.sum())
|
||||
.groupby(n.storage_units.carrier)
|
||||
.sum()
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def aggregate_costs(n, flatten=False, opts=None, existing_only=False):
|
||||
|
||||
components = dict(Link=("p_nom", "p0"),
|
||||
components = dict(
|
||||
Link=("p_nom", "p0"),
|
||||
Generator=("p_nom", "p"),
|
||||
StorageUnit=("p_nom", "p"),
|
||||
Store=("e_nom", "p"),
|
||||
Line=("s_nom", None),
|
||||
Transformer=("s_nom", None))
|
||||
Transformer=("s_nom", None),
|
||||
)
|
||||
|
||||
costs = {}
|
||||
for c, (p_nom, p_attr) in zip(
|
||||
n.iterate_components(components.keys(), skip_empty=False),
|
||||
components.values()
|
||||
n.iterate_components(components.keys(), skip_empty=False), components.values()
|
||||
):
|
||||
if c.df.empty: continue
|
||||
if not existing_only: p_nom += "_opt"
|
||||
costs[(c.list_name, 'capital')] = (c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum()
|
||||
if c.df.empty:
|
||||
continue
|
||||
if not existing_only:
|
||||
p_nom += "_opt"
|
||||
costs[(c.list_name, "capital")] = (
|
||||
(c.df[p_nom] * c.df.capital_cost).groupby(c.df.carrier).sum()
|
||||
)
|
||||
if p_attr is not None:
|
||||
p = c.pnl[p_attr].sum()
|
||||
if c.name == 'StorageUnit':
|
||||
if c.name == "StorageUnit":
|
||||
p = p.loc[p > 0]
|
||||
costs[(c.list_name, 'marginal')] = (p*c.df.marginal_cost).groupby(c.df.carrier).sum()
|
||||
costs[(c.list_name, "marginal")] = (
|
||||
(p * c.df.marginal_cost).groupby(c.df.carrier).sum()
|
||||
)
|
||||
costs = pd.concat(costs)
|
||||
|
||||
if flatten:
|
||||
assert opts is not None
|
||||
conv_techs = opts['conv_techs']
|
||||
conv_techs = opts["conv_techs"]
|
||||
|
||||
costs = costs.reset_index(level=0, drop=True)
|
||||
costs = costs['capital'].add(
|
||||
costs['marginal'].rename({t: t + ' marginal' for t in conv_techs}),
|
||||
fill_value=0.
|
||||
costs = costs["capital"].add(
|
||||
costs["marginal"].rename({t: t + " marginal" for t in conv_techs}),
|
||||
fill_value=0.0,
|
||||
)
|
||||
|
||||
return costs
|
||||
|
||||
|
||||
def progress_retrieve(url, file):
|
||||
import urllib
|
||||
|
||||
from progressbar import ProgressBar
|
||||
|
||||
pbar = ProgressBar(0, 100)
|
||||
@ -211,6 +263,7 @@ def progress_retrieve(url, file):
|
||||
|
||||
urllib.request.urlretrieve(url, file, reporthook=dlProgress)
|
||||
|
||||
|
||||
def get_aggregation_strategies(aggregation_strategies):
|
||||
# default aggregation strategies that cannot be defined in .yaml format must be specified within
|
||||
# the function, otherwise (when defaults are passed in the function's definition) they get lost
|
||||
@ -222,7 +275,7 @@ def get_aggregation_strategies(aggregation_strategies):
|
||||
bus_strategies = dict(country=_make_consense("Bus", "country"))
|
||||
bus_strategies.update(aggregation_strategies.get("buses", {}))
|
||||
|
||||
generator_strategies = {'build_year': lambda x: 0, 'lifetime': lambda x: np.inf}
|
||||
generator_strategies = {"build_year": lambda x: 0, "lifetime": lambda x: np.inf}
|
||||
generator_strategies.update(aggregation_strategies.get("generators", {}))
|
||||
|
||||
return bus_strategies, generator_strategies
|
||||
@ -244,15 +297,17 @@ def mock_snakemake(rulename, **wildcards):
|
||||
keyword arguments fixing the wildcards. Only necessary if wildcards are
|
||||
needed.
|
||||
"""
|
||||
import snakemake as sm
|
||||
import os
|
||||
|
||||
import snakemake as sm
|
||||
from packaging.version import Version, parse
|
||||
from pypsa.descriptors import Dict
|
||||
from snakemake.script import Snakemake
|
||||
from packaging.version import Version, parse
|
||||
|
||||
script_dir = Path(__file__).parent.resolve()
|
||||
assert Path.cwd().resolve() == script_dir, \
|
||||
f'mock_snakemake has to be run from the repository scripts directory {script_dir}'
|
||||
assert (
|
||||
Path.cwd().resolve() == script_dir
|
||||
), f"mock_snakemake has to be run from the repository scripts directory {script_dir}"
|
||||
os.chdir(script_dir.parent)
|
||||
for p in sm.SNAKEFILE_CHOICES:
|
||||
if os.path.exists(p):
|
||||
@ -273,9 +328,18 @@ def mock_snakemake(rulename, **wildcards):
|
||||
io[i] = os.path.abspath(io[i])
|
||||
|
||||
make_accessable(job.input, job.output, job.log)
|
||||
snakemake = Snakemake(job.input, job.output, job.params, job.wildcards,
|
||||
job.threads, job.resources, job.log,
|
||||
job.dag.workflow.config, job.rule.name, None,)
|
||||
snakemake = Snakemake(
|
||||
job.input,
|
||||
job.output,
|
||||
job.params,
|
||||
job.wildcards,
|
||||
job.threads,
|
||||
job.resources,
|
||||
job.log,
|
||||
job.dag.workflow.config,
|
||||
job.rule.name,
|
||||
None,
|
||||
)
|
||||
# create log and output dir if not existent
|
||||
for path in list(snakemake.log) + list(snakemake.output):
|
||||
Path(path).parent.mkdir(parents=True, exist_ok=True)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
@ -50,14 +51,16 @@ The rule :mod:`add_extra_components` attaches additional extendable components t
|
||||
- ``Stores`` of carrier 'H2' and/or 'battery' in combination with ``Links``. If this option is chosen, the script adds extra buses with corresponding carrier where energy ``Stores`` are attached and which are connected to the corresponding power buses via two links, one each for charging and discharging. This leads to three investment variables for the energy capacity, charging and discharging capacity of the storage unit.
|
||||
"""
|
||||
import logging
|
||||
from _helpers import configure_logging
|
||||
|
||||
import pypsa
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
from add_electricity import (load_costs, add_nice_carrier_names,
|
||||
_add_missing_carriers_from_costs)
|
||||
import pandas as pd
|
||||
import pypsa
|
||||
from _helpers import configure_logging
|
||||
from add_electricity import (
|
||||
_add_missing_carriers_from_costs,
|
||||
add_nice_carrier_names,
|
||||
load_costs,
|
||||
)
|
||||
|
||||
idx = pd.IndexSlice
|
||||
|
||||
@ -65,8 +68,8 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def attach_storageunits(n, costs, elec_opts):
|
||||
carriers = elec_opts['extendable_carriers']['StorageUnit']
|
||||
max_hours = elec_opts['max_hours']
|
||||
carriers = elec_opts["extendable_carriers"]["StorageUnit"]
|
||||
max_hours = elec_opts["max_hours"]
|
||||
|
||||
_add_missing_carriers_from_costs(n, costs, carriers)
|
||||
|
||||
@ -78,130 +81,166 @@ def attach_storageunits(n, costs, elec_opts):
|
||||
for carrier in carriers:
|
||||
roundtrip_correction = 0.5 if carrier == "battery" else 1
|
||||
|
||||
n.madd("StorageUnit", buses_i, ' ' + carrier,
|
||||
n.madd(
|
||||
"StorageUnit",
|
||||
buses_i,
|
||||
" " + carrier,
|
||||
bus=buses_i,
|
||||
carrier=carrier,
|
||||
p_nom_extendable=True,
|
||||
capital_cost=costs.at[carrier, 'capital_cost'],
|
||||
marginal_cost=costs.at[carrier, 'marginal_cost'],
|
||||
efficiency_store=costs.at[lookup_store[carrier], 'efficiency']**roundtrip_correction,
|
||||
efficiency_dispatch=costs.at[lookup_dispatch[carrier], 'efficiency']**roundtrip_correction,
|
||||
capital_cost=costs.at[carrier, "capital_cost"],
|
||||
marginal_cost=costs.at[carrier, "marginal_cost"],
|
||||
efficiency_store=costs.at[lookup_store[carrier], "efficiency"]
|
||||
** roundtrip_correction,
|
||||
efficiency_dispatch=costs.at[lookup_dispatch[carrier], "efficiency"]
|
||||
** roundtrip_correction,
|
||||
max_hours=max_hours[carrier],
|
||||
cyclic_state_of_charge=True
|
||||
cyclic_state_of_charge=True,
|
||||
)
|
||||
|
||||
|
||||
def attach_stores(n, costs, elec_opts):
|
||||
carriers = elec_opts['extendable_carriers']['Store']
|
||||
carriers = elec_opts["extendable_carriers"]["Store"]
|
||||
|
||||
_add_missing_carriers_from_costs(n, costs, carriers)
|
||||
|
||||
buses_i = n.buses.index
|
||||
bus_sub_dict = {k: n.buses[k].values for k in ['x', 'y', 'country']}
|
||||
bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]}
|
||||
|
||||
if 'H2' in carriers:
|
||||
if "H2" in carriers:
|
||||
h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict)
|
||||
|
||||
n.madd("Store", h2_buses_i,
|
||||
n.madd(
|
||||
"Store",
|
||||
h2_buses_i,
|
||||
bus=h2_buses_i,
|
||||
carrier='H2',
|
||||
carrier="H2",
|
||||
e_nom_extendable=True,
|
||||
e_cyclic=True,
|
||||
capital_cost=costs.at["hydrogen storage underground", "capital_cost"])
|
||||
capital_cost=costs.at["hydrogen storage underground", "capital_cost"],
|
||||
)
|
||||
|
||||
n.madd("Link", h2_buses_i + " Electrolysis",
|
||||
n.madd(
|
||||
"Link",
|
||||
h2_buses_i + " Electrolysis",
|
||||
bus0=buses_i,
|
||||
bus1=h2_buses_i,
|
||||
carrier='H2 electrolysis',
|
||||
carrier="H2 electrolysis",
|
||||
p_nom_extendable=True,
|
||||
efficiency=costs.at["electrolysis", "efficiency"],
|
||||
capital_cost=costs.at["electrolysis", "capital_cost"],
|
||||
marginal_cost=costs.at["electrolysis", "marginal_cost"])
|
||||
marginal_cost=costs.at["electrolysis", "marginal_cost"],
|
||||
)
|
||||
|
||||
n.madd("Link", h2_buses_i + " Fuel Cell",
|
||||
n.madd(
|
||||
"Link",
|
||||
h2_buses_i + " Fuel Cell",
|
||||
bus0=h2_buses_i,
|
||||
bus1=buses_i,
|
||||
carrier='H2 fuel cell',
|
||||
carrier="H2 fuel cell",
|
||||
p_nom_extendable=True,
|
||||
efficiency=costs.at["fuel cell", "efficiency"],
|
||||
# NB: fixed cost is per MWel
|
||||
capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"],
|
||||
marginal_cost=costs.at["fuel cell", "marginal_cost"])
|
||||
capital_cost=costs.at["fuel cell", "capital_cost"]
|
||||
* costs.at["fuel cell", "efficiency"],
|
||||
marginal_cost=costs.at["fuel cell", "marginal_cost"],
|
||||
)
|
||||
|
||||
if 'battery' in carriers:
|
||||
b_buses_i = n.madd("Bus", buses_i + " battery", carrier="battery", **bus_sub_dict)
|
||||
if "battery" in carriers:
|
||||
b_buses_i = n.madd(
|
||||
"Bus", buses_i + " battery", carrier="battery", **bus_sub_dict
|
||||
)
|
||||
|
||||
n.madd("Store", b_buses_i,
|
||||
n.madd(
|
||||
"Store",
|
||||
b_buses_i,
|
||||
bus=b_buses_i,
|
||||
carrier='battery',
|
||||
carrier="battery",
|
||||
e_cyclic=True,
|
||||
e_nom_extendable=True,
|
||||
capital_cost=costs.at['battery storage', 'capital_cost'],
|
||||
marginal_cost=costs.at["battery", "marginal_cost"])
|
||||
capital_cost=costs.at["battery storage", "capital_cost"],
|
||||
marginal_cost=costs.at["battery", "marginal_cost"],
|
||||
)
|
||||
|
||||
n.madd("Link", b_buses_i + " charger",
|
||||
n.madd(
|
||||
"Link",
|
||||
b_buses_i + " charger",
|
||||
bus0=buses_i,
|
||||
bus1=b_buses_i,
|
||||
carrier='battery charger',
|
||||
carrier="battery charger",
|
||||
# the efficiencies are "round trip efficiencies"
|
||||
efficiency=costs.at['battery inverter', 'efficiency']**0.5,
|
||||
capital_cost=costs.at['battery inverter', 'capital_cost'],
|
||||
efficiency=costs.at["battery inverter", "efficiency"] ** 0.5,
|
||||
capital_cost=costs.at["battery inverter", "capital_cost"],
|
||||
p_nom_extendable=True,
|
||||
marginal_cost=costs.at["battery inverter", "marginal_cost"])
|
||||
marginal_cost=costs.at["battery inverter", "marginal_cost"],
|
||||
)
|
||||
|
||||
n.madd("Link", b_buses_i + " discharger",
|
||||
n.madd(
|
||||
"Link",
|
||||
b_buses_i + " discharger",
|
||||
bus0=b_buses_i,
|
||||
bus1=buses_i,
|
||||
carrier='battery discharger',
|
||||
efficiency=costs.at['battery inverter','efficiency']**0.5,
|
||||
carrier="battery discharger",
|
||||
efficiency=costs.at["battery inverter", "efficiency"] ** 0.5,
|
||||
p_nom_extendable=True,
|
||||
marginal_cost=costs.at["battery inverter", "marginal_cost"])
|
||||
marginal_cost=costs.at["battery inverter", "marginal_cost"],
|
||||
)
|
||||
|
||||
|
||||
def attach_hydrogen_pipelines(n, costs, elec_opts):
|
||||
ext_carriers = elec_opts['extendable_carriers']
|
||||
as_stores = ext_carriers.get('Store', [])
|
||||
ext_carriers = elec_opts["extendable_carriers"]
|
||||
as_stores = ext_carriers.get("Store", [])
|
||||
|
||||
if 'H2 pipeline' not in ext_carriers.get('Link',[]): return
|
||||
if "H2 pipeline" not in ext_carriers.get("Link", []):
|
||||
return
|
||||
|
||||
assert 'H2' in as_stores, ("Attaching hydrogen pipelines requires hydrogen "
|
||||
assert "H2" in as_stores, (
|
||||
"Attaching hydrogen pipelines requires hydrogen "
|
||||
"storage to be modelled as Store-Link-Bus combination. See "
|
||||
"`config.yaml` at `electricity: extendable_carriers: Store:`.")
|
||||
"`config.yaml` at `electricity: extendable_carriers: Store:`."
|
||||
)
|
||||
|
||||
# determine bus pairs
|
||||
attrs = ["bus0", "bus1", "length"]
|
||||
candidates = pd.concat([n.lines[attrs], n.links.query('carrier=="DC"')[attrs]])\
|
||||
.reset_index(drop=True)
|
||||
candidates = pd.concat(
|
||||
[n.lines[attrs], n.links.query('carrier=="DC"')[attrs]]
|
||||
).reset_index(drop=True)
|
||||
|
||||
# remove bus pair duplicates regardless of order of bus0 and bus1
|
||||
h2_links = candidates[~pd.DataFrame(np.sort(candidates[['bus0', 'bus1']])).duplicated()]
|
||||
h2_links = candidates[
|
||||
~pd.DataFrame(np.sort(candidates[["bus0", "bus1"]])).duplicated()
|
||||
]
|
||||
h2_links.index = h2_links.apply(lambda c: f"H2 pipeline {c.bus0}-{c.bus1}", axis=1)
|
||||
|
||||
# add pipelines
|
||||
n.madd("Link",
|
||||
n.madd(
|
||||
"Link",
|
||||
h2_links.index,
|
||||
bus0=h2_links.bus0.values + " H2",
|
||||
bus1=h2_links.bus1.values + " H2",
|
||||
p_min_pu=-1,
|
||||
p_nom_extendable=True,
|
||||
length=h2_links.length.values,
|
||||
capital_cost=costs.at['H2 pipeline','capital_cost']*h2_links.length,
|
||||
efficiency=costs.at['H2 pipeline','efficiency'],
|
||||
carrier="H2 pipeline")
|
||||
capital_cost=costs.at["H2 pipeline", "capital_cost"] * h2_links.length,
|
||||
efficiency=costs.at["H2 pipeline", "efficiency"],
|
||||
carrier="H2 pipeline",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('add_extra_components',
|
||||
simpl='', clusters=5)
|
||||
|
||||
snakemake = mock_snakemake("add_extra_components", simpl="", clusters=5)
|
||||
configure_logging(snakemake)
|
||||
|
||||
n = pypsa.Network(snakemake.input.network)
|
||||
elec_config = snakemake.config['electricity']
|
||||
elec_config = snakemake.config["electricity"]
|
||||
|
||||
Nyears = n.snapshot_weightings.objective.sum() / 8760.
|
||||
costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], elec_config, Nyears)
|
||||
Nyears = n.snapshot_weightings.objective.sum() / 8760.0
|
||||
costs = load_costs(
|
||||
snakemake.input.tech_costs, snakemake.config["costs"], elec_config, Nyears
|
||||
)
|
||||
|
||||
attach_storageunits(n, costs, elec_config)
|
||||
attach_stores(n, costs, elec_config)
|
||||
|
@ -1,10 +1,14 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
# coding: utf-8
|
||||
"""
|
||||
Creates the network topology from a `ENTSO-E map extract <https://github.com/PyPSA/GridKit/tree/master/entsoe>`_ (March 2022) as a PyPSA network.
|
||||
Creates the network topology from a `ENTSO-E map extract.
|
||||
|
||||
<https://github.com/PyPSA/GridKit/tree/master/entsoe>`_ (March 2022) as a PyPSA
|
||||
network.
|
||||
|
||||
Relevant Settings
|
||||
-----------------
|
||||
@ -59,25 +63,24 @@ Outputs
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import configure_logging
|
||||
|
||||
import pypsa
|
||||
import yaml
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
import numpy as np
|
||||
import networkx as nx
|
||||
|
||||
from scipy import spatial
|
||||
from scipy.sparse import csgraph
|
||||
from itertools import product
|
||||
|
||||
from shapely.geometry import Point, LineString
|
||||
import shapely, shapely.prepared, shapely.wkt
|
||||
import geopandas as gpd
|
||||
import networkx as nx
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pypsa
|
||||
import shapely
|
||||
import shapely.prepared
|
||||
import shapely.wkt
|
||||
import yaml
|
||||
from _helpers import configure_logging
|
||||
from scipy import spatial
|
||||
from scipy.sparse import csgraph
|
||||
from shapely.geometry import LineString, Point
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -97,48 +100,73 @@ def _get_country(df):
|
||||
|
||||
|
||||
def _find_closest_links(links, new_links, distance_upper_bound=1.5):
|
||||
treecoords = np.asarray([np.asarray(shapely.wkt.loads(s).coords)[[0, -1]].flatten()
|
||||
for s in links.geometry])
|
||||
querycoords = np.vstack([new_links[['x1', 'y1', 'x2', 'y2']],
|
||||
new_links[['x2', 'y2', 'x1', 'y1']]])
|
||||
treecoords = np.asarray(
|
||||
[
|
||||
np.asarray(shapely.wkt.loads(s).coords)[[0, -1]].flatten()
|
||||
for s in links.geometry
|
||||
]
|
||||
)
|
||||
querycoords = np.vstack(
|
||||
[new_links[["x1", "y1", "x2", "y2"]], new_links[["x2", "y2", "x1", "y1"]]]
|
||||
)
|
||||
tree = spatial.KDTree(treecoords)
|
||||
dist, ind = tree.query(querycoords, distance_upper_bound=distance_upper_bound)
|
||||
found_b = ind < len(links)
|
||||
found_i = np.arange(len(new_links) * 2)[found_b] % len(new_links)
|
||||
return pd.DataFrame(dict(D=dist[found_b],
|
||||
i=links.index[ind[found_b] % len(links)]),
|
||||
index=new_links.index[found_i]).sort_values(by='D')\
|
||||
[lambda ds: ~ds.index.duplicated(keep='first')]\
|
||||
.sort_index()['i']
|
||||
return (
|
||||
pd.DataFrame(
|
||||
dict(D=dist[found_b], i=links.index[ind[found_b] % len(links)]),
|
||||
index=new_links.index[found_i],
|
||||
)
|
||||
.sort_values(by="D")[lambda ds: ~ds.index.duplicated(keep="first")]
|
||||
.sort_index()["i"]
|
||||
)
|
||||
|
||||
|
||||
def _load_buses_from_eg(eg_buses, europe_shape, config_elec):
|
||||
buses = (pd.read_csv(eg_buses, quotechar="'",
|
||||
true_values=['t'], false_values=['f'],
|
||||
dtype=dict(bus_id="str"))
|
||||
buses = (
|
||||
pd.read_csv(
|
||||
eg_buses,
|
||||
quotechar="'",
|
||||
true_values=["t"],
|
||||
false_values=["f"],
|
||||
dtype=dict(bus_id="str"),
|
||||
)
|
||||
.set_index("bus_id")
|
||||
.drop(['station_id'], axis=1)
|
||||
.rename(columns=dict(voltage='v_nom')))
|
||||
.drop(["station_id"], axis=1)
|
||||
.rename(columns=dict(voltage="v_nom"))
|
||||
)
|
||||
|
||||
buses['carrier'] = buses.pop('dc').map({True: 'DC', False: 'AC'})
|
||||
buses['under_construction'] = buses['under_construction'].fillna(False).astype(bool)
|
||||
buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"})
|
||||
buses["under_construction"] = buses["under_construction"].fillna(False).astype(bool)
|
||||
|
||||
# remove all buses outside of all countries including exclusive economic zones (offshore)
|
||||
europe_shape = gpd.read_file(europe_shape).loc[0, 'geometry']
|
||||
europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"]
|
||||
europe_shape_prepped = shapely.prepared.prep(europe_shape)
|
||||
buses_in_europe_b = buses[['x', 'y']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1)
|
||||
buses_in_europe_b = buses[["x", "y"]].apply(
|
||||
lambda p: europe_shape_prepped.contains(Point(p)), axis=1
|
||||
)
|
||||
|
||||
buses_with_v_nom_to_keep_b = buses.v_nom.isin(config_elec['voltages']) | buses.v_nom.isnull()
|
||||
logger.info("Removing buses with voltages {}".format(pd.Index(buses.v_nom.unique()).dropna().difference(config_elec['voltages'])))
|
||||
buses_with_v_nom_to_keep_b = (
|
||||
buses.v_nom.isin(config_elec["voltages"]) | buses.v_nom.isnull()
|
||||
)
|
||||
logger.info(
|
||||
"Removing buses with voltages {}".format(
|
||||
pd.Index(buses.v_nom.unique()).dropna().difference(config_elec["voltages"])
|
||||
)
|
||||
)
|
||||
|
||||
return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b])
|
||||
|
||||
|
||||
def _load_transformers_from_eg(buses, eg_transformers):
|
||||
transformers = (pd.read_csv(eg_transformers, quotechar="'",
|
||||
true_values=['t'], false_values=['f'],
|
||||
dtype=dict(transformer_id='str', bus0='str', bus1='str'))
|
||||
.set_index('transformer_id'))
|
||||
transformers = pd.read_csv(
|
||||
eg_transformers,
|
||||
quotechar="'",
|
||||
true_values=["t"],
|
||||
false_values=["f"],
|
||||
dtype=dict(transformer_id="str", bus0="str", bus1="str"),
|
||||
).set_index("transformer_id")
|
||||
|
||||
transformers = _remove_dangling_branches(transformers, buses)
|
||||
|
||||
@ -146,33 +174,40 @@ def _load_transformers_from_eg(buses, eg_transformers):
|
||||
|
||||
|
||||
def _load_converters_from_eg(buses, eg_converters):
|
||||
converters = (pd.read_csv(eg_converters, quotechar="'",
|
||||
true_values=['t'], false_values=['f'],
|
||||
dtype=dict(converter_id='str', bus0='str', bus1='str'))
|
||||
.set_index('converter_id'))
|
||||
converters = pd.read_csv(
|
||||
eg_converters,
|
||||
quotechar="'",
|
||||
true_values=["t"],
|
||||
false_values=["f"],
|
||||
dtype=dict(converter_id="str", bus0="str", bus1="str"),
|
||||
).set_index("converter_id")
|
||||
|
||||
converters = _remove_dangling_branches(converters, buses)
|
||||
|
||||
converters['carrier'] = 'B2B'
|
||||
converters["carrier"] = "B2B"
|
||||
|
||||
return converters
|
||||
|
||||
|
||||
def _load_links_from_eg(buses, eg_links):
|
||||
links = (pd.read_csv(eg_links, quotechar="'", true_values=['t'], false_values=['f'],
|
||||
dtype=dict(link_id='str', bus0='str', bus1='str', under_construction="bool"))
|
||||
.set_index('link_id'))
|
||||
links = pd.read_csv(
|
||||
eg_links,
|
||||
quotechar="'",
|
||||
true_values=["t"],
|
||||
false_values=["f"],
|
||||
dtype=dict(link_id="str", bus0="str", bus1="str", under_construction="bool"),
|
||||
).set_index("link_id")
|
||||
|
||||
links['length'] /= 1e3
|
||||
links["length"] /= 1e3
|
||||
|
||||
# Skagerrak Link is connected to 132kV bus which is removed in _load_buses_from_eg.
|
||||
# Connect to neighboring 380kV bus
|
||||
links.loc[links.bus1=='6396', 'bus1'] = '6398'
|
||||
links.loc[links.bus1 == "6396", "bus1"] = "6398"
|
||||
|
||||
links = _remove_dangling_branches(links, buses)
|
||||
|
||||
# Add DC line parameters
|
||||
links['carrier'] = 'DC'
|
||||
links["carrier"] = "DC"
|
||||
|
||||
return links
|
||||
|
||||
@ -181,15 +216,21 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape):
|
||||
links_tyndp = pd.read_csv(links_tyndp)
|
||||
|
||||
# remove all links from list which lie outside all of the desired countries
|
||||
europe_shape = gpd.read_file(europe_shape).loc[0, 'geometry']
|
||||
europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"]
|
||||
europe_shape_prepped = shapely.prepared.prep(europe_shape)
|
||||
x1y1_in_europe_b = links_tyndp[['x1', 'y1']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1)
|
||||
x2y2_in_europe_b = links_tyndp[['x2', 'y2']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1)
|
||||
x1y1_in_europe_b = links_tyndp[["x1", "y1"]].apply(
|
||||
lambda p: europe_shape_prepped.contains(Point(p)), axis=1
|
||||
)
|
||||
x2y2_in_europe_b = links_tyndp[["x2", "y2"]].apply(
|
||||
lambda p: europe_shape_prepped.contains(Point(p)), axis=1
|
||||
)
|
||||
is_within_covered_countries_b = x1y1_in_europe_b & x2y2_in_europe_b
|
||||
|
||||
if not is_within_covered_countries_b.all():
|
||||
logger.info("TYNDP links outside of the covered area (skipping): " +
|
||||
", ".join(links_tyndp.loc[~ is_within_covered_countries_b, "Name"]))
|
||||
logger.info(
|
||||
"TYNDP links outside of the covered area (skipping): "
|
||||
+ ", ".join(links_tyndp.loc[~is_within_covered_countries_b, "Name"])
|
||||
)
|
||||
|
||||
links_tyndp = links_tyndp.loc[is_within_covered_countries_b]
|
||||
if links_tyndp.empty:
|
||||
@ -197,25 +238,32 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape):
|
||||
|
||||
has_replaces_b = links_tyndp.replaces.notnull()
|
||||
oids = dict(Bus=_get_oid(buses), Link=_get_oid(links))
|
||||
keep_b = dict(Bus=pd.Series(True, index=buses.index),
|
||||
Link=pd.Series(True, index=links.index))
|
||||
for reps in links_tyndp.loc[has_replaces_b, 'replaces']:
|
||||
for comps in reps.split(':'):
|
||||
oids_to_remove = comps.split('.')
|
||||
keep_b = dict(
|
||||
Bus=pd.Series(True, index=buses.index), Link=pd.Series(True, index=links.index)
|
||||
)
|
||||
for reps in links_tyndp.loc[has_replaces_b, "replaces"]:
|
||||
for comps in reps.split(":"):
|
||||
oids_to_remove = comps.split(".")
|
||||
c = oids_to_remove.pop(0)
|
||||
keep_b[c] &= ~oids[c].isin(oids_to_remove)
|
||||
buses = buses.loc[keep_b['Bus']]
|
||||
links = links.loc[keep_b['Link']]
|
||||
buses = buses.loc[keep_b["Bus"]]
|
||||
links = links.loc[keep_b["Link"]]
|
||||
|
||||
links_tyndp["j"] = _find_closest_links(links, links_tyndp, distance_upper_bound=0.20)
|
||||
links_tyndp["j"] = _find_closest_links(
|
||||
links, links_tyndp, distance_upper_bound=0.20
|
||||
)
|
||||
# Corresponds approximately to 20km tolerances
|
||||
|
||||
if links_tyndp["j"].notnull().any():
|
||||
logger.info("TYNDP links already in the dataset (skipping): " + ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"]))
|
||||
logger.info(
|
||||
"TYNDP links already in the dataset (skipping): "
|
||||
+ ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"])
|
||||
)
|
||||
links_tyndp = links_tyndp.loc[links_tyndp["j"].isnull()]
|
||||
if links_tyndp.empty: return buses, links
|
||||
if links_tyndp.empty:
|
||||
return buses, links
|
||||
|
||||
tree = spatial.KDTree(buses[['x', 'y']])
|
||||
tree = spatial.KDTree(buses[["x", "y"]])
|
||||
_, ind0 = tree.query(links_tyndp[["x1", "y1"]])
|
||||
ind0_b = ind0 < len(buses)
|
||||
links_tyndp.loc[ind0_b, "bus0"] = buses.index[ind0[ind0_b]]
|
||||
@ -224,24 +272,42 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape):
|
||||
ind1_b = ind1 < len(buses)
|
||||
links_tyndp.loc[ind1_b, "bus1"] = buses.index[ind1[ind1_b]]
|
||||
|
||||
links_tyndp_located_b = links_tyndp["bus0"].notnull() & links_tyndp["bus1"].notnull()
|
||||
links_tyndp_located_b = (
|
||||
links_tyndp["bus0"].notnull() & links_tyndp["bus1"].notnull()
|
||||
)
|
||||
if not links_tyndp_located_b.all():
|
||||
logger.warning("Did not find connected buses for TYNDP links (skipping): " + ", ".join(links_tyndp.loc[~links_tyndp_located_b, "Name"]))
|
||||
logger.warning(
|
||||
"Did not find connected buses for TYNDP links (skipping): "
|
||||
+ ", ".join(links_tyndp.loc[~links_tyndp_located_b, "Name"])
|
||||
)
|
||||
links_tyndp = links_tyndp.loc[links_tyndp_located_b]
|
||||
|
||||
logger.info("Adding the following TYNDP links: " + ", ".join(links_tyndp["Name"]))
|
||||
|
||||
links_tyndp = links_tyndp[["bus0", "bus1"]].assign(
|
||||
carrier='DC',
|
||||
carrier="DC",
|
||||
p_nom=links_tyndp["Power (MW)"],
|
||||
length=links_tyndp["Length (given) (km)"].fillna(links_tyndp["Length (distance*1.2) (km)"]),
|
||||
length=links_tyndp["Length (given) (km)"].fillna(
|
||||
links_tyndp["Length (distance*1.2) (km)"]
|
||||
),
|
||||
under_construction=True,
|
||||
underground=False,
|
||||
geometry=(links_tyndp[["x1", "y1", "x2", "y2"]]
|
||||
.apply(lambda s: str(LineString([[s.x1, s.y1], [s.x2, s.y2]])), axis=1)),
|
||||
tags=('"name"=>"' + links_tyndp["Name"] + '", ' +
|
||||
'"ref"=>"' + links_tyndp["Ref"] + '", ' +
|
||||
'"status"=>"' + links_tyndp["status"] + '"')
|
||||
geometry=(
|
||||
links_tyndp[["x1", "y1", "x2", "y2"]].apply(
|
||||
lambda s: str(LineString([[s.x1, s.y1], [s.x2, s.y2]])), axis=1
|
||||
)
|
||||
),
|
||||
tags=(
|
||||
'"name"=>"'
|
||||
+ links_tyndp["Name"]
|
||||
+ '", '
|
||||
+ '"ref"=>"'
|
||||
+ links_tyndp["Ref"]
|
||||
+ '", '
|
||||
+ '"status"=>"'
|
||||
+ links_tyndp["status"]
|
||||
+ '"'
|
||||
),
|
||||
)
|
||||
|
||||
links_tyndp.index = "T" + links_tyndp.index.astype(str)
|
||||
@ -252,13 +318,25 @@ def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape):
|
||||
|
||||
|
||||
def _load_lines_from_eg(buses, eg_lines):
|
||||
lines = (pd.read_csv(eg_lines, quotechar="'", true_values=['t'], false_values=['f'],
|
||||
dtype=dict(line_id='str', bus0='str', bus1='str',
|
||||
underground="bool", under_construction="bool"))
|
||||
.set_index('line_id')
|
||||
.rename(columns=dict(voltage='v_nom', circuits='num_parallel')))
|
||||
lines = (
|
||||
pd.read_csv(
|
||||
eg_lines,
|
||||
quotechar="'",
|
||||
true_values=["t"],
|
||||
false_values=["f"],
|
||||
dtype=dict(
|
||||
line_id="str",
|
||||
bus0="str",
|
||||
bus1="str",
|
||||
underground="bool",
|
||||
under_construction="bool",
|
||||
),
|
||||
)
|
||||
.set_index("line_id")
|
||||
.rename(columns=dict(voltage="v_nom", circuits="num_parallel"))
|
||||
)
|
||||
|
||||
lines['length'] /= 1e3
|
||||
lines["length"] /= 1e3
|
||||
|
||||
lines = _remove_dangling_branches(lines, buses)
|
||||
|
||||
@ -269,18 +347,20 @@ def _apply_parameter_corrections(n, parameter_corrections):
|
||||
with open(parameter_corrections) as f:
|
||||
corrections = yaml.safe_load(f)
|
||||
|
||||
if corrections is None: return
|
||||
if corrections is None:
|
||||
return
|
||||
|
||||
for component, attrs in corrections.items():
|
||||
df = n.df(component)
|
||||
oid = _get_oid(df)
|
||||
if attrs is None: continue
|
||||
if attrs is None:
|
||||
continue
|
||||
|
||||
for attr, repls in attrs.items():
|
||||
for i, r in repls.items():
|
||||
if i == 'oid':
|
||||
if i == "oid":
|
||||
r = oid.map(repls["oid"]).dropna()
|
||||
elif i == 'index':
|
||||
elif i == "index":
|
||||
r = pd.Series(repls["index"])
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
@ -289,78 +369,87 @@ def _apply_parameter_corrections(n, parameter_corrections):
|
||||
|
||||
|
||||
def _set_electrical_parameters_lines(lines, config):
|
||||
v_noms = config['electricity']['voltages']
|
||||
linetypes = config['lines']['types']
|
||||
v_noms = config["electricity"]["voltages"]
|
||||
linetypes = config["lines"]["types"]
|
||||
|
||||
for v_nom in v_noms:
|
||||
lines.loc[lines["v_nom"] == v_nom, 'type'] = linetypes[v_nom]
|
||||
lines.loc[lines["v_nom"] == v_nom, "type"] = linetypes[v_nom]
|
||||
|
||||
lines['s_max_pu'] = config['lines']['s_max_pu']
|
||||
lines["s_max_pu"] = config["lines"]["s_max_pu"]
|
||||
|
||||
return lines
|
||||
|
||||
|
||||
def _set_lines_s_nom_from_linetypes(n):
|
||||
n.lines['s_nom'] = (
|
||||
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
|
||||
n.lines['v_nom'] * n.lines.num_parallel
|
||||
n.lines["s_nom"] = (
|
||||
np.sqrt(3)
|
||||
* n.lines["type"].map(n.line_types.i_nom)
|
||||
* n.lines["v_nom"]
|
||||
* n.lines.num_parallel
|
||||
)
|
||||
|
||||
|
||||
def _set_electrical_parameters_links(links, config, links_p_nom):
|
||||
if links.empty: return links
|
||||
if links.empty:
|
||||
return links
|
||||
|
||||
p_max_pu = config['links'].get('p_max_pu', 1.)
|
||||
links['p_max_pu'] = p_max_pu
|
||||
links['p_min_pu'] = -p_max_pu
|
||||
p_max_pu = config["links"].get("p_max_pu", 1.0)
|
||||
links["p_max_pu"] = p_max_pu
|
||||
links["p_min_pu"] = -p_max_pu
|
||||
|
||||
links_p_nom = pd.read_csv(links_p_nom)
|
||||
|
||||
# filter links that are not in operation anymore
|
||||
removed_b = links_p_nom.Remarks.str.contains('Shut down|Replaced', na=False)
|
||||
removed_b = links_p_nom.Remarks.str.contains("Shut down|Replaced", na=False)
|
||||
links_p_nom = links_p_nom[~removed_b]
|
||||
|
||||
# find closest link for all links in links_p_nom
|
||||
links_p_nom['j'] = _find_closest_links(links, links_p_nom)
|
||||
links_p_nom["j"] = _find_closest_links(links, links_p_nom)
|
||||
|
||||
links_p_nom = links_p_nom.groupby(['j'],as_index=False).agg({'Power (MW)': 'sum'})
|
||||
links_p_nom = links_p_nom.groupby(["j"], as_index=False).agg({"Power (MW)": "sum"})
|
||||
|
||||
p_nom = links_p_nom.dropna(subset=["j"]).set_index("j")["Power (MW)"]
|
||||
|
||||
# Don't update p_nom if it's already set
|
||||
p_nom_unset = p_nom.drop(links.index[links.p_nom.notnull()], errors='ignore') if "p_nom" in links else p_nom
|
||||
p_nom_unset = (
|
||||
p_nom.drop(links.index[links.p_nom.notnull()], errors="ignore")
|
||||
if "p_nom" in links
|
||||
else p_nom
|
||||
)
|
||||
links.loc[p_nom_unset.index, "p_nom"] = p_nom_unset
|
||||
|
||||
return links
|
||||
|
||||
|
||||
def _set_electrical_parameters_converters(converters, config):
|
||||
p_max_pu = config['links'].get('p_max_pu', 1.)
|
||||
converters['p_max_pu'] = p_max_pu
|
||||
converters['p_min_pu'] = -p_max_pu
|
||||
p_max_pu = config["links"].get("p_max_pu", 1.0)
|
||||
converters["p_max_pu"] = p_max_pu
|
||||
converters["p_min_pu"] = -p_max_pu
|
||||
|
||||
converters['p_nom'] = 2000
|
||||
converters["p_nom"] = 2000
|
||||
|
||||
# Converters are combined with links
|
||||
converters['under_construction'] = False
|
||||
converters['underground'] = False
|
||||
converters["under_construction"] = False
|
||||
converters["underground"] = False
|
||||
|
||||
return converters
|
||||
|
||||
|
||||
def _set_electrical_parameters_transformers(transformers, config):
|
||||
config = config['transformers']
|
||||
config = config["transformers"]
|
||||
|
||||
## Add transformer parameters
|
||||
transformers["x"] = config.get('x', 0.1)
|
||||
transformers["s_nom"] = config.get('s_nom', 2000)
|
||||
transformers['type'] = config.get('type', '')
|
||||
transformers["x"] = config.get("x", 0.1)
|
||||
transformers["s_nom"] = config.get("s_nom", 2000)
|
||||
transformers["type"] = config.get("type", "")
|
||||
|
||||
return transformers
|
||||
|
||||
|
||||
def _remove_dangling_branches(branches, buses):
|
||||
return pd.DataFrame(branches.loc[branches.bus0.isin(buses.index) & branches.bus1.isin(buses.index)])
|
||||
return pd.DataFrame(
|
||||
branches.loc[branches.bus0.isin(buses.index) & branches.bus1.isin(buses.index)]
|
||||
)
|
||||
|
||||
|
||||
def _remove_unconnected_components(network):
|
||||
@ -370,46 +459,62 @@ def _remove_unconnected_components(network):
|
||||
component_sizes = component.value_counts()
|
||||
components_to_remove = component_sizes.iloc[1:]
|
||||
|
||||
logger.info("Removing {} unconnected network components with less than {} buses. In total {} buses."
|
||||
.format(len(components_to_remove), components_to_remove.max(), components_to_remove.sum()))
|
||||
logger.info(
|
||||
"Removing {} unconnected network components with less than {} buses. In total {} buses.".format(
|
||||
len(components_to_remove),
|
||||
components_to_remove.max(),
|
||||
components_to_remove.sum(),
|
||||
)
|
||||
)
|
||||
|
||||
return network[component == component_sizes.index[0]]
|
||||
|
||||
|
||||
def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
|
||||
|
||||
buses = n.buses
|
||||
|
||||
def buses_in_shape(shape):
|
||||
shape = shapely.prepared.prep(shape)
|
||||
return pd.Series(
|
||||
np.fromiter((shape.contains(Point(x, y))
|
||||
for x, y in buses.loc[:,["x", "y"]].values),
|
||||
dtype=bool, count=len(buses)),
|
||||
index=buses.index
|
||||
np.fromiter(
|
||||
(
|
||||
shape.contains(Point(x, y))
|
||||
for x, y in buses.loc[:, ["x", "y"]].values
|
||||
),
|
||||
dtype=bool,
|
||||
count=len(buses),
|
||||
),
|
||||
index=buses.index,
|
||||
)
|
||||
|
||||
countries = config['countries']
|
||||
country_shapes = gpd.read_file(country_shapes).set_index('name')['geometry']
|
||||
countries = config["countries"]
|
||||
country_shapes = gpd.read_file(country_shapes).set_index("name")["geometry"]
|
||||
# reindexing necessary for supporting empty geo-dataframes
|
||||
offshore_shapes = gpd.read_file(offshore_shapes)
|
||||
offshore_shapes = offshore_shapes.reindex(columns=['name', 'geometry']).set_index('name')['geometry']
|
||||
substation_b = buses['symbol'].str.contains('substation|converter station', case=False)
|
||||
offshore_shapes = offshore_shapes.reindex(columns=["name", "geometry"]).set_index(
|
||||
"name"
|
||||
)["geometry"]
|
||||
substation_b = buses["symbol"].str.contains(
|
||||
"substation|converter station", case=False
|
||||
)
|
||||
|
||||
def prefer_voltage(x, which):
|
||||
index = x.index
|
||||
if len(index) == 1:
|
||||
return pd.Series(index, index)
|
||||
key = (x.index[0]
|
||||
if x['v_nom'].isnull().all()
|
||||
else getattr(x['v_nom'], 'idx' + which)())
|
||||
key = (
|
||||
x.index[0]
|
||||
if x["v_nom"].isnull().all()
|
||||
else getattr(x["v_nom"], "idx" + which)()
|
||||
)
|
||||
return pd.Series(key, index)
|
||||
|
||||
gb = buses.loc[substation_b].groupby(['x', 'y'], as_index=False,
|
||||
group_keys=False, sort=False)
|
||||
bus_map_low = gb.apply(prefer_voltage, 'min')
|
||||
gb = buses.loc[substation_b].groupby(
|
||||
["x", "y"], as_index=False, group_keys=False, sort=False
|
||||
)
|
||||
bus_map_low = gb.apply(prefer_voltage, "min")
|
||||
lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False)
|
||||
bus_map_high = gb.apply(prefer_voltage, 'max')
|
||||
bus_map_high = gb.apply(prefer_voltage, "max")
|
||||
hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False)
|
||||
|
||||
onshore_b = pd.Series(False, buses.index)
|
||||
@ -420,47 +525,66 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
|
||||
onshore_country_b = buses_in_shape(onshore_shape)
|
||||
onshore_b |= onshore_country_b
|
||||
|
||||
buses.loc[onshore_country_b, 'country'] = country
|
||||
buses.loc[onshore_country_b, "country"] = country
|
||||
|
||||
if country not in offshore_shapes.index: continue
|
||||
if country not in offshore_shapes.index:
|
||||
continue
|
||||
offshore_country_b = buses_in_shape(offshore_shapes[country])
|
||||
offshore_b |= offshore_country_b
|
||||
|
||||
buses.loc[offshore_country_b, 'country'] = country
|
||||
buses.loc[offshore_country_b, "country"] = country
|
||||
|
||||
# Only accept buses as low-voltage substations (where load is attached), if
|
||||
# they have at least one connection which is not under_construction
|
||||
has_connections_b = pd.Series(False, index=buses.index)
|
||||
for b, df in product(('bus0', 'bus1'), (n.lines, n.links)):
|
||||
for b, df in product(("bus0", "bus1"), (n.lines, n.links)):
|
||||
has_connections_b |= ~df.groupby(b).under_construction.min()
|
||||
|
||||
buses['substation_lv'] = lv_b & onshore_b & (~ buses['under_construction']) & has_connections_b
|
||||
buses['substation_off'] = (offshore_b | (hv_b & onshore_b)) & (~ buses['under_construction'])
|
||||
buses["substation_lv"] = (
|
||||
lv_b & onshore_b & (~buses["under_construction"]) & has_connections_b
|
||||
)
|
||||
buses["substation_off"] = (offshore_b | (hv_b & onshore_b)) & (
|
||||
~buses["under_construction"]
|
||||
)
|
||||
|
||||
c_nan_b = buses.country.isnull()
|
||||
if c_nan_b.sum() > 0:
|
||||
c_tag = _get_country(buses.loc[c_nan_b])
|
||||
c_tag.loc[~c_tag.isin(countries)] = np.nan
|
||||
n.buses.loc[c_nan_b, 'country'] = c_tag
|
||||
n.buses.loc[c_nan_b, "country"] = c_tag
|
||||
|
||||
c_tag_nan_b = n.buses.country.isnull()
|
||||
|
||||
# Nearest country in path length defines country of still homeless buses
|
||||
# Work-around until commit 705119 lands in pypsa release
|
||||
n.transformers['length'] = 0.
|
||||
graph = n.graph(weight='length')
|
||||
n.transformers.drop('length', axis=1, inplace=True)
|
||||
n.transformers["length"] = 0.0
|
||||
graph = n.graph(weight="length")
|
||||
n.transformers.drop("length", axis=1, inplace=True)
|
||||
|
||||
for b in n.buses.index[c_tag_nan_b]:
|
||||
df = (pd.DataFrame(dict(pathlength=nx.single_source_dijkstra_path_length(graph, b, cutoff=200)))
|
||||
.join(n.buses.country).dropna())
|
||||
assert not df.empty, "No buses with defined country within 200km of bus `{}`".format(b)
|
||||
n.buses.at[b, 'country'] = df.loc[df.pathlength.idxmin(), 'country']
|
||||
df = (
|
||||
pd.DataFrame(
|
||||
dict(
|
||||
pathlength=nx.single_source_dijkstra_path_length(
|
||||
graph, b, cutoff=200
|
||||
)
|
||||
)
|
||||
)
|
||||
.join(n.buses.country)
|
||||
.dropna()
|
||||
)
|
||||
assert (
|
||||
not df.empty
|
||||
), "No buses with defined country within 200km of bus `{}`".format(b)
|
||||
n.buses.at[b, "country"] = df.loc[df.pathlength.idxmin(), "country"]
|
||||
|
||||
logger.warning("{} buses are not in any country or offshore shape,"
|
||||
logger.warning(
|
||||
"{} buses are not in any country or offshore shape,"
|
||||
" {} have been assigned from the tag of the entsoe map,"
|
||||
" the rest from the next bus in terms of pathlength."
|
||||
.format(c_nan_b.sum(), c_nan_b.sum() - c_tag_nan_b.sum()))
|
||||
" the rest from the next bus in terms of pathlength.".format(
|
||||
c_nan_b.sum(), c_nan_b.sum() - c_tag_nan_b.sum()
|
||||
)
|
||||
)
|
||||
|
||||
return buses
|
||||
|
||||
@ -469,11 +593,13 @@ def _replace_b2b_converter_at_country_border_by_link(n):
|
||||
# Affects only the B2B converter in Lithuania at the Polish border at the moment
|
||||
buscntry = n.buses.country
|
||||
linkcntry = n.links.bus0.map(buscntry)
|
||||
converters_i = n.links.index[(n.links.carrier == 'B2B') & (linkcntry == n.links.bus1.map(buscntry))]
|
||||
converters_i = n.links.index[
|
||||
(n.links.carrier == "B2B") & (linkcntry == n.links.bus1.map(buscntry))
|
||||
]
|
||||
|
||||
def findforeignbus(G, i):
|
||||
cntry = linkcntry.at[i]
|
||||
for busattr in ('bus0', 'bus1'):
|
||||
for busattr in ("bus0", "bus1"):
|
||||
b0 = n.links.at[i, busattr]
|
||||
for b1 in G[b0]:
|
||||
if buscntry[b1] != cntry:
|
||||
@ -486,67 +612,93 @@ def _replace_b2b_converter_at_country_border_by_link(n):
|
||||
if busattr is not None:
|
||||
comp, line = next(iter(G[b0][b1]))
|
||||
if comp != "Line":
|
||||
logger.warning("Unable to replace B2B `{}` expected a Line, but found a {}"
|
||||
.format(i, comp))
|
||||
logger.warning(
|
||||
"Unable to replace B2B `{}` expected a Line, but found a {}".format(
|
||||
i, comp
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
n.links.at[i, busattr] = b1
|
||||
n.links.at[i, 'p_nom'] = min(n.links.at[i, 'p_nom'], n.lines.at[line, 's_nom'])
|
||||
n.links.at[i, 'carrier'] = 'DC'
|
||||
n.links.at[i, 'underwater_fraction'] = 0.
|
||||
n.links.at[i, 'length'] = n.lines.at[line, 'length']
|
||||
n.links.at[i, "p_nom"] = min(
|
||||
n.links.at[i, "p_nom"], n.lines.at[line, "s_nom"]
|
||||
)
|
||||
n.links.at[i, "carrier"] = "DC"
|
||||
n.links.at[i, "underwater_fraction"] = 0.0
|
||||
n.links.at[i, "length"] = n.lines.at[line, "length"]
|
||||
|
||||
n.remove("Line", line)
|
||||
n.remove("Bus", b0)
|
||||
|
||||
logger.info("Replacing B2B converter `{}` together with bus `{}` and line `{}` by an HVDC tie-line {}-{}"
|
||||
.format(i, b0, line, linkcntry.at[i], buscntry.at[b1]))
|
||||
logger.info(
|
||||
"Replacing B2B converter `{}` together with bus `{}` and line `{}` by an HVDC tie-line {}-{}".format(
|
||||
i, b0, line, linkcntry.at[i], buscntry.at[b1]
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _set_links_underwater_fraction(n, offshore_shapes):
|
||||
if n.links.empty: return
|
||||
if n.links.empty:
|
||||
return
|
||||
|
||||
if not hasattr(n.links, 'geometry'):
|
||||
n.links['underwater_fraction'] = 0.
|
||||
if not hasattr(n.links, "geometry"):
|
||||
n.links["underwater_fraction"] = 0.0
|
||||
else:
|
||||
offshore_shape = gpd.read_file(offshore_shapes).unary_union
|
||||
links = gpd.GeoSeries(n.links.geometry.dropna().map(shapely.wkt.loads))
|
||||
n.links['underwater_fraction'] = links.intersection(offshore_shape).length / links.length
|
||||
n.links["underwater_fraction"] = (
|
||||
links.intersection(offshore_shape).length / links.length
|
||||
)
|
||||
|
||||
|
||||
def _adjust_capacities_of_under_construction_branches(n, config):
|
||||
lines_mode = config['lines'].get('under_construction', 'undef')
|
||||
if lines_mode == 'zero':
|
||||
n.lines.loc[n.lines.under_construction, 'num_parallel'] = 0.
|
||||
n.lines.loc[n.lines.under_construction, 's_nom'] = 0.
|
||||
elif lines_mode == 'remove':
|
||||
lines_mode = config["lines"].get("under_construction", "undef")
|
||||
if lines_mode == "zero":
|
||||
n.lines.loc[n.lines.under_construction, "num_parallel"] = 0.0
|
||||
n.lines.loc[n.lines.under_construction, "s_nom"] = 0.0
|
||||
elif lines_mode == "remove":
|
||||
n.mremove("Line", n.lines.index[n.lines.under_construction])
|
||||
elif lines_mode != 'keep':
|
||||
logger.warning("Unrecognized configuration for `lines: under_construction` = `{}`. Keeping under construction lines.")
|
||||
elif lines_mode != "keep":
|
||||
logger.warning(
|
||||
"Unrecognized configuration for `lines: under_construction` = `{}`. Keeping under construction lines."
|
||||
)
|
||||
|
||||
links_mode = config['links'].get('under_construction', 'undef')
|
||||
if links_mode == 'zero':
|
||||
n.links.loc[n.links.under_construction, "p_nom"] = 0.
|
||||
elif links_mode == 'remove':
|
||||
links_mode = config["links"].get("under_construction", "undef")
|
||||
if links_mode == "zero":
|
||||
n.links.loc[n.links.under_construction, "p_nom"] = 0.0
|
||||
elif links_mode == "remove":
|
||||
n.mremove("Link", n.links.index[n.links.under_construction])
|
||||
elif links_mode != 'keep':
|
||||
logger.warning("Unrecognized configuration for `links: under_construction` = `{}`. Keeping under construction links.")
|
||||
elif links_mode != "keep":
|
||||
logger.warning(
|
||||
"Unrecognized configuration for `links: under_construction` = `{}`. Keeping under construction links."
|
||||
)
|
||||
|
||||
if lines_mode == 'remove' or links_mode == 'remove':
|
||||
if lines_mode == "remove" or links_mode == "remove":
|
||||
# We might need to remove further unconnected components
|
||||
n = _remove_unconnected_components(n)
|
||||
|
||||
return n
|
||||
|
||||
|
||||
def base_network(eg_buses, eg_converters, eg_transformers, eg_lines, eg_links,
|
||||
links_p_nom, links_tyndp, europe_shape, country_shapes, offshore_shapes,
|
||||
parameter_corrections, config):
|
||||
def base_network(
|
||||
eg_buses,
|
||||
eg_converters,
|
||||
eg_transformers,
|
||||
eg_lines,
|
||||
eg_links,
|
||||
links_p_nom,
|
||||
links_tyndp,
|
||||
europe_shape,
|
||||
country_shapes,
|
||||
offshore_shapes,
|
||||
parameter_corrections,
|
||||
config,
|
||||
):
|
||||
|
||||
buses = _load_buses_from_eg(eg_buses, europe_shape, config['electricity'])
|
||||
buses = _load_buses_from_eg(eg_buses, europe_shape, config["electricity"])
|
||||
|
||||
links = _load_links_from_eg(buses, eg_links)
|
||||
if config['links'].get('include_tyndp'):
|
||||
if config["links"].get("include_tyndp"):
|
||||
buses, links = _add_links_from_tyndp(buses, links, links_tyndp, europe_shape)
|
||||
|
||||
converters = _load_converters_from_eg(buses, eg_converters)
|
||||
@ -560,9 +712,9 @@ def base_network(eg_buses, eg_converters, eg_transformers, eg_lines, eg_links,
|
||||
converters = _set_electrical_parameters_converters(converters, config)
|
||||
|
||||
n = pypsa.Network()
|
||||
n.name = 'PyPSA-Eur'
|
||||
n.name = "PyPSA-Eur"
|
||||
|
||||
n.set_snapshots(pd.date_range(freq='h', **config['snapshots']))
|
||||
n.set_snapshots(pd.date_range(freq="h", **config["snapshots"]))
|
||||
|
||||
n.import_components_from_dataframe(buses, "Bus")
|
||||
n.import_components_from_dataframe(lines, "Line")
|
||||
@ -586,15 +738,28 @@ def base_network(eg_buses, eg_converters, eg_transformers, eg_lines, eg_links,
|
||||
|
||||
return n
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('base_network')
|
||||
|
||||
snakemake = mock_snakemake("base_network")
|
||||
configure_logging(snakemake)
|
||||
|
||||
n = base_network(snakemake.input.eg_buses, snakemake.input.eg_converters, snakemake.input.eg_transformers, snakemake.input.eg_lines, snakemake.input.eg_links,
|
||||
snakemake.input.links_p_nom, snakemake.input.links_tyndp, snakemake.input.europe_shape, snakemake.input.country_shapes, snakemake.input.offshore_shapes,
|
||||
snakemake.input.parameter_corrections, snakemake.config)
|
||||
n = base_network(
|
||||
snakemake.input.eg_buses,
|
||||
snakemake.input.eg_converters,
|
||||
snakemake.input.eg_transformers,
|
||||
snakemake.input.eg_lines,
|
||||
snakemake.input.eg_links,
|
||||
snakemake.input.links_p_nom,
|
||||
snakemake.input.links_tyndp,
|
||||
snakemake.input.europe_shape,
|
||||
snakemake.input.country_shapes,
|
||||
snakemake.input.offshore_shapes,
|
||||
snakemake.input.parameter_corrections,
|
||||
snakemake.config,
|
||||
)
|
||||
|
||||
n.meta = snakemake.config
|
||||
n.export_to_netcdf(snakemake.output[0])
|
||||
|
@ -1,9 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Creates Voronoi shapes for each bus representing both onshore and offshore regions.
|
||||
Creates Voronoi shapes for each bus representing both onshore and offshore
|
||||
regions.
|
||||
|
||||
Relevant Settings
|
||||
-----------------
|
||||
@ -38,19 +40,18 @@ Outputs
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import configure_logging, REGION_COLS
|
||||
|
||||
import pypsa
|
||||
import os
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
import geopandas as gpd
|
||||
from shapely.geometry import Polygon
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pypsa
|
||||
from _helpers import REGION_COLS, configure_logging
|
||||
from scipy.spatial import Voronoi
|
||||
from shapely.geometry import Polygon
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -81,11 +82,19 @@ def voronoi_partition_pts(points, outline):
|
||||
|
||||
# to avoid any network positions outside all Voronoi cells, append
|
||||
# the corners of a rectangle framing these points
|
||||
vor = Voronoi(np.vstack((points,
|
||||
[[xmin-3.*xspan, ymin-3.*yspan],
|
||||
[xmin-3.*xspan, ymax+3.*yspan],
|
||||
[xmax+3.*xspan, ymin-3.*yspan],
|
||||
[xmax+3.*xspan, ymax+3.*yspan]])))
|
||||
vor = Voronoi(
|
||||
np.vstack(
|
||||
(
|
||||
points,
|
||||
[
|
||||
[xmin - 3.0 * xspan, ymin - 3.0 * yspan],
|
||||
[xmin - 3.0 * xspan, ymax + 3.0 * yspan],
|
||||
[xmax + 3.0 * xspan, ymin - 3.0 * yspan],
|
||||
[xmax + 3.0 * xspan, ymax + 3.0 * yspan],
|
||||
],
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
polygons = []
|
||||
for i in range(len(points)):
|
||||
@ -98,23 +107,27 @@ def voronoi_partition_pts(points, outline):
|
||||
|
||||
polygons.append(poly)
|
||||
|
||||
|
||||
return np.array(polygons, dtype=object)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('build_bus_regions')
|
||||
|
||||
snakemake = mock_snakemake("build_bus_regions")
|
||||
configure_logging(snakemake)
|
||||
|
||||
countries = snakemake.config['countries']
|
||||
countries = snakemake.config["countries"]
|
||||
|
||||
n = pypsa.Network(snakemake.input.base_network)
|
||||
|
||||
country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry']
|
||||
country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index("name")[
|
||||
"geometry"
|
||||
]
|
||||
offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes)
|
||||
offshore_shapes = offshore_shapes.reindex(columns=REGION_COLS).set_index('name')['geometry']
|
||||
offshore_shapes = offshore_shapes.reindex(columns=REGION_COLS).set_index("name")[
|
||||
"geometry"
|
||||
]
|
||||
|
||||
onshore_regions = []
|
||||
offshore_regions = []
|
||||
@ -124,29 +137,42 @@ if __name__ == "__main__":
|
||||
|
||||
onshore_shape = country_shapes[country]
|
||||
onshore_locs = n.buses.loc[c_b & n.buses.substation_lv, ["x", "y"]]
|
||||
onshore_regions.append(gpd.GeoDataFrame({
|
||||
'name': onshore_locs.index,
|
||||
'x': onshore_locs['x'],
|
||||
'y': onshore_locs['y'],
|
||||
'geometry': voronoi_partition_pts(onshore_locs.values, onshore_shape),
|
||||
'country': country
|
||||
}))
|
||||
onshore_regions.append(
|
||||
gpd.GeoDataFrame(
|
||||
{
|
||||
"name": onshore_locs.index,
|
||||
"x": onshore_locs["x"],
|
||||
"y": onshore_locs["y"],
|
||||
"geometry": voronoi_partition_pts(
|
||||
onshore_locs.values, onshore_shape
|
||||
),
|
||||
"country": country,
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
if country not in offshore_shapes.index: continue
|
||||
if country not in offshore_shapes.index:
|
||||
continue
|
||||
offshore_shape = offshore_shapes[country]
|
||||
offshore_locs = n.buses.loc[c_b & n.buses.substation_off, ["x", "y"]]
|
||||
offshore_regions_c = gpd.GeoDataFrame({
|
||||
'name': offshore_locs.index,
|
||||
'x': offshore_locs['x'],
|
||||
'y': offshore_locs['y'],
|
||||
'geometry': voronoi_partition_pts(offshore_locs.values, offshore_shape),
|
||||
'country': country
|
||||
})
|
||||
offshore_regions_c = gpd.GeoDataFrame(
|
||||
{
|
||||
"name": offshore_locs.index,
|
||||
"x": offshore_locs["x"],
|
||||
"y": offshore_locs["y"],
|
||||
"geometry": voronoi_partition_pts(offshore_locs.values, offshore_shape),
|
||||
"country": country,
|
||||
}
|
||||
)
|
||||
offshore_regions_c = offshore_regions_c.loc[offshore_regions_c.area > 1e-2]
|
||||
offshore_regions.append(offshore_regions_c)
|
||||
|
||||
pd.concat(onshore_regions, ignore_index=True).to_file(snakemake.output.regions_onshore)
|
||||
pd.concat(onshore_regions, ignore_index=True).to_file(
|
||||
snakemake.output.regions_onshore
|
||||
)
|
||||
if offshore_regions:
|
||||
pd.concat(offshore_regions, ignore_index=True).to_file(snakemake.output.regions_offshore)
|
||||
pd.concat(offshore_regions, ignore_index=True).to_file(
|
||||
snakemake.output.regions_offshore
|
||||
)
|
||||
else:
|
||||
offshore_shapes.to_frame().to_file(snakemake.output.regions_offshore)
|
@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
@ -88,43 +89,42 @@ A **SARAH-2 cutout** can be used to amend the fields ``temperature``, ``influx_t
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import atlite
|
||||
import geopandas as gpd
|
||||
import pandas as pd
|
||||
from _helpers import configure_logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('build_cutout', cutout='europe-2013-era5')
|
||||
|
||||
snakemake = mock_snakemake("build_cutout", cutout="europe-2013-era5")
|
||||
configure_logging(snakemake)
|
||||
|
||||
cutout_params = snakemake.config['atlite']['cutouts'][snakemake.wildcards.cutout]
|
||||
cutout_params = snakemake.config["atlite"]["cutouts"][snakemake.wildcards.cutout]
|
||||
|
||||
snapshots = pd.date_range(freq='h', **snakemake.config['snapshots'])
|
||||
snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"])
|
||||
time = [snapshots[0], snapshots[-1]]
|
||||
cutout_params['time'] = slice(*cutout_params.get('time', time))
|
||||
cutout_params["time"] = slice(*cutout_params.get("time", time))
|
||||
|
||||
if {'x', 'y', 'bounds'}.isdisjoint(cutout_params):
|
||||
if {"x", "y", "bounds"}.isdisjoint(cutout_params):
|
||||
# Determine the bounds from bus regions with a buffer of two grid cells
|
||||
onshore = gpd.read_file(snakemake.input.regions_onshore)
|
||||
offshore = gpd.read_file(snakemake.input.regions_offshore)
|
||||
regions = pd.concat([onshore, offshore])
|
||||
d = max(cutout_params.get('dx', 0.25), cutout_params.get('dy', 0.25))*2
|
||||
cutout_params['bounds'] = regions.total_bounds + [-d, -d, d, d]
|
||||
elif {'x', 'y'}.issubset(cutout_params):
|
||||
cutout_params['x'] = slice(*cutout_params['x'])
|
||||
cutout_params['y'] = slice(*cutout_params['y'])
|
||||
|
||||
d = max(cutout_params.get("dx", 0.25), cutout_params.get("dy", 0.25)) * 2
|
||||
cutout_params["bounds"] = regions.total_bounds + [-d, -d, d, d]
|
||||
elif {"x", "y"}.issubset(cutout_params):
|
||||
cutout_params["x"] = slice(*cutout_params["x"])
|
||||
cutout_params["y"] = slice(*cutout_params["y"])
|
||||
|
||||
logging.info(f"Preparing cutout with parameters {cutout_params}.")
|
||||
features = cutout_params.pop('features', None)
|
||||
features = cutout_params.pop("features", None)
|
||||
cutout = atlite.Cutout(snakemake.output[0], **cutout_params)
|
||||
cutout.prepare(features=features)
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
@ -60,51 +61,61 @@ Description
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import configure_logging
|
||||
|
||||
import atlite
|
||||
import country_converter as coco
|
||||
import geopandas as gpd
|
||||
import pandas as pd
|
||||
from _helpers import configure_logging
|
||||
|
||||
import country_converter as coco
|
||||
cc = coco.CountryConverter()
|
||||
|
||||
|
||||
def get_eia_annual_hydro_generation(fn, countries):
|
||||
|
||||
# in billion kWh/a = TWh/a
|
||||
df = pd.read_csv(fn, skiprows=2, index_col=1, na_values=[u' ','--']).iloc[1:, 1:]
|
||||
df = pd.read_csv(fn, skiprows=2, index_col=1, na_values=[" ", "--"]).iloc[1:, 1:]
|
||||
df.index = df.index.str.strip()
|
||||
|
||||
former_countries = {
|
||||
"Former Czechoslovakia": dict(
|
||||
countries=["Czech Republic", "Slovakia"],
|
||||
start=1980, end=1992),
|
||||
countries=["Czech Republic", "Slovakia"], start=1980, end=1992
|
||||
),
|
||||
"Former Serbia and Montenegro": dict(
|
||||
countries=["Serbia", "Montenegro"],
|
||||
start=1992, end=2005),
|
||||
countries=["Serbia", "Montenegro"], start=1992, end=2005
|
||||
),
|
||||
"Former Yugoslavia": dict(
|
||||
countries=["Slovenia", "Croatia", "Bosnia and Herzegovina", "Serbia", "Montenegro", "North Macedonia"],
|
||||
start=1980, end=1991),
|
||||
countries=[
|
||||
"Slovenia",
|
||||
"Croatia",
|
||||
"Bosnia and Herzegovina",
|
||||
"Serbia",
|
||||
"Montenegro",
|
||||
"North Macedonia",
|
||||
],
|
||||
start=1980,
|
||||
end=1991,
|
||||
),
|
||||
}
|
||||
|
||||
for k, v in former_countries.items():
|
||||
period = [str(i) for i in range(v["start"], v["end"] + 1)]
|
||||
ratio = df.loc[v['countries']].T.dropna().sum()
|
||||
ratio = df.loc[v["countries"]].T.dropna().sum()
|
||||
ratio /= ratio.sum()
|
||||
for country in v['countries']:
|
||||
for country in v["countries"]:
|
||||
df.loc[country, period] = df.loc[k, period] * ratio[country]
|
||||
|
||||
baltic_states = ["Latvia", "Estonia", "Lithuania"]
|
||||
df.loc[baltic_states] = df.loc[baltic_states].T.fillna(df.loc[baltic_states].mean(axis=1)).T
|
||||
df.loc[baltic_states] = (
|
||||
df.loc[baltic_states].T.fillna(df.loc[baltic_states].mean(axis=1)).T
|
||||
)
|
||||
|
||||
df.loc["Germany"] = df.filter(like='Germany', axis=0).sum()
|
||||
df.loc["Serbia"] += df.loc["Kosovo"].fillna(0.)
|
||||
df = df.loc[~df.index.str.contains('Former')]
|
||||
df.loc["Germany"] = df.filter(like="Germany", axis=0).sum()
|
||||
df.loc["Serbia"] += df.loc["Kosovo"].fillna(0.0)
|
||||
df = df.loc[~df.index.str.contains("Former")]
|
||||
df.drop(["Europe", "Germany, West", "Germany, East", "Kosovo"], inplace=True)
|
||||
|
||||
df.index = cc.convert(df.index, to='iso2')
|
||||
df.index.name = 'countries'
|
||||
df.index = cc.convert(df.index, to="iso2")
|
||||
df.index.name = "countries"
|
||||
|
||||
df = df.T[countries] * 1e6 # in MWh/a
|
||||
|
||||
@ -114,28 +125,34 @@ def get_eia_annual_hydro_generation(fn, countries):
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('build_hydro_profile')
|
||||
|
||||
snakemake = mock_snakemake("build_hydro_profile")
|
||||
configure_logging(snakemake)
|
||||
|
||||
config_hydro = snakemake.config['renewable']['hydro']
|
||||
config_hydro = snakemake.config["renewable"]["hydro"]
|
||||
cutout = atlite.Cutout(snakemake.input.cutout)
|
||||
|
||||
countries = snakemake.config['countries']
|
||||
country_shapes = (gpd.read_file(snakemake.input.country_shapes)
|
||||
.set_index('name')['geometry'].reindex(countries))
|
||||
country_shapes.index.name = 'countries'
|
||||
countries = snakemake.config["countries"]
|
||||
country_shapes = (
|
||||
gpd.read_file(snakemake.input.country_shapes)
|
||||
.set_index("name")["geometry"]
|
||||
.reindex(countries)
|
||||
)
|
||||
country_shapes.index.name = "countries"
|
||||
|
||||
fn = snakemake.input.eia_hydro_generation
|
||||
eia_stats = get_eia_annual_hydro_generation(fn, countries)
|
||||
|
||||
inflow = cutout.runoff(shapes=country_shapes,
|
||||
inflow = cutout.runoff(
|
||||
shapes=country_shapes,
|
||||
smooth=True,
|
||||
lower_threshold_quantile=True,
|
||||
normalize_using_yearly=eia_stats)
|
||||
normalize_using_yearly=eia_stats,
|
||||
)
|
||||
|
||||
if 'clip_min_inflow' in config_hydro:
|
||||
inflow = inflow.where(inflow > config_hydro['clip_min_inflow'], 0)
|
||||
if "clip_min_inflow" in config_hydro:
|
||||
inflow = inflow.where(inflow > config_hydro["clip_min_inflow"], 0)
|
||||
|
||||
inflow.to_netcdf(snakemake.output[0])
|
||||
|
@ -1,10 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
This rule downloads the load data from `Open Power System Data Time series.
|
||||
|
||||
This rule downloads the load data from `Open Power System Data Time series <https://data.open-power-system-data.org/time_series/>`_. For all countries in the network, the per country load timeseries with suffix ``_load_actual_entsoe_transparency`` are extracted from the dataset. After filling small gaps linearly and large gaps by copying time-slice of a given period, the load data is exported to a ``.csv`` file.
|
||||
<https://data.open-power-system-data.org/time_series/>`_. For all countries in
|
||||
the network, the per country load timeseries with suffix
|
||||
``_load_actual_entsoe_transparency`` are extracted from the dataset. After
|
||||
filling small gaps linearly and large gaps by copying time-slice of a given
|
||||
period, the load data is exported to a ``.csv`` file.
|
||||
|
||||
Relevant Settings
|
||||
-----------------
|
||||
@ -32,17 +38,15 @@ Outputs
|
||||
-------
|
||||
|
||||
- ``resources/load.csv``:
|
||||
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
from _helpers import configure_logging
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
logger = logging.getLogger(__name__)
|
||||
import dateutil
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from _helpers import configure_logging
|
||||
from pandas import Timedelta as Delta
|
||||
|
||||
|
||||
@ -71,23 +75,29 @@ def load_timeseries(fn, years, countries, powerstatistics=True):
|
||||
"""
|
||||
logger.info(f"Retrieving load data from '{fn}'.")
|
||||
|
||||
pattern = 'power_statistics' if powerstatistics else 'transparency'
|
||||
pattern = f'_load_actual_entsoe_{pattern}'
|
||||
pattern = "power_statistics" if powerstatistics else "transparency"
|
||||
pattern = f"_load_actual_entsoe_{pattern}"
|
||||
rename = lambda s: s[: -len(pattern)]
|
||||
date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True)
|
||||
return (pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)
|
||||
return (
|
||||
pd.read_csv(fn, index_col=0, parse_dates=[0], date_parser=date_parser)
|
||||
.filter(like=pattern)
|
||||
.rename(columns=rename)
|
||||
.dropna(how="all", axis=0)
|
||||
.rename(columns={'GB_UKM' : 'GB'})
|
||||
.rename(columns={"GB_UKM": "GB"})
|
||||
.filter(items=countries)
|
||||
.loc[years])
|
||||
.loc[years]
|
||||
)
|
||||
|
||||
|
||||
def consecutive_nans(ds):
|
||||
return (ds.isnull().astype(int)
|
||||
return (
|
||||
ds.isnull()
|
||||
.astype(int)
|
||||
.groupby(ds.notnull().astype(int).cumsum()[ds.isnull()])
|
||||
.transform('sum').fillna(0))
|
||||
.transform("sum")
|
||||
.fillna(0)
|
||||
)
|
||||
|
||||
|
||||
def fill_large_gaps(ds, shift):
|
||||
@ -97,36 +107,50 @@ def fill_large_gaps(ds, shift):
|
||||
This function fills gaps ragning from 3 to 168 hours (one week).
|
||||
"""
|
||||
shift = Delta(shift)
|
||||
nhours = shift / np.timedelta64(1, 'h')
|
||||
nhours = shift / np.timedelta64(1, "h")
|
||||
if (consecutive_nans(ds) > nhours).any():
|
||||
logger.warning('There exist gaps larger then the time shift used for '
|
||||
'copying time slices.')
|
||||
logger.warning(
|
||||
"There exist gaps larger then the time shift used for "
|
||||
"copying time slices."
|
||||
)
|
||||
time_shift = pd.Series(ds.values, ds.index + shift)
|
||||
return ds.where(ds.notnull(), time_shift.reindex_like(ds))
|
||||
|
||||
|
||||
def nan_statistics(df):
|
||||
def max_consecutive_nans(ds):
|
||||
return (ds.isnull().astype(int)
|
||||
return (
|
||||
ds.isnull()
|
||||
.astype(int)
|
||||
.groupby(ds.notnull().astype(int).cumsum())
|
||||
.sum().max())
|
||||
.sum()
|
||||
.max()
|
||||
)
|
||||
|
||||
consecutive = df.apply(max_consecutive_nans)
|
||||
total = df.isnull().sum()
|
||||
max_total_per_month = df.isnull().resample('m').sum().max()
|
||||
return pd.concat([total, consecutive, max_total_per_month],
|
||||
keys=['total', 'consecutive', 'max_total_per_month'], axis=1)
|
||||
max_total_per_month = df.isnull().resample("m").sum().max()
|
||||
return pd.concat(
|
||||
[total, consecutive, max_total_per_month],
|
||||
keys=["total", "consecutive", "max_total_per_month"],
|
||||
axis=1,
|
||||
)
|
||||
|
||||
|
||||
def copy_timeslice(load, cntry, start, stop, delta, fn_load=None):
|
||||
start = pd.Timestamp(start)
|
||||
stop = pd.Timestamp(stop)
|
||||
if (start in load.index and stop in load.index):
|
||||
if start in load.index and stop in load.index:
|
||||
if start - delta in load.index and stop - delta in load.index and cntry in load:
|
||||
load.loc[start:stop, cntry] = load.loc[start-delta:stop-delta, cntry].values
|
||||
load.loc[start:stop, cntry] = load.loc[
|
||||
start - delta : stop - delta, cntry
|
||||
].values
|
||||
elif fn_load is not None:
|
||||
duration = pd.date_range(freq='h', start=start-delta, end=stop-delta)
|
||||
duration = pd.date_range(freq="h", start=start - delta, end=stop - delta)
|
||||
load_raw = load_timeseries(fn_load, duration, [cntry], powerstatistics)
|
||||
load.loc[start:stop, cntry] = load_raw.loc[start-delta:stop-delta, cntry].values
|
||||
load.loc[start:stop, cntry] = load_raw.loc[
|
||||
start - delta : stop - delta, cntry
|
||||
].values
|
||||
|
||||
|
||||
def manual_adjustment(load, fn_load, powerstatistics):
|
||||
@ -167,70 +191,116 @@ def manual_adjustment(load, fn_load, powerstatistics):
|
||||
"""
|
||||
|
||||
if powerstatistics:
|
||||
if 'MK' in load.columns:
|
||||
if 'AL' not in load.columns or load.AL.isnull().values.all():
|
||||
load['AL'] = load['MK'] * (4.1 / 7.4)
|
||||
if 'RS' in load.columns:
|
||||
if 'KV' not in load.columns or load.KV.isnull().values.all():
|
||||
load['KV'] = load['RS'] * (4.8 / 27.)
|
||||
if "MK" in load.columns:
|
||||
if "AL" not in load.columns or load.AL.isnull().values.all():
|
||||
load["AL"] = load["MK"] * (4.1 / 7.4)
|
||||
if "RS" in load.columns:
|
||||
if "KV" not in load.columns or load.KV.isnull().values.all():
|
||||
load["KV"] = load["RS"] * (4.8 / 27.0)
|
||||
|
||||
copy_timeslice(load, 'GR', '2015-08-11 21:00', '2015-08-15 20:00', Delta(weeks=1))
|
||||
copy_timeslice(load, 'AT', '2018-12-31 22:00', '2019-01-01 22:00', Delta(days=2))
|
||||
copy_timeslice(load, 'CH', '2010-01-19 07:00', '2010-01-19 22:00', Delta(days=1))
|
||||
copy_timeslice(load, 'CH', '2010-03-28 00:00', '2010-03-28 21:00', Delta(days=1))
|
||||
copy_timeslice(
|
||||
load, "GR", "2015-08-11 21:00", "2015-08-15 20:00", Delta(weeks=1)
|
||||
)
|
||||
copy_timeslice(
|
||||
load, "AT", "2018-12-31 22:00", "2019-01-01 22:00", Delta(days=2)
|
||||
)
|
||||
copy_timeslice(
|
||||
load, "CH", "2010-01-19 07:00", "2010-01-19 22:00", Delta(days=1)
|
||||
)
|
||||
copy_timeslice(
|
||||
load, "CH", "2010-03-28 00:00", "2010-03-28 21:00", Delta(days=1)
|
||||
)
|
||||
# is a WE, so take WE before
|
||||
copy_timeslice(load, 'CH', '2010-10-08 13:00', '2010-10-10 21:00', Delta(weeks=1))
|
||||
copy_timeslice(load, 'CH', '2010-11-04 04:00', '2010-11-04 22:00', Delta(days=1))
|
||||
copy_timeslice(load, 'NO', '2010-12-09 11:00', '2010-12-09 18:00', Delta(days=1))
|
||||
copy_timeslice(
|
||||
load, "CH", "2010-10-08 13:00", "2010-10-10 21:00", Delta(weeks=1)
|
||||
)
|
||||
copy_timeslice(
|
||||
load, "CH", "2010-11-04 04:00", "2010-11-04 22:00", Delta(days=1)
|
||||
)
|
||||
copy_timeslice(
|
||||
load, "NO", "2010-12-09 11:00", "2010-12-09 18:00", Delta(days=1)
|
||||
)
|
||||
# whole january missing
|
||||
copy_timeslice(load, 'GB', '2010-01-01 00:00', '2010-01-31 23:00', Delta(days=-365), fn_load)
|
||||
copy_timeslice(
|
||||
load,
|
||||
"GB",
|
||||
"2010-01-01 00:00",
|
||||
"2010-01-31 23:00",
|
||||
Delta(days=-365),
|
||||
fn_load,
|
||||
)
|
||||
# 1.1. at midnight gets special treatment
|
||||
copy_timeslice(load, 'IE', '2016-01-01 00:00', '2016-01-01 01:00', Delta(days=-366), fn_load)
|
||||
copy_timeslice(load, 'PT', '2016-01-01 00:00', '2016-01-01 01:00', Delta(days=-366), fn_load)
|
||||
copy_timeslice(load, 'GB', '2016-01-01 00:00', '2016-01-01 01:00', Delta(days=-366), fn_load)
|
||||
copy_timeslice(
|
||||
load,
|
||||
"IE",
|
||||
"2016-01-01 00:00",
|
||||
"2016-01-01 01:00",
|
||||
Delta(days=-366),
|
||||
fn_load,
|
||||
)
|
||||
copy_timeslice(
|
||||
load,
|
||||
"PT",
|
||||
"2016-01-01 00:00",
|
||||
"2016-01-01 01:00",
|
||||
Delta(days=-366),
|
||||
fn_load,
|
||||
)
|
||||
copy_timeslice(
|
||||
load,
|
||||
"GB",
|
||||
"2016-01-01 00:00",
|
||||
"2016-01-01 01:00",
|
||||
Delta(days=-366),
|
||||
fn_load,
|
||||
)
|
||||
|
||||
else:
|
||||
if 'ME' in load:
|
||||
if 'AL' not in load and 'AL' in countries:
|
||||
load['AL'] = load.ME * (5.7/2.9)
|
||||
if 'MK' not in load and 'MK' in countries:
|
||||
load['MK'] = load.ME * (6.7/2.9)
|
||||
copy_timeslice(load, 'BG', '2018-10-27 21:00', '2018-10-28 22:00', Delta(weeks=1))
|
||||
if "ME" in load:
|
||||
if "AL" not in load and "AL" in countries:
|
||||
load["AL"] = load.ME * (5.7 / 2.9)
|
||||
if "MK" not in load and "MK" in countries:
|
||||
load["MK"] = load.ME * (6.7 / 2.9)
|
||||
copy_timeslice(
|
||||
load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1)
|
||||
)
|
||||
|
||||
return load
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('build_load_data')
|
||||
|
||||
snakemake = mock_snakemake("build_load_data")
|
||||
|
||||
configure_logging(snakemake)
|
||||
|
||||
powerstatistics = snakemake.config['load']['power_statistics']
|
||||
interpolate_limit = snakemake.config['load']['interpolate_limit']
|
||||
countries = snakemake.config['countries']
|
||||
snapshots = pd.date_range(freq='h', **snakemake.config['snapshots'])
|
||||
powerstatistics = snakemake.config["load"]["power_statistics"]
|
||||
interpolate_limit = snakemake.config["load"]["interpolate_limit"]
|
||||
countries = snakemake.config["countries"]
|
||||
snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"])
|
||||
years = slice(snapshots[0], snapshots[-1])
|
||||
time_shift = snakemake.config['load']['time_shift_for_large_gaps']
|
||||
time_shift = snakemake.config["load"]["time_shift_for_large_gaps"]
|
||||
|
||||
load = load_timeseries(snakemake.input[0], years, countries, powerstatistics)
|
||||
|
||||
if snakemake.config['load']['manual_adjustments']:
|
||||
if snakemake.config["load"]["manual_adjustments"]:
|
||||
load = manual_adjustment(load, snakemake.input[0], powerstatistics)
|
||||
|
||||
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")
|
||||
load = load.interpolate(method='linear', limit=interpolate_limit)
|
||||
load = load.interpolate(method="linear", limit=interpolate_limit)
|
||||
|
||||
logger.info("Filling larger gaps by copying time-slices of period "
|
||||
f"'{time_shift}'.")
|
||||
logger.info(
|
||||
"Filling larger gaps by copying time-slices of period " f"'{time_shift}'."
|
||||
)
|
||||
load = load.apply(fill_large_gaps, shift=time_shift)
|
||||
|
||||
assert not load.isna().any().any(), (
|
||||
'Load data contains nans. Adjust the parameters '
|
||||
'`time_shift_for_large_gaps` or modify the `manual_adjustment` function '
|
||||
'for implementing the needed load data modifications.')
|
||||
"Load data contains nans. Adjust the parameters "
|
||||
"`time_shift_for_large_gaps` or modify the `manual_adjustment` function "
|
||||
"for implementing the needed load data modifications."
|
||||
)
|
||||
|
||||
load.to_csv(snakemake.output[0])
|
||||
|
||||
|
@ -1,9 +1,13 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Rasters the vector data of the `Natura 2000 <https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas onto all cutout regions.
|
||||
Rasters the vector data of the `Natura 2000.
|
||||
|
||||
<https://en.wikipedia.org/wiki/Natura_2000>`_ natural protection areas onto all
|
||||
cutout regions.
|
||||
|
||||
Relevant Settings
|
||||
-----------------
|
||||
@ -36,15 +40,14 @@ Outputs
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import configure_logging
|
||||
|
||||
import atlite
|
||||
import geopandas as gpd
|
||||
import rasterio as rio
|
||||
from _helpers import configure_logging
|
||||
from rasterio.features import geometry_mask
|
||||
from rasterio.warp import transform_bounds
|
||||
|
||||
@ -56,7 +59,7 @@ def determine_cutout_xXyY(cutout_name):
|
||||
assert cutout.crs.to_epsg() == 4326
|
||||
x, X, y, Y = cutout.extent
|
||||
dx, dy = cutout.dx, cutout.dy
|
||||
return [x - dx/2., X + dx/2., y - dy/2., Y + dy/2.]
|
||||
return [x - dx / 2.0, X + dx / 2.0, y - dy / 2.0, Y + dy / 2.0]
|
||||
|
||||
|
||||
def get_transform_and_shape(bounds, res):
|
||||
@ -68,9 +71,10 @@ def get_transform_and_shape(bounds, res):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('build_natura_raster')
|
||||
|
||||
snakemake = mock_snakemake("build_natura_raster")
|
||||
configure_logging(snakemake)
|
||||
|
||||
cutouts = snakemake.input.cutouts
|
||||
@ -83,7 +87,16 @@ if __name__ == "__main__":
|
||||
raster = ~geometry_mask(shapes.geometry, out_shape[::-1], transform)
|
||||
raster = raster.astype(rio.uint8)
|
||||
|
||||
with rio.open(snakemake.output[0], 'w', driver='GTiff', dtype=rio.uint8,
|
||||
count=1, transform=transform, crs=3035, compress='lzw',
|
||||
width=raster.shape[1], height=raster.shape[0]) as dst:
|
||||
with rio.open(
|
||||
snakemake.output[0],
|
||||
"w",
|
||||
driver="GTiff",
|
||||
dtype=rio.uint8,
|
||||
count=1,
|
||||
transform=transform,
|
||||
crs=3035,
|
||||
compress="lzw",
|
||||
width=raster.shape[1],
|
||||
height=raster.shape[0],
|
||||
) as dst:
|
||||
dst.write(raster, indexes=1)
|
||||
|
@ -1,10 +1,15 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
# coding: utf-8
|
||||
"""
|
||||
Retrieves conventional powerplant capacities and locations from `powerplantmatching <https://github.com/FRESNA/powerplantmatching>`_, assigns these to buses and creates a ``.csv`` file. It is possible to amend the powerplant database with custom entries provided in ``data/custom_powerplants.csv``.
|
||||
Retrieves conventional powerplant capacities and locations from
|
||||
`powerplantmatching <https://github.com/FRESNA/powerplantmatching>`_, assigns
|
||||
these to buses and creates a ``.csv`` file. It is possible to amend the
|
||||
powerplant database with custom entries provided in
|
||||
``data/custom_powerplants.csv``.
|
||||
|
||||
Relevant Settings
|
||||
-----------------
|
||||
@ -68,16 +73,14 @@ The configuration options ``electricity: powerplants_filter`` and ``electricity:
|
||||
|
||||
powerplants_filter: Country not in ['Germany'] and YearCommissioned <= 2015
|
||||
custom_powerplants: YearCommissioned <= 2015
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import configure_logging
|
||||
|
||||
import pypsa
|
||||
import powerplantmatching as pm
|
||||
import pandas as pd
|
||||
|
||||
import powerplantmatching as pm
|
||||
import pypsa
|
||||
from _helpers import configure_logging
|
||||
from powerplantmatching.export import map_country_bus
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -86,70 +89,78 @@ logger = logging.getLogger(__name__)
|
||||
def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False):
|
||||
if not custom_ppl_query:
|
||||
return ppl
|
||||
add_ppls = pd.read_csv(custom_powerplants, index_col=0, dtype={'bus': 'str'})
|
||||
add_ppls = pd.read_csv(custom_powerplants, index_col=0, dtype={"bus": "str"})
|
||||
if isinstance(custom_ppl_query, str):
|
||||
add_ppls.query(custom_ppl_query, inplace=True)
|
||||
return pd.concat([ppl, add_ppls], sort=False, ignore_index=True, verify_integrity=True)
|
||||
return pd.concat(
|
||||
[ppl, add_ppls], sort=False, ignore_index=True, verify_integrity=True
|
||||
)
|
||||
|
||||
|
||||
def replace_natural_gas_technology(df):
|
||||
mapping = {'Steam Turbine': 'OCGT', "Combustion Engine": "OCGT"}
|
||||
tech = df.Technology.replace(mapping).fillna('OCGT')
|
||||
return df.Technology.where(df.Fueltype != 'Natural Gas', tech)
|
||||
mapping = {"Steam Turbine": "OCGT", "Combustion Engine": "OCGT"}
|
||||
tech = df.Technology.replace(mapping).fillna("OCGT")
|
||||
return df.Technology.where(df.Fueltype != "Natural Gas", tech)
|
||||
|
||||
|
||||
def replace_natural_gas_fueltype(df):
|
||||
return df.Fueltype.where(df.Fueltype != 'Natural Gas', df.Technology)
|
||||
return df.Fueltype.where(df.Fueltype != "Natural Gas", df.Technology)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('build_powerplants')
|
||||
|
||||
snakemake = mock_snakemake("build_powerplants")
|
||||
configure_logging(snakemake)
|
||||
|
||||
n = pypsa.Network(snakemake.input.base_network)
|
||||
countries = n.buses.country.unique()
|
||||
|
||||
|
||||
ppl = (pm.powerplants(from_url=True)
|
||||
ppl = (
|
||||
pm.powerplants(from_url=True)
|
||||
.powerplant.fill_missing_decommissioning_years()
|
||||
.powerplant.convert_country_to_alpha2()
|
||||
.query('Fueltype not in ["Solar", "Wind"] and Country in @countries')
|
||||
.assign(Technology=replace_natural_gas_technology)
|
||||
.assign(Fueltype=replace_natural_gas_fueltype))
|
||||
.assign(Fueltype=replace_natural_gas_fueltype)
|
||||
)
|
||||
|
||||
# Correct bioenergy for countries where possible
|
||||
opsd = pm.data.OPSD_VRE().powerplant.convert_country_to_alpha2()
|
||||
opsd = opsd.query('Country in @countries and Fueltype == "Bioenergy"')
|
||||
opsd['Name'] = "Biomass"
|
||||
opsd["Name"] = "Biomass"
|
||||
available_countries = opsd.Country.unique()
|
||||
ppl = ppl.query('not (Country in @available_countries and Fueltype == "Bioenergy")')
|
||||
ppl = pd.concat([ppl, opsd])
|
||||
|
||||
ppl_query = snakemake.config['electricity']['powerplants_filter']
|
||||
ppl_query = snakemake.config["electricity"]["powerplants_filter"]
|
||||
if isinstance(ppl_query, str):
|
||||
ppl.query(ppl_query, inplace=True)
|
||||
|
||||
# add carriers from own powerplant files:
|
||||
custom_ppl_query = snakemake.config['electricity']['custom_powerplants']
|
||||
ppl = add_custom_powerplants(ppl, snakemake.input.custom_powerplants, custom_ppl_query)
|
||||
custom_ppl_query = snakemake.config["electricity"]["custom_powerplants"]
|
||||
ppl = add_custom_powerplants(
|
||||
ppl, snakemake.input.custom_powerplants, custom_ppl_query
|
||||
)
|
||||
|
||||
countries_wo_ppl = set(countries) - set(ppl.Country.unique())
|
||||
if countries_wo_ppl:
|
||||
logging.warning(f"No powerplants known in: {', '.join(countries_wo_ppl)}")
|
||||
|
||||
substations = n.buses.query('substation_lv')
|
||||
substations = n.buses.query("substation_lv")
|
||||
ppl = map_country_bus(ppl, substations)
|
||||
|
||||
bus_null_b = ppl["bus"].isnull()
|
||||
if bus_null_b.any():
|
||||
logging.warning(f"Couldn't find close bus for {bus_null_b.sum()} powerplants. "
|
||||
"Removing them from the powerplants list.")
|
||||
logging.warning(
|
||||
f"Couldn't find close bus for {bus_null_b.sum()} powerplants. "
|
||||
"Removing them from the powerplants list."
|
||||
)
|
||||
ppl = ppl[~bus_null_b]
|
||||
|
||||
# TODO: This has to fixed in PPM, some powerplants are still duplicated
|
||||
cumcount = ppl.groupby(['bus', 'Fueltype']).cumcount() + 1
|
||||
cumcount = ppl.groupby(["bus", "Fueltype"]).cumcount() + 1
|
||||
ppl.Name = ppl.Name.where(cumcount == 1, ppl.Name + " " + cumcount.astype(str))
|
||||
|
||||
ppl.reset_index(drop=True).to_csv(snakemake.output[0])
|
||||
|
@ -1,15 +1,17 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""Calculates for each network node the
|
||||
(i) installable capacity (based on land-use), (ii) the available generation time
|
||||
series (based on weather data), and (iii) the average distance from the node for
|
||||
onshore wind, AC-connected offshore wind, DC-connected offshore wind and solar
|
||||
PV generators. In addition for offshore wind it calculates the fraction of the
|
||||
grid connection which is under water.
|
||||
"""
|
||||
Calculates for each network node the (i) installable capacity (based on land-
|
||||
use), (ii) the available generation time series (based on weather data), and
|
||||
(iii) the average distance from the node for onshore wind, AC-connected
|
||||
offshore wind, DC-connected offshore wind and solar PV generators. In addition
|
||||
for offshore wind it calculates the fraction of the grid connection which is
|
||||
under water.
|
||||
|
||||
.. note:: Hydroelectric profiles are built in script :mod:`build_hydro_profiles`.
|
||||
|
||||
@ -177,132 +179,148 @@ node (`p_nom_max`): ``simple`` and ``conservative``:
|
||||
- ``conservative`` assertains the nodal limit by increasing capacities
|
||||
proportional to the layout until the limit of an individual grid cell is
|
||||
reached.
|
||||
|
||||
"""
|
||||
import progressbar as pgb
|
||||
import geopandas as gpd
|
||||
import xarray as xr
|
||||
import numpy as np
|
||||
import functools
|
||||
import atlite
|
||||
import logging
|
||||
import time
|
||||
|
||||
import atlite
|
||||
import geopandas as gpd
|
||||
import numpy as np
|
||||
import progressbar as pgb
|
||||
import xarray as xr
|
||||
from _helpers import configure_logging
|
||||
from dask.distributed import Client, LocalCluster
|
||||
from pypsa.geo import haversine
|
||||
from shapely.geometry import LineString
|
||||
import time
|
||||
from dask.distributed import Client, LocalCluster
|
||||
|
||||
from _helpers import configure_logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 'snakemake' not in globals():
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('build_renewable_profiles', technology='solar')
|
||||
|
||||
snakemake = mock_snakemake("build_renewable_profiles", technology="solar")
|
||||
configure_logging(snakemake)
|
||||
pgb.streams.wrap_stderr()
|
||||
|
||||
nprocesses = int(snakemake.threads)
|
||||
noprogress = not snakemake.config['atlite'].get('show_progress', False)
|
||||
config = snakemake.config['renewable'][snakemake.wildcards.technology]
|
||||
resource = config['resource'] # pv panel config / wind turbine config
|
||||
correction_factor = config.get('correction_factor', 1.)
|
||||
capacity_per_sqkm = config['capacity_per_sqkm']
|
||||
p_nom_max_meth = config.get('potential', 'conservative')
|
||||
noprogress = not snakemake.config["atlite"].get("show_progress", False)
|
||||
config = snakemake.config["renewable"][snakemake.wildcards.technology]
|
||||
resource = config["resource"] # pv panel config / wind turbine config
|
||||
correction_factor = config.get("correction_factor", 1.0)
|
||||
capacity_per_sqkm = config["capacity_per_sqkm"]
|
||||
p_nom_max_meth = config.get("potential", "conservative")
|
||||
|
||||
if isinstance(config.get("corine", {}), list):
|
||||
config['corine'] = {'grid_codes': config['corine']}
|
||||
config["corine"] = {"grid_codes": config["corine"]}
|
||||
|
||||
if correction_factor != 1.:
|
||||
logger.info(f'correction_factor is set as {correction_factor}')
|
||||
if correction_factor != 1.0:
|
||||
logger.info(f"correction_factor is set as {correction_factor}")
|
||||
|
||||
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)
|
||||
client = Client(cluster, asynchronous=True)
|
||||
|
||||
cutout = atlite.Cutout(snakemake.input['cutout'])
|
||||
cutout = atlite.Cutout(snakemake.input["cutout"])
|
||||
regions = gpd.read_file(snakemake.input.regions)
|
||||
assert not regions.empty, (f"List of regions in {snakemake.input.regions} is empty, please "
|
||||
"disable the corresponding renewable technology")
|
||||
assert not regions.empty, (
|
||||
f"List of regions in {snakemake.input.regions} is empty, please "
|
||||
"disable the corresponding renewable technology"
|
||||
)
|
||||
# do not pull up, set_index does not work if geo dataframe is empty
|
||||
regions = regions.set_index('name').rename_axis('bus')
|
||||
regions = regions.set_index("name").rename_axis("bus")
|
||||
buses = regions.index
|
||||
|
||||
res = config.get("excluder_resolution", 100)
|
||||
excluder = atlite.ExclusionContainer(crs=3035, res=res)
|
||||
|
||||
if config['natura']:
|
||||
if config["natura"]:
|
||||
excluder.add_raster(snakemake.input.natura, nodata=0, allow_no_overlap=True)
|
||||
|
||||
corine = config.get("corine", {})
|
||||
if "grid_codes" in corine:
|
||||
codes = corine["grid_codes"]
|
||||
excluder.add_raster(snakemake.input.corine, codes=codes, invert=True, crs=3035)
|
||||
if corine.get("distance", 0.) > 0.:
|
||||
if corine.get("distance", 0.0) > 0.0:
|
||||
codes = corine["distance_grid_codes"]
|
||||
buffer = corine["distance"]
|
||||
excluder.add_raster(snakemake.input.corine, codes=codes, buffer=buffer, crs=3035)
|
||||
excluder.add_raster(
|
||||
snakemake.input.corine, codes=codes, buffer=buffer, crs=3035
|
||||
)
|
||||
|
||||
if "ship_threshold" in config:
|
||||
shipping_threshold=config["ship_threshold"] * 8760 * 6 # approximation because 6 years of data which is hourly collected
|
||||
shipping_threshold = (
|
||||
config["ship_threshold"] * 8760 * 6
|
||||
) # approximation because 6 years of data which is hourly collected
|
||||
func = functools.partial(np.less, shipping_threshold)
|
||||
excluder.add_raster(snakemake.input.ship_density, codes=func, crs=4326, allow_no_overlap=True)
|
||||
excluder.add_raster(
|
||||
snakemake.input.ship_density, codes=func, crs=4326, allow_no_overlap=True
|
||||
)
|
||||
|
||||
if "max_depth" in config:
|
||||
# lambda not supported for atlite + multiprocessing
|
||||
# use named function np.greater with partially frozen argument instead
|
||||
# and exclude areas where: -max_depth > grid cell depth
|
||||
func = functools.partial(np.greater,-config['max_depth'])
|
||||
func = functools.partial(np.greater, -config["max_depth"])
|
||||
excluder.add_raster(snakemake.input.gebco, codes=func, crs=4326, nodata=-1000)
|
||||
|
||||
if 'min_shore_distance' in config:
|
||||
buffer = config['min_shore_distance']
|
||||
if "min_shore_distance" in config:
|
||||
buffer = config["min_shore_distance"]
|
||||
excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer)
|
||||
|
||||
if 'max_shore_distance' in config:
|
||||
buffer = config['max_shore_distance']
|
||||
excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer, invert=True)
|
||||
if "max_shore_distance" in config:
|
||||
buffer = config["max_shore_distance"]
|
||||
excluder.add_geometry(
|
||||
snakemake.input.country_shapes, buffer=buffer, invert=True
|
||||
)
|
||||
|
||||
kwargs = dict(nprocesses=nprocesses, disable_progressbar=noprogress)
|
||||
if noprogress:
|
||||
logger.info('Calculate landuse availabilities...')
|
||||
logger.info("Calculate landuse availabilities...")
|
||||
start = time.time()
|
||||
availability = cutout.availabilitymatrix(regions, excluder, **kwargs)
|
||||
duration = time.time() - start
|
||||
logger.info(f'Completed availability calculation ({duration:2.2f}s)')
|
||||
logger.info(f"Completed availability calculation ({duration:2.2f}s)")
|
||||
else:
|
||||
availability = cutout.availabilitymatrix(regions, excluder, **kwargs)
|
||||
|
||||
area = cutout.grid.to_crs(3035).area / 1e6
|
||||
area = xr.DataArray(area.values.reshape(cutout.shape),
|
||||
[cutout.coords['y'], cutout.coords['x']])
|
||||
area = xr.DataArray(
|
||||
area.values.reshape(cutout.shape), [cutout.coords["y"], cutout.coords["x"]]
|
||||
)
|
||||
|
||||
potential = capacity_per_sqkm * availability.sum('bus') * area
|
||||
func = getattr(cutout, resource.pop('method'))
|
||||
resource['dask_kwargs'] = {"scheduler": client}
|
||||
potential = capacity_per_sqkm * availability.sum("bus") * area
|
||||
func = getattr(cutout, resource.pop("method"))
|
||||
resource["dask_kwargs"] = {"scheduler": client}
|
||||
capacity_factor = correction_factor * func(capacity_factor=True, **resource)
|
||||
layout = capacity_factor * area * capacity_per_sqkm
|
||||
profile, capacities = func(matrix=availability.stack(spatial=['y','x']),
|
||||
layout=layout, index=buses,
|
||||
per_unit=True, return_capacity=True, **resource)
|
||||
profile, capacities = func(
|
||||
matrix=availability.stack(spatial=["y", "x"]),
|
||||
layout=layout,
|
||||
index=buses,
|
||||
per_unit=True,
|
||||
return_capacity=True,
|
||||
**resource,
|
||||
)
|
||||
|
||||
logger.info(f"Calculating maximal capacity per bus (method '{p_nom_max_meth}')")
|
||||
if p_nom_max_meth == 'simple':
|
||||
if p_nom_max_meth == "simple":
|
||||
p_nom_max = capacity_per_sqkm * availability @ area
|
||||
elif p_nom_max_meth == 'conservative':
|
||||
max_cap_factor = capacity_factor.where(availability!=0).max(['x', 'y'])
|
||||
elif p_nom_max_meth == "conservative":
|
||||
max_cap_factor = capacity_factor.where(availability != 0).max(["x", "y"])
|
||||
p_nom_max = capacities / max_cap_factor
|
||||
else:
|
||||
raise AssertionError('Config key `potential` should be one of "simple" '
|
||||
f'(default) or "conservative", not "{p_nom_max_meth}"')
|
||||
raise AssertionError(
|
||||
'Config key `potential` should be one of "simple" '
|
||||
f'(default) or "conservative", not "{p_nom_max_meth}"'
|
||||
)
|
||||
|
||||
logger.info("Calculate average distances.")
|
||||
layoutmatrix = (layout * availability).stack(spatial=["y", "x"])
|
||||
|
||||
|
||||
logger.info('Calculate average distances.')
|
||||
layoutmatrix = (layout * availability).stack(spatial=['y','x'])
|
||||
|
||||
coords = cutout.grid[['x', 'y']]
|
||||
bus_coords = regions[['x', 'y']]
|
||||
coords = cutout.grid[["x", "y"]]
|
||||
bus_coords = regions[["x", "y"]]
|
||||
|
||||
average_distance = []
|
||||
centre_of_mass = []
|
||||
@ -316,34 +334,40 @@ if __name__ == '__main__':
|
||||
centre_of_mass.append(co.values.T @ (row / row.sum()))
|
||||
|
||||
average_distance = xr.DataArray(average_distance, [buses])
|
||||
centre_of_mass = xr.DataArray(centre_of_mass, [buses, ('spatial', ['x', 'y'])])
|
||||
|
||||
|
||||
ds = xr.merge([(correction_factor * profile).rename('profile'),
|
||||
capacities.rename('weight'),
|
||||
p_nom_max.rename('p_nom_max'),
|
||||
potential.rename('potential'),
|
||||
average_distance.rename('average_distance')])
|
||||
centre_of_mass = xr.DataArray(centre_of_mass, [buses, ("spatial", ["x", "y"])])
|
||||
|
||||
ds = xr.merge(
|
||||
[
|
||||
(correction_factor * profile).rename("profile"),
|
||||
capacities.rename("weight"),
|
||||
p_nom_max.rename("p_nom_max"),
|
||||
potential.rename("potential"),
|
||||
average_distance.rename("average_distance"),
|
||||
]
|
||||
)
|
||||
|
||||
if snakemake.wildcards.technology.startswith("offwind"):
|
||||
logger.info('Calculate underwater fraction of connections.')
|
||||
offshore_shape = gpd.read_file(snakemake.input['offshore_shapes']).unary_union
|
||||
logger.info("Calculate underwater fraction of connections.")
|
||||
offshore_shape = gpd.read_file(snakemake.input["offshore_shapes"]).unary_union
|
||||
underwater_fraction = []
|
||||
for bus in buses:
|
||||
p = centre_of_mass.sel(bus=bus).data
|
||||
line = LineString([p, regions.loc[bus, ['x', 'y']]])
|
||||
line = LineString([p, regions.loc[bus, ["x", "y"]]])
|
||||
frac = line.intersection(offshore_shape).length / line.length
|
||||
underwater_fraction.append(frac)
|
||||
|
||||
ds['underwater_fraction'] = xr.DataArray(underwater_fraction, [buses])
|
||||
ds["underwater_fraction"] = xr.DataArray(underwater_fraction, [buses])
|
||||
|
||||
# select only buses with some capacity and minimal capacity factor
|
||||
ds = ds.sel(bus=((ds['profile'].mean('time') > config.get('min_p_max_pu', 0.)) &
|
||||
(ds['p_nom_max'] > config.get('min_p_nom_max', 0.))))
|
||||
ds = ds.sel(
|
||||
bus=(
|
||||
(ds["profile"].mean("time") > config.get("min_p_max_pu", 0.0))
|
||||
& (ds["p_nom_max"] > config.get("min_p_nom_max", 0.0))
|
||||
)
|
||||
)
|
||||
|
||||
if 'clip_p_max_pu' in config:
|
||||
min_p_max_pu = config['clip_p_max_pu']
|
||||
ds['profile'] = ds['profile'].where(ds['profile'] >= min_p_max_pu, 0)
|
||||
if "clip_p_max_pu" in config:
|
||||
min_p_max_pu = config["clip_p_max_pu"]
|
||||
ds["profile"] = ds["profile"].where(ds["profile"] >= min_p_max_pu, 0)
|
||||
|
||||
ds.to_netcdf(snakemake.output.profile)
|
||||
|
@ -1,9 +1,12 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Creates GIS shape files of the countries, exclusive economic zones and `NUTS3 <https://en.wikipedia.org/wiki/Nomenclature_of_Territorial_Units_for_Statistics>`_ areas.
|
||||
Creates GIS shape files of the countries, exclusive economic zones and `NUTS3 <
|
||||
https://en.wikipedia.org/wiki/Nomenclature_of_Territorial_Units_for_Statistics>
|
||||
`_ areas.
|
||||
|
||||
Relevant Settings
|
||||
-----------------
|
||||
@ -64,22 +67,20 @@ Outputs
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import configure_logging
|
||||
|
||||
import numpy as np
|
||||
from operator import attrgetter
|
||||
from functools import reduce
|
||||
from itertools import takewhile
|
||||
from operator import attrgetter
|
||||
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pycountry as pyc
|
||||
from _helpers import configure_logging
|
||||
from shapely.geometry import MultiPolygon, Polygon
|
||||
from shapely.ops import unary_union
|
||||
import pycountry as pyc
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -94,40 +95,58 @@ def _get_country(target, **keys):
|
||||
|
||||
def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True):
|
||||
if isinstance(polys, MultiPolygon):
|
||||
polys = sorted(polys.geoms, key=attrgetter('area'), reverse=True)
|
||||
polys = sorted(polys.geoms, key=attrgetter("area"), reverse=True)
|
||||
mainpoly = polys[0]
|
||||
mainlength = np.sqrt(mainpoly.area/(2.*np.pi))
|
||||
mainlength = np.sqrt(mainpoly.area / (2.0 * np.pi))
|
||||
if mainpoly.area > minarea:
|
||||
polys = MultiPolygon([p
|
||||
polys = MultiPolygon(
|
||||
[
|
||||
p
|
||||
for p in takewhile(lambda p: p.area > minarea, polys)
|
||||
if not filterremote or (mainpoly.distance(p) < mainlength)])
|
||||
if not filterremote or (mainpoly.distance(p) < mainlength)
|
||||
]
|
||||
)
|
||||
else:
|
||||
polys = mainpoly
|
||||
return polys.simplify(tolerance=tolerance)
|
||||
|
||||
|
||||
def countries(naturalearth, country_list):
|
||||
if 'RS' in country_list: country_list.append('KV')
|
||||
if "RS" in country_list:
|
||||
country_list.append("KV")
|
||||
|
||||
df = gpd.read_file(naturalearth)
|
||||
|
||||
# Names are a hassle in naturalearth, try several fields
|
||||
fieldnames = (df[x].where(lambda s: s!='-99') for x in ('ISO_A2', 'WB_A2', 'ADM0_A3'))
|
||||
df['name'] = reduce(lambda x,y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2]
|
||||
fieldnames = (
|
||||
df[x].where(lambda s: s != "-99") for x in ("ISO_A2", "WB_A2", "ADM0_A3")
|
||||
)
|
||||
df["name"] = reduce(lambda x, y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2]
|
||||
|
||||
df = df.loc[df.name.isin(country_list) & ((df['scalerank'] == 0) | (df['scalerank'] == 5))]
|
||||
s = df.set_index('name')['geometry'].map(_simplify_polys)
|
||||
if 'RS' in country_list: s['RS'] = s['RS'].union(s.pop('KV'))
|
||||
df = df.loc[
|
||||
df.name.isin(country_list) & ((df["scalerank"] == 0) | (df["scalerank"] == 5))
|
||||
]
|
||||
s = df.set_index("name")["geometry"].map(_simplify_polys)
|
||||
if "RS" in country_list:
|
||||
s["RS"] = s["RS"].union(s.pop("KV"))
|
||||
|
||||
return s
|
||||
|
||||
|
||||
def eez(country_shapes, eez, country_list):
|
||||
df = gpd.read_file(eez)
|
||||
df = df.loc[df['ISO_3digit'].isin([_get_country('alpha_3', alpha_2=c) for c in country_list])]
|
||||
df['name'] = df['ISO_3digit'].map(lambda c: _get_country('alpha_2', alpha_3=c))
|
||||
s = df.set_index('name').geometry.map(lambda s: _simplify_polys(s, filterremote=False))
|
||||
s = gpd.GeoSeries({k:v for k,v in s.iteritems() if v.distance(country_shapes[k]) < 1e-3})
|
||||
df = df.loc[
|
||||
df["ISO_3digit"].isin(
|
||||
[_get_country("alpha_3", alpha_2=c) for c in country_list]
|
||||
)
|
||||
]
|
||||
df["name"] = df["ISO_3digit"].map(lambda c: _get_country("alpha_2", alpha_3=c))
|
||||
s = df.set_index("name").geometry.map(
|
||||
lambda s: _simplify_polys(s, filterremote=False)
|
||||
)
|
||||
s = gpd.GeoSeries(
|
||||
{k: v for k, v in s.iteritems() if v.distance(country_shapes[k]) < 1e-3}
|
||||
)
|
||||
s = s.to_frame("geometry")
|
||||
s.index.name = "name"
|
||||
return s
|
||||
@ -140,84 +159,121 @@ def country_cover(country_shapes, eez_shapes=None):
|
||||
|
||||
europe_shape = unary_union(shapes)
|
||||
if isinstance(europe_shape, MultiPolygon):
|
||||
europe_shape = max(europe_shape, key=attrgetter('area'))
|
||||
europe_shape = max(europe_shape, key=attrgetter("area"))
|
||||
return Polygon(shell=europe_shape.exterior)
|
||||
|
||||
|
||||
def nuts3(country_shapes, nuts3, nuts3pop, nuts3gdp, ch_cantons, ch_popgdp):
|
||||
df = gpd.read_file(nuts3)
|
||||
df = df.loc[df['STAT_LEVL_'] == 3]
|
||||
df['geometry'] = df['geometry'].map(_simplify_polys)
|
||||
df = df.rename(columns={'NUTS_ID': 'id'})[['id', 'geometry']].set_index('id')
|
||||
df = df.loc[df["STAT_LEVL_"] == 3]
|
||||
df["geometry"] = df["geometry"].map(_simplify_polys)
|
||||
df = df.rename(columns={"NUTS_ID": "id"})[["id", "geometry"]].set_index("id")
|
||||
|
||||
pop = pd.read_table(nuts3pop, na_values=[':'], delimiter=' ?\t', engine='python')
|
||||
pop = (pop
|
||||
.set_index(pd.MultiIndex.from_tuples(pop.pop('unit,geo\\time').str.split(','))).loc['THS']
|
||||
.applymap(lambda x: pd.to_numeric(x, errors='coerce'))
|
||||
.fillna(method='bfill', axis=1))['2014']
|
||||
pop = pd.read_table(nuts3pop, na_values=[":"], delimiter=" ?\t", engine="python")
|
||||
pop = (
|
||||
pop.set_index(
|
||||
pd.MultiIndex.from_tuples(pop.pop("unit,geo\\time").str.split(","))
|
||||
)
|
||||
.loc["THS"]
|
||||
.applymap(lambda x: pd.to_numeric(x, errors="coerce"))
|
||||
.fillna(method="bfill", axis=1)
|
||||
)["2014"]
|
||||
|
||||
gdp = pd.read_table(nuts3gdp, na_values=[':'], delimiter=' ?\t', engine='python')
|
||||
gdp = (gdp
|
||||
.set_index(pd.MultiIndex.from_tuples(gdp.pop('unit,geo\\time').str.split(','))).loc['EUR_HAB']
|
||||
.applymap(lambda x: pd.to_numeric(x, errors='coerce'))
|
||||
.fillna(method='bfill', axis=1))['2014']
|
||||
gdp = pd.read_table(nuts3gdp, na_values=[":"], delimiter=" ?\t", engine="python")
|
||||
gdp = (
|
||||
gdp.set_index(
|
||||
pd.MultiIndex.from_tuples(gdp.pop("unit,geo\\time").str.split(","))
|
||||
)
|
||||
.loc["EUR_HAB"]
|
||||
.applymap(lambda x: pd.to_numeric(x, errors="coerce"))
|
||||
.fillna(method="bfill", axis=1)
|
||||
)["2014"]
|
||||
|
||||
cantons = pd.read_csv(ch_cantons)
|
||||
cantons = cantons.set_index(cantons['HASC'].str[3:])['NUTS']
|
||||
cantons = cantons.str.pad(5, side='right', fillchar='0')
|
||||
cantons = cantons.set_index(cantons["HASC"].str[3:])["NUTS"]
|
||||
cantons = cantons.str.pad(5, side="right", fillchar="0")
|
||||
|
||||
swiss = pd.read_excel(ch_popgdp, skiprows=3, index_col=0)
|
||||
swiss.columns = swiss.columns.to_series().map(cantons)
|
||||
|
||||
swiss_pop = pd.to_numeric(swiss.loc['Residents in 1000', 'CH040':])
|
||||
swiss_pop = pd.to_numeric(swiss.loc["Residents in 1000", "CH040":])
|
||||
pop = pd.concat([pop, swiss_pop])
|
||||
swiss_gdp = pd.to_numeric(swiss.loc['Gross domestic product per capita in Swiss francs', 'CH040':])
|
||||
swiss_gdp = pd.to_numeric(
|
||||
swiss.loc["Gross domestic product per capita in Swiss francs", "CH040":]
|
||||
)
|
||||
gdp = pd.concat([gdp, swiss_gdp])
|
||||
|
||||
df = df.join(pd.DataFrame(dict(pop=pop, gdp=gdp)))
|
||||
|
||||
df['country'] = df.index.to_series().str[:2].replace(dict(UK='GB', EL='GR'))
|
||||
df["country"] = df.index.to_series().str[:2].replace(dict(UK="GB", EL="GR"))
|
||||
|
||||
excludenuts = pd.Index(('FRA10', 'FRA20', 'FRA30', 'FRA40', 'FRA50',
|
||||
'PT200', 'PT300',
|
||||
'ES707', 'ES703', 'ES704','ES705', 'ES706', 'ES708', 'ES709',
|
||||
'FI2', 'FR9'))
|
||||
excludecountry = pd.Index(('MT', 'TR', 'LI', 'IS', 'CY', 'KV'))
|
||||
excludenuts = pd.Index(
|
||||
(
|
||||
"FRA10",
|
||||
"FRA20",
|
||||
"FRA30",
|
||||
"FRA40",
|
||||
"FRA50",
|
||||
"PT200",
|
||||
"PT300",
|
||||
"ES707",
|
||||
"ES703",
|
||||
"ES704",
|
||||
"ES705",
|
||||
"ES706",
|
||||
"ES708",
|
||||
"ES709",
|
||||
"FI2",
|
||||
"FR9",
|
||||
)
|
||||
)
|
||||
excludecountry = pd.Index(("MT", "TR", "LI", "IS", "CY", "KV"))
|
||||
|
||||
df = df.loc[df.index.difference(excludenuts)]
|
||||
df = df.loc[~df.country.isin(excludecountry)]
|
||||
|
||||
manual = gpd.GeoDataFrame(
|
||||
[['BA1', 'BA', 3871.],
|
||||
['RS1', 'RS', 7210.],
|
||||
['AL1', 'AL', 2893.]],
|
||||
columns=['NUTS_ID', 'country', 'pop']
|
||||
).set_index('NUTS_ID')
|
||||
manual['geometry'] = manual['country'].map(country_shapes)
|
||||
[["BA1", "BA", 3871.0], ["RS1", "RS", 7210.0], ["AL1", "AL", 2893.0]],
|
||||
columns=["NUTS_ID", "country", "pop"],
|
||||
).set_index("NUTS_ID")
|
||||
manual["geometry"] = manual["country"].map(country_shapes)
|
||||
manual = manual.dropna()
|
||||
|
||||
df = pd.concat([df, manual], sort=False)
|
||||
|
||||
df.loc['ME000', 'pop'] = 650.
|
||||
df.loc["ME000", "pop"] = 650.0
|
||||
|
||||
return df
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('build_shapes')
|
||||
|
||||
snakemake = mock_snakemake("build_shapes")
|
||||
configure_logging(snakemake)
|
||||
|
||||
country_shapes = countries(snakemake.input.naturalearth, snakemake.config['countries'])
|
||||
country_shapes = countries(
|
||||
snakemake.input.naturalearth, snakemake.config["countries"]
|
||||
)
|
||||
country_shapes.reset_index().to_file(snakemake.output.country_shapes)
|
||||
|
||||
offshore_shapes = eez(country_shapes, snakemake.input.eez, snakemake.config['countries'])
|
||||
offshore_shapes = eez(
|
||||
country_shapes, snakemake.input.eez, snakemake.config["countries"]
|
||||
)
|
||||
offshore_shapes.reset_index().to_file(snakemake.output.offshore_shapes)
|
||||
|
||||
europe_shape = gpd.GeoDataFrame(geometry=[country_cover(country_shapes, offshore_shapes.geometry)])
|
||||
europe_shape = gpd.GeoDataFrame(
|
||||
geometry=[country_cover(country_shapes, offshore_shapes.geometry)]
|
||||
)
|
||||
europe_shape.reset_index().to_file(snakemake.output.europe_shape)
|
||||
|
||||
nuts3_shapes = nuts3(country_shapes, snakemake.input.nuts3, snakemake.input.nuts3pop,
|
||||
snakemake.input.nuts3gdp, snakemake.input.ch_cantons, snakemake.input.ch_popgdp)
|
||||
nuts3_shapes = nuts3(
|
||||
country_shapes,
|
||||
snakemake.input.nuts3,
|
||||
snakemake.input.nuts3pop,
|
||||
snakemake.input.nuts3gdp,
|
||||
snakemake.input.ch_cantons,
|
||||
snakemake.input.ch_popgdp,
|
||||
)
|
||||
nuts3_shapes.reset_index().to_file(snakemake.output.nuts3_shapes)
|
||||
|
@ -1,9 +1,14 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Transforms the global ship density data from https://datacatalog.worldbank.org/search/dataset/0037580/Global-Shipping-Traffic-Density to the size of the considered cutout. The global ship density raster is later used for the exclusion when calculating the offshore potentials.
|
||||
Transforms the global ship density data from
|
||||
https://datacatalog.worldbank.org/search/dataset/0037580/Global-Shipping-
|
||||
Traffic-Density to the size of the considered cutout. The global ship density
|
||||
raster is later used for the exclusion when calculating the offshore
|
||||
potentials.
|
||||
|
||||
Relevant Settings
|
||||
-----------------
|
||||
@ -30,23 +35,23 @@ Outputs
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import zipfile
|
||||
|
||||
import xarray as xr
|
||||
from _helpers import configure_logging
|
||||
from build_natura_raster import determine_cutout_xXyY
|
||||
|
||||
import zipfile
|
||||
import xarray as xr
|
||||
import os
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('build_ship_raster')
|
||||
|
||||
snakemake = mock_snakemake("build_ship_raster")
|
||||
configure_logging(snakemake)
|
||||
|
||||
cutouts = snakemake.input.cutouts
|
||||
@ -55,7 +60,9 @@ if __name__ == "__main__":
|
||||
with zipfile.ZipFile(snakemake.input.ship_density) as zip_f:
|
||||
zip_f.extract("shipdensity_global.tif")
|
||||
with xr.open_rasterio("shipdensity_global.tif") as ship_density:
|
||||
ship_density = ship_density.drop(["band"]).sel(x=slice(min(xs),max(Xs)), y=slice(max(Ys),min(ys)))
|
||||
ship_density = ship_density.drop(["band"]).sel(
|
||||
x=slice(min(xs), max(Xs)), y=slice(max(Ys), min(ys))
|
||||
)
|
||||
ship_density.to_netcdf(snakemake.output[0])
|
||||
|
||||
os.remove("shipdensity_global.tif")
|
@ -1,10 +1,12 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
# coding: utf-8
|
||||
"""
|
||||
Creates networks clustered to ``{cluster}`` number of zones with aggregated buses, generators and transmission corridors.
|
||||
Creates networks clustered to ``{cluster}`` number of zones with aggregated
|
||||
buses, generators and transmission corridors.
|
||||
|
||||
Relevant Settings
|
||||
-----------------
|
||||
@ -118,28 +120,28 @@ Exemplary unsolved network clustered to 37 nodes:
|
||||
.. image:: ../img/elec_s_37.png
|
||||
:scale: 40 %
|
||||
:align: center
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import configure_logging, update_p_nom_max, get_aggregation_strategies
|
||||
|
||||
import pypsa
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import geopandas as gpd
|
||||
import pyomo.environ as po
|
||||
import matplotlib.pyplot as plt
|
||||
import seaborn as sns
|
||||
|
||||
import warnings
|
||||
from functools import reduce
|
||||
|
||||
from pypsa.networkclustering import (busmap_by_kmeans, busmap_by_hac,
|
||||
busmap_by_greedy_modularity, get_clustering_from_busmap)
|
||||
import geopandas as gpd
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pyomo.environ as po
|
||||
import pypsa
|
||||
import seaborn as sns
|
||||
from _helpers import configure_logging, get_aggregation_strategies, update_p_nom_max
|
||||
from pypsa.networkclustering import (
|
||||
busmap_by_greedy_modularity,
|
||||
busmap_by_hac,
|
||||
busmap_by_kmeans,
|
||||
get_clustering_from_busmap,
|
||||
)
|
||||
|
||||
import warnings
|
||||
warnings.filterwarnings(action='ignore', category=UserWarning)
|
||||
warnings.filterwarnings(action="ignore", category=UserWarning)
|
||||
|
||||
from add_electricity import load_costs
|
||||
|
||||
@ -148,19 +150,21 @@ idx = pd.IndexSlice
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def normed(x): return (x/x.sum()).fillna(0.)
|
||||
def normed(x):
|
||||
return (x / x.sum()).fillna(0.0)
|
||||
|
||||
|
||||
def weighting_for_country(n, x):
|
||||
conv_carriers = {'OCGT','CCGT','PHS', 'hydro'}
|
||||
gen = (n
|
||||
.generators.loc[n.generators.carrier.isin(conv_carriers)]
|
||||
.groupby('bus').p_nom.sum()
|
||||
.reindex(n.buses.index, fill_value=0.) +
|
||||
n
|
||||
.storage_units.loc[n.storage_units.carrier.isin(conv_carriers)]
|
||||
.groupby('bus').p_nom.sum()
|
||||
.reindex(n.buses.index, fill_value=0.))
|
||||
conv_carriers = {"OCGT", "CCGT", "PHS", "hydro"}
|
||||
gen = n.generators.loc[n.generators.carrier.isin(conv_carriers)].groupby(
|
||||
"bus"
|
||||
).p_nom.sum().reindex(n.buses.index, fill_value=0.0) + n.storage_units.loc[
|
||||
n.storage_units.carrier.isin(conv_carriers)
|
||||
].groupby(
|
||||
"bus"
|
||||
).p_nom.sum().reindex(
|
||||
n.buses.index, fill_value=0.0
|
||||
)
|
||||
load = n.loads_t.p_set.mean().groupby(n.loads.bus).sum()
|
||||
|
||||
b_i = x.index
|
||||
@ -168,34 +172,41 @@ def weighting_for_country(n, x):
|
||||
l = normed(load.reindex(b_i, fill_value=0))
|
||||
|
||||
w = g + l
|
||||
return (w * (100. / w.max())).clip(lower=1.).astype(int)
|
||||
return (w * (100.0 / w.max())).clip(lower=1.0).astype(int)
|
||||
|
||||
|
||||
def get_feature_for_hac(n, buses_i=None, feature=None):
|
||||
|
||||
if buses_i is None:
|
||||
buses_i = n.buses.index
|
||||
|
||||
if feature is None:
|
||||
feature = "solar+onwind-time"
|
||||
|
||||
carriers = feature.split('-')[0].split('+')
|
||||
carriers = feature.split("-")[0].split("+")
|
||||
if "offwind" in carriers:
|
||||
carriers.remove("offwind")
|
||||
carriers = np.append(carriers, network.generators.carrier.filter(like='offwind').unique())
|
||||
carriers = np.append(
|
||||
carriers, network.generators.carrier.filter(like="offwind").unique()
|
||||
)
|
||||
|
||||
if feature.split('-')[1] == 'cap':
|
||||
if feature.split("-")[1] == "cap":
|
||||
feature_data = pd.DataFrame(index=buses_i, columns=carriers)
|
||||
for carrier in carriers:
|
||||
gen_i = n.generators.query("carrier == @carrier").index
|
||||
attach = n.generators_t.p_max_pu[gen_i].mean().rename(index = n.generators.loc[gen_i].bus)
|
||||
attach = (
|
||||
n.generators_t.p_max_pu[gen_i]
|
||||
.mean()
|
||||
.rename(index=n.generators.loc[gen_i].bus)
|
||||
)
|
||||
feature_data[carrier] = attach
|
||||
|
||||
if feature.split('-')[1] == 'time':
|
||||
if feature.split("-")[1] == "time":
|
||||
feature_data = pd.DataFrame(columns=buses_i)
|
||||
for carrier in carriers:
|
||||
gen_i = n.generators.query("carrier == @carrier").index
|
||||
attach = n.generators_t.p_max_pu[gen_i].rename(columns = n.generators.loc[gen_i].bus)
|
||||
attach = n.generators_t.p_max_pu[gen_i].rename(
|
||||
columns=n.generators.loc[gen_i].bus
|
||||
)
|
||||
feature_data = pd.concat([feature_data, attach], axis=0)[buses_i]
|
||||
|
||||
feature_data = feature_data.T
|
||||
@ -208,59 +219,88 @@ def get_feature_for_hac(n, buses_i=None, feature=None):
|
||||
|
||||
|
||||
def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"):
|
||||
"""Determine the number of clusters per country"""
|
||||
"""
|
||||
Determine the number of clusters per country.
|
||||
"""
|
||||
|
||||
L = (n.loads_t.p_set.mean()
|
||||
.groupby(n.loads.bus).sum()
|
||||
.groupby([n.buses.country, n.buses.sub_network]).sum()
|
||||
.pipe(normed))
|
||||
L = (
|
||||
n.loads_t.p_set.mean()
|
||||
.groupby(n.loads.bus)
|
||||
.sum()
|
||||
.groupby([n.buses.country, n.buses.sub_network])
|
||||
.sum()
|
||||
.pipe(normed)
|
||||
)
|
||||
|
||||
N = n.buses.groupby(['country', 'sub_network']).size()
|
||||
N = n.buses.groupby(["country", "sub_network"]).size()
|
||||
|
||||
assert n_clusters >= len(N) and n_clusters <= N.sum(), \
|
||||
f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries."
|
||||
assert (
|
||||
n_clusters >= len(N) and n_clusters <= N.sum()
|
||||
), f"Number of clusters must be {len(N)} <= n_clusters <= {N.sum()} for this selection of countries."
|
||||
|
||||
if focus_weights is not None:
|
||||
|
||||
total_focus = sum(list(focus_weights.values()))
|
||||
|
||||
assert total_focus <= 1.0, "The sum of focus weights must be less than or equal to 1."
|
||||
assert (
|
||||
total_focus <= 1.0
|
||||
), "The sum of focus weights must be less than or equal to 1."
|
||||
|
||||
for country, weight in focus_weights.items():
|
||||
L[country] = weight / len(L[country])
|
||||
|
||||
remainder = [c not in focus_weights.keys() for c in L.index.get_level_values('country')]
|
||||
remainder = [
|
||||
c not in focus_weights.keys() for c in L.index.get_level_values("country")
|
||||
]
|
||||
L[remainder] = L.loc[remainder].pipe(normed) * (1 - total_focus)
|
||||
|
||||
logger.warning('Using custom focus weights for determining number of clusters.')
|
||||
logger.warning("Using custom focus weights for determining number of clusters.")
|
||||
|
||||
assert np.isclose(L.sum(), 1.0, rtol=1e-3), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}."
|
||||
assert np.isclose(
|
||||
L.sum(), 1.0, rtol=1e-3
|
||||
), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}."
|
||||
|
||||
m = po.ConcreteModel()
|
||||
|
||||
def n_bounds(model, *n_id):
|
||||
return (1, N[n_id])
|
||||
|
||||
m.n = po.Var(list(L.index), bounds=n_bounds, domain=po.Integers)
|
||||
m.tot = po.Constraint(expr=(po.summation(m.n) == n_clusters))
|
||||
m.objective = po.Objective(expr=sum((m.n[i] - L.loc[i]*n_clusters)**2 for i in L.index),
|
||||
sense=po.minimize)
|
||||
m.objective = po.Objective(
|
||||
expr=sum((m.n[i] - L.loc[i] * n_clusters) ** 2 for i in L.index),
|
||||
sense=po.minimize,
|
||||
)
|
||||
|
||||
opt = po.SolverFactory(solver_name)
|
||||
if not opt.has_capability('quadratic_objective'):
|
||||
logger.warning(f'The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`.')
|
||||
opt = po.SolverFactory('ipopt')
|
||||
if not opt.has_capability("quadratic_objective"):
|
||||
logger.warning(
|
||||
f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`."
|
||||
)
|
||||
opt = po.SolverFactory("ipopt")
|
||||
|
||||
results = opt.solve(m)
|
||||
assert results['Solver'][0]['Status'] == 'ok', f"Solver returned non-optimally: {results}"
|
||||
assert (
|
||||
results["Solver"][0]["Status"] == "ok"
|
||||
), f"Solver returned non-optimally: {results}"
|
||||
|
||||
return pd.Series(m.n.get_values(), index=L.index).round().astype(int)
|
||||
|
||||
|
||||
def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algorithm="kmeans", feature=None, **algorithm_kwds):
|
||||
def busmap_for_n_clusters(
|
||||
n,
|
||||
n_clusters,
|
||||
solver_name,
|
||||
focus_weights=None,
|
||||
algorithm="kmeans",
|
||||
feature=None,
|
||||
**algorithm_kwds,
|
||||
):
|
||||
if algorithm == "kmeans":
|
||||
algorithm_kwds.setdefault('n_init', 1000)
|
||||
algorithm_kwds.setdefault('max_iter', 30000)
|
||||
algorithm_kwds.setdefault('tol', 1e-6)
|
||||
algorithm_kwds.setdefault('random_state', 0)
|
||||
algorithm_kwds.setdefault("n_init", 1000)
|
||||
algorithm_kwds.setdefault("max_iter", 30000)
|
||||
algorithm_kwds.setdefault("tol", 1e-6)
|
||||
algorithm_kwds.setdefault("random_state", 0)
|
||||
|
||||
def fix_country_assignment_for_hac(n):
|
||||
from scipy.sparse import csgraph
|
||||
@ -269,24 +309,29 @@ def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algori
|
||||
for country in n.buses.country.unique():
|
||||
m = n[n.buses.country == country].copy()
|
||||
|
||||
_, labels = csgraph.connected_components(m.adjacency_matrix(), directed=False)
|
||||
_, labels = csgraph.connected_components(
|
||||
m.adjacency_matrix(), directed=False
|
||||
)
|
||||
|
||||
component = pd.Series(labels, index=m.buses.index)
|
||||
component_sizes = component.value_counts()
|
||||
|
||||
if len(component_sizes) > 1:
|
||||
disconnected_bus = component[component==component_sizes.index[-1]].index[0]
|
||||
disconnected_bus = component[
|
||||
component == component_sizes.index[-1]
|
||||
].index[0]
|
||||
|
||||
neighbor_bus = (
|
||||
n.lines.query("bus0 == @disconnected_bus or bus1 == @disconnected_bus")
|
||||
.iloc[0][['bus0', 'bus1']]
|
||||
)
|
||||
new_country = list(set(n.buses.loc[neighbor_bus].country)-set([country]))[0]
|
||||
neighbor_bus = n.lines.query(
|
||||
"bus0 == @disconnected_bus or bus1 == @disconnected_bus"
|
||||
).iloc[0][["bus0", "bus1"]]
|
||||
new_country = list(
|
||||
set(n.buses.loc[neighbor_bus].country) - set([country])
|
||||
)[0]
|
||||
|
||||
logger.info(
|
||||
f"overwriting country `{country}` of bus `{disconnected_bus}` "
|
||||
f"to new country `{new_country}`, because it is disconnected "
|
||||
"from its inital inter-country transmission grid."
|
||||
"from its initial inter-country transmission grid."
|
||||
)
|
||||
n.buses.at[disconnected_bus, "country"] = new_country
|
||||
return n
|
||||
@ -296,75 +341,107 @@ def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algori
|
||||
n = fix_country_assignment_for_hac(n)
|
||||
|
||||
if (algorithm != "hac") and (feature is not None):
|
||||
logger.warning(f"Keyword argument feature is only valid for algorithm `hac`. "
|
||||
f"Given feature `{feature}` will be ignored.")
|
||||
logger.warning(
|
||||
f"Keyword argument feature is only valid for algorithm `hac`. "
|
||||
f"Given feature `{feature}` will be ignored."
|
||||
)
|
||||
|
||||
n.determine_network_topology()
|
||||
|
||||
n_clusters = distribute_clusters(n, n_clusters, focus_weights=focus_weights, solver_name=solver_name)
|
||||
n_clusters = distribute_clusters(
|
||||
n, n_clusters, focus_weights=focus_weights, solver_name=solver_name
|
||||
)
|
||||
|
||||
def busmap_for_country(x):
|
||||
prefix = x.name[0] + x.name[1] + ' '
|
||||
prefix = x.name[0] + x.name[1] + " "
|
||||
logger.debug(f"Determining busmap for country {prefix[:-1]}")
|
||||
if len(x) == 1:
|
||||
return pd.Series(prefix + '0', index=x.index)
|
||||
return pd.Series(prefix + "0", index=x.index)
|
||||
weight = weighting_for_country(n, x)
|
||||
|
||||
if algorithm == "kmeans":
|
||||
return prefix + busmap_by_kmeans(n, weight, n_clusters[x.name], buses_i=x.index, **algorithm_kwds)
|
||||
return prefix + busmap_by_kmeans(
|
||||
n, weight, n_clusters[x.name], buses_i=x.index, **algorithm_kwds
|
||||
)
|
||||
elif algorithm == "hac":
|
||||
return prefix + busmap_by_hac(n, n_clusters[x.name], buses_i=x.index, feature=feature.loc[x.index])
|
||||
return prefix + busmap_by_hac(
|
||||
n, n_clusters[x.name], buses_i=x.index, feature=feature.loc[x.index]
|
||||
)
|
||||
elif algorithm == "modularity":
|
||||
return prefix + busmap_by_greedy_modularity(n, n_clusters[x.name], buses_i=x.index)
|
||||
return prefix + busmap_by_greedy_modularity(
|
||||
n, n_clusters[x.name], buses_i=x.index
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"`algorithm` must be one of 'kmeans' or 'hac'. Is {algorithm}.")
|
||||
raise ValueError(
|
||||
f"`algorithm` must be one of 'kmeans' or 'hac'. Is {algorithm}."
|
||||
)
|
||||
|
||||
return (n.buses.groupby(['country', 'sub_network'], group_keys=False)
|
||||
.apply(busmap_for_country).squeeze().rename('busmap'))
|
||||
return (
|
||||
n.buses.groupby(["country", "sub_network"], group_keys=False)
|
||||
.apply(busmap_for_country)
|
||||
.squeeze()
|
||||
.rename("busmap")
|
||||
)
|
||||
|
||||
|
||||
def clustering_for_n_clusters(n, n_clusters, custom_busmap=False, aggregate_carriers=None,
|
||||
line_length_factor=1.25, aggregation_strategies=dict(), solver_name="cbc",
|
||||
algorithm="hac", feature=None, extended_link_costs=0, focus_weights=None):
|
||||
def clustering_for_n_clusters(
|
||||
n,
|
||||
n_clusters,
|
||||
custom_busmap=False,
|
||||
aggregate_carriers=None,
|
||||
line_length_factor=1.25,
|
||||
aggregation_strategies=dict(),
|
||||
solver_name="cbc",
|
||||
algorithm="hac",
|
||||
feature=None,
|
||||
extended_link_costs=0,
|
||||
focus_weights=None,
|
||||
):
|
||||
|
||||
bus_strategies, generator_strategies = get_aggregation_strategies(aggregation_strategies)
|
||||
bus_strategies, generator_strategies = get_aggregation_strategies(
|
||||
aggregation_strategies
|
||||
)
|
||||
|
||||
if not isinstance(custom_busmap, pd.Series):
|
||||
busmap = busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm, feature)
|
||||
busmap = busmap_for_n_clusters(
|
||||
n, n_clusters, solver_name, focus_weights, algorithm, feature
|
||||
)
|
||||
else:
|
||||
busmap = custom_busmap
|
||||
|
||||
clustering = get_clustering_from_busmap(
|
||||
n, busmap,
|
||||
n,
|
||||
busmap,
|
||||
bus_strategies=bus_strategies,
|
||||
aggregate_generators_weighted=True,
|
||||
aggregate_generators_carriers=aggregate_carriers,
|
||||
aggregate_one_ports=["Load", "StorageUnit"],
|
||||
line_length_factor=line_length_factor,
|
||||
generator_strategies=generator_strategies,
|
||||
scale_link_capital_costs=False)
|
||||
scale_link_capital_costs=False,
|
||||
)
|
||||
|
||||
if not n.links.empty:
|
||||
nc = clustering.network
|
||||
nc.links['underwater_fraction'] = (n.links.eval('underwater_fraction * length')
|
||||
.div(nc.links.length).dropna())
|
||||
nc.links['capital_cost'] = (nc.links['capital_cost']
|
||||
.add((nc.links.length - n.links.length)
|
||||
.clip(lower=0).mul(extended_link_costs),
|
||||
fill_value=0))
|
||||
nc.links["underwater_fraction"] = (
|
||||
n.links.eval("underwater_fraction * length").div(nc.links.length).dropna()
|
||||
)
|
||||
nc.links["capital_cost"] = nc.links["capital_cost"].add(
|
||||
(nc.links.length - n.links.length).clip(lower=0).mul(extended_link_costs),
|
||||
fill_value=0,
|
||||
)
|
||||
|
||||
return clustering
|
||||
|
||||
|
||||
def cluster_regions(busmaps, input=None, output=None):
|
||||
|
||||
busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0])
|
||||
|
||||
for which in ('regions_onshore', 'regions_offshore'):
|
||||
for which in ("regions_onshore", "regions_offshore"):
|
||||
regions = gpd.read_file(getattr(input, which))
|
||||
regions = regions.reindex(columns=["name", "geometry"]).set_index('name')
|
||||
regions = regions.reindex(columns=["name", "geometry"]).set_index("name")
|
||||
regions_c = regions.dissolve(busmap)
|
||||
regions_c.index.name = 'name'
|
||||
regions_c.index.name = "name"
|
||||
regions_c = regions_c.reset_index()
|
||||
regions_c.to_file(getattr(output, which))
|
||||
|
||||
@ -375,28 +452,35 @@ def plot_busmap_for_n_clusters(n, n_clusters, fn=None):
|
||||
cr = sns.color_palette("hls", len(cs))
|
||||
n.plot(bus_colors=busmap.map(dict(zip(cs, cr))))
|
||||
if fn is not None:
|
||||
plt.savefig(fn, bbox_inches='tight')
|
||||
plt.savefig(fn, bbox_inches="tight")
|
||||
del cs, cr
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('cluster_network', simpl='', clusters='5')
|
||||
|
||||
snakemake = mock_snakemake("cluster_network", simpl="", clusters="5")
|
||||
configure_logging(snakemake)
|
||||
|
||||
n = pypsa.Network(snakemake.input.network)
|
||||
|
||||
focus_weights = snakemake.config.get('focus_weights', None)
|
||||
focus_weights = snakemake.config.get("focus_weights", None)
|
||||
|
||||
renewable_carriers = pd.Index([tech
|
||||
renewable_carriers = pd.Index(
|
||||
[
|
||||
tech
|
||||
for tech in n.generators.carrier.unique()
|
||||
if tech in snakemake.config['renewable']])
|
||||
if tech in snakemake.config["renewable"]
|
||||
]
|
||||
)
|
||||
|
||||
if snakemake.wildcards.clusters.endswith('m'):
|
||||
if snakemake.wildcards.clusters.endswith("m"):
|
||||
n_clusters = int(snakemake.wildcards.clusters[:-1])
|
||||
aggregate_carriers = snakemake.config["electricity"].get("conventional_carriers")
|
||||
elif snakemake.wildcards.clusters == 'all':
|
||||
aggregate_carriers = snakemake.config["electricity"].get(
|
||||
"conventional_carriers"
|
||||
)
|
||||
elif snakemake.wildcards.clusters == "all":
|
||||
n_clusters = len(n.buses)
|
||||
aggregate_carriers = None # All
|
||||
else:
|
||||
@ -407,21 +491,30 @@ if __name__ == "__main__":
|
||||
# Fast-path if no clustering is necessary
|
||||
busmap = n.buses.index.to_series()
|
||||
linemap = n.lines.index.to_series()
|
||||
clustering = pypsa.networkclustering.Clustering(n, busmap, linemap, linemap, pd.Series(dtype='O'))
|
||||
clustering = pypsa.networkclustering.Clustering(
|
||||
n, busmap, linemap, linemap, pd.Series(dtype="O")
|
||||
)
|
||||
else:
|
||||
line_length_factor = snakemake.config['lines']['length_factor']
|
||||
line_length_factor = snakemake.config["lines"]["length_factor"]
|
||||
Nyears = n.snapshot_weightings.objective.sum() / 8760
|
||||
|
||||
hvac_overhead_cost = (load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears)
|
||||
.at['HVAC overhead', 'capital_cost'])
|
||||
hvac_overhead_cost = load_costs(
|
||||
snakemake.input.tech_costs,
|
||||
snakemake.config["costs"],
|
||||
snakemake.config["electricity"],
|
||||
Nyears,
|
||||
).at["HVAC overhead", "capital_cost"]
|
||||
|
||||
def consense(x):
|
||||
v = x.iat[0]
|
||||
assert ((x == v).all() or x.isnull().all()), (
|
||||
"The `potential` configuration option must agree for all renewable carriers, for now!"
|
||||
)
|
||||
assert (
|
||||
x == v
|
||||
).all() or x.isnull().all(), "The `potential` configuration option must agree for all renewable carriers, for now!"
|
||||
return v
|
||||
aggregation_strategies = snakemake.config["clustering"].get("aggregation_strategies", {})
|
||||
|
||||
aggregation_strategies = snakemake.config["clustering"].get(
|
||||
"aggregation_strategies", {}
|
||||
)
|
||||
# translate str entries of aggregation_strategies to pd.Series functions:
|
||||
aggregation_strategies = {
|
||||
p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()}
|
||||
@ -430,23 +523,39 @@ if __name__ == "__main__":
|
||||
|
||||
custom_busmap = snakemake.config["enable"].get("custom_busmap", False)
|
||||
if custom_busmap:
|
||||
custom_busmap = pd.read_csv(snakemake.input.custom_busmap, index_col=0, squeeze=True)
|
||||
custom_busmap = pd.read_csv(
|
||||
snakemake.input.custom_busmap, index_col=0, squeeze=True
|
||||
)
|
||||
custom_busmap.index = custom_busmap.index.astype(str)
|
||||
logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}")
|
||||
|
||||
cluster_config = snakemake.config.get('clustering', {}).get('cluster_network', {})
|
||||
clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap, aggregate_carriers,
|
||||
line_length_factor, aggregation_strategies,
|
||||
snakemake.config['solving']['solver']['name'],
|
||||
cluster_config = snakemake.config.get("clustering", {}).get(
|
||||
"cluster_network", {}
|
||||
)
|
||||
clustering = clustering_for_n_clusters(
|
||||
n,
|
||||
n_clusters,
|
||||
custom_busmap,
|
||||
aggregate_carriers,
|
||||
line_length_factor,
|
||||
aggregation_strategies,
|
||||
snakemake.config["solving"]["solver"]["name"],
|
||||
cluster_config.get("algorithm", "hac"),
|
||||
cluster_config.get("feature", "solar+onwind-time"),
|
||||
hvac_overhead_cost, focus_weights)
|
||||
hvac_overhead_cost,
|
||||
focus_weights,
|
||||
)
|
||||
|
||||
update_p_nom_max(clustering.network)
|
||||
|
||||
clustering.network.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
||||
clustering.network.meta = dict(
|
||||
snakemake.config, **dict(wildcards=dict(snakemake.wildcards))
|
||||
)
|
||||
clustering.network.export_to_netcdf(snakemake.output.network)
|
||||
for attr in ('busmap', 'linemap'): #also available: linemap_positive, linemap_negative
|
||||
for attr in (
|
||||
"busmap",
|
||||
"linemap",
|
||||
): # also available: linemap_positive, linemap_negative
|
||||
getattr(clustering, attr).to_csv(snakemake.output[attr])
|
||||
|
||||
cluster_regions((clustering.busmap,), snakemake.input, snakemake.output)
|
||||
|
@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
@ -33,9 +34,9 @@ Outputs
|
||||
Description
|
||||
-----------
|
||||
|
||||
The following rule can be used to summarize the results in seperate .csv files:
|
||||
The following rule can be used to summarize the results in separate .csv files:
|
||||
|
||||
.. code::
|
||||
.. code:: bash
|
||||
|
||||
snakemake results/summaries/elec_s_all_lall_Co2L-3H_all
|
||||
clusters
|
||||
@ -46,21 +47,19 @@ The following rule can be used to summarize the results in seperate .csv files:
|
||||
the line volume/cost cap field can be set to one of the following:
|
||||
* ``lv1.25`` for a particular line volume extension by 25%
|
||||
* ``lc1.25`` for a line cost extension by 25 %
|
||||
* ``lall`` for all evalutated caps
|
||||
* ``lall`` for all evaluated caps
|
||||
* ``lvall`` for all line volume caps
|
||||
* ``lcall`` for all line cost caps
|
||||
|
||||
Replacing '/summaries/' with '/plots/' creates nice colored maps of the results.
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import configure_logging
|
||||
|
||||
import os
|
||||
import pypsa
|
||||
import pandas as pd
|
||||
|
||||
import pandas as pd
|
||||
import pypsa
|
||||
from _helpers import configure_logging
|
||||
from add_electricity import load_costs, update_transmission_costs
|
||||
|
||||
idx = pd.IndexSlice
|
||||
@ -79,34 +78,42 @@ def _add_indexed_rows(df, raw_index):
|
||||
|
||||
|
||||
def assign_carriers(n):
|
||||
|
||||
if "carrier" not in n.loads:
|
||||
n.loads["carrier"] = "electricity"
|
||||
for carrier in ["transport", "heat", "urban heat"]:
|
||||
n.loads.loc[n.loads.index.str.contains(carrier), "carrier"] = carrier
|
||||
|
||||
n.storage_units['carrier'].replace({'hydro': 'hydro+PHS', 'PHS': 'hydro+PHS'}, inplace=True)
|
||||
n.storage_units["carrier"].replace(
|
||||
{"hydro": "hydro+PHS", "PHS": "hydro+PHS"}, inplace=True
|
||||
)
|
||||
|
||||
if "carrier" not in n.lines:
|
||||
n.lines["carrier"] = "AC"
|
||||
|
||||
n.lines["carrier"].replace({"AC": "lines"}, inplace=True)
|
||||
|
||||
if n.links.empty: n.links["carrier"] = pd.Series(dtype=str)
|
||||
if n.links.empty:
|
||||
n.links["carrier"] = pd.Series(dtype=str)
|
||||
n.links["carrier"].replace({"DC": "lines"}, inplace=True)
|
||||
|
||||
if "EU gas store" in n.stores.index and n.stores.loc["EU gas Store","carrier"] == "":
|
||||
if (
|
||||
"EU gas store" in n.stores.index
|
||||
and n.stores.loc["EU gas Store", "carrier"] == ""
|
||||
):
|
||||
n.stores.loc["EU gas Store", "carrier"] = "gas Store"
|
||||
|
||||
|
||||
def calculate_costs(n, label, costs):
|
||||
|
||||
for c in n.iterate_components(n.branch_components|n.controllable_one_port_components^{"Load"}):
|
||||
for c in n.iterate_components(
|
||||
n.branch_components | n.controllable_one_port_components ^ {"Load"}
|
||||
):
|
||||
capital_costs = c.df.capital_cost * c.df[opt_name.get(c.name, "p") + "_nom_opt"]
|
||||
capital_costs_grouped = capital_costs.groupby(c.df.carrier).sum()
|
||||
|
||||
# Index tuple(s) indicating the newly to-be-added row(s)
|
||||
raw_index = tuple([[c.list_name],["capital"],list(capital_costs_grouped.index)])
|
||||
raw_index = tuple(
|
||||
[[c.list_name], ["capital"], list(capital_costs_grouped.index)]
|
||||
)
|
||||
costs = _add_indexed_rows(costs, raw_index)
|
||||
|
||||
costs.loc[idx[raw_index], label] = capital_costs_grouped.values
|
||||
@ -117,7 +124,7 @@ def calculate_costs(n, label, costs):
|
||||
continue
|
||||
elif c.name == "StorageUnit":
|
||||
p_all = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
|
||||
p_all[p_all < 0.] = 0.
|
||||
p_all[p_all < 0.0] = 0.0
|
||||
p = p_all.sum()
|
||||
else:
|
||||
p = c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0).sum()
|
||||
@ -126,38 +133,70 @@ def calculate_costs(n, label, costs):
|
||||
|
||||
marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum()
|
||||
|
||||
costs = costs.reindex(costs.index.union(pd.MultiIndex.from_product([[c.list_name],["marginal"],marginal_costs_grouped.index])))
|
||||
costs = costs.reindex(
|
||||
costs.index.union(
|
||||
pd.MultiIndex.from_product(
|
||||
[[c.list_name], ["marginal"], marginal_costs_grouped.index]
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
costs.loc[idx[c.list_name,"marginal",list(marginal_costs_grouped.index)],label] = marginal_costs_grouped.values
|
||||
costs.loc[
|
||||
idx[c.list_name, "marginal", list(marginal_costs_grouped.index)], label
|
||||
] = marginal_costs_grouped.values
|
||||
|
||||
return costs
|
||||
|
||||
def calculate_curtailment(n, label, curtailment):
|
||||
|
||||
avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum()
|
||||
def calculate_curtailment(n, label, curtailment):
|
||||
avail = (
|
||||
n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt)
|
||||
.sum()
|
||||
.groupby(n.generators.carrier)
|
||||
.sum()
|
||||
)
|
||||
used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()
|
||||
|
||||
curtailment[label] = (((avail - used) / avail) * 100).round(3)
|
||||
|
||||
return curtailment
|
||||
|
||||
def calculate_energy(n, label, energy):
|
||||
|
||||
def calculate_energy(n, label, energy):
|
||||
for c in n.iterate_components(n.one_port_components | n.branch_components):
|
||||
|
||||
if c.name in {'Generator', 'Load', 'ShuntImpedance'}:
|
||||
c_energies = c.pnl.p.multiply(n.snapshot_weightings.generators,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum()
|
||||
elif c.name in {'StorageUnit', 'Store'}:
|
||||
c_energies = c.pnl.p.multiply(n.snapshot_weightings.stores,axis=0).sum().multiply(c.df.sign).groupby(c.df.carrier).sum()
|
||||
if c.name in {"Generator", "Load", "ShuntImpedance"}:
|
||||
c_energies = (
|
||||
c.pnl.p.multiply(n.snapshot_weightings.generators, axis=0)
|
||||
.sum()
|
||||
.multiply(c.df.sign)
|
||||
.groupby(c.df.carrier)
|
||||
.sum()
|
||||
)
|
||||
elif c.name in {"StorageUnit", "Store"}:
|
||||
c_energies = (
|
||||
c.pnl.p.multiply(n.snapshot_weightings.stores, axis=0)
|
||||
.sum()
|
||||
.multiply(c.df.sign)
|
||||
.groupby(c.df.carrier)
|
||||
.sum()
|
||||
)
|
||||
else:
|
||||
c_energies = (-c.pnl.p1.multiply(n.snapshot_weightings.generators,axis=0).sum() - c.pnl.p0.multiply(n.snapshot_weightings.generators,axis=0).sum()).groupby(c.df.carrier).sum()
|
||||
c_energies = (
|
||||
(
|
||||
-c.pnl.p1.multiply(n.snapshot_weightings.generators, axis=0).sum()
|
||||
- c.pnl.p0.multiply(n.snapshot_weightings.generators, axis=0).sum()
|
||||
)
|
||||
.groupby(c.df.carrier)
|
||||
.sum()
|
||||
)
|
||||
|
||||
energy = include_in_summary(energy, [c.list_name], label, c_energies)
|
||||
|
||||
return energy
|
||||
|
||||
def include_in_summary(summary, multiindexprefix, label, item):
|
||||
|
||||
def include_in_summary(summary, multiindexprefix, label, item):
|
||||
# Index tuple(s) indicating the newly to-be-added row(s)
|
||||
raw_index = tuple([multiindexprefix, list(item.index)])
|
||||
summary = _add_indexed_rows(summary, raw_index)
|
||||
@ -166,18 +205,22 @@ def include_in_summary(summary, multiindexprefix, label, item):
|
||||
|
||||
return summary
|
||||
|
||||
def calculate_capacity(n,label,capacity):
|
||||
|
||||
def calculate_capacity(n, label, capacity):
|
||||
for c in n.iterate_components(n.one_port_components):
|
||||
if 'p_nom_opt' in c.df.columns:
|
||||
c_capacities = abs(c.df.p_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum()
|
||||
if "p_nom_opt" in c.df.columns:
|
||||
c_capacities = (
|
||||
abs(c.df.p_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum()
|
||||
)
|
||||
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
|
||||
elif 'e_nom_opt' in c.df.columns:
|
||||
c_capacities = abs(c.df.e_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum()
|
||||
elif "e_nom_opt" in c.df.columns:
|
||||
c_capacities = (
|
||||
abs(c.df.e_nom_opt.multiply(c.df.sign)).groupby(c.df.carrier).sum()
|
||||
)
|
||||
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
|
||||
|
||||
for c in n.iterate_components(n.passive_branch_components):
|
||||
c_capacities = c.df['s_nom_opt'].groupby(c.df.carrier).sum()
|
||||
c_capacities = c.df["s_nom_opt"].groupby(c.df.carrier).sum()
|
||||
capacity = include_in_summary(capacity, [c.list_name], label, c_capacities)
|
||||
|
||||
for c in n.iterate_components(n.controllable_branch_components):
|
||||
@ -186,8 +229,12 @@ def calculate_capacity(n,label,capacity):
|
||||
|
||||
return capacity
|
||||
|
||||
|
||||
def calculate_supply(n, label, supply):
|
||||
"""calculate the max dispatch of each component at the buses where the loads are attached"""
|
||||
"""
|
||||
calculate the max dispatch of each component at the buses where the loads
|
||||
are attached.
|
||||
"""
|
||||
|
||||
load_types = n.buses.carrier.unique()
|
||||
|
||||
@ -206,7 +253,13 @@ def calculate_supply(n, label, supply):
|
||||
if len(items) == 0 or c.pnl.p.empty:
|
||||
continue
|
||||
|
||||
s = c.pnl.p[items].max().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum()
|
||||
s = (
|
||||
c.pnl.p[items]
|
||||
.max()
|
||||
.multiply(c.df.loc[items, "sign"])
|
||||
.groupby(c.df.loc[items, "carrier"])
|
||||
.sum()
|
||||
)
|
||||
|
||||
# Index tuple(s) indicating the newly to-be-added row(s)
|
||||
raw_index = tuple([[i], [c.list_name], list(s.index)])
|
||||
@ -214,7 +267,6 @@ def calculate_supply(n, label, supply):
|
||||
|
||||
supply.loc[idx[raw_index], label] = s.values
|
||||
|
||||
|
||||
for c in n.iterate_components(n.branch_components):
|
||||
|
||||
for end in ["0", "1"]:
|
||||
@ -225,16 +277,25 @@ def calculate_supply(n, label, supply):
|
||||
continue
|
||||
|
||||
# lots of sign compensation for direction and to do maximums
|
||||
s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items,'carrier']).sum()
|
||||
s = (-1) ** (1 - int(end)) * (
|
||||
(-1) ** int(end) * c.pnl["p" + end][items]
|
||||
).max().groupby(c.df.loc[items, "carrier"]).sum()
|
||||
|
||||
supply = supply.reindex(supply.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index])))
|
||||
supply = supply.reindex(
|
||||
supply.index.union(
|
||||
pd.MultiIndex.from_product([[i], [c.list_name], s.index])
|
||||
)
|
||||
)
|
||||
supply.loc[idx[i, c.list_name, list(s.index)], label] = s.values
|
||||
|
||||
return supply
|
||||
|
||||
|
||||
def calculate_supply_energy(n, label, supply_energy):
|
||||
"""calculate the total dispatch of each component at the buses where the loads are attached"""
|
||||
"""
|
||||
calculate the total dispatch of each component at the buses where the loads
|
||||
are attached.
|
||||
"""
|
||||
|
||||
load_types = n.buses.carrier.unique()
|
||||
|
||||
@ -253,7 +314,13 @@ def calculate_supply_energy(n, label, supply_energy):
|
||||
if len(items) == 0 or c.pnl.p.empty:
|
||||
continue
|
||||
|
||||
s = c.pnl.p[items].sum().multiply(c.df.loc[items,'sign']).groupby(c.df.loc[items,'carrier']).sum()
|
||||
s = (
|
||||
c.pnl.p[items]
|
||||
.sum()
|
||||
.multiply(c.df.loc[items, "sign"])
|
||||
.groupby(c.df.loc[items, "carrier"])
|
||||
.sum()
|
||||
)
|
||||
|
||||
# Index tuple(s) indicating the newly to-be-added row(s)
|
||||
raw_index = tuple([[i], [c.list_name], list(s.index)])
|
||||
@ -261,31 +328,52 @@ def calculate_supply_energy(n, label, supply_energy):
|
||||
|
||||
supply_energy.loc[idx[raw_index], label] = s.values
|
||||
|
||||
|
||||
for c in n.iterate_components(n.branch_components):
|
||||
|
||||
for end in ["0", "1"]:
|
||||
|
||||
items = c.df.index[c.df["bus" + end].map(bus_map)]
|
||||
|
||||
if len(items) == 0 or c.pnl['p' + end].empty:
|
||||
if len(items) == 0 or c.pnl["p" + end].empty:
|
||||
continue
|
||||
|
||||
s = (-1)*c.pnl["p"+end][items].sum().groupby(c.df.loc[items,'carrier']).sum()
|
||||
s = (-1) * c.pnl["p" + end][items].sum().groupby(
|
||||
c.df.loc[items, "carrier"]
|
||||
).sum()
|
||||
|
||||
supply_energy = supply_energy.reindex(supply_energy.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index])))
|
||||
supply_energy = supply_energy.reindex(
|
||||
supply_energy.index.union(
|
||||
pd.MultiIndex.from_product([[i], [c.list_name], s.index])
|
||||
)
|
||||
)
|
||||
supply_energy.loc[idx[i, c.list_name, list(s.index)], label] = s.values
|
||||
|
||||
return supply_energy
|
||||
|
||||
|
||||
def calculate_metrics(n, label, metrics):
|
||||
metrics = metrics.reindex(
|
||||
metrics.index.union(
|
||||
pd.Index(
|
||||
[
|
||||
"line_volume",
|
||||
"line_volume_limit",
|
||||
"line_volume_AC",
|
||||
"line_volume_DC",
|
||||
"line_volume_shadow",
|
||||
"co2_shadow",
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
metrics = metrics.reindex(metrics.index.union(pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"])))
|
||||
|
||||
metrics.at["line_volume_DC",label] = (n.links.length*n.links.p_nom_opt)[n.links.carrier == "DC"].sum()
|
||||
metrics.at["line_volume_DC", label] = (n.links.length * n.links.p_nom_opt)[
|
||||
n.links.carrier == "DC"
|
||||
].sum()
|
||||
metrics.at["line_volume_AC", label] = (n.lines.length * n.lines.s_nom_opt).sum()
|
||||
metrics.at["line_volume",label] = metrics.loc[["line_volume_AC","line_volume_DC"],label].sum()
|
||||
metrics.at["line_volume", label] = metrics.loc[
|
||||
["line_volume_AC", "line_volume_DC"], label
|
||||
].sum()
|
||||
|
||||
if hasattr(n, "line_volume_limit"):
|
||||
metrics.at["line_volume_limit", label] = n.line_volume_limit
|
||||
@ -300,8 +388,9 @@ def calculate_metrics(n,label,metrics):
|
||||
|
||||
|
||||
def calculate_prices(n, label, prices):
|
||||
|
||||
bus_type = pd.Series(n.buses.index.str[3:],n.buses.index).replace("","electricity")
|
||||
bus_type = pd.Series(n.buses.index.str[3:], n.buses.index).replace(
|
||||
"", "electricity"
|
||||
)
|
||||
|
||||
prices = prices.reindex(prices.index.union(bus_type.value_counts().index))
|
||||
|
||||
@ -312,18 +401,36 @@ def calculate_prices(n,label,prices):
|
||||
|
||||
|
||||
def calculate_weighted_prices(n, label, weighted_prices):
|
||||
|
||||
logger.warning("Weighted prices don't include storage units as loads")
|
||||
|
||||
weighted_prices = weighted_prices.reindex(pd.Index(["electricity","heat","space heat","urban heat","space urban heat","gas","H2"]))
|
||||
weighted_prices = weighted_prices.reindex(
|
||||
pd.Index(
|
||||
[
|
||||
"electricity",
|
||||
"heat",
|
||||
"space heat",
|
||||
"urban heat",
|
||||
"space urban heat",
|
||||
"gas",
|
||||
"H2",
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
link_loads = {"electricity" : ["heat pump", "resistive heater", "battery charger", "H2 Electrolysis"],
|
||||
link_loads = {
|
||||
"electricity": [
|
||||
"heat pump",
|
||||
"resistive heater",
|
||||
"battery charger",
|
||||
"H2 Electrolysis",
|
||||
],
|
||||
"heat": ["water tanks charger"],
|
||||
"urban heat": ["water tanks charger"],
|
||||
"space heat": [],
|
||||
"space urban heat": [],
|
||||
"gas": ["OCGT", "gas boiler", "CHP electric", "CHP heat"],
|
||||
"H2" : ["Sabatier", "H2 Fuel Cell"]}
|
||||
"H2": ["Sabatier", "H2 Fuel Cell"],
|
||||
}
|
||||
|
||||
for carrier in link_loads:
|
||||
|
||||
@ -340,13 +447,14 @@ def calculate_weighted_prices(n,label,weighted_prices):
|
||||
continue
|
||||
|
||||
if carrier in ["H2", "gas"]:
|
||||
load = pd.DataFrame(index=n.snapshots,columns=buses,data=0.)
|
||||
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
|
||||
elif carrier[:5] == "space":
|
||||
load = heat_demand_df[buses.str[:2]].rename(columns=lambda i: str(i)+suffix)
|
||||
load = heat_demand_df[buses.str[:2]].rename(
|
||||
columns=lambda i: str(i) + suffix
|
||||
)
|
||||
else:
|
||||
load = n.loads_t.p_set[buses]
|
||||
|
||||
|
||||
for tech in link_loads[carrier]:
|
||||
|
||||
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
|
||||
@ -354,15 +462,25 @@ def calculate_weighted_prices(n,label,weighted_prices):
|
||||
if names.empty:
|
||||
continue
|
||||
|
||||
load += n.links_t.p0[names].groupby(n.links.loc[names,"bus0"],axis=1).sum(axis=1)
|
||||
load += (
|
||||
n.links_t.p0[names]
|
||||
.groupby(n.links.loc[names, "bus0"], axis=1)
|
||||
.sum(axis=1)
|
||||
)
|
||||
|
||||
# Add H2 Store when charging
|
||||
if carrier == "H2":
|
||||
stores = n.stores_t.p[buses+ " Store"].groupby(n.stores.loc[buses+ " Store","bus"],axis=1).sum(axis=1)
|
||||
stores[stores > 0.] = 0.
|
||||
stores = (
|
||||
n.stores_t.p[buses + " Store"]
|
||||
.groupby(n.stores.loc[buses + " Store", "bus"], axis=1)
|
||||
.sum(axis=1)
|
||||
)
|
||||
stores[stores > 0.0] = 0.0
|
||||
load += -stores
|
||||
|
||||
weighted_prices.loc[carrier,label] = (load*n.buses_t.marginal_price[buses]).sum().sum()/load.sum().sum()
|
||||
weighted_prices.loc[carrier, label] = (
|
||||
load * n.buses_t.marginal_price[buses]
|
||||
).sum().sum() / load.sum().sum()
|
||||
|
||||
if carrier[:5] == "space":
|
||||
print(load * n.buses_t.marginal_price[buses])
|
||||
@ -370,7 +488,8 @@ def calculate_weighted_prices(n,label,weighted_prices):
|
||||
return weighted_prices
|
||||
|
||||
|
||||
outputs = ["costs",
|
||||
outputs = [
|
||||
"costs",
|
||||
"curtailment",
|
||||
"energy",
|
||||
"capacity",
|
||||
@ -382,9 +501,10 @@ outputs = ["costs",
|
||||
]
|
||||
|
||||
|
||||
def make_summaries(networks_dict, paths, config, country='all'):
|
||||
|
||||
columns = pd.MultiIndex.from_tuples(networks_dict.keys(),names=["simpl","clusters","ll","opts"])
|
||||
def make_summaries(networks_dict, paths, config, country="all"):
|
||||
columns = pd.MultiIndex.from_tuples(
|
||||
networks_dict.keys(), names=["simpl", "clusters", "ll", "opts"]
|
||||
)
|
||||
|
||||
dfs = {}
|
||||
|
||||
@ -403,11 +523,11 @@ def make_summaries(networks_dict, paths, config, country='all'):
|
||||
logger.warning("Skipping {filename}".format(filename=filename))
|
||||
continue
|
||||
|
||||
if country != 'all':
|
||||
if country != "all":
|
||||
n = n[n.buses.country == country]
|
||||
|
||||
Nyears = n.snapshot_weightings.objective.sum() / 8760.
|
||||
costs = load_costs(paths[0], config['costs'], config['electricity'], Nyears)
|
||||
Nyears = n.snapshot_weightings.objective.sum() / 8760.0
|
||||
costs = load_costs(paths[0], config["costs"], config["electricity"], Nyears)
|
||||
update_transmission_costs(n, costs)
|
||||
|
||||
assign_carriers(n)
|
||||
@ -425,13 +545,20 @@ def to_csv(dfs, dir):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('make_summary', simpl='',
|
||||
clusters='5', ll='copt', opts='Co2L-24H', country='all')
|
||||
network_dir = os.path.join('..', 'results', 'networks')
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"make_summary",
|
||||
simpl="",
|
||||
clusters="5",
|
||||
ll="copt",
|
||||
opts="Co2L-24H",
|
||||
country="all",
|
||||
)
|
||||
network_dir = os.path.join("..", "results", "networks")
|
||||
else:
|
||||
network_dir = os.path.join('results', 'networks')
|
||||
network_dir = os.path.join("results", "networks")
|
||||
configure_logging(snakemake)
|
||||
|
||||
config = snakemake.config
|
||||
@ -448,14 +575,18 @@ if __name__ == "__main__":
|
||||
else:
|
||||
ll = [wildcards.ll]
|
||||
|
||||
networks_dict = {(simpl,clusters,l,opts) :
|
||||
os.path.join(network_dir, f'elec_s{simpl}_'
|
||||
f'{clusters}_ec_l{l}_{opts}.nc')
|
||||
networks_dict = {
|
||||
(simpl, clusters, l, opts): os.path.join(
|
||||
network_dir, f"elec_s{simpl}_" f"{clusters}_ec_l{l}_{opts}.nc"
|
||||
)
|
||||
for simpl in expand_from_wildcard("simpl", config)
|
||||
for clusters in expand_from_wildcard("clusters", config)
|
||||
for l in ll
|
||||
for opts in expand_from_wildcard("opts", config)}
|
||||
for opts in expand_from_wildcard("opts", config)
|
||||
}
|
||||
|
||||
dfs = make_summaries(networks_dict, snakemake.input, config, country=wildcards.country)
|
||||
dfs = make_summaries(
|
||||
networks_dict, snakemake.input, config, country=wildcards.country
|
||||
)
|
||||
|
||||
to_csv(dfs, snakemake.output[0])
|
||||
|
@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
@ -16,20 +17,24 @@ Outputs
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import (load_network_for_plots, aggregate_p, aggregate_costs, configure_logging)
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
|
||||
import cartopy.crs as ccrs
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib as mpl
|
||||
from matplotlib.patches import Circle, Ellipse
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from _helpers import (
|
||||
aggregate_costs,
|
||||
aggregate_p,
|
||||
configure_logging,
|
||||
load_network_for_plots,
|
||||
)
|
||||
from matplotlib.legend_handler import HandlerPatch
|
||||
from matplotlib.patches import Circle, Ellipse
|
||||
|
||||
to_rgba = mpl.colors.colorConverter.to_rgba
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -37,24 +42,36 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False):
|
||||
fig = ax.get_figure()
|
||||
|
||||
def axes2pt():
|
||||
return np.diff(ax.transData.transform([(0,0), (1,1)]), axis=0)[0] * (72./fig.dpi)
|
||||
return np.diff(ax.transData.transform([(0, 0), (1, 1)]), axis=0)[0] * (
|
||||
72.0 / fig.dpi
|
||||
)
|
||||
|
||||
ellipses = []
|
||||
if not dont_resize_actively:
|
||||
|
||||
def update_width_height(event):
|
||||
dist = axes2pt()
|
||||
for e, radius in ellipses: e.width, e.height = 2. * radius * dist
|
||||
fig.canvas.mpl_connect('resize_event', update_width_height)
|
||||
ax.callbacks.connect('xlim_changed', update_width_height)
|
||||
ax.callbacks.connect('ylim_changed', update_width_height)
|
||||
for e, radius in ellipses:
|
||||
e.width, e.height = 2.0 * radius * dist
|
||||
|
||||
def legend_circle_handler(legend, orig_handle, xdescent, ydescent,
|
||||
width, height, fontsize):
|
||||
w, h = 2. * orig_handle.get_radius() * axes2pt()
|
||||
e = Ellipse(xy=(0.5*width-0.5*xdescent, 0.5*height-0.5*ydescent), width=w, height=w)
|
||||
fig.canvas.mpl_connect("resize_event", update_width_height)
|
||||
ax.callbacks.connect("xlim_changed", update_width_height)
|
||||
ax.callbacks.connect("ylim_changed", update_width_height)
|
||||
|
||||
def legend_circle_handler(
|
||||
legend, orig_handle, xdescent, ydescent, width, height, fontsize
|
||||
):
|
||||
w, h = 2.0 * orig_handle.get_radius() * axes2pt()
|
||||
e = Ellipse(
|
||||
xy=(0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent),
|
||||
width=w,
|
||||
height=w,
|
||||
)
|
||||
ellipses.append((e, orig_handle.get_radius()))
|
||||
return e
|
||||
|
||||
return {Circle: HandlerPatch(patch_func=legend_circle_handler)}
|
||||
|
||||
|
||||
@ -63,214 +80,314 @@ def make_legend_circles_for(sizes, scale=1.0, **kw):
|
||||
|
||||
|
||||
def set_plot_style():
|
||||
plt.style.use(['classic', 'seaborn-white',
|
||||
{'axes.grid': False, 'grid.linestyle': '--', 'grid.color': u'0.6',
|
||||
'hatch.color': 'white',
|
||||
'patch.linewidth': 0.5,
|
||||
'font.size': 12,
|
||||
'legend.fontsize': 'medium',
|
||||
'lines.linewidth': 1.5,
|
||||
'pdf.fonttype': 42,
|
||||
}])
|
||||
plt.style.use(
|
||||
[
|
||||
"classic",
|
||||
"seaborn-white",
|
||||
{
|
||||
"axes.grid": False,
|
||||
"grid.linestyle": "--",
|
||||
"grid.color": "0.6",
|
||||
"hatch.color": "white",
|
||||
"patch.linewidth": 0.5,
|
||||
"font.size": 12,
|
||||
"legend.fontsize": "medium",
|
||||
"lines.linewidth": 1.5,
|
||||
"pdf.fonttype": 42,
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def plot_map(n, opts, ax=None, attribute='p_nom'):
|
||||
def plot_map(n, opts, ax=None, attribute="p_nom"):
|
||||
if ax is None:
|
||||
ax = plt.gca()
|
||||
|
||||
## DATA
|
||||
line_colors = {'cur': "purple",
|
||||
'exp': mpl.colors.rgb2hex(to_rgba("red", 0.7), True)}
|
||||
tech_colors = opts['tech_colors']
|
||||
line_colors = {
|
||||
"cur": "purple",
|
||||
"exp": mpl.colors.rgb2hex(to_rgba("red", 0.7), True),
|
||||
}
|
||||
tech_colors = opts["tech_colors"]
|
||||
|
||||
if attribute == 'p_nom':
|
||||
if attribute == "p_nom":
|
||||
# bus_sizes = n.generators_t.p.sum().loc[n.generators.carrier == "load"].groupby(n.generators.bus).sum()
|
||||
bus_sizes = pd.concat((n.generators.query('carrier != "load"').groupby(['bus', 'carrier']).p_nom_opt.sum(),
|
||||
n.storage_units.groupby(['bus', 'carrier']).p_nom_opt.sum()))
|
||||
bus_sizes = pd.concat(
|
||||
(
|
||||
n.generators.query('carrier != "load"')
|
||||
.groupby(["bus", "carrier"])
|
||||
.p_nom_opt.sum(),
|
||||
n.storage_units.groupby(["bus", "carrier"]).p_nom_opt.sum(),
|
||||
)
|
||||
)
|
||||
line_widths_exp = n.lines.s_nom_opt
|
||||
line_widths_cur = n.lines.s_nom_min
|
||||
link_widths_exp = n.links.p_nom_opt
|
||||
link_widths_cur = n.links.p_nom_min
|
||||
else:
|
||||
raise 'plotting of {} has not been implemented yet'.format(attribute)
|
||||
|
||||
|
||||
line_colors_with_alpha = \
|
||||
((line_widths_cur / n.lines.s_nom > 1e-3)
|
||||
.map({True: line_colors['cur'], False: to_rgba(line_colors['cur'], 0.)}))
|
||||
link_colors_with_alpha = \
|
||||
((link_widths_cur / n.links.p_nom > 1e-3)
|
||||
.map({True: line_colors['cur'], False: to_rgba(line_colors['cur'], 0.)}))
|
||||
raise "plotting of {} has not been implemented yet".format(attribute)
|
||||
|
||||
line_colors_with_alpha = (line_widths_cur / n.lines.s_nom > 1e-3).map(
|
||||
{True: line_colors["cur"], False: to_rgba(line_colors["cur"], 0.0)}
|
||||
)
|
||||
link_colors_with_alpha = (link_widths_cur / n.links.p_nom > 1e-3).map(
|
||||
{True: line_colors["cur"], False: to_rgba(line_colors["cur"], 0.0)}
|
||||
)
|
||||
|
||||
## FORMAT
|
||||
linewidth_factor = opts['map'][attribute]['linewidth_factor']
|
||||
bus_size_factor = opts['map'][attribute]['bus_size_factor']
|
||||
linewidth_factor = opts["map"][attribute]["linewidth_factor"]
|
||||
bus_size_factor = opts["map"][attribute]["bus_size_factor"]
|
||||
|
||||
## PLOT
|
||||
n.plot(line_widths=line_widths_exp/linewidth_factor,
|
||||
n.plot(
|
||||
line_widths=line_widths_exp / linewidth_factor,
|
||||
link_widths=link_widths_exp / linewidth_factor,
|
||||
line_colors=line_colors['exp'],
|
||||
link_colors=line_colors['exp'],
|
||||
line_colors=line_colors["exp"],
|
||||
link_colors=line_colors["exp"],
|
||||
bus_sizes=bus_sizes / bus_size_factor,
|
||||
bus_colors=tech_colors,
|
||||
boundaries=map_boundaries,
|
||||
color_geomap=True, geomap=True,
|
||||
ax=ax)
|
||||
n.plot(line_widths=line_widths_cur/linewidth_factor,
|
||||
color_geomap=True,
|
||||
geomap=True,
|
||||
ax=ax,
|
||||
)
|
||||
n.plot(
|
||||
line_widths=line_widths_cur / linewidth_factor,
|
||||
link_widths=link_widths_cur / linewidth_factor,
|
||||
line_colors=line_colors_with_alpha,
|
||||
link_colors=link_colors_with_alpha,
|
||||
bus_sizes=0,
|
||||
boundaries=map_boundaries,
|
||||
color_geomap=True, geomap=True,
|
||||
ax=ax)
|
||||
ax.set_aspect('equal')
|
||||
ax.axis('off')
|
||||
color_geomap=True,
|
||||
geomap=True,
|
||||
ax=ax,
|
||||
)
|
||||
ax.set_aspect("equal")
|
||||
ax.axis("off")
|
||||
|
||||
# Rasterize basemap
|
||||
# TODO : Check if this also works with cartopy
|
||||
for c in ax.collections[:2]: c.set_rasterized(True)
|
||||
for c in ax.collections[:2]:
|
||||
c.set_rasterized(True)
|
||||
|
||||
# LEGEND
|
||||
handles = []
|
||||
labels = []
|
||||
|
||||
for s in (10, 1):
|
||||
handles.append(plt.Line2D([0],[0],color=line_colors['exp'],
|
||||
linewidth=s*1e3/linewidth_factor))
|
||||
handles.append(
|
||||
plt.Line2D(
|
||||
[0], [0], color=line_colors["exp"], linewidth=s * 1e3 / linewidth_factor
|
||||
)
|
||||
)
|
||||
labels.append("{} GW".format(s))
|
||||
l1_1 = ax.legend(handles, labels,
|
||||
loc="upper left", bbox_to_anchor=(0.24, 1.01),
|
||||
l1_1 = ax.legend(
|
||||
handles,
|
||||
labels,
|
||||
loc="upper left",
|
||||
bbox_to_anchor=(0.24, 1.01),
|
||||
frameon=False,
|
||||
labelspacing=0.8, handletextpad=1.5,
|
||||
title='Transmission Exp./Exist. ')
|
||||
labelspacing=0.8,
|
||||
handletextpad=1.5,
|
||||
title="Transmission Exp./Exist. ",
|
||||
)
|
||||
ax.add_artist(l1_1)
|
||||
|
||||
handles = []
|
||||
labels = []
|
||||
for s in (10, 5):
|
||||
handles.append(plt.Line2D([0],[0],color=line_colors['cur'],
|
||||
linewidth=s*1e3/linewidth_factor))
|
||||
handles.append(
|
||||
plt.Line2D(
|
||||
[0], [0], color=line_colors["cur"], linewidth=s * 1e3 / linewidth_factor
|
||||
)
|
||||
)
|
||||
labels.append("/")
|
||||
l1_2 = ax.legend(handles, labels,
|
||||
loc="upper left", bbox_to_anchor=(0.26, 1.01),
|
||||
l1_2 = ax.legend(
|
||||
handles,
|
||||
labels,
|
||||
loc="upper left",
|
||||
bbox_to_anchor=(0.26, 1.01),
|
||||
frameon=False,
|
||||
labelspacing=0.8, handletextpad=0.5,
|
||||
title=' ')
|
||||
labelspacing=0.8,
|
||||
handletextpad=0.5,
|
||||
title=" ",
|
||||
)
|
||||
ax.add_artist(l1_2)
|
||||
|
||||
handles = make_legend_circles_for([10e3, 5e3, 1e3], scale=bus_size_factor, facecolor="w")
|
||||
handles = make_legend_circles_for(
|
||||
[10e3, 5e3, 1e3], scale=bus_size_factor, facecolor="w"
|
||||
)
|
||||
labels = ["{} GW".format(s) for s in (10, 5, 3)]
|
||||
l2 = ax.legend(handles, labels,
|
||||
loc="upper left", bbox_to_anchor=(0.01, 1.01),
|
||||
frameon=False, labelspacing=1.0,
|
||||
title='Generation',
|
||||
handler_map=make_handler_map_to_scale_circles_as_in(ax))
|
||||
l2 = ax.legend(
|
||||
handles,
|
||||
labels,
|
||||
loc="upper left",
|
||||
bbox_to_anchor=(0.01, 1.01),
|
||||
frameon=False,
|
||||
labelspacing=1.0,
|
||||
title="Generation",
|
||||
handler_map=make_handler_map_to_scale_circles_as_in(ax),
|
||||
)
|
||||
ax.add_artist(l2)
|
||||
|
||||
techs = (bus_sizes.index.levels[1]).intersection(pd.Index(opts['vre_techs'] + opts['conv_techs'] + opts['storage_techs']))
|
||||
techs = (bus_sizes.index.levels[1]).intersection(
|
||||
pd.Index(opts["vre_techs"] + opts["conv_techs"] + opts["storage_techs"])
|
||||
)
|
||||
handles = []
|
||||
labels = []
|
||||
for t in techs:
|
||||
handles.append(plt.Line2D([0], [0], color=tech_colors[t], marker='o', markersize=8, linewidth=0))
|
||||
labels.append(opts['nice_names'].get(t, t))
|
||||
l3 = ax.legend(handles, labels, loc="upper center", bbox_to_anchor=(0.5, -0.), # bbox_to_anchor=(0.72, -0.05),
|
||||
handletextpad=0., columnspacing=0.5, ncol=4, title='Technology')
|
||||
handles.append(
|
||||
plt.Line2D(
|
||||
[0], [0], color=tech_colors[t], marker="o", markersize=8, linewidth=0
|
||||
)
|
||||
)
|
||||
labels.append(opts["nice_names"].get(t, t))
|
||||
l3 = ax.legend(
|
||||
handles,
|
||||
labels,
|
||||
loc="upper center",
|
||||
bbox_to_anchor=(0.5, -0.0), # bbox_to_anchor=(0.72, -0.05),
|
||||
handletextpad=0.0,
|
||||
columnspacing=0.5,
|
||||
ncol=4,
|
||||
title="Technology",
|
||||
)
|
||||
|
||||
return fig
|
||||
|
||||
|
||||
def plot_total_energy_pie(n, opts, ax=None):
|
||||
if ax is None: ax = plt.gca()
|
||||
if ax is None:
|
||||
ax = plt.gca()
|
||||
|
||||
ax.set_title('Energy per technology', fontdict=dict(fontsize="medium"))
|
||||
ax.set_title("Energy per technology", fontdict=dict(fontsize="medium"))
|
||||
|
||||
e_primary = aggregate_p(n).drop('load', errors='ignore').loc[lambda s: s>0]
|
||||
e_primary = aggregate_p(n).drop("load", errors="ignore").loc[lambda s: s > 0]
|
||||
|
||||
patches, texts, autotexts = ax.pie(e_primary,
|
||||
patches, texts, autotexts = ax.pie(
|
||||
e_primary,
|
||||
startangle=90,
|
||||
labels = e_primary.rename(opts['nice_names']).index,
|
||||
autopct='%.0f%%',
|
||||
labels=e_primary.rename(opts["nice_names"]).index,
|
||||
autopct="%.0f%%",
|
||||
shadow=False,
|
||||
colors = [opts['tech_colors'][tech] for tech in e_primary.index])
|
||||
colors=[opts["tech_colors"][tech] for tech in e_primary.index],
|
||||
)
|
||||
for t1, t2, i in zip(texts, autotexts, e_primary.index):
|
||||
if e_primary.at[i] < 0.04 * e_primary.sum():
|
||||
t1.remove()
|
||||
t2.remove()
|
||||
|
||||
|
||||
def plot_total_cost_bar(n, opts, ax=None):
|
||||
if ax is None: ax = plt.gca()
|
||||
if ax is None:
|
||||
ax = plt.gca()
|
||||
|
||||
total_load = (n.snapshot_weightings.generators * n.loads_t.p.sum(axis=1)).sum()
|
||||
tech_colors = opts['tech_colors']
|
||||
tech_colors = opts["tech_colors"]
|
||||
|
||||
def split_costs(n):
|
||||
costs = aggregate_costs(n).reset_index(level=0, drop=True)
|
||||
costs_ex = aggregate_costs(n, existing_only=True).reset_index(level=0, drop=True)
|
||||
return (costs['capital'].add(costs['marginal'], fill_value=0.),
|
||||
costs_ex['capital'], costs['capital'] - costs_ex['capital'], costs['marginal'])
|
||||
costs_ex = aggregate_costs(n, existing_only=True).reset_index(
|
||||
level=0, drop=True
|
||||
)
|
||||
return (
|
||||
costs["capital"].add(costs["marginal"], fill_value=0.0),
|
||||
costs_ex["capital"],
|
||||
costs["capital"] - costs_ex["capital"],
|
||||
costs["marginal"],
|
||||
)
|
||||
|
||||
costs, costs_cap_ex, costs_cap_new, costs_marg = split_costs(n)
|
||||
|
||||
costs_graph = pd.DataFrame(dict(a=costs.drop('load', errors='ignore')),
|
||||
index=['AC-AC', 'AC line', 'onwind', 'offwind-ac',
|
||||
'offwind-dc', 'solar', 'OCGT','CCGT', 'battery', 'H2']).dropna()
|
||||
bottom = np.array([0., 0.])
|
||||
costs_graph = pd.DataFrame(
|
||||
dict(a=costs.drop("load", errors="ignore")),
|
||||
index=[
|
||||
"AC-AC",
|
||||
"AC line",
|
||||
"onwind",
|
||||
"offwind-ac",
|
||||
"offwind-dc",
|
||||
"solar",
|
||||
"OCGT",
|
||||
"CCGT",
|
||||
"battery",
|
||||
"H2",
|
||||
],
|
||||
).dropna()
|
||||
bottom = np.array([0.0, 0.0])
|
||||
texts = []
|
||||
|
||||
for i, ind in enumerate(costs_graph.index):
|
||||
data = np.asarray(costs_graph.loc[ind]) / total_load
|
||||
ax.bar([0.5], data, bottom=bottom, color=tech_colors[ind],
|
||||
width=0.7, zorder=-1)
|
||||
ax.bar([0.5], data, bottom=bottom, color=tech_colors[ind], width=0.7, zorder=-1)
|
||||
bottom_sub = bottom
|
||||
bottom = bottom + data
|
||||
|
||||
if ind in opts['conv_techs'] + ['AC line']:
|
||||
if ind in opts["conv_techs"] + ["AC line"]:
|
||||
for c in [costs_cap_ex, costs_marg]:
|
||||
if ind in c:
|
||||
data_sub = np.asarray([c.loc[ind]]) / total_load
|
||||
ax.bar([0.5], data_sub, linewidth=0,
|
||||
bottom=bottom_sub, color=tech_colors[ind],
|
||||
width=0.7, zorder=-1, alpha=0.8)
|
||||
ax.bar(
|
||||
[0.5],
|
||||
data_sub,
|
||||
linewidth=0,
|
||||
bottom=bottom_sub,
|
||||
color=tech_colors[ind],
|
||||
width=0.7,
|
||||
zorder=-1,
|
||||
alpha=0.8,
|
||||
)
|
||||
bottom_sub += data_sub
|
||||
|
||||
if abs(data[-1]) < 5:
|
||||
continue
|
||||
|
||||
text = ax.text(1.1,(bottom-0.5*data)[-1]-3,opts['nice_names'].get(ind,ind))
|
||||
text = ax.text(
|
||||
1.1, (bottom - 0.5 * data)[-1] - 3, opts["nice_names"].get(ind, ind)
|
||||
)
|
||||
texts.append(text)
|
||||
|
||||
ax.set_ylabel("Average system cost [Eur/MWh]")
|
||||
ax.set_ylim([0, opts.get('costs_max', 80)])
|
||||
ax.set_ylim([0, opts.get("costs_max", 80)])
|
||||
ax.set_xlim([0, 1])
|
||||
ax.set_xticklabels([])
|
||||
ax.grid(True, axis="y", color='k', linestyle='dotted')
|
||||
ax.grid(True, axis="y", color="k", linestyle="dotted")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('plot_network', simpl='',
|
||||
clusters='5', ll='copt', opts='Co2L-24H',
|
||||
attr='p_nom', ext="pdf")
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"plot_network",
|
||||
simpl="",
|
||||
clusters="5",
|
||||
ll="copt",
|
||||
opts="Co2L-24H",
|
||||
attr="p_nom",
|
||||
ext="pdf",
|
||||
)
|
||||
configure_logging(snakemake)
|
||||
|
||||
set_plot_style()
|
||||
|
||||
config, wildcards = snakemake.config, snakemake.wildcards
|
||||
|
||||
map_figsize = config["plotting"]['map']['figsize']
|
||||
map_boundaries = config["plotting"]['map']['boundaries']
|
||||
map_figsize = config["plotting"]["map"]["figsize"]
|
||||
map_boundaries = config["plotting"]["map"]["boundaries"]
|
||||
|
||||
n = load_network_for_plots(snakemake.input.network, snakemake.input.tech_costs, config)
|
||||
n = load_network_for_plots(
|
||||
snakemake.input.network, snakemake.input.tech_costs, config
|
||||
)
|
||||
|
||||
scenario_opts = wildcards.opts.split('-')
|
||||
scenario_opts = wildcards.opts.split("-")
|
||||
|
||||
fig, ax = plt.subplots(figsize=map_figsize, subplot_kw={"projection": ccrs.PlateCarree()})
|
||||
fig, ax = plt.subplots(
|
||||
figsize=map_figsize, subplot_kw={"projection": ccrs.PlateCarree()}
|
||||
)
|
||||
plot_map(n, config["plotting"], ax=ax, attribute=wildcards.attr)
|
||||
|
||||
fig.savefig(snakemake.output.only_map, dpi=150, bbox_inches='tight')
|
||||
fig.savefig(snakemake.output.only_map, dpi=150, bbox_inches="tight")
|
||||
|
||||
ax1 = fig.add_axes([-0.115, 0.625, 0.2, 0.2])
|
||||
plot_total_energy_pie(n, config["plotting"], ax=ax1)
|
||||
@ -281,9 +398,12 @@ if __name__ == "__main__":
|
||||
ll = wildcards.ll
|
||||
ll_type = ll[0]
|
||||
ll_factor = ll[1:]
|
||||
lbl = dict(c='line cost', v='line volume')[ll_type]
|
||||
amnt = '{ll} x today\'s'.format(ll=ll_factor) if ll_factor != 'opt' else 'optimal'
|
||||
fig.suptitle('Expansion to {amount} {label} at {clusters} clusters'
|
||||
.format(amount=amnt, label=lbl, clusters=wildcards.clusters))
|
||||
lbl = dict(c="line cost", v="line volume")[ll_type]
|
||||
amnt = "{ll} x today's".format(ll=ll_factor) if ll_factor != "opt" else "optimal"
|
||||
fig.suptitle(
|
||||
"Expansion to {amount} {label} at {clusters} clusters".format(
|
||||
amount=amnt, label=lbl, clusters=wildcards.clusters
|
||||
)
|
||||
)
|
||||
|
||||
fig.savefig(snakemake.output.ext, transparent=True, bbox_inches='tight')
|
||||
fig.savefig(snakemake.output.ext, transparent=True, bbox_inches="tight")
|
||||
|
@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
@ -16,14 +17,13 @@ Outputs
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
"""
|
||||
import logging
|
||||
from _helpers import configure_logging
|
||||
|
||||
import pypsa
|
||||
import pandas as pd
|
||||
import matplotlib.pyplot as plt
|
||||
import pandas as pd
|
||||
import pypsa
|
||||
from _helpers import configure_logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -31,11 +31,13 @@ logger = logging.getLogger(__name__)
|
||||
def cum_p_nom_max(net, tech, country=None):
|
||||
carrier_b = net.generators.carrier == tech
|
||||
|
||||
generators = pd.DataFrame(dict(
|
||||
p_nom_max=net.generators.loc[carrier_b, 'p_nom_max'],
|
||||
generators = pd.DataFrame(
|
||||
dict(
|
||||
p_nom_max=net.generators.loc[carrier_b, "p_nom_max"],
|
||||
p_max_pu=net.generators_t.p_max_pu.loc[:, carrier_b].mean(),
|
||||
country=net.generators.loc[carrier_b, 'bus'].map(net.buses.country)
|
||||
)).sort_values("p_max_pu", ascending=False)
|
||||
country=net.generators.loc[carrier_b, "bus"].map(net.buses.country),
|
||||
)
|
||||
).sort_values("p_max_pu", ascending=False)
|
||||
|
||||
if country is not None:
|
||||
generators = generators.loc[generators.country == country]
|
||||
@ -46,22 +48,28 @@ def cum_p_nom_max(net, tech, country=None):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('plot_p_nom_max', simpl='',
|
||||
techs='solar,onwind,offwind-dc', ext='png',
|
||||
clusts= '5,full', country= 'all')
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"plot_p_nom_max",
|
||||
simpl="",
|
||||
techs="solar,onwind,offwind-dc",
|
||||
ext="png",
|
||||
clusts="5,full",
|
||||
country="all",
|
||||
)
|
||||
configure_logging(snakemake)
|
||||
|
||||
plot_kwds = dict(drawstyle="steps-post")
|
||||
|
||||
clusters = snakemake.wildcards.clusts.split(',')
|
||||
techs = snakemake.wildcards.techs.split(',')
|
||||
clusters = snakemake.wildcards.clusts.split(",")
|
||||
techs = snakemake.wildcards.techs.split(",")
|
||||
country = snakemake.wildcards.country
|
||||
if country == 'all':
|
||||
if country == "all":
|
||||
country = None
|
||||
else:
|
||||
plot_kwds['marker'] = 'x'
|
||||
plot_kwds["marker"] = "x"
|
||||
|
||||
fig, axes = plt.subplots(1, len(techs))
|
||||
|
||||
@ -69,8 +77,9 @@ if __name__ == "__main__":
|
||||
net = pypsa.Network(snakemake.input[j])
|
||||
|
||||
for i, tech in enumerate(techs):
|
||||
cum_p_nom_max(net, tech, country).plot(x="p_max_pu", y="cum_p_nom_max",
|
||||
label=cluster, ax=axes[i], **plot_kwds)
|
||||
cum_p_nom_max(net, tech, country).plot(
|
||||
x="p_max_pu", y="cum_p_nom_max", label=cluster, ax=axes[i], **plot_kwds
|
||||
)
|
||||
|
||||
for i, tech in enumerate(techs):
|
||||
ax = axes[i]
|
||||
@ -79,4 +88,4 @@ if __name__ == "__main__":
|
||||
|
||||
plt.legend(title="Cluster level")
|
||||
|
||||
fig.savefig(snakemake.output[0], transparent=True, bbox_inches='tight')
|
||||
fig.savefig(snakemake.output[0], transparent=True, bbox_inches="tight")
|
||||
|
@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
@ -16,15 +17,14 @@ Outputs
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import logging
|
||||
from _helpers import configure_logging
|
||||
import os
|
||||
|
||||
import pandas as pd
|
||||
import matplotlib.pyplot as plt
|
||||
import pandas as pd
|
||||
from _helpers import configure_logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -52,11 +52,26 @@ def rename_techs(label):
|
||||
return label
|
||||
|
||||
|
||||
preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","onshore wind","offshore wind ac", "offshore wind dc","solar PV","solar thermal","OCGT","hydrogen storage","battery storage"])
|
||||
preferred_order = pd.Index(
|
||||
[
|
||||
"transmission lines",
|
||||
"hydroelectricity",
|
||||
"hydro reservoir",
|
||||
"run of river",
|
||||
"pumped hydro storage",
|
||||
"onshore wind",
|
||||
"offshore wind ac",
|
||||
"offshore wind dc",
|
||||
"solar PV",
|
||||
"solar thermal",
|
||||
"OCGT",
|
||||
"hydrogen storage",
|
||||
"battery storage",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def plot_costs(infn, config, fn=None):
|
||||
|
||||
## For now ignore the simpl header
|
||||
cost_df = pd.read_csv(infn, index_col=list(range(3)), header=[1, 2, 3])
|
||||
|
||||
@ -67,7 +82,7 @@ def plot_costs(infn, config, fn=None):
|
||||
|
||||
df = df.groupby(df.index.map(rename_techs)).sum()
|
||||
|
||||
to_drop = df.index[df.max(axis=1) < config['plotting']['costs_threshold']]
|
||||
to_drop = df.index[df.max(axis=1) < config["plotting"]["costs_threshold"]]
|
||||
|
||||
print("dropping")
|
||||
|
||||
@ -77,22 +92,28 @@ def plot_costs(infn, config, fn=None):
|
||||
|
||||
print(df.sum())
|
||||
|
||||
new_index = (preferred_order&df.index).append(df.index.difference(preferred_order))
|
||||
new_index = (preferred_order & df.index).append(
|
||||
df.index.difference(preferred_order)
|
||||
)
|
||||
|
||||
new_columns = df.sum().sort_values().index
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
fig.set_size_inches((12, 8))
|
||||
|
||||
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[config['plotting']['tech_colors'][i] for i in new_index])
|
||||
|
||||
df.loc[new_index, new_columns].T.plot(
|
||||
kind="bar",
|
||||
ax=ax,
|
||||
stacked=True,
|
||||
color=[config["plotting"]["tech_colors"][i] for i in new_index],
|
||||
)
|
||||
|
||||
handles, labels = ax.get_legend_handles_labels()
|
||||
|
||||
handles.reverse()
|
||||
labels.reverse()
|
||||
|
||||
ax.set_ylim([0,config['plotting']['costs_max']])
|
||||
ax.set_ylim([0, config["plotting"]["costs_max"]])
|
||||
|
||||
ax.set_ylabel("System Cost [EUR billion per year]")
|
||||
|
||||
@ -102,7 +123,6 @@ def plot_costs(infn, config, fn=None):
|
||||
|
||||
ax.legend(handles, labels, ncol=4, loc="upper left")
|
||||
|
||||
|
||||
fig.tight_layout()
|
||||
|
||||
if fn is not None:
|
||||
@ -110,7 +130,6 @@ def plot_costs(infn, config, fn=None):
|
||||
|
||||
|
||||
def plot_energy(infn, config, fn=None):
|
||||
|
||||
energy_df = pd.read_csv(infn, index_col=list(range(2)), header=[1, 2, 3])
|
||||
|
||||
df = energy_df.groupby(energy_df.index.get_level_values(1)).sum()
|
||||
@ -120,7 +139,7 @@ def plot_energy(infn, config, fn=None):
|
||||
|
||||
df = df.groupby(df.index.map(rename_techs)).sum()
|
||||
|
||||
to_drop = df.index[df.abs().max(axis=1) < config['plotting']['energy_threshold']]
|
||||
to_drop = df.index[df.abs().max(axis=1) < config["plotting"]["energy_threshold"]]
|
||||
|
||||
print("dropping")
|
||||
|
||||
@ -130,22 +149,28 @@ def plot_energy(infn, config, fn=None):
|
||||
|
||||
print(df.sum())
|
||||
|
||||
new_index = (preferred_order&df.index).append(df.index.difference(preferred_order))
|
||||
new_index = (preferred_order & df.index).append(
|
||||
df.index.difference(preferred_order)
|
||||
)
|
||||
|
||||
new_columns = df.columns.sort_values()
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
fig.set_size_inches((12, 8))
|
||||
|
||||
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[config['plotting']['tech_colors'][i] for i in new_index])
|
||||
|
||||
df.loc[new_index, new_columns].T.plot(
|
||||
kind="bar",
|
||||
ax=ax,
|
||||
stacked=True,
|
||||
color=[config["plotting"]["tech_colors"][i] for i in new_index],
|
||||
)
|
||||
|
||||
handles, labels = ax.get_legend_handles_labels()
|
||||
|
||||
handles.reverse()
|
||||
labels.reverse()
|
||||
|
||||
ax.set_ylim([config['plotting']['energy_min'], config['plotting']['energy_max']])
|
||||
ax.set_ylim([config["plotting"]["energy_min"], config["plotting"]["energy_max"]])
|
||||
|
||||
ax.set_ylabel("Energy [TWh/a]")
|
||||
|
||||
@ -155,7 +180,6 @@ def plot_energy(infn, config, fn=None):
|
||||
|
||||
ax.legend(handles, labels, ncol=4, loc="upper left")
|
||||
|
||||
|
||||
fig.tight_layout()
|
||||
|
||||
if fn is not None:
|
||||
@ -163,11 +187,20 @@ def plot_energy(infn, config, fn=None):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('plot_summary', summary='energy',
|
||||
simpl='', clusters=5, ll='copt', opts='Co2L-24H',
|
||||
attr='', ext='png', country='all')
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"plot_summary",
|
||||
summary="energy",
|
||||
simpl="",
|
||||
clusters=5,
|
||||
ll="copt",
|
||||
opts="Co2L-24H",
|
||||
attr="",
|
||||
ext="png",
|
||||
country="all",
|
||||
)
|
||||
configure_logging(snakemake)
|
||||
|
||||
config = snakemake.config
|
||||
@ -178,4 +211,6 @@ if __name__ == "__main__":
|
||||
except KeyError:
|
||||
raise RuntimeError(f"plotting function for {summary} has not been defined")
|
||||
|
||||
func(os.path.join(snakemake.input[0], f"{summary}.csv"), config, snakemake.output[0])
|
||||
func(
|
||||
os.path.join(snakemake.input[0], f"{summary}.csv"), config, snakemake.output[0]
|
||||
)
|
||||
|
@ -1,11 +1,14 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Extracts capacities of HVDC links from `Wikipedia <https://en.wikipedia.org/wiki/List_of_HVDC_projects>`_.
|
||||
Extracts capacities of HVDC links from `Wikipedia.
|
||||
|
||||
<https://en.wikipedia.org/wiki/List_of_HVDC_projects>`_.
|
||||
|
||||
Relevant Settings
|
||||
-----------------
|
||||
@ -33,13 +36,12 @@ Description
|
||||
-----------
|
||||
|
||||
*None*
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import configure_logging
|
||||
|
||||
import pandas as pd
|
||||
from _helpers import configure_logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -49,29 +51,45 @@ def multiply(s):
|
||||
|
||||
|
||||
def extract_coordinates(s):
|
||||
regex = (r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(N|S) "
|
||||
r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(E|W)")
|
||||
regex = (
|
||||
r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(N|S) " r"(\d{1,2})°(\d{1,2})′(\d{1,2})″(E|W)"
|
||||
)
|
||||
e = s.str.extract(regex, expand=True)
|
||||
lat = (e[0].astype(float) + (e[1].astype(float) + e[2].astype(float)/60.)/60.)*e[3].map({'N': +1., 'S': -1.})
|
||||
lon = (e[4].astype(float) + (e[5].astype(float) + e[6].astype(float)/60.)/60.)*e[7].map({'E': +1., 'W': -1.})
|
||||
lat = (
|
||||
e[0].astype(float) + (e[1].astype(float) + e[2].astype(float) / 60.0) / 60.0
|
||||
) * e[3].map({"N": +1.0, "S": -1.0})
|
||||
lon = (
|
||||
e[4].astype(float) + (e[5].astype(float) + e[6].astype(float) / 60.0) / 60.0
|
||||
) * e[7].map({"E": +1.0, "W": -1.0})
|
||||
return lon, lat
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake # rule must be enabled in config
|
||||
snakemake = mock_snakemake('prepare_links_p_nom', simpl='')
|
||||
|
||||
snakemake = mock_snakemake("prepare_links_p_nom", simpl="")
|
||||
configure_logging(snakemake)
|
||||
|
||||
links_p_nom = pd.read_html('https://en.wikipedia.org/wiki/List_of_HVDC_projects', header=0, match="SwePol")[0]
|
||||
links_p_nom = pd.read_html(
|
||||
"https://en.wikipedia.org/wiki/List_of_HVDC_projects", header=0, match="SwePol"
|
||||
)[0]
|
||||
|
||||
mw = "Power (MW)"
|
||||
m_b = links_p_nom[mw].str.contains('x').fillna(False)
|
||||
m_b = links_p_nom[mw].str.contains("x").fillna(False)
|
||||
|
||||
links_p_nom.loc[m_b, mw] = links_p_nom.loc[m_b, mw].str.split('x').pipe(multiply)
|
||||
links_p_nom[mw] = links_p_nom[mw].str.extract("[-/]?([\d.]+)", expand=False).astype(float)
|
||||
links_p_nom.loc[m_b, mw] = links_p_nom.loc[m_b, mw].str.split("x").pipe(multiply)
|
||||
links_p_nom[mw] = (
|
||||
links_p_nom[mw].str.extract("[-/]?([\d.]+)", expand=False).astype(float)
|
||||
)
|
||||
|
||||
links_p_nom['x1'], links_p_nom['y1'] = extract_coordinates(links_p_nom['Converterstation 1'])
|
||||
links_p_nom['x2'], links_p_nom['y2'] = extract_coordinates(links_p_nom['Converterstation 2'])
|
||||
links_p_nom["x1"], links_p_nom["y1"] = extract_coordinates(
|
||||
links_p_nom["Converterstation 1"]
|
||||
)
|
||||
links_p_nom["x2"], links_p_nom["y2"] = extract_coordinates(
|
||||
links_p_nom["Converterstation 2"]
|
||||
)
|
||||
|
||||
links_p_nom.dropna(subset=['x1', 'y1', 'x2', 'y2']).to_csv(snakemake.output[0], index=False)
|
||||
links_p_nom.dropna(subset=["x1", "y1", "x2", "y2"]).to_csv(
|
||||
snakemake.output[0], index=False
|
||||
)
|
||||
|
@ -1,10 +1,12 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
# coding: utf-8
|
||||
"""
|
||||
Prepare PyPSA network for solving according to :ref:`opts` and :ref:`ll`, such as
|
||||
Prepare PyPSA network for solving according to :ref:`opts` and :ref:`ll`, such
|
||||
as.
|
||||
|
||||
- adding an annual **limit** of carbon-dioxide emissions,
|
||||
- adding an exogenous **price** per tonne emissions of carbon-dioxide (or other kinds),
|
||||
@ -53,17 +55,15 @@ Description
|
||||
The rule :mod:`prepare_all_networks` runs
|
||||
for all ``scenario`` s in the configuration file
|
||||
the rule :mod:`prepare_network`.
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import configure_logging
|
||||
|
||||
import re
|
||||
import pypsa
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
import pypsa
|
||||
from _helpers import configure_logging
|
||||
from add_electricity import load_costs, update_transmission_costs
|
||||
|
||||
idx = pd.IndexSlice
|
||||
@ -71,65 +71,84 @@ idx = pd.IndexSlice
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def add_co2limit(n, co2limit, Nyears=1.):
|
||||
|
||||
n.add("GlobalConstraint", "CO2Limit",
|
||||
carrier_attribute="co2_emissions", sense="<=",
|
||||
constant=co2limit * Nyears)
|
||||
def add_co2limit(n, co2limit, Nyears=1.0):
|
||||
n.add(
|
||||
"GlobalConstraint",
|
||||
"CO2Limit",
|
||||
carrier_attribute="co2_emissions",
|
||||
sense="<=",
|
||||
constant=co2limit * Nyears,
|
||||
)
|
||||
|
||||
|
||||
def add_gaslimit(n, gaslimit, Nyears=1.):
|
||||
|
||||
def add_gaslimit(n, gaslimit, Nyears=1.0):
|
||||
sel = n.carriers.index.intersection(["OCGT", "CCGT", "CHP"])
|
||||
n.carriers.loc[sel, "gas_usage"] = 1.
|
||||
n.carriers.loc[sel, "gas_usage"] = 1.0
|
||||
|
||||
n.add("GlobalConstraint", "GasLimit",
|
||||
carrier_attribute="gas_usage", sense="<=",
|
||||
constant=gaslimit * Nyears)
|
||||
n.add(
|
||||
"GlobalConstraint",
|
||||
"GasLimit",
|
||||
carrier_attribute="gas_usage",
|
||||
sense="<=",
|
||||
constant=gaslimit * Nyears,
|
||||
)
|
||||
|
||||
|
||||
def add_emission_prices(n, emission_prices={'co2': 0.}, exclude_co2=False):
|
||||
if exclude_co2: emission_prices.pop('co2')
|
||||
ep = (pd.Series(emission_prices).rename(lambda x: x+'_emissions') *
|
||||
n.carriers.filter(like='_emissions')).sum(axis=1)
|
||||
def add_emission_prices(n, emission_prices={"co2": 0.0}, exclude_co2=False):
|
||||
if exclude_co2:
|
||||
emission_prices.pop("co2")
|
||||
ep = (
|
||||
pd.Series(emission_prices).rename(lambda x: x + "_emissions")
|
||||
* n.carriers.filter(like="_emissions")
|
||||
).sum(axis=1)
|
||||
gen_ep = n.generators.carrier.map(ep) / n.generators.efficiency
|
||||
n.generators['marginal_cost'] += gen_ep
|
||||
n.generators["marginal_cost"] += gen_ep
|
||||
su_ep = n.storage_units.carrier.map(ep) / n.storage_units.efficiency_dispatch
|
||||
n.storage_units['marginal_cost'] += su_ep
|
||||
n.storage_units["marginal_cost"] += su_ep
|
||||
|
||||
|
||||
def set_line_s_max_pu(n, s_max_pu=0.7):
|
||||
n.lines['s_max_pu'] = s_max_pu
|
||||
n.lines["s_max_pu"] = s_max_pu
|
||||
logger.info(f"N-1 security margin of lines set to {s_max_pu}")
|
||||
|
||||
|
||||
def set_transmission_limit(n, ll_type, factor, costs, Nyears=1):
|
||||
links_dc_b = n.links.carrier == 'DC' if not n.links.empty else pd.Series()
|
||||
links_dc_b = n.links.carrier == "DC" if not n.links.empty else pd.Series()
|
||||
|
||||
_lines_s_nom = (np.sqrt(3) * n.lines.type.map(n.line_types.i_nom) *
|
||||
n.lines.num_parallel * n.lines.bus0.map(n.buses.v_nom))
|
||||
lines_s_nom = n.lines.s_nom.where(n.lines.type == '', _lines_s_nom)
|
||||
_lines_s_nom = (
|
||||
np.sqrt(3)
|
||||
* n.lines.type.map(n.line_types.i_nom)
|
||||
* n.lines.num_parallel
|
||||
* n.lines.bus0.map(n.buses.v_nom)
|
||||
)
|
||||
lines_s_nom = n.lines.s_nom.where(n.lines.type == "", _lines_s_nom)
|
||||
|
||||
|
||||
col = 'capital_cost' if ll_type == 'c' else 'length'
|
||||
ref = (lines_s_nom @ n.lines[col] +
|
||||
n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col])
|
||||
col = "capital_cost" if ll_type == "c" else "length"
|
||||
ref = (
|
||||
lines_s_nom @ n.lines[col]
|
||||
+ n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col]
|
||||
)
|
||||
|
||||
update_transmission_costs(n, costs)
|
||||
|
||||
if factor == 'opt' or float(factor) > 1.0:
|
||||
n.lines['s_nom_min'] = lines_s_nom
|
||||
n.lines['s_nom_extendable'] = True
|
||||
if factor == "opt" or float(factor) > 1.0:
|
||||
n.lines["s_nom_min"] = lines_s_nom
|
||||
n.lines["s_nom_extendable"] = True
|
||||
|
||||
n.links.loc[links_dc_b, 'p_nom_min'] = n.links.loc[links_dc_b, 'p_nom']
|
||||
n.links.loc[links_dc_b, 'p_nom_extendable'] = True
|
||||
n.links.loc[links_dc_b, "p_nom_min"] = n.links.loc[links_dc_b, "p_nom"]
|
||||
n.links.loc[links_dc_b, "p_nom_extendable"] = True
|
||||
|
||||
if factor != 'opt':
|
||||
con_type = 'expansion_cost' if ll_type == 'c' else 'volume_expansion'
|
||||
if factor != "opt":
|
||||
con_type = "expansion_cost" if ll_type == "c" else "volume_expansion"
|
||||
rhs = float(factor) * ref
|
||||
n.add('GlobalConstraint', f'l{ll_type}_limit',
|
||||
type=f'transmission_{con_type}_limit',
|
||||
sense='<=', constant=rhs, carrier_attribute='AC, DC')
|
||||
n.add(
|
||||
"GlobalConstraint",
|
||||
f"l{ll_type}_limit",
|
||||
type=f"transmission_{con_type}_limit",
|
||||
sense="<=",
|
||||
constant=rhs,
|
||||
carrier_attribute="AC, DC",
|
||||
)
|
||||
|
||||
return n
|
||||
|
||||
@ -156,8 +175,9 @@ def apply_time_segmentation(n, segments, solver_name="cbc"):
|
||||
try:
|
||||
import tsam.timeseriesaggregation as tsam
|
||||
except:
|
||||
raise ModuleNotFoundError("Optional dependency 'tsam' not found."
|
||||
"Install via 'pip install tsam'")
|
||||
raise ModuleNotFoundError(
|
||||
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
|
||||
)
|
||||
|
||||
p_max_pu_norm = n.generators_t.p_max_pu.max()
|
||||
p_max_pu = n.generators_t.p_max_pu / p_max_pu_norm
|
||||
@ -170,9 +190,14 @@ def apply_time_segmentation(n, segments, solver_name="cbc"):
|
||||
|
||||
raw = pd.concat([p_max_pu, load, inflow], axis=1, sort=False)
|
||||
|
||||
agg = tsam.TimeSeriesAggregation(raw, hoursPerPeriod=len(raw),
|
||||
noTypicalPeriods=1, noSegments=int(segments),
|
||||
segmentation=True, solver=solver_name)
|
||||
agg = tsam.TimeSeriesAggregation(
|
||||
raw,
|
||||
hoursPerPeriod=len(raw),
|
||||
noTypicalPeriods=1,
|
||||
noSegments=int(segments),
|
||||
segmentation=True,
|
||||
solver=solver_name,
|
||||
)
|
||||
|
||||
segmented = agg.createTypicalPeriods()
|
||||
|
||||
@ -180,8 +205,10 @@ def apply_time_segmentation(n, segments, solver_name="cbc"):
|
||||
offsets = np.insert(np.cumsum(weightings[:-1]), 0, 0)
|
||||
snapshots = [n.snapshots[0] + pd.Timedelta(f"{offset}h") for offset in offsets]
|
||||
|
||||
n.set_snapshots(pd.DatetimeIndex(snapshots, name='name'))
|
||||
n.snapshot_weightings = pd.Series(weightings, index=snapshots, name="weightings", dtype="float64")
|
||||
n.set_snapshots(pd.DatetimeIndex(snapshots, name="name"))
|
||||
n.snapshot_weightings = pd.Series(
|
||||
weightings, index=snapshots, name="weightings", dtype="float64"
|
||||
)
|
||||
|
||||
segmented.index = snapshots
|
||||
n.generators_t.p_max_pu = segmented[n.generators_t.p_max_pu.columns] * p_max_pu_norm
|
||||
@ -190,15 +217,14 @@ def apply_time_segmentation(n, segments, solver_name="cbc"):
|
||||
|
||||
return n
|
||||
|
||||
|
||||
def enforce_autarky(n, only_crossborder=False):
|
||||
if only_crossborder:
|
||||
lines_rm = n.lines.loc[
|
||||
n.lines.bus0.map(n.buses.country) !=
|
||||
n.lines.bus1.map(n.buses.country)
|
||||
n.lines.bus0.map(n.buses.country) != n.lines.bus1.map(n.buses.country)
|
||||
].index
|
||||
links_rm = n.links.loc[
|
||||
n.links.bus0.map(n.buses.country) !=
|
||||
n.links.bus1.map(n.buses.country)
|
||||
n.links.bus0.map(n.buses.country) != n.links.bus1.map(n.buses.country)
|
||||
].index
|
||||
else:
|
||||
lines_rm = n.lines.index
|
||||
@ -206,33 +232,42 @@ def enforce_autarky(n, only_crossborder=False):
|
||||
n.mremove("Line", lines_rm)
|
||||
n.mremove("Link", links_rm)
|
||||
|
||||
|
||||
def set_line_nom_max(n, s_nom_max_set=np.inf, p_nom_max_set=np.inf):
|
||||
n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True)
|
||||
n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('prepare_network', simpl='',
|
||||
clusters='40', ll='v0.3', opts='Co2L-24H')
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"prepare_network", simpl="", clusters="40", ll="v0.3", opts="Co2L-24H"
|
||||
)
|
||||
configure_logging(snakemake)
|
||||
|
||||
opts = snakemake.wildcards.opts.split('-')
|
||||
opts = snakemake.wildcards.opts.split("-")
|
||||
|
||||
n = pypsa.Network(snakemake.input[0])
|
||||
Nyears = n.snapshot_weightings.objective.sum() / 8760.
|
||||
costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears)
|
||||
Nyears = n.snapshot_weightings.objective.sum() / 8760.0
|
||||
costs = load_costs(
|
||||
snakemake.input.tech_costs,
|
||||
snakemake.config["costs"],
|
||||
snakemake.config["electricity"],
|
||||
Nyears,
|
||||
)
|
||||
|
||||
set_line_s_max_pu(n, snakemake.config['lines']['s_max_pu'])
|
||||
set_line_s_max_pu(n, snakemake.config["lines"]["s_max_pu"])
|
||||
|
||||
for o in opts:
|
||||
m = re.match(r'^\d+h$', o, re.IGNORECASE)
|
||||
m = re.match(r"^\d+h$", o, re.IGNORECASE)
|
||||
if m is not None:
|
||||
n = average_every_nhours(n, m.group(0))
|
||||
break
|
||||
|
||||
for o in opts:
|
||||
m = re.match(r'^\d+seg$', o, re.IGNORECASE)
|
||||
m = re.match(r"^\d+seg$", o, re.IGNORECASE)
|
||||
if m is not None:
|
||||
solver_name = snakemake.config["solving"]["solver"]["name"]
|
||||
n = apply_time_segmentation(n, m.group(0)[:-3], solver_name)
|
||||
@ -242,11 +277,11 @@ if __name__ == "__main__":
|
||||
if "Co2L" in o:
|
||||
m = re.findall("[0-9]*\.?[0-9]+$", o)
|
||||
if len(m) > 0:
|
||||
co2limit = float(m[0]) * snakemake.config['electricity']['co2base']
|
||||
co2limit = float(m[0]) * snakemake.config["electricity"]["co2base"]
|
||||
add_co2limit(n, co2limit, Nyears)
|
||||
logger.info("Setting CO2 limit according to wildcard value.")
|
||||
else:
|
||||
add_co2limit(n, snakemake.config['electricity']['co2limit'], Nyears)
|
||||
add_co2limit(n, snakemake.config["electricity"]["co2limit"], Nyears)
|
||||
logger.info("Setting CO2 limit according to config value.")
|
||||
break
|
||||
|
||||
@ -280,21 +315,24 @@ if __name__ == "__main__":
|
||||
c.df.loc[sel, attr] *= factor
|
||||
|
||||
for o in opts:
|
||||
if 'Ep' in o:
|
||||
if "Ep" in o:
|
||||
m = re.findall("[0-9]*\.?[0-9]+$", o)
|
||||
if len(m) > 0:
|
||||
logger.info("Setting emission prices according to wildcard value.")
|
||||
add_emission_prices(n, dict(co2=float(m[0])))
|
||||
else:
|
||||
logger.info("Setting emission prices according to config value.")
|
||||
add_emission_prices(n, snakemake.config['costs']['emission_prices'])
|
||||
add_emission_prices(n, snakemake.config["costs"]["emission_prices"])
|
||||
break
|
||||
|
||||
ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:]
|
||||
set_transmission_limit(n, ll_type, factor, costs, Nyears)
|
||||
|
||||
set_line_nom_max(n, s_nom_max_set=snakemake.config["lines"].get("s_nom_max,", np.inf),
|
||||
p_nom_max_set=snakemake.config["links"].get("p_nom_max,", np.inf))
|
||||
set_line_nom_max(
|
||||
n,
|
||||
s_nom_max_set=snakemake.config["lines"].get("s_nom_max,", np.inf),
|
||||
p_nom_max_set=snakemake.config["links"].get("p_nom_max,", np.inf),
|
||||
)
|
||||
|
||||
if "ATK" in opts:
|
||||
enforce_autarky(n)
|
||||
|
@ -1,3 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019-2022 Fabian Hofmann (TUB, FIAS)
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
@ -33,24 +34,27 @@ The :ref:`tutorial` uses a smaller `data bundle <https://zenodo.org/record/35179
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import progress_retrieve, configure_logging
|
||||
|
||||
import tarfile
|
||||
from pathlib import Path
|
||||
|
||||
from _helpers import configure_logging, progress_retrieve
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('retrieve_databundle')
|
||||
rootpath = '..'
|
||||
else:
|
||||
rootpath = '.'
|
||||
configure_logging(snakemake) # TODO Make logging compatible with progressbar (see PR #102)
|
||||
|
||||
if snakemake.config['tutorial']:
|
||||
snakemake = mock_snakemake("retrieve_databundle")
|
||||
rootpath = ".."
|
||||
else:
|
||||
rootpath = "."
|
||||
configure_logging(
|
||||
snakemake
|
||||
) # TODO Make logging compatible with progressbar (see PR #102)
|
||||
|
||||
if snakemake.config["tutorial"]:
|
||||
url = "https://zenodo.org/record/3517921/files/pypsa-eur-tutorial-data-bundle.tar.xz"
|
||||
else:
|
||||
url = "https://zenodo.org/record/3517935/files/pypsa-eur-data-bundle.tar.xz"
|
||||
|
@ -1,12 +1,13 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
# coding: utf-8
|
||||
"""
|
||||
Lifts electrical transmission network to a single 380 kV voltage layer,
|
||||
removes dead-ends of the network,
|
||||
and reduces multi-hop HVDC connections to a single link.
|
||||
Lifts electrical transmission network to a single 380 kV voltage layer, removes
|
||||
dead-ends of the network, and reduces multi-hop HVDC connections to a single
|
||||
link.
|
||||
|
||||
Relevant Settings
|
||||
-----------------
|
||||
@ -77,7 +78,7 @@ The rule :mod:`simplify_network` does up to four things:
|
||||
|
||||
1. Create an equivalent transmission network in which all voltage levels are mapped to the 380 kV level by the function ``simplify_network(...)``.
|
||||
|
||||
2. DC only sub-networks that are connected at only two buses to the AC network are reduced to a single representative link in the function ``simplify_links(...)``. The components attached to buses in between are moved to the nearest endpoint. The grid connection cost of offshore wind generators are added to the captial costs of the generator.
|
||||
2. DC only sub-networks that are connected at only two buses to the AC network are reduced to a single representative link in the function ``simplify_links(...)``. The components attached to buses in between are moved to the nearest endpoint. The grid connection cost of offshore wind generators are added to the capital costs of the generator.
|
||||
|
||||
3. Stub lines and links, i.e. dead-ends of the network, are sequentially removed from the network in the function ``remove_stubs(...)``. Components are moved along.
|
||||
|
||||
@ -85,21 +86,23 @@ The rule :mod:`simplify_network` does up to four things:
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import configure_logging, update_p_nom_max, get_aggregation_strategies
|
||||
|
||||
from cluster_network import clustering_for_n_clusters, cluster_regions
|
||||
from add_electricity import load_costs
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import scipy as sp
|
||||
from scipy.sparse.csgraph import connected_components, dijkstra
|
||||
|
||||
from functools import reduce
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import pypsa
|
||||
import scipy as sp
|
||||
from _helpers import configure_logging, get_aggregation_strategies, update_p_nom_max
|
||||
from add_electricity import load_costs
|
||||
from cluster_network import cluster_regions, clustering_for_n_clusters
|
||||
from pypsa.io import import_components_from_dataframe, import_series_from_dataframe
|
||||
from pypsa.networkclustering import busmap_by_stubs, aggregategenerators, aggregateoneport, get_clustering_from_busmap
|
||||
from pypsa.networkclustering import (
|
||||
aggregategenerators,
|
||||
aggregateoneport,
|
||||
busmap_by_stubs,
|
||||
get_clustering_from_busmap,
|
||||
)
|
||||
from scipy.sparse.csgraph import connected_components, dijkstra
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -112,21 +115,21 @@ def simplify_network_to_380(n):
|
||||
its voltage level, line type and number of parallel bundles (num_parallel).
|
||||
|
||||
Transformers are removed and connected components are moved from their
|
||||
starting bus to their ending bus. The corresponing starting buses are
|
||||
starting bus to their ending bus. The corresponding starting buses are
|
||||
removed as well.
|
||||
"""
|
||||
logger.info("Mapping all network lines onto a single 380kV layer")
|
||||
|
||||
n.buses['v_nom'] = 380.
|
||||
n.buses["v_nom"] = 380.0
|
||||
|
||||
linetype_380, = n.lines.loc[n.lines.v_nom == 380., 'type'].unique()
|
||||
n.lines['type'] = linetype_380
|
||||
(linetype_380,) = n.lines.loc[n.lines.v_nom == 380.0, "type"].unique()
|
||||
n.lines["type"] = linetype_380
|
||||
n.lines["v_nom"] = 380
|
||||
n.lines["i_nom"] = n.line_types.i_nom[linetype_380]
|
||||
n.lines['num_parallel'] = n.lines.eval("s_nom / (sqrt(3) * v_nom * i_nom)")
|
||||
n.lines["num_parallel"] = n.lines.eval("s_nom / (sqrt(3) * v_nom * i_nom)")
|
||||
|
||||
trafo_map = pd.Series(n.transformers.bus1.values, n.transformers.bus0.values)
|
||||
trafo_map = trafo_map[~trafo_map.index.duplicated(keep='first')]
|
||||
trafo_map = trafo_map[~trafo_map.index.duplicated(keep="first")]
|
||||
several_trafo_b = trafo_map.isin(trafo_map.index)
|
||||
trafo_map[several_trafo_b] = trafo_map[several_trafo_b].map(trafo_map)
|
||||
missing_buses_i = n.buses.index.difference(trafo_map.index)
|
||||
@ -136,7 +139,7 @@ def simplify_network_to_380(n):
|
||||
for c in n.one_port_components | n.branch_components:
|
||||
df = n.df(c)
|
||||
for col in df.columns:
|
||||
if col.startswith('bus'):
|
||||
if col.startswith("bus"):
|
||||
df[col] = df[col].map(trafo_map)
|
||||
|
||||
n.mremove("Transformer", n.transformers.index)
|
||||
@ -146,22 +149,30 @@ def simplify_network_to_380(n):
|
||||
|
||||
|
||||
def _prepare_connection_costs_per_link(n, costs, config):
|
||||
if n.links.empty: return {}
|
||||
if n.links.empty:
|
||||
return {}
|
||||
|
||||
connection_costs_per_link = {}
|
||||
|
||||
for tech in config['renewable']:
|
||||
if tech.startswith('offwind'):
|
||||
for tech in config["renewable"]:
|
||||
if tech.startswith("offwind"):
|
||||
connection_costs_per_link[tech] = (
|
||||
n.links.length * config['lines']['length_factor'] *
|
||||
(n.links.underwater_fraction * costs.at[tech + '-connection-submarine', 'capital_cost'] +
|
||||
(1. - n.links.underwater_fraction) * costs.at[tech + '-connection-underground', 'capital_cost'])
|
||||
n.links.length
|
||||
* config["lines"]["length_factor"]
|
||||
* (
|
||||
n.links.underwater_fraction
|
||||
* costs.at[tech + "-connection-submarine", "capital_cost"]
|
||||
+ (1.0 - n.links.underwater_fraction)
|
||||
* costs.at[tech + "-connection-underground", "capital_cost"]
|
||||
)
|
||||
)
|
||||
|
||||
return connection_costs_per_link
|
||||
|
||||
|
||||
def _compute_connection_costs_to_bus(n, busmap, costs, config, connection_costs_per_link=None, buses=None):
|
||||
def _compute_connection_costs_to_bus(
|
||||
n, busmap, costs, config, connection_costs_per_link=None, buses=None
|
||||
):
|
||||
if connection_costs_per_link is None:
|
||||
connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config)
|
||||
|
||||
@ -171,12 +182,21 @@ def _compute_connection_costs_to_bus(n, busmap, costs, config, connection_costs_
|
||||
connection_costs_to_bus = pd.DataFrame(index=buses)
|
||||
|
||||
for tech in connection_costs_per_link:
|
||||
adj = n.adjacency_matrix(weights=pd.concat(dict(Link=connection_costs_per_link[tech].reindex(n.links.index),
|
||||
Line=pd.Series(0., n.lines.index))))
|
||||
adj = n.adjacency_matrix(
|
||||
weights=pd.concat(
|
||||
dict(
|
||||
Link=connection_costs_per_link[tech].reindex(n.links.index),
|
||||
Line=pd.Series(0.0, n.lines.index),
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
costs_between_buses = dijkstra(adj, directed=False, indices=n.buses.index.get_indexer(buses))
|
||||
connection_costs_to_bus[tech] = costs_between_buses[np.arange(len(buses)),
|
||||
n.buses.index.get_indexer(busmap.loc[buses])]
|
||||
costs_between_buses = dijkstra(
|
||||
adj, directed=False, indices=n.buses.index.get_indexer(buses)
|
||||
)
|
||||
connection_costs_to_bus[tech] = costs_between_buses[
|
||||
np.arange(len(buses)), n.buses.index.get_indexer(busmap.loc[buses])
|
||||
]
|
||||
|
||||
return connection_costs_to_bus
|
||||
|
||||
@ -185,20 +205,34 @@ def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, out
|
||||
connection_costs = {}
|
||||
for tech in connection_costs_to_bus:
|
||||
tech_b = n.generators.carrier == tech
|
||||
costs = n.generators.loc[tech_b, "bus"].map(connection_costs_to_bus[tech]).loc[lambda s: s>0]
|
||||
costs = (
|
||||
n.generators.loc[tech_b, "bus"]
|
||||
.map(connection_costs_to_bus[tech])
|
||||
.loc[lambda s: s > 0]
|
||||
)
|
||||
if not costs.empty:
|
||||
n.generators.loc[costs.index, "capital_cost"] += costs
|
||||
logger.info("Displacing {} generator(s) and adding connection costs to capital_costs: {} "
|
||||
.format(tech, ", ".join("{:.0f} Eur/MW/a for `{}`".format(d, b) for b, d in costs.iteritems())))
|
||||
logger.info(
|
||||
"Displacing {} generator(s) and adding connection costs to capital_costs: {} ".format(
|
||||
tech,
|
||||
", ".join(
|
||||
"{:.0f} Eur/MW/a for `{}`".format(d, b)
|
||||
for b, d in costs.iteritems()
|
||||
),
|
||||
)
|
||||
)
|
||||
connection_costs[tech] = costs
|
||||
pd.DataFrame(connection_costs).to_csv(output.connection_costs)
|
||||
|
||||
|
||||
|
||||
def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output,
|
||||
def _aggregate_and_move_components(
|
||||
n,
|
||||
busmap,
|
||||
connection_costs_to_bus,
|
||||
output,
|
||||
aggregate_one_ports={"Load", "StorageUnit"},
|
||||
aggregation_strategies=dict()):
|
||||
|
||||
aggregation_strategies=dict(),
|
||||
):
|
||||
def replace_components(n, c, df, pnl):
|
||||
n.mremove(c, n.df(c).index)
|
||||
|
||||
@ -236,8 +270,10 @@ def simplify_links(n, costs, config, output, aggregation_strategies=dict()):
|
||||
return n, n.buses.index.to_series()
|
||||
|
||||
# Determine connected link components, ignore all links but DC
|
||||
adjacency_matrix = n.adjacency_matrix(branch_components=['Link'],
|
||||
weights=dict(Link=(n.links.carrier == 'DC').astype(float)))
|
||||
adjacency_matrix = n.adjacency_matrix(
|
||||
branch_components=["Link"],
|
||||
weights=dict(Link=(n.links.carrier == "DC").astype(float)),
|
||||
)
|
||||
|
||||
_, labels = connected_components(adjacency_matrix, directed=False)
|
||||
labels = pd.Series(labels, n.buses.index)
|
||||
@ -248,12 +284,12 @@ def simplify_links(n, costs, config, output, aggregation_strategies=dict()):
|
||||
nodes = frozenset(nodes)
|
||||
|
||||
seen = set()
|
||||
supernodes = {m for m in nodes
|
||||
if len(G.adj[m]) > 2 or (set(G.adj[m]) - nodes)}
|
||||
supernodes = {m for m in nodes if len(G.adj[m]) > 2 or (set(G.adj[m]) - nodes)}
|
||||
|
||||
for u in supernodes:
|
||||
for m, ls in G.adj[u].items():
|
||||
if m not in nodes or m in seen: continue
|
||||
if m not in nodes or m in seen:
|
||||
continue
|
||||
|
||||
buses = [u, m]
|
||||
links = [list(ls)] # [name for name in ls]]
|
||||
@ -261,7 +297,8 @@ def simplify_links(n, costs, config, output, aggregation_strategies=dict()):
|
||||
while m not in (supernodes | seen):
|
||||
seen.add(m)
|
||||
for m2, ls in G.adj[m].items():
|
||||
if m2 in seen or m2 == u: continue
|
||||
if m2 in seen or m2 == u:
|
||||
continue
|
||||
buses.append(m2)
|
||||
links.append(list(ls)) # [name for name in ls])
|
||||
break
|
||||
@ -276,54 +313,78 @@ def simplify_links(n, costs, config, output, aggregation_strategies=dict()):
|
||||
busmap = n.buses.index.to_series()
|
||||
|
||||
connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config)
|
||||
connection_costs_to_bus = pd.DataFrame(0., index=n.buses.index, columns=list(connection_costs_per_link))
|
||||
connection_costs_to_bus = pd.DataFrame(
|
||||
0.0, index=n.buses.index, columns=list(connection_costs_per_link)
|
||||
)
|
||||
|
||||
for lbl in labels.value_counts().loc[lambda s: s > 2].index:
|
||||
|
||||
for b, buses, links in split_links(labels.index[labels == lbl]):
|
||||
if len(buses) <= 2: continue
|
||||
if len(buses) <= 2:
|
||||
continue
|
||||
|
||||
logger.debug('nodes = {}'.format(labels.index[labels == lbl]))
|
||||
logger.debug('b = {}\nbuses = {}\nlinks = {}'.format(b, buses, links))
|
||||
logger.debug("nodes = {}".format(labels.index[labels == lbl]))
|
||||
logger.debug("b = {}\nbuses = {}\nlinks = {}".format(b, buses, links))
|
||||
|
||||
m = sp.spatial.distance_matrix(n.buses.loc[b, ['x', 'y']],
|
||||
n.buses.loc[buses[1:-1], ['x', 'y']])
|
||||
m = sp.spatial.distance_matrix(
|
||||
n.buses.loc[b, ["x", "y"]], n.buses.loc[buses[1:-1], ["x", "y"]]
|
||||
)
|
||||
busmap.loc[buses] = b[np.r_[0, m.argmin(axis=0), 1]]
|
||||
connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(n, busmap, costs, config, connection_costs_per_link, buses)
|
||||
connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(
|
||||
n, busmap, costs, config, connection_costs_per_link, buses
|
||||
)
|
||||
|
||||
all_links = [i for _, i in sum(links, [])]
|
||||
|
||||
p_max_pu = config['links'].get('p_max_pu', 1.)
|
||||
lengths = n.links.loc[all_links, 'length']
|
||||
name = lengths.idxmax() + '+{}'.format(len(links) - 1)
|
||||
p_max_pu = config["links"].get("p_max_pu", 1.0)
|
||||
lengths = n.links.loc[all_links, "length"]
|
||||
name = lengths.idxmax() + "+{}".format(len(links) - 1)
|
||||
params = dict(
|
||||
carrier='DC',
|
||||
bus0=b[0], bus1=b[1],
|
||||
length=sum(n.links.loc[[i for _, i in l], 'length'].mean() for l in links),
|
||||
p_nom=min(n.links.loc[[i for _, i in l], 'p_nom'].sum() for l in links),
|
||||
underwater_fraction=sum(lengths/lengths.sum() * n.links.loc[all_links, 'underwater_fraction']),
|
||||
carrier="DC",
|
||||
bus0=b[0],
|
||||
bus1=b[1],
|
||||
length=sum(
|
||||
n.links.loc[[i for _, i in l], "length"].mean() for l in links
|
||||
),
|
||||
p_nom=min(n.links.loc[[i for _, i in l], "p_nom"].sum() for l in links),
|
||||
underwater_fraction=sum(
|
||||
lengths
|
||||
/ lengths.sum()
|
||||
* n.links.loc[all_links, "underwater_fraction"]
|
||||
),
|
||||
p_max_pu=p_max_pu,
|
||||
p_min_pu=-p_max_pu,
|
||||
underground=False,
|
||||
under_construction=False
|
||||
under_construction=False,
|
||||
)
|
||||
|
||||
logger.info("Joining the links {} connecting the buses {} to simple link {}".format(", ".join(all_links), ", ".join(buses), name))
|
||||
logger.info(
|
||||
"Joining the links {} connecting the buses {} to simple link {}".format(
|
||||
", ".join(all_links), ", ".join(buses), name
|
||||
)
|
||||
)
|
||||
|
||||
n.mremove("Link", all_links)
|
||||
|
||||
static_attrs = n.components["Link"]["attrs"].loc[lambda df: df.static]
|
||||
for attr, default in static_attrs.default.iteritems(): params.setdefault(attr, default)
|
||||
for attr, default in static_attrs.default.iteritems():
|
||||
params.setdefault(attr, default)
|
||||
n.links.loc[name] = pd.Series(params)
|
||||
|
||||
# n.add("Link", **params)
|
||||
|
||||
logger.debug("Collecting all components using the busmap")
|
||||
|
||||
_aggregate_and_move_components(n, busmap, connection_costs_to_bus, output,
|
||||
aggregation_strategies=aggregation_strategies)
|
||||
_aggregate_and_move_components(
|
||||
n,
|
||||
busmap,
|
||||
connection_costs_to_bus,
|
||||
output,
|
||||
aggregation_strategies=aggregation_strategies,
|
||||
)
|
||||
return n, busmap
|
||||
|
||||
|
||||
def remove_stubs(n, costs, config, output, aggregation_strategies=dict()):
|
||||
logger.info("Removing stubs")
|
||||
|
||||
@ -331,28 +392,44 @@ def remove_stubs(n, costs, config, output, aggregation_strategies=dict()):
|
||||
|
||||
connection_costs_to_bus = _compute_connection_costs_to_bus(n, busmap, costs, config)
|
||||
|
||||
_aggregate_and_move_components(n, busmap, connection_costs_to_bus, output,
|
||||
aggregation_strategies=aggregation_strategies)
|
||||
_aggregate_and_move_components(
|
||||
n,
|
||||
busmap,
|
||||
connection_costs_to_bus,
|
||||
output,
|
||||
aggregation_strategies=aggregation_strategies,
|
||||
)
|
||||
|
||||
return n, busmap
|
||||
|
||||
|
||||
def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None):
|
||||
# can be used to aggregate a selection of buses to electrically closest neighbors
|
||||
# if no buses are given, nodes that are no substations or without offshore connection are aggregated
|
||||
|
||||
if buses_i is None:
|
||||
logger.info("Aggregating buses that are no substations or have no valid offshore connection")
|
||||
logger.info(
|
||||
"Aggregating buses that are no substations or have no valid offshore connection"
|
||||
)
|
||||
buses_i = list(set(n.buses.index) - set(n.generators.bus) - set(n.loads.bus))
|
||||
|
||||
weight = pd.concat({'Line': n.lines.length/n.lines.s_nom.clip(1e-3),
|
||||
'Link': n.links.length/n.links.p_nom.clip(1e-3)})
|
||||
weight = pd.concat(
|
||||
{
|
||||
"Line": n.lines.length / n.lines.s_nom.clip(1e-3),
|
||||
"Link": n.links.length / n.links.p_nom.clip(1e-3),
|
||||
}
|
||||
)
|
||||
|
||||
adj = n.adjacency_matrix(branch_components=['Line', 'Link'], weights=weight)
|
||||
adj = n.adjacency_matrix(branch_components=["Line", "Link"], weights=weight)
|
||||
|
||||
bus_indexer = n.buses.index.get_indexer(buses_i)
|
||||
dist = pd.DataFrame(dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index)
|
||||
dist = pd.DataFrame(
|
||||
dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index
|
||||
)
|
||||
|
||||
dist[buses_i] = np.inf # bus in buses_i should not be assigned to different bus in buses_i
|
||||
dist[
|
||||
buses_i
|
||||
] = np.inf # bus in buses_i should not be assigned to different bus in buses_i
|
||||
|
||||
for c in n.buses.country.unique():
|
||||
incountry_b = n.buses.country == c
|
||||
@ -361,46 +438,65 @@ def aggregate_to_substations(n, aggregation_strategies=dict(), buses_i=None):
|
||||
busmap = n.buses.index.to_series()
|
||||
busmap.loc[buses_i] = dist.idxmin(1)
|
||||
|
||||
bus_strategies, generator_strategies = get_aggregation_strategies(aggregation_strategies)
|
||||
bus_strategies, generator_strategies = get_aggregation_strategies(
|
||||
aggregation_strategies
|
||||
)
|
||||
|
||||
clustering = get_clustering_from_busmap(n, busmap,
|
||||
clustering = get_clustering_from_busmap(
|
||||
n,
|
||||
busmap,
|
||||
bus_strategies=bus_strategies,
|
||||
aggregate_generators_weighted=True,
|
||||
aggregate_generators_carriers=None,
|
||||
aggregate_one_ports=["Load", "StorageUnit"],
|
||||
line_length_factor=1.0,
|
||||
generator_strategies=generator_strategies,
|
||||
scale_link_capital_costs=False)
|
||||
scale_link_capital_costs=False,
|
||||
)
|
||||
return clustering.network, busmap
|
||||
|
||||
|
||||
def cluster(n, n_clusters, config, algorithm="hac", feature=None, aggregation_strategies=dict()):
|
||||
def cluster(
|
||||
n, n_clusters, config, algorithm="hac", feature=None, aggregation_strategies=dict()
|
||||
):
|
||||
logger.info(f"Clustering to {n_clusters} buses")
|
||||
|
||||
focus_weights = config.get('focus_weights', None)
|
||||
focus_weights = config.get("focus_weights", None)
|
||||
|
||||
renewable_carriers = pd.Index([tech
|
||||
renewable_carriers = pd.Index(
|
||||
[
|
||||
tech
|
||||
for tech in n.generators.carrier.unique()
|
||||
if tech.split('-', 2)[0] in config['renewable']])
|
||||
if tech.split("-", 2)[0] in config["renewable"]
|
||||
]
|
||||
)
|
||||
|
||||
clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap=False,
|
||||
clustering = clustering_for_n_clusters(
|
||||
n,
|
||||
n_clusters,
|
||||
custom_busmap=False,
|
||||
aggregation_strategies=aggregation_strategies,
|
||||
solver_name=config['solving']['solver']['name'],
|
||||
algorithm=algorithm, feature=feature,
|
||||
focus_weights=focus_weights)
|
||||
solver_name=config["solving"]["solver"]["name"],
|
||||
algorithm=algorithm,
|
||||
feature=feature,
|
||||
focus_weights=focus_weights,
|
||||
)
|
||||
|
||||
return clustering.network, clustering.busmap
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('simplify_network', simpl='f')
|
||||
|
||||
snakemake = mock_snakemake("simplify_network", simpl="f")
|
||||
configure_logging(snakemake)
|
||||
|
||||
n = pypsa.Network(snakemake.input.network)
|
||||
|
||||
aggregation_strategies = snakemake.config["clustering"].get("aggregation_strategies", {})
|
||||
aggregation_strategies = snakemake.config["clustering"].get(
|
||||
"aggregation_strategies", {}
|
||||
)
|
||||
# translate str entries of aggregation_strategies to pd.Series functions:
|
||||
aggregation_strategies = {
|
||||
p: {k: getattr(pd.Series, v) for k, v in aggregation_strategies[p].items()}
|
||||
@ -411,44 +507,78 @@ if __name__ == "__main__":
|
||||
|
||||
Nyears = n.snapshot_weightings.objective.sum() / 8760
|
||||
|
||||
technology_costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears)
|
||||
technology_costs = load_costs(
|
||||
snakemake.input.tech_costs,
|
||||
snakemake.config["costs"],
|
||||
snakemake.config["electricity"],
|
||||
Nyears,
|
||||
)
|
||||
|
||||
n, simplify_links_map = simplify_links(n, technology_costs, snakemake.config, snakemake.output,
|
||||
aggregation_strategies)
|
||||
n, simplify_links_map = simplify_links(
|
||||
n, technology_costs, snakemake.config, snakemake.output, aggregation_strategies
|
||||
)
|
||||
|
||||
n, stub_map = remove_stubs(n, technology_costs, snakemake.config, snakemake.output,
|
||||
aggregation_strategies=aggregation_strategies)
|
||||
n, stub_map = remove_stubs(
|
||||
n,
|
||||
technology_costs,
|
||||
snakemake.config,
|
||||
snakemake.output,
|
||||
aggregation_strategies=aggregation_strategies,
|
||||
)
|
||||
|
||||
busmaps = [trafo_map, simplify_links_map, stub_map]
|
||||
|
||||
cluster_config = snakemake.config.get('clustering', {}).get('simplify_network', {})
|
||||
if cluster_config.get('clustering', {}).get('simplify_network', {}).get('to_substations', False):
|
||||
cluster_config = snakemake.config.get("clustering", {}).get("simplify_network", {})
|
||||
if (
|
||||
cluster_config.get("clustering", {})
|
||||
.get("simplify_network", {})
|
||||
.get("to_substations", False)
|
||||
):
|
||||
n, substation_map = aggregate_to_substations(n, aggregation_strategies)
|
||||
busmaps.append(substation_map)
|
||||
|
||||
# treatment of outliers (nodes without a profile for considered carrier):
|
||||
# all nodes that have no profile of the given carrier are being aggregated to closest neighbor
|
||||
if (
|
||||
snakemake.config.get("clustering", {}).get("cluster_network", {}).get("algorithm", "hac") == "hac" or
|
||||
cluster_config.get("algorithm", "hac") == "hac"
|
||||
snakemake.config.get("clustering", {})
|
||||
.get("cluster_network", {})
|
||||
.get("algorithm", "hac")
|
||||
== "hac"
|
||||
or cluster_config.get("algorithm", "hac") == "hac"
|
||||
):
|
||||
carriers = cluster_config.get("feature", "solar+onwind-time").split('-')[0].split('+')
|
||||
carriers = (
|
||||
cluster_config.get("feature", "solar+onwind-time").split("-")[0].split("+")
|
||||
)
|
||||
for carrier in carriers:
|
||||
buses_i = list(set(n.buses.index)-set(n.generators.query("carrier == @carrier").bus))
|
||||
logger.info(f'clustering preparaton (hac): aggregating {len(buses_i)} buses of type {carrier}.')
|
||||
buses_i = list(
|
||||
set(n.buses.index) - set(n.generators.query("carrier == @carrier").bus)
|
||||
)
|
||||
logger.info(
|
||||
f"clustering preparaton (hac): aggregating {len(buses_i)} buses of type {carrier}."
|
||||
)
|
||||
n, busmap_hac = aggregate_to_substations(n, aggregation_strategies, buses_i)
|
||||
busmaps.append(busmap_hac)
|
||||
|
||||
if snakemake.wildcards.simpl:
|
||||
n, cluster_map = cluster(n, int(snakemake.wildcards.simpl), snakemake.config,
|
||||
cluster_config.get('algorithm', 'hac'),
|
||||
cluster_config.get('feature', None),
|
||||
aggregation_strategies)
|
||||
n, cluster_map = cluster(
|
||||
n,
|
||||
int(snakemake.wildcards.simpl),
|
||||
snakemake.config,
|
||||
cluster_config.get("algorithm", "hac"),
|
||||
cluster_config.get("feature", None),
|
||||
aggregation_strategies,
|
||||
)
|
||||
busmaps.append(cluster_map)
|
||||
|
||||
# some entries in n.buses are not updated in previous functions, therefore can be wrong. as they are not needed
|
||||
# and are lost when clustering (for example with the simpl wildcard), we remove them for consistency:
|
||||
buses_c = {'symbol', 'tags', 'under_construction', 'substation_lv', 'substation_off'}.intersection(n.buses.columns)
|
||||
buses_c = {
|
||||
"symbol",
|
||||
"tags",
|
||||
"under_construction",
|
||||
"substation_lv",
|
||||
"substation_off",
|
||||
}.intersection(n.buses.columns)
|
||||
n.buses = n.buses.drop(buses_c, axis=1)
|
||||
|
||||
update_p_nom_max(n)
|
||||
|
@ -1,9 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Solves linear optimal power flow for a network iteratively while updating reactances.
|
||||
Solves linear optimal power flow for a network iteratively while updating
|
||||
reactances.
|
||||
|
||||
Relevant Settings
|
||||
-----------------
|
||||
@ -73,104 +75,123 @@ Details (and errors made through this heuristic) are discussed in the paper
|
||||
The rule :mod:`solve_all_networks` runs
|
||||
for all ``scenario`` s in the configuration file
|
||||
the rule :mod:`solve_network`.
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import configure_logging
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import re
|
||||
|
||||
import pypsa
|
||||
from pypsa.linopf import (get_var, define_constraints, define_variables,
|
||||
linexpr, join_exprs, network_lopf, ilopf)
|
||||
from _helpers import configure_logging
|
||||
from pypsa.descriptors import get_switchable_as_dense as get_as_dense
|
||||
|
||||
from pathlib import Path
|
||||
from pypsa.linopf import (
|
||||
define_constraints,
|
||||
define_variables,
|
||||
get_var,
|
||||
ilopf,
|
||||
join_exprs,
|
||||
linexpr,
|
||||
network_lopf,
|
||||
)
|
||||
from vresutils.benchmark import memory_logger
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def prepare_network(n, solve_opts):
|
||||
|
||||
if 'clip_p_max_pu' in solve_opts:
|
||||
if "clip_p_max_pu" in solve_opts:
|
||||
for df in (n.generators_t.p_max_pu, n.storage_units_t.inflow):
|
||||
df.where(df>solve_opts['clip_p_max_pu'], other=0., inplace=True)
|
||||
df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)
|
||||
|
||||
load_shedding = solve_opts.get('load_shedding')
|
||||
load_shedding = solve_opts.get("load_shedding")
|
||||
if load_shedding:
|
||||
n.add("Carrier", "load", color="#dd2e23", nice_name="Load shedding")
|
||||
buses_i = n.buses.query("carrier == 'AC'").index
|
||||
if not np.isscalar(load_shedding): load_shedding = 1e2 # Eur/kWh
|
||||
if not np.isscalar(load_shedding):
|
||||
load_shedding = 1e2 # Eur/kWh
|
||||
# intersect between macroeconomic and surveybased
|
||||
# willingness to pay
|
||||
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full)
|
||||
n.madd("Generator", buses_i, " load",
|
||||
n.madd(
|
||||
"Generator",
|
||||
buses_i,
|
||||
" load",
|
||||
bus=buses_i,
|
||||
carrier='load',
|
||||
carrier="load",
|
||||
sign=1e-3, # Adjust sign to measure p and p_nom in kW instead of MW
|
||||
marginal_cost=load_shedding,
|
||||
p_nom=1e9 # kW
|
||||
p_nom=1e9, # kW
|
||||
)
|
||||
|
||||
if solve_opts.get('noisy_costs'):
|
||||
if solve_opts.get("noisy_costs"):
|
||||
for t in n.iterate_components(n.one_port_components):
|
||||
# if 'capital_cost' in t.df:
|
||||
# t.df['capital_cost'] += 1e1 + 2.*(np.random.random(len(t.df)) - 0.5)
|
||||
if 'marginal_cost' in t.df:
|
||||
t.df['marginal_cost'] += (1e-2 + 2e-3 *
|
||||
(np.random.random(len(t.df)) - 0.5))
|
||||
if "marginal_cost" in t.df:
|
||||
t.df["marginal_cost"] += 1e-2 + 2e-3 * (
|
||||
np.random.random(len(t.df)) - 0.5
|
||||
)
|
||||
|
||||
for t in n.iterate_components(['Line', 'Link']):
|
||||
t.df['capital_cost'] += (1e-1 +
|
||||
2e-2*(np.random.random(len(t.df)) - 0.5)) * t.df['length']
|
||||
for t in n.iterate_components(["Line", "Link"]):
|
||||
t.df["capital_cost"] += (
|
||||
1e-1 + 2e-2 * (np.random.random(len(t.df)) - 0.5)
|
||||
) * t.df["length"]
|
||||
|
||||
if solve_opts.get('nhours'):
|
||||
nhours = solve_opts['nhours']
|
||||
if solve_opts.get("nhours"):
|
||||
nhours = solve_opts["nhours"]
|
||||
n.set_snapshots(n.snapshots[:nhours])
|
||||
n.snapshot_weightings[:] = 8760. / nhours
|
||||
n.snapshot_weightings[:] = 8760.0 / nhours
|
||||
|
||||
return n
|
||||
|
||||
|
||||
def add_CCL_constraints(n, config):
|
||||
agg_p_nom_limits = config['electricity'].get('agg_p_nom_limits')
|
||||
agg_p_nom_limits = config["electricity"].get("agg_p_nom_limits")
|
||||
|
||||
try:
|
||||
agg_p_nom_minmax = pd.read_csv(agg_p_nom_limits,
|
||||
index_col=list(range(2)))
|
||||
agg_p_nom_minmax = pd.read_csv(agg_p_nom_limits, index_col=list(range(2)))
|
||||
except IOError:
|
||||
logger.exception("Need to specify the path to a .csv file containing "
|
||||
logger.exception(
|
||||
"Need to specify the path to a .csv file containing "
|
||||
"aggregate capacity limits per country in "
|
||||
"config['electricity']['agg_p_nom_limit'].")
|
||||
logger.info("Adding per carrier generation capacity constraints for "
|
||||
"individual countries")
|
||||
"config['electricity']['agg_p_nom_limit']."
|
||||
)
|
||||
logger.info(
|
||||
"Adding per carrier generation capacity constraints for " "individual countries"
|
||||
)
|
||||
|
||||
gen_country = n.generators.bus.map(n.buses.country)
|
||||
# cc means country and carrier
|
||||
p_nom_per_cc = (pd.DataFrame(
|
||||
{'p_nom': linexpr((1, get_var(n, 'Generator', 'p_nom'))),
|
||||
'country': gen_country, 'carrier': n.generators.carrier})
|
||||
.dropna(subset=['p_nom'])
|
||||
.groupby(['country', 'carrier']).p_nom
|
||||
.apply(join_exprs))
|
||||
minimum = agg_p_nom_minmax['min'].dropna()
|
||||
p_nom_per_cc = (
|
||||
pd.DataFrame(
|
||||
{
|
||||
"p_nom": linexpr((1, get_var(n, "Generator", "p_nom"))),
|
||||
"country": gen_country,
|
||||
"carrier": n.generators.carrier,
|
||||
}
|
||||
)
|
||||
.dropna(subset=["p_nom"])
|
||||
.groupby(["country", "carrier"])
|
||||
.p_nom.apply(join_exprs)
|
||||
)
|
||||
minimum = agg_p_nom_minmax["min"].dropna()
|
||||
if not minimum.empty:
|
||||
minconstraint = define_constraints(n, p_nom_per_cc[minimum.index],
|
||||
'>=', minimum, 'agg_p_nom', 'min')
|
||||
maximum = agg_p_nom_minmax['max'].dropna()
|
||||
minconstraint = define_constraints(
|
||||
n, p_nom_per_cc[minimum.index], ">=", minimum, "agg_p_nom", "min"
|
||||
)
|
||||
maximum = agg_p_nom_minmax["max"].dropna()
|
||||
if not maximum.empty:
|
||||
maxconstraint = define_constraints(n, p_nom_per_cc[maximum.index],
|
||||
'<=', maximum, 'agg_p_nom', 'max')
|
||||
maxconstraint = define_constraints(
|
||||
n, p_nom_per_cc[maximum.index], "<=", maximum, "agg_p_nom", "max"
|
||||
)
|
||||
|
||||
|
||||
def add_EQ_constraints(n, o, scaling=1e-1):
|
||||
float_regex = "[0-9]*\.?[0-9]+"
|
||||
level = float(re.findall(float_regex, o)[0])
|
||||
if o[-1] == 'c':
|
||||
if o[-1] == "c":
|
||||
ggrouper = n.generators.bus.map(n.buses.country)
|
||||
lgrouper = n.loads.bus.map(n.buses.country)
|
||||
sgrouper = n.storage_units.bus.map(n.buses.country)
|
||||
@ -178,60 +199,83 @@ def add_EQ_constraints(n, o, scaling=1e-1):
|
||||
ggrouper = n.generators.bus
|
||||
lgrouper = n.loads.bus
|
||||
sgrouper = n.storage_units.bus
|
||||
load = n.snapshot_weightings.generators @ \
|
||||
n.loads_t.p_set.groupby(lgrouper, axis=1).sum()
|
||||
inflow = n.snapshot_weightings.stores @ \
|
||||
n.storage_units_t.inflow.groupby(sgrouper, axis=1).sum()
|
||||
inflow = inflow.reindex(load.index).fillna(0.)
|
||||
load = (
|
||||
n.snapshot_weightings.generators
|
||||
@ n.loads_t.p_set.groupby(lgrouper, axis=1).sum()
|
||||
)
|
||||
inflow = (
|
||||
n.snapshot_weightings.stores
|
||||
@ n.storage_units_t.inflow.groupby(sgrouper, axis=1).sum()
|
||||
)
|
||||
inflow = inflow.reindex(load.index).fillna(0.0)
|
||||
rhs = scaling * (level * load - inflow)
|
||||
lhs_gen = linexpr((n.snapshot_weightings.generators * scaling,
|
||||
get_var(n, "Generator", "p").T)
|
||||
).T.groupby(ggrouper, axis=1).apply(join_exprs)
|
||||
lhs_spill = linexpr((-n.snapshot_weightings.stores * scaling,
|
||||
get_var(n, "StorageUnit", "spill").T)
|
||||
).T.groupby(sgrouper, axis=1).apply(join_exprs)
|
||||
lhs_gen = (
|
||||
linexpr(
|
||||
(n.snapshot_weightings.generators * scaling, get_var(n, "Generator", "p").T)
|
||||
)
|
||||
.T.groupby(ggrouper, axis=1)
|
||||
.apply(join_exprs)
|
||||
)
|
||||
lhs_spill = (
|
||||
linexpr(
|
||||
(
|
||||
-n.snapshot_weightings.stores * scaling,
|
||||
get_var(n, "StorageUnit", "spill").T,
|
||||
)
|
||||
)
|
||||
.T.groupby(sgrouper, axis=1)
|
||||
.apply(join_exprs)
|
||||
)
|
||||
lhs_spill = lhs_spill.reindex(lhs_gen.index).fillna("")
|
||||
lhs = lhs_gen + lhs_spill
|
||||
define_constraints(n, lhs, ">=", rhs, "equity", "min")
|
||||
|
||||
|
||||
def add_BAU_constraints(n, config):
|
||||
mincaps = pd.Series(config['electricity']['BAU_mincapacities'])
|
||||
lhs = (linexpr((1, get_var(n, 'Generator', 'p_nom')))
|
||||
.groupby(n.generators.carrier).apply(join_exprs))
|
||||
define_constraints(n, lhs, '>=', mincaps[lhs.index], 'Carrier', 'bau_mincaps')
|
||||
mincaps = pd.Series(config["electricity"]["BAU_mincapacities"])
|
||||
lhs = (
|
||||
linexpr((1, get_var(n, "Generator", "p_nom")))
|
||||
.groupby(n.generators.carrier)
|
||||
.apply(join_exprs)
|
||||
)
|
||||
define_constraints(n, lhs, ">=", mincaps[lhs.index], "Carrier", "bau_mincaps")
|
||||
|
||||
|
||||
def add_SAFE_constraints(n, config):
|
||||
peakdemand = (1. + config['electricity']['SAFE_reservemargin']) *\
|
||||
n.loads_t.p_set.sum(axis=1).max()
|
||||
conv_techs = config['plotting']['conv_techs']
|
||||
exist_conv_caps = n.generators.query('~p_nom_extendable & carrier in @conv_techs')\
|
||||
.p_nom.sum()
|
||||
ext_gens_i = n.generators.query('carrier in @conv_techs & p_nom_extendable').index
|
||||
lhs = linexpr((1, get_var(n, 'Generator', 'p_nom')[ext_gens_i])).sum()
|
||||
peakdemand = (
|
||||
1.0 + config["electricity"]["SAFE_reservemargin"]
|
||||
) * n.loads_t.p_set.sum(axis=1).max()
|
||||
conv_techs = config["plotting"]["conv_techs"]
|
||||
exist_conv_caps = n.generators.query(
|
||||
"~p_nom_extendable & carrier in @conv_techs"
|
||||
).p_nom.sum()
|
||||
ext_gens_i = n.generators.query("carrier in @conv_techs & p_nom_extendable").index
|
||||
lhs = linexpr((1, get_var(n, "Generator", "p_nom")[ext_gens_i])).sum()
|
||||
rhs = peakdemand - exist_conv_caps
|
||||
define_constraints(n, lhs, '>=', rhs, 'Safe', 'mintotalcap')
|
||||
define_constraints(n, lhs, ">=", rhs, "Safe", "mintotalcap")
|
||||
|
||||
|
||||
def add_operational_reserve_margin_constraint(n, config):
|
||||
|
||||
reserve_config = config["electricity"]["operational_reserve"]
|
||||
EPSILON_LOAD = reserve_config["epsilon_load"]
|
||||
EPSILON_VRES = reserve_config["epsilon_vres"]
|
||||
CONTINGENCY = reserve_config["contingency"]
|
||||
|
||||
# Reserve Variables
|
||||
reserve = get_var(n, 'Generator', 'r')
|
||||
reserve = get_var(n, "Generator", "r")
|
||||
lhs = linexpr((1, reserve)).sum(1)
|
||||
|
||||
# Share of extendable renewable capacities
|
||||
ext_i = n.generators.query('p_nom_extendable').index
|
||||
ext_i = n.generators.query("p_nom_extendable").index
|
||||
vres_i = n.generators_t.p_max_pu.columns
|
||||
if not ext_i.empty and not vres_i.empty:
|
||||
capacity_factor = n.generators_t.p_max_pu[vres_i.intersection(ext_i)]
|
||||
renewable_capacity_variables = get_var(n, 'Generator', 'p_nom')[vres_i.intersection(ext_i)]
|
||||
lhs += linexpr((-EPSILON_VRES * capacity_factor, renewable_capacity_variables)).sum(1)
|
||||
renewable_capacity_variables = get_var(n, "Generator", "p_nom")[
|
||||
vres_i.intersection(ext_i)
|
||||
]
|
||||
lhs += linexpr(
|
||||
(-EPSILON_VRES * capacity_factor, renewable_capacity_variables)
|
||||
).sum(1)
|
||||
|
||||
# Total demand at t
|
||||
demand = n.loads_t.p.sum(1)
|
||||
@ -244,30 +288,32 @@ def add_operational_reserve_margin_constraint(n, config):
|
||||
# Right-hand-side
|
||||
rhs = EPSILON_LOAD * demand + EPSILON_VRES * potential + CONTINGENCY
|
||||
|
||||
define_constraints(n, lhs, '>=', rhs, "Reserve margin")
|
||||
define_constraints(n, lhs, ">=", rhs, "Reserve margin")
|
||||
|
||||
|
||||
def update_capacity_constraint(n):
|
||||
gen_i = n.generators.index
|
||||
ext_i = n.generators.query('p_nom_extendable').index
|
||||
fix_i = n.generators.query('not p_nom_extendable').index
|
||||
ext_i = n.generators.query("p_nom_extendable").index
|
||||
fix_i = n.generators.query("not p_nom_extendable").index
|
||||
|
||||
dispatch = get_var(n, 'Generator', 'p')
|
||||
reserve = get_var(n, 'Generator', 'r')
|
||||
dispatch = get_var(n, "Generator", "p")
|
||||
reserve = get_var(n, "Generator", "r")
|
||||
|
||||
capacity_fixed = n.generators.p_nom[fix_i]
|
||||
|
||||
p_max_pu = get_as_dense(n, 'Generator', 'p_max_pu')
|
||||
p_max_pu = get_as_dense(n, "Generator", "p_max_pu")
|
||||
|
||||
lhs = linexpr((1, dispatch), (1, reserve))
|
||||
|
||||
if not ext_i.empty:
|
||||
capacity_variable = get_var(n, 'Generator', 'p_nom')
|
||||
lhs += linexpr((-p_max_pu[ext_i], capacity_variable)).reindex(columns=gen_i, fill_value='')
|
||||
capacity_variable = get_var(n, "Generator", "p_nom")
|
||||
lhs += linexpr((-p_max_pu[ext_i], capacity_variable)).reindex(
|
||||
columns=gen_i, fill_value=""
|
||||
)
|
||||
|
||||
rhs = (p_max_pu[fix_i] * capacity_fixed).reindex(columns=gen_i, fill_value=0)
|
||||
|
||||
define_constraints(n, lhs, '<=', rhs, 'Generators', 'updated_capacity_constraint')
|
||||
define_constraints(n, lhs, "<=", rhs, "Generators", "updated_capacity_constraint")
|
||||
|
||||
|
||||
def add_operational_reserve_margin(n, sns, config):
|
||||
@ -276,7 +322,7 @@ def add_operational_reserve_margin(n, sns, config):
|
||||
https://genxproject.github.io/GenX/dev/core/#Reserves.
|
||||
"""
|
||||
|
||||
define_variables(n, 0, np.inf, 'Generator', 'r', axes=[sns, n.generators.index])
|
||||
define_variables(n, 0, np.inf, "Generator", "r", axes=[sns, n.generators.index])
|
||||
|
||||
add_operational_reserve_margin_constraint(n, config)
|
||||
|
||||
@ -285,28 +331,35 @@ def add_operational_reserve_margin(n, sns, config):
|
||||
|
||||
def add_battery_constraints(n):
|
||||
nodes = n.buses.index[n.buses.carrier == "battery"]
|
||||
if nodes.empty or ('Link', 'p_nom') not in n.variables.index:
|
||||
if nodes.empty or ("Link", "p_nom") not in n.variables.index:
|
||||
return
|
||||
link_p_nom = get_var(n, "Link", "p_nom")
|
||||
lhs = linexpr((1,link_p_nom[nodes + " charger"]),
|
||||
(-n.links.loc[nodes + " discharger", "efficiency"].values,
|
||||
link_p_nom[nodes + " discharger"].values))
|
||||
define_constraints(n, lhs, "=", 0, 'Link', 'charger_ratio')
|
||||
lhs = linexpr(
|
||||
(1, link_p_nom[nodes + " charger"]),
|
||||
(
|
||||
-n.links.loc[nodes + " discharger", "efficiency"].values,
|
||||
link_p_nom[nodes + " discharger"].values,
|
||||
),
|
||||
)
|
||||
define_constraints(n, lhs, "=", 0, "Link", "charger_ratio")
|
||||
|
||||
|
||||
def extra_functionality(n, snapshots):
|
||||
"""
|
||||
Collects supplementary constraints which will be passed to ``pypsa.linopf.network_lopf``.
|
||||
If you want to enforce additional custom constraints, this is a good location to add them.
|
||||
The arguments ``opts`` and ``snakemake.config`` are expected to be attached to the network.
|
||||
Collects supplementary constraints which will be passed to
|
||||
``pypsa.linopf.network_lopf``.
|
||||
|
||||
If you want to enforce additional custom constraints, this is a good
|
||||
location to add them. The arguments ``opts`` and
|
||||
``snakemake.config`` are expected to be attached to the network.
|
||||
"""
|
||||
opts = n.opts
|
||||
config = n.config
|
||||
if 'BAU' in opts and n.generators.p_nom_extendable.any():
|
||||
if "BAU" in opts and n.generators.p_nom_extendable.any():
|
||||
add_BAU_constraints(n, config)
|
||||
if 'SAFE' in opts and n.generators.p_nom_extendable.any():
|
||||
if "SAFE" in opts and n.generators.p_nom_extendable.any():
|
||||
add_SAFE_constraints(n, config)
|
||||
if 'CCL' in opts and n.generators.p_nom_extendable.any():
|
||||
if "CCL" in opts and n.generators.p_nom_extendable.any():
|
||||
add_CCL_constraints(n, config)
|
||||
reserve = config["electricity"].get("operational_reserve", {})
|
||||
if reserve.get("activate"):
|
||||
@ -317,54 +370,71 @@ def extra_functionality(n, snapshots):
|
||||
add_battery_constraints(n)
|
||||
|
||||
|
||||
def solve_network(n, config, opts='', **kwargs):
|
||||
solver_options = config['solving']['solver'].copy()
|
||||
solver_name = solver_options.pop('name')
|
||||
cf_solving = config['solving']['options']
|
||||
track_iterations = cf_solving.get('track_iterations', False)
|
||||
min_iterations = cf_solving.get('min_iterations', 4)
|
||||
max_iterations = cf_solving.get('max_iterations', 6)
|
||||
def solve_network(n, config, opts="", **kwargs):
|
||||
solver_options = config["solving"]["solver"].copy()
|
||||
solver_name = solver_options.pop("name")
|
||||
cf_solving = config["solving"]["options"]
|
||||
track_iterations = cf_solving.get("track_iterations", False)
|
||||
min_iterations = cf_solving.get("min_iterations", 4)
|
||||
max_iterations = cf_solving.get("max_iterations", 6)
|
||||
|
||||
# add to network for extra_functionality
|
||||
n.config = config
|
||||
n.opts = opts
|
||||
|
||||
skip_iterations = cf_solving.get('skip_iterations', False)
|
||||
skip_iterations = cf_solving.get("skip_iterations", False)
|
||||
if not n.lines.s_nom_extendable.any():
|
||||
skip_iterations = True
|
||||
logger.info("No expandable lines found. Skipping iterative solving.")
|
||||
|
||||
if skip_iterations:
|
||||
network_lopf(n, solver_name=solver_name, solver_options=solver_options,
|
||||
extra_functionality=extra_functionality, **kwargs)
|
||||
network_lopf(
|
||||
n,
|
||||
solver_name=solver_name,
|
||||
solver_options=solver_options,
|
||||
extra_functionality=extra_functionality,
|
||||
**kwargs
|
||||
)
|
||||
else:
|
||||
ilopf(n, solver_name=solver_name, solver_options=solver_options,
|
||||
ilopf(
|
||||
n,
|
||||
solver_name=solver_name,
|
||||
solver_options=solver_options,
|
||||
track_iterations=track_iterations,
|
||||
min_iterations=min_iterations,
|
||||
max_iterations=max_iterations,
|
||||
extra_functionality=extra_functionality, **kwargs)
|
||||
extra_functionality=extra_functionality,
|
||||
**kwargs
|
||||
)
|
||||
return n
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('solve_network', simpl='',
|
||||
clusters='5', ll='copt', opts='Co2L-BAU-CCL-24H')
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"solve_network", simpl="", clusters="5", ll="copt", opts="Co2L-BAU-CCL-24H"
|
||||
)
|
||||
configure_logging(snakemake)
|
||||
|
||||
tmpdir = snakemake.config['solving'].get('tmpdir')
|
||||
tmpdir = snakemake.config["solving"].get("tmpdir")
|
||||
if tmpdir is not None:
|
||||
Path(tmpdir).mkdir(parents=True, exist_ok=True)
|
||||
opts = snakemake.wildcards.opts.split('-')
|
||||
solve_opts = snakemake.config['solving']['options']
|
||||
opts = snakemake.wildcards.opts.split("-")
|
||||
solve_opts = snakemake.config["solving"]["options"]
|
||||
|
||||
fn = getattr(snakemake.log, 'memory', None)
|
||||
with memory_logger(filename=fn, interval=30.) as mem:
|
||||
fn = getattr(snakemake.log, "memory", None)
|
||||
with memory_logger(filename=fn, interval=30.0) as mem:
|
||||
n = pypsa.Network(snakemake.input[0])
|
||||
n = prepare_network(n, solve_opts)
|
||||
n = solve_network(n, snakemake.config, opts, solver_dir=tmpdir,
|
||||
solver_logfile=snakemake.log.solver)
|
||||
n = solve_network(
|
||||
n,
|
||||
snakemake.config,
|
||||
opts,
|
||||
solver_dir=tmpdir,
|
||||
solver_logfile=snakemake.log.solver,
|
||||
)
|
||||
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
||||
n.export_to_netcdf(snakemake.output[0])
|
||||
|
||||
|
@ -1,10 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# SPDX-FileCopyrightText: : 2017-2022 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
"""
|
||||
Solves linear optimal dispatch in hourly resolution
|
||||
using the capacities of previous capacity expansion in rule :mod:`solve_network`.
|
||||
Solves linear optimal dispatch in hourly resolution using the capacities of
|
||||
previous capacity expansion in rule :mod:`solve_network`.
|
||||
|
||||
Relevant Settings
|
||||
-----------------
|
||||
@ -42,65 +43,80 @@ Outputs
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
"""
|
||||
|
||||
import logging
|
||||
from _helpers import configure_logging
|
||||
|
||||
import pypsa
|
||||
import numpy as np
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import pypsa
|
||||
from _helpers import configure_logging
|
||||
from solve_network import prepare_network, solve_network
|
||||
from vresutils.benchmark import memory_logger
|
||||
from solve_network import solve_network, prepare_network
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def set_parameters_from_optimized(n, n_optim):
|
||||
lines_typed_i = n.lines.index[n.lines.type != '']
|
||||
n.lines.loc[lines_typed_i, 'num_parallel'] = \
|
||||
n_optim.lines['num_parallel'].reindex(lines_typed_i, fill_value=0.)
|
||||
n.lines.loc[lines_typed_i, 's_nom'] = (
|
||||
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
|
||||
n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel)
|
||||
|
||||
lines_untyped_i = n.lines.index[n.lines.type == '']
|
||||
for attr in ('s_nom', 'r', 'x'):
|
||||
n.lines.loc[lines_untyped_i, attr] = \
|
||||
n_optim.lines[attr].reindex(lines_untyped_i, fill_value=0.)
|
||||
n.lines['s_nom_extendable'] = False
|
||||
def set_parameters_from_optimized(n, n_optim):
|
||||
lines_typed_i = n.lines.index[n.lines.type != ""]
|
||||
n.lines.loc[lines_typed_i, "num_parallel"] = n_optim.lines["num_parallel"].reindex(
|
||||
lines_typed_i, fill_value=0.0
|
||||
)
|
||||
n.lines.loc[lines_typed_i, "s_nom"] = (
|
||||
np.sqrt(3)
|
||||
* n.lines["type"].map(n.line_types.i_nom)
|
||||
* n.lines.bus0.map(n.buses.v_nom)
|
||||
* n.lines.num_parallel
|
||||
)
|
||||
|
||||
lines_untyped_i = n.lines.index[n.lines.type == ""]
|
||||
for attr in ("s_nom", "r", "x"):
|
||||
n.lines.loc[lines_untyped_i, attr] = n_optim.lines[attr].reindex(
|
||||
lines_untyped_i, fill_value=0.0
|
||||
)
|
||||
n.lines["s_nom_extendable"] = False
|
||||
|
||||
links_dc_i = n.links.index[n.links.p_nom_extendable]
|
||||
n.links.loc[links_dc_i, 'p_nom'] = \
|
||||
n_optim.links['p_nom_opt'].reindex(links_dc_i, fill_value=0.)
|
||||
n.links.loc[links_dc_i, 'p_nom_extendable'] = False
|
||||
n.links.loc[links_dc_i, "p_nom"] = n_optim.links["p_nom_opt"].reindex(
|
||||
links_dc_i, fill_value=0.0
|
||||
)
|
||||
n.links.loc[links_dc_i, "p_nom_extendable"] = False
|
||||
|
||||
gen_extend_i = n.generators.index[n.generators.p_nom_extendable]
|
||||
n.generators.loc[gen_extend_i, 'p_nom'] = \
|
||||
n_optim.generators['p_nom_opt'].reindex(gen_extend_i, fill_value=0.)
|
||||
n.generators.loc[gen_extend_i, 'p_nom_extendable'] = False
|
||||
n.generators.loc[gen_extend_i, "p_nom"] = n_optim.generators["p_nom_opt"].reindex(
|
||||
gen_extend_i, fill_value=0.0
|
||||
)
|
||||
n.generators.loc[gen_extend_i, "p_nom_extendable"] = False
|
||||
|
||||
stor_units_extend_i = n.storage_units.index[n.storage_units.p_nom_extendable]
|
||||
n.storage_units.loc[stor_units_extend_i, 'p_nom'] = \
|
||||
n_optim.storage_units['p_nom_opt'].reindex(stor_units_extend_i, fill_value=0.)
|
||||
n.storage_units.loc[stor_units_extend_i, 'p_nom_extendable'] = False
|
||||
n.storage_units.loc[stor_units_extend_i, "p_nom"] = n_optim.storage_units[
|
||||
"p_nom_opt"
|
||||
].reindex(stor_units_extend_i, fill_value=0.0)
|
||||
n.storage_units.loc[stor_units_extend_i, "p_nom_extendable"] = False
|
||||
|
||||
stor_extend_i = n.stores.index[n.stores.e_nom_extendable]
|
||||
n.stores.loc[stor_extend_i, 'e_nom'] = \
|
||||
n_optim.stores['e_nom_opt'].reindex(stor_extend_i, fill_value=0.)
|
||||
n.stores.loc[stor_extend_i, 'e_nom_extendable'] = False
|
||||
n.stores.loc[stor_extend_i, "e_nom"] = n_optim.stores["e_nom_opt"].reindex(
|
||||
stor_extend_i, fill_value=0.0
|
||||
)
|
||||
n.stores.loc[stor_extend_i, "e_nom_extendable"] = False
|
||||
|
||||
return n
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
snakemake = mock_snakemake('solve_operations_network',
|
||||
simpl='', clusters='5', ll='copt', opts='Co2L-BAU-24H')
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"solve_operations_network",
|
||||
simpl="",
|
||||
clusters="5",
|
||||
ll="copt",
|
||||
opts="Co2L-BAU-24H",
|
||||
)
|
||||
configure_logging(snakemake)
|
||||
|
||||
tmpdir = snakemake.config['solving'].get('tmpdir')
|
||||
tmpdir = snakemake.config["solving"].get("tmpdir")
|
||||
if tmpdir is not None:
|
||||
Path(tmpdir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
@ -109,14 +125,19 @@ if __name__ == "__main__":
|
||||
n = set_parameters_from_optimized(n, n_optim)
|
||||
del n_optim
|
||||
|
||||
opts = snakemake.wildcards.opts.split('-')
|
||||
snakemake.config['solving']['options']['skip_iterations'] = False
|
||||
opts = snakemake.wildcards.opts.split("-")
|
||||
snakemake.config["solving"]["options"]["skip_iterations"] = False
|
||||
|
||||
fn = getattr(snakemake.log, 'memory', None)
|
||||
with memory_logger(filename=fn, interval=30.) as mem:
|
||||
n = prepare_network(n, snakemake.config['solving']['options'])
|
||||
n = solve_network(n, snakemake.config, opts, solver_dir=tmpdir,
|
||||
solver_logfile=snakemake.log.solver)
|
||||
fn = getattr(snakemake.log, "memory", None)
|
||||
with memory_logger(filename=fn, interval=30.0) as mem:
|
||||
n = prepare_network(n, snakemake.config["solving"]["options"])
|
||||
n = solve_network(
|
||||
n,
|
||||
snakemake.config,
|
||||
opts,
|
||||
solver_dir=tmpdir,
|
||||
solver_logfile=snakemake.log.solver,
|
||||
)
|
||||
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
||||
n.export_to_netcdf(snakemake.output[0])
|
||||
|
||||
|
@ -72,8 +72,7 @@ renewable:
|
||||
corine:
|
||||
# Scholz, Y. (2012). Renewable energy based electricity supply at low costs:
|
||||
# development of the REMix model and application for Europe. ( p.42 / p.28)
|
||||
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
|
||||
24, 25, 26, 27, 28, 29, 31, 32]
|
||||
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 32]
|
||||
distance: 1000
|
||||
distance_grid_codes: [1, 2, 3, 4, 5, 6]
|
||||
natura: true
|
||||
@ -124,8 +123,7 @@ renewable:
|
||||
# sector: The economic potential of photovoltaics and concentrating solar
|
||||
# power." Applied Energy 135 (2014): 704-720.
|
||||
correction_factor: 0.854337
|
||||
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
|
||||
14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
|
||||
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
|
||||
natura: true
|
||||
excluder_resolution: 200
|
||||
potential: simple # or conservative
|
||||
@ -153,7 +151,7 @@ transformers:
|
||||
type: ''
|
||||
|
||||
load:
|
||||
power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data
|
||||
power_statistics: true # only for files from <2019; set false in order to get ENTSOE transparency data
|
||||
interpolate_limit: 3 # data gaps up until this size are interpolated linearly
|
||||
time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from
|
||||
manual_adjustments: true # false
|
||||
|
Loading…
Reference in New Issue
Block a user