Merge remote-tracking branch 'origin/master' into line-rating

This commit is contained in:
Philipp Glaum 2022-01-25 08:46:34 +01:00
commit 815f9824e9
26 changed files with 405 additions and 377 deletions

View File

@ -4,6 +4,10 @@
name: CI name: CI
# Caching method based on and described by:
# epassaro (2021): https://dev.to/epassaro/caching-anaconda-environments-in-github-actions-5hde
# and code in GitHub repo: https://github.com/epassaro/cache-conda-envs
on: on:
push: push:
branches: branches:
@ -14,42 +18,73 @@ on:
schedule: schedule:
- cron: "0 5 * * TUE" - cron: "0 5 * * TUE"
env:
CACHE_NUMBER: 1 # Change this value to manually reset the environment cache
jobs: jobs:
build: build:
runs-on: ${{ matrix.os }}
strategy: strategy:
max-parallel: 5
matrix: matrix:
os: include:
- ubuntu-latest # Matrix required to handle caching with Mambaforge
- macos-latest - os: ubuntu-latest
- windows-latest label: ubuntu-latest
prefix: /usr/share/miniconda3/envs/pypsa-eur
- os: macos-latest
label: macos-latest
prefix: /Users/runner/miniconda3/envs/pypsa-eur
- os: windows-latest
label: windows-latest
prefix: C:\Miniconda3\envs\pypsa-eur
name: ${{ matrix.label }}
runs-on: ${{ matrix.os }}
defaults: defaults:
run: run:
shell: bash -l {0} shell: bash -l {0}
steps:
- uses: actions/checkout@v2
- name: Setup Miniconda
uses: conda-incubator/setup-miniconda@v2.1.1
with: # checks out environment 'test' by default
mamba-version: "*"
channels: conda-forge,defaults
channel-priority: true
- name: Install dependencies
run: |
echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc
echo -e " - glpk\n - ipopt<3.13.3" >> envs/environment.yaml
mamba env update -f envs/environment.yaml --name test
- name: Test snakemake workflow steps:
run: | - uses: actions/checkout@v2
conda list
cp test/config.test1.yaml config.yaml - name: Setup secrets
snakemake --cores all solve_all_networks run: |
rm -rf resources/*.nc resources/*.geojson resources/*.h5 networks results echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc
- name: Add solver to environment
run: |
echo -e " - glpk\n - ipopt<3.13.3" >> envs/environment.yaml
- name: Setup Mambaforge
uses: conda-incubator/setup-miniconda@v2
with:
miniforge-variant: Mambaforge
miniforge-version: latest
activate-environment: pypsa-eur
use-mamba: true
- name: Set cache date
run: echo "DATE=$(date +'%Y%m%d')" >> $GITHUB_ENV
- name: Create environment cache
uses: actions/cache@v2
id: cache
with:
path: ${{ matrix.prefix }}
key: ${{ matrix.label }}-conda-${{ hashFiles('envs/environment.yaml') }}-${{ env.DATE }}-${{ env.CACHE_NUMBER }}
- name: Update environment due to outdated or unavailable cache
run: mamba env update -n pypsa-eur -f envs/environment.yaml
if: steps.cache.outputs.cache-hit != 'true'
- name: Test snakemake workflow
run: |
conda activate pypsa-eur
conda list
cp test/config.test1.yaml config.yaml
snakemake --cores all solve_all_networks
rm -rf resources/*.nc resources/*.geojson resources/*.h5 networks results

View File

@ -58,7 +58,7 @@ The dataset consists of:
- Electrical demand time series from the - Electrical demand time series from the
[OPSD project](https://open-power-system-data.org/). [OPSD project](https://open-power-system-data.org/).
- Renewable time series based on ERA5 and SARAH, assembled using the [atlite tool](https://github.com/FRESNA/atlite). - Renewable time series based on ERA5 and SARAH, assembled using the [atlite tool](https://github.com/FRESNA/atlite).
- Geographical potentials for wind and solar generators based on land use (CORINE) and excluding nature reserves (Natura2000) are computed with the [vresutils library](https://github.com/FRESNA/vresutils) and the [glaes library](https://github.com/FZJ-IEK3-VSA/glaes). - Geographical potentials for wind and solar generators based on land use (CORINE) and excluding nature reserves (Natura2000) are computed with the [atlite library](https://github.com/PyPSA/atlite).
Already-built versions of the model can be found in the accompanying [Zenodo Already-built versions of the model can be found in the accompanying [Zenodo
repository](https://doi.org/10.5281/zenodo.3601881). repository](https://doi.org/10.5281/zenodo.3601881).

View File

@ -245,7 +245,7 @@ rule add_electricity:
log: "logs/add_electricity.log" log: "logs/add_electricity.log"
benchmark: "benchmarks/add_electricity" benchmark: "benchmarks/add_electricity"
threads: 1 threads: 1
resources: mem=3000 resources: mem=5000
script: "scripts/add_electricity.py" script: "scripts/add_electricity.py"
@ -286,7 +286,7 @@ rule cluster_network:
log: "logs/cluster_network/elec_s{simpl}_{clusters}.log" log: "logs/cluster_network/elec_s{simpl}_{clusters}.log"
benchmark: "benchmarks/cluster_network/elec_s{simpl}_{clusters}" benchmark: "benchmarks/cluster_network/elec_s{simpl}_{clusters}"
threads: 1 threads: 1
resources: mem=3000 resources: mem=6000
script: "scripts/cluster_network.py" script: "scripts/cluster_network.py"

View File

@ -148,12 +148,14 @@ renewable:
slope: 35. slope: 35.
azimuth: 180. azimuth: 180.
capacity_per_sqkm: 1.7 # ScholzPhd Tab 4.3.1: 170 MW/km^2 capacity_per_sqkm: 1.7 # ScholzPhd Tab 4.3.1: 170 MW/km^2
# Determined by comparing uncorrected area-weighted full-load hours to those # Correction factor determined by comparing uncorrected area-weighted full-load hours to those
# published in Supplementary Data to # published in Supplementary Data to
# Pietzcker, Robert Carl, et al. "Using the sun to decarbonize the power # Pietzcker, Robert Carl, et al. "Using the sun to decarbonize the power
# sector: The economic potential of photovoltaics and concentrating solar # sector: The economic potential of photovoltaics and concentrating solar
# power." Applied Energy 135 (2014): 704-720. # power." Applied Energy 135 (2014): 704-720.
correction_factor: 0.854337 # This correction factor of 0.854337 may be in order if using reanalysis data.
# for discussion refer to https://github.com/PyPSA/pypsa-eur/pull/304
# correction_factor: 0.854337
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 26, 31, 32] 14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
natura: true natura: true

View File

@ -116,12 +116,13 @@ renewable:
slope: 35. slope: 35.
azimuth: 180. azimuth: 180.
capacity_per_sqkm: 1.7 # ScholzPhd Tab 4.3.1: 170 MW/km^2 capacity_per_sqkm: 1.7 # ScholzPhd Tab 4.3.1: 170 MW/km^2
# Determined by comparing uncorrected area-weighted full-load hours to those # Correction factor determined by comparing uncorrected area-weighted full-load hours to those
# published in Supplementary Data to # published in Supplementary Data to
# Pietzcker, Robert Carl, et al. "Using the sun to decarbonize the power # Pietzcker, Robert Carl, et al. "Using the sun to decarbonize the power
# sector: The economic potential of photovoltaics and concentrating solar # sector: The economic potential of photovoltaics and concentrating solar
# power." Applied Energy 135 (2014): 704-720. # power." Applied Energy 135 (2014): 704-720.
correction_factor: 0.854337 # This correction factor of 0.854337 may be in order if using reanalysis data.
# correction_factor: 0.854337
corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, corine: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 26, 31, 32] 14, 15, 16, 17, 18, 19, 20, 26, 31, 32]
natura: true natura: true

View File

@ -18,6 +18,10 @@ Upcoming Release
* The default deployment density of AC- and DC-connected offshore wind capacity is reduced from 3 MW/sqkm * The default deployment density of AC- and DC-connected offshore wind capacity is reduced from 3 MW/sqkm
to a more conservative estimate of 2 MW/sqkm [`#280 <https://github.com/PyPSA/pypsa-eur/pull/280>`_]. to a more conservative estimate of 2 MW/sqkm [`#280 <https://github.com/PyPSA/pypsa-eur/pull/280>`_].
* Following discussion in `#285 <https://github.com/PyPSA/pypsa-eur/issues/285>`_ we have disabled the
correction factor for solar PV capacity factors by default while satellite data is used.
A correction factor of 0.854337 is recommended if reanalysis data like ERA5 is used.
PyPSA-Eur 0.4.0 (22th September 2021) PyPSA-Eur 0.4.0 (22th September 2021)
===================================== =====================================

View File

@ -6,15 +6,13 @@ name: pypsa-eur
channels: channels:
- conda-forge - conda-forge
- bioconda - bioconda
- http://conda.anaconda.org/gurobi
dependencies: dependencies:
- python>=3.8 - python>=3.8
- pip - pip
- mamba # esp for windows build
- pypsa>=0.18 - pypsa>=0.18.1
- atlite>=0.2.5 - atlite>=0.2.5
- dask<=2021.3.1 # until https://github.com/dask/dask/issues/7583 is solved - dask
# Dependencies of the workflow itself # Dependencies of the workflow itself
- xlrd - xlrd
@ -38,7 +36,7 @@ dependencies:
- progressbar2 - progressbar2
- pyomo - pyomo
- matplotlib - matplotlib
- proj<8 - proj
# Keep in conda environment when calling ipython # Keep in conda environment when calling ipython
- ipython - ipython
@ -56,5 +54,5 @@ dependencies:
- tabula-py - tabula-py
- pip: - pip:
- vresutils==0.3.1 - vresutils>=0.3.1
- tsam>=1.1.0 - tsam>=1.1.0

View File

@ -95,7 +95,6 @@ import powerplantmatching as pm
from powerplantmatching.export import map_country_bus from powerplantmatching.export import map_country_bus
from vresutils.costdata import annuity from vresutils.costdata import annuity
from vresutils.load import timeseries_opsd
from vresutils import transfer as vtransfer from vresutils import transfer as vtransfer
idx = pd.IndexSlice idx = pd.IndexSlice
@ -118,12 +117,7 @@ def _add_missing_carriers_from_costs(n, costs, carriers):
n.import_components_from_dataframe(emissions, 'Carrier') n.import_components_from_dataframe(emissions, 'Carrier')
def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None): def load_costs(tech_costs, config, elec_config, Nyears=1.):
if tech_costs is None:
tech_costs = snakemake.input.tech_costs
if config is None:
config = snakemake.config['costs']
# set all asset costs and other parameters # set all asset costs and other parameters
costs = pd.read_csv(tech_costs, index_col=list(range(3))).sort_index() costs = pd.read_csv(tech_costs, index_col=list(range(3))).sort_index()
@ -169,8 +163,6 @@ def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None):
marginal_cost=0., marginal_cost=0.,
co2_emissions=0.)) co2_emissions=0.))
if elec_config is None:
elec_config = snakemake.config['electricity']
max_hours = elec_config['max_hours'] max_hours = elec_config['max_hours']
costs.loc["battery"] = \ costs.loc["battery"] = \
costs_for_storage(costs.loc["battery storage"], costs.loc["battery inverter"], costs_for_storage(costs.loc["battery storage"], costs.loc["battery inverter"],
@ -188,9 +180,7 @@ def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None):
return costs return costs
def load_powerplants(ppl_fn=None): def load_powerplants(ppl_fn):
if ppl_fn is None:
ppl_fn = snakemake.input.powerplants
carrier_dict = {'ocgt': 'OCGT', 'ccgt': 'CCGT', 'bioenergy': 'biomass', carrier_dict = {'ocgt': 'OCGT', 'ccgt': 'CCGT', 'bioenergy': 'biomass',
'ccgt, thermal': 'CCGT', 'hard coal': 'coal'} 'ccgt, thermal': 'CCGT', 'hard coal': 'coal'}
return (pd.read_csv(ppl_fn, index_col=0, dtype={'bus': 'str'}) return (pd.read_csv(ppl_fn, index_col=0, dtype={'bus': 'str'})
@ -199,18 +189,18 @@ def load_powerplants(ppl_fn=None):
.replace({'carrier': carrier_dict})) .replace({'carrier': carrier_dict}))
def attach_load(n): def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.):
substation_lv_i = n.buses.index[n.buses['substation_lv']]
regions = (gpd.read_file(snakemake.input.regions).set_index('name') substation_lv_i = n.buses.index[n.buses['substation_lv']]
.reindex(substation_lv_i)) regions = (gpd.read_file(regions).set_index('name')
opsd_load = (pd.read_csv(snakemake.input.load, index_col=0, parse_dates=True) .reindex(substation_lv_i))
.filter(items=snakemake.config['countries'])) opsd_load = (pd.read_csv(load, index_col=0, parse_dates=True)
.filter(items=countries))
scaling = snakemake.config.get('load', {}).get('scaling_factor', 1.0)
logger.info(f"Load data scaled with scalling factor {scaling}.") logger.info(f"Load data scaled with scalling factor {scaling}.")
opsd_load *= scaling opsd_load *= scaling
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('index') nuts3 = gpd.read_file(nuts3_shapes).set_index('index')
def upsample(cntry, group): def upsample(cntry, group):
l = opsd_load[cntry] l = opsd_load[cntry]
@ -227,7 +217,6 @@ def attach_load(n):
# relative factors 0.6 and 0.4 have been determined from a linear # relative factors 0.6 and 0.4 have been determined from a linear
# regression on the country to continent load data # regression on the country to continent load data
# (refer to vresutils.load._upsampling_weights)
factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n)) factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))
return pd.DataFrame(factors.values * l.values[:,np.newaxis], return pd.DataFrame(factors.values * l.values[:,np.newaxis],
index=l.index, columns=factors.index) index=l.index, columns=factors.index)
@ -239,6 +228,9 @@ def attach_load(n):
def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=False): def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=False):
# TODO: line length factor of lines is applied to lines and links.
# Separate the function to distinguish.
n.lines['capital_cost'] = (n.lines['length'] * length_factor * n.lines['capital_cost'] = (n.lines['length'] * length_factor *
costs.at['HVAC overhead', 'capital_cost']) costs.at['HVAC overhead', 'capital_cost'])
@ -263,18 +255,20 @@ def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=Fal
n.links.loc[dc_b, 'capital_cost'] = costs n.links.loc[dc_b, 'capital_cost'] = costs
def attach_wind_and_solar(n, costs): def attach_wind_and_solar(n, costs, input_profiles, technologies, line_length_factor=1):
for tech in snakemake.config['renewable']: # TODO: rename tech -> carrier, technologies -> carriers
for tech in technologies:
if tech == 'hydro': continue if tech == 'hydro': continue
n.add("Carrier", name=tech) n.add("Carrier", name=tech)
with xr.open_dataset(getattr(snakemake.input, 'profile_' + tech)) as ds: with xr.open_dataset(getattr(input_profiles, 'profile_' + tech)) as ds:
if ds.indexes['bus'].empty: continue if ds.indexes['bus'].empty: continue
suptech = tech.split('-', 2)[0] suptech = tech.split('-', 2)[0]
if suptech == 'offwind': if suptech == 'offwind':
underwater_fraction = ds['underwater_fraction'].to_pandas() underwater_fraction = ds['underwater_fraction'].to_pandas()
connection_cost = (snakemake.config['lines']['length_factor'] * connection_cost = (line_length_factor *
ds['average_distance'].to_pandas() * ds['average_distance'].to_pandas() *
(underwater_fraction * (underwater_fraction *
costs.at[tech + '-connection-submarine', 'capital_cost'] + costs.at[tech + '-connection-submarine', 'capital_cost'] +
@ -300,8 +294,7 @@ def attach_wind_and_solar(n, costs):
p_max_pu=ds['profile'].transpose('time', 'bus').to_pandas()) p_max_pu=ds['profile'].transpose('time', 'bus').to_pandas())
def attach_conventional_generators(n, costs, ppl): def attach_conventional_generators(n, costs, ppl, carriers):
carriers = snakemake.config['electricity']['conventional_carriers']
_add_missing_carriers_from_costs(n, costs, carriers) _add_missing_carriers_from_costs(n, costs, carriers)
@ -322,10 +315,7 @@ def attach_conventional_generators(n, costs, ppl):
logger.warning(f'Capital costs for conventional generators put to 0 EUR/MW.') logger.warning(f'Capital costs for conventional generators put to 0 EUR/MW.')
def attach_hydro(n, costs, ppl): def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **config):
if 'hydro' not in snakemake.config['renewable']: return
c = snakemake.config['renewable']['hydro']
carriers = c.get('carriers', ['ror', 'PHS', 'hydro'])
_add_missing_carriers_from_costs(n, costs, carriers) _add_missing_carriers_from_costs(n, costs, carriers)
@ -341,11 +331,11 @@ def attach_hydro(n, costs, ppl):
if not inflow_idx.empty: if not inflow_idx.empty:
dist_key = ppl.loc[inflow_idx, 'p_nom'].groupby(country).transform(normed) dist_key = ppl.loc[inflow_idx, 'p_nom'].groupby(country).transform(normed)
with xr.open_dataarray(snakemake.input.profile_hydro) as inflow: with xr.open_dataarray(profile_hydro) as inflow:
inflow_countries = pd.Index(country[inflow_idx]) inflow_countries = pd.Index(country[inflow_idx])
missing_c = (inflow_countries.unique() missing_c = (inflow_countries.unique()
.difference(inflow.indexes['countries'])) .difference(inflow.indexes['countries']))
assert missing_c.empty, (f"'{snakemake.input.profile_hydro}' is missing " assert missing_c.empty, (f"'{profile_hydro}' is missing "
f"inflow time-series for at least one country: {', '.join(missing_c)}") f"inflow time-series for at least one country: {', '.join(missing_c)}")
inflow_t = (inflow.sel(countries=inflow_countries) inflow_t = (inflow.sel(countries=inflow_countries)
@ -370,7 +360,8 @@ def attach_hydro(n, costs, ppl):
if 'PHS' in carriers and not phs.empty: if 'PHS' in carriers and not phs.empty:
# fill missing max hours to config value and # fill missing max hours to config value and
# assume no natural inflow due to lack of data # assume no natural inflow due to lack of data
phs = phs.replace({'max_hours': {0: c['PHS_max_hours']}}) max_hours = config.get('PHS_max_hours', 6)
phs = phs.replace({'max_hours': {0: max_hours}})
n.madd('StorageUnit', phs.index, n.madd('StorageUnit', phs.index,
carrier='PHS', carrier='PHS',
bus=phs['bus'], bus=phs['bus'],
@ -382,8 +373,11 @@ def attach_hydro(n, costs, ppl):
cyclic_state_of_charge=True) cyclic_state_of_charge=True)
if 'hydro' in carriers and not hydro.empty: if 'hydro' in carriers and not hydro.empty:
hydro_max_hours = c.get('hydro_max_hours') hydro_max_hours = config.get('hydro_max_hours')
hydro_stats = pd.read_csv(snakemake.input.hydro_capacities,
assert hydro_max_hours is not None, "No path for hydro capacities given."
hydro_stats = pd.read_csv(hydro_capacities,
comment="#", na_values='-', index_col=0) comment="#", na_values='-', index_col=0)
e_target = hydro_stats["E_store[TWh]"].clip(lower=0.2) * 1e6 e_target = hydro_stats["E_store[TWh]"].clip(lower=0.2) * 1e6
e_installed = hydro.eval('p_nom * max_hours').groupby(hydro.country).sum() e_installed = hydro.eval('p_nom * max_hours').groupby(hydro.country).sum()
@ -411,8 +405,7 @@ def attach_hydro(n, costs, ppl):
bus=hydro['bus'], bus=hydro['bus'],
p_nom=hydro['p_nom'], p_nom=hydro['p_nom'],
max_hours=hydro_max_hours, max_hours=hydro_max_hours,
capital_cost=(costs.at['hydro', 'capital_cost'] capital_cost=costs.at['hydro', 'capital_cost'],
if c.get('hydro_capital_cost') else 0.),
marginal_cost=costs.at['hydro', 'marginal_cost'], marginal_cost=costs.at['hydro', 'marginal_cost'],
p_max_pu=1., # dispatch p_max_pu=1., # dispatch
p_min_pu=0., # store p_min_pu=0., # store
@ -422,9 +415,7 @@ def attach_hydro(n, costs, ppl):
inflow=inflow_t.loc[:, hydro.index]) inflow=inflow_t.loc[:, hydro.index])
def attach_extendable_generators(n, costs, ppl): def attach_extendable_generators(n, costs, ppl, carriers):
elec_opts = snakemake.config['electricity']
carriers = pd.Index(elec_opts['extendable_carriers']['Generator'])
_add_missing_carriers_from_costs(n, costs, carriers) _add_missing_carriers_from_costs(n, costs, carriers)
@ -472,12 +463,11 @@ def attach_extendable_generators(n, costs, ppl):
def attach_OPSD_renewables(n): def attach_OPSD_renewables(n, techs):
available = ['DE', 'FR', 'PL', 'CH', 'DK', 'CZ', 'SE', 'GB'] available = ['DE', 'FR', 'PL', 'CH', 'DK', 'CZ', 'SE', 'GB']
tech_map = {'Onshore': 'onwind', 'Offshore': 'offwind', 'Solar': 'solar'} tech_map = {'Onshore': 'onwind', 'Offshore': 'offwind', 'Solar': 'solar'}
countries = set(available) & set(n.buses.country) countries = set(available) & set(n.buses.country)
techs = snakemake.config['electricity'].get('renewable_capacities_from_OPSD', [])
tech_map = {k: v for k, v in tech_map.items() if v in techs} tech_map = {k: v for k, v in tech_map.items() if v in techs}
if not tech_map: if not tech_map:
@ -505,10 +495,7 @@ def attach_OPSD_renewables(n):
def estimate_renewable_capacities(n, tech_map=None): def estimate_renewable_capacities(n, tech_map):
if tech_map is None:
tech_map = (snakemake.config['electricity']
.get('estimate_renewable_capacities_from_capacity_stats', {}))
if len(tech_map) == 0: return if len(tech_map) == 0: return
@ -543,8 +530,7 @@ def attach_line_rating(n):
s_max=xr.open_dataarray(snakemake.input.line_rating).to_pandas().transpose() s_max=xr.open_dataarray(snakemake.input.line_rating).to_pandas().transpose()
n.lines_t.s_max_pu=s_max/n.lines.loc[s_max.columns,:]['s_nom'] #only considers overhead lines n.lines_t.s_max_pu=s_max/n.lines.loc[s_max.columns,:]['s_nom'] #only considers overhead lines
def add_nice_carrier_names(n, config=None): def add_nice_carrier_names(n, config):
if config is None: config = snakemake.config
carrier_i = n.carriers.index carrier_i = n.carriers.index
nice_names = (pd.Series(config['plotting']['nice_names']) nice_names = (pd.Series(config['plotting']['nice_names'])
.reindex(carrier_i).fillna(carrier_i.to_series().str.title())) .reindex(carrier_i).fillna(carrier_i.to_series().str.title()))
@ -552,11 +538,9 @@ def add_nice_carrier_names(n, config=None):
colors = pd.Series(config['plotting']['tech_colors']).reindex(carrier_i) colors = pd.Series(config['plotting']['tech_colors']).reindex(carrier_i)
if colors.isna().any(): if colors.isna().any():
missing_i = list(colors.index[colors.isna()]) missing_i = list(colors.index[colors.isna()])
logger.warning(f'tech_colors for carriers {missing_i} not defined ' logger.warning(f'tech_colors for carriers {missing_i} not defined in config.')
'in config.')
n.carriers['color'] = colors n.carriers['color'] = colors
if __name__ == "__main__": if __name__ == "__main__":
if 'snakemake' not in globals(): if 'snakemake' not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
@ -566,25 +550,37 @@ if __name__ == "__main__":
n = pypsa.Network(snakemake.input.base_network) n = pypsa.Network(snakemake.input.base_network)
Nyears = n.snapshot_weightings.objective.sum() / 8760. Nyears = n.snapshot_weightings.objective.sum() / 8760.
costs = load_costs(Nyears) costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears)
ppl = load_powerplants() ppl = load_powerplants(snakemake.input.powerplants)
attach_load(n) attach_load(n, snakemake.input.regions, snakemake.input.load, snakemake.input.nuts3_shapes,
snakemake.config['countries'], snakemake.config['load']['scaling_factor'])
update_transmission_costs(n, costs) update_transmission_costs(n, costs, snakemake.config['lines']['length_factor'])
attach_conventional_generators(n, costs, ppl) carriers = snakemake.config['electricity']['conventional_carriers']
attach_wind_and_solar(n, costs) attach_conventional_generators(n, costs, ppl, carriers)
attach_hydro(n, costs, ppl)
attach_extendable_generators(n, costs, ppl)
carriers = snakemake.config['renewable']
attach_wind_and_solar(n, costs, snakemake.input, carriers, snakemake.config['lines']['length_factor'])
if 'hydro' in snakemake.config['renewable']:
carriers = snakemake.config['renewable']['hydro'].pop('carriers', [])
attach_hydro(n, costs, ppl, snakemake.input.profile_hydro, snakemake.input.hydro_capacities,
carriers, **snakemake.config['renewable']['hydro'])
carriers = snakemake.config['electricity']['extendable_carriers']['Generator']
attach_extendable_generators(n, costs, ppl, carriers)
tech_map = snakemake.config['electricity'].get('estimate_renewable_capacities_from_capacity_stats', {})
estimate_renewable_capacities(n, tech_map)
techs = snakemake.config['electricity'].get('renewable_capacities_from_OPSD', [])
attach_OPSD_renewables(n, techs)
estimate_renewable_capacities(n)
attach_OPSD_renewables(n)
update_p_nom_max(n) update_p_nom_max(n)
if snakemake.config["lines"]["line_rating"]: if snakemake.config["lines"]["line_rating"]:
attach_line_rating(n) attach_line_rating(n)
add_nice_carrier_names(n) add_nice_carrier_names(n, snakemake.config)
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])

View File

@ -64,8 +64,7 @@ idx = pd.IndexSlice
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def attach_storageunits(n, costs): def attach_storageunits(n, costs, elec_opts):
elec_opts = snakemake.config['electricity']
carriers = elec_opts['extendable_carriers']['StorageUnit'] carriers = elec_opts['extendable_carriers']['StorageUnit']
max_hours = elec_opts['max_hours'] max_hours = elec_opts['max_hours']
@ -89,8 +88,7 @@ def attach_storageunits(n, costs):
cyclic_state_of_charge=True) cyclic_state_of_charge=True)
def attach_stores(n, costs): def attach_stores(n, costs, elec_opts):
elec_opts = snakemake.config['electricity']
carriers = elec_opts['extendable_carriers']['Store'] carriers = elec_opts['extendable_carriers']['Store']
_add_missing_carriers_from_costs(n, costs, carriers) _add_missing_carriers_from_costs(n, costs, carriers)
@ -156,8 +154,7 @@ def attach_stores(n, costs):
marginal_cost=costs.at["battery inverter", "marginal_cost"]) marginal_cost=costs.at["battery inverter", "marginal_cost"])
def attach_hydrogen_pipelines(n, costs): def attach_hydrogen_pipelines(n, costs, elec_opts):
elec_opts = snakemake.config['electricity']
ext_carriers = elec_opts['extendable_carriers'] ext_carriers = elec_opts['extendable_carriers']
as_stores = ext_carriers.get('Store', []) as_stores = ext_carriers.get('Store', [])
@ -197,15 +194,15 @@ if __name__ == "__main__":
configure_logging(snakemake) configure_logging(snakemake)
n = pypsa.Network(snakemake.input.network) n = pypsa.Network(snakemake.input.network)
elec_config = snakemake.config['electricity']
Nyears = n.snapshot_weightings.objective.sum() / 8760. Nyears = n.snapshot_weightings.objective.sum() / 8760.
costs = load_costs(Nyears, tech_costs=snakemake.input.tech_costs, costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], elec_config, Nyears)
config=snakemake.config['costs'],
elec_config=snakemake.config['electricity'])
attach_storageunits(n, costs) attach_storageunits(n, costs, elec_config)
attach_stores(n, costs) attach_stores(n, costs, elec_config)
attach_hydrogen_pipelines(n, costs) attach_hydrogen_pipelines(n, costs, elec_config)
add_nice_carrier_names(n, config=snakemake.config) add_nice_carrier_names(n, snakemake.config)
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])

View File

@ -97,7 +97,7 @@ def _get_country(df):
def _find_closest_links(links, new_links, distance_upper_bound=1.5): def _find_closest_links(links, new_links, distance_upper_bound=1.5):
treecoords = np.asarray([np.asarray(shapely.wkt.loads(s))[[0, -1]].flatten() treecoords = np.asarray([np.asarray(shapely.wkt.loads(s).coords)[[0, -1]].flatten()
for s in links.geometry]) for s in links.geometry])
querycoords = np.vstack([new_links[['x1', 'y1', 'x2', 'y2']], querycoords = np.vstack([new_links[['x1', 'y1', 'x2', 'y2']],
new_links[['x2', 'y2', 'x1', 'y1']]]) new_links[['x2', 'y2', 'x1', 'y1']]])
@ -112,8 +112,8 @@ def _find_closest_links(links, new_links, distance_upper_bound=1.5):
.sort_index()['i'] .sort_index()['i']
def _load_buses_from_eg(): def _load_buses_from_eg(eg_buses, europe_shape, config_elec):
buses = (pd.read_csv(snakemake.input.eg_buses, quotechar="'", buses = (pd.read_csv(eg_buses, quotechar="'",
true_values=['t'], false_values=['f'], true_values=['t'], false_values=['f'],
dtype=dict(bus_id="str")) dtype=dict(bus_id="str"))
.set_index("bus_id") .set_index("bus_id")
@ -124,18 +124,18 @@ def _load_buses_from_eg():
buses['under_construction'] = buses['under_construction'].fillna(False).astype(bool) buses['under_construction'] = buses['under_construction'].fillna(False).astype(bool)
# remove all buses outside of all countries including exclusive economic zones (offshore) # remove all buses outside of all countries including exclusive economic zones (offshore)
europe_shape = gpd.read_file(snakemake.input.europe_shape).loc[0, 'geometry'] europe_shape = gpd.read_file(europe_shape).loc[0, 'geometry']
europe_shape_prepped = shapely.prepared.prep(europe_shape) europe_shape_prepped = shapely.prepared.prep(europe_shape)
buses_in_europe_b = buses[['x', 'y']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1) buses_in_europe_b = buses[['x', 'y']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1)
buses_with_v_nom_to_keep_b = buses.v_nom.isin(snakemake.config['electricity']['voltages']) | buses.v_nom.isnull() buses_with_v_nom_to_keep_b = buses.v_nom.isin(config_elec['voltages']) | buses.v_nom.isnull()
logger.info("Removing buses with voltages {}".format(pd.Index(buses.v_nom.unique()).dropna().difference(snakemake.config['electricity']['voltages']))) logger.info("Removing buses with voltages {}".format(pd.Index(buses.v_nom.unique()).dropna().difference(config_elec['voltages'])))
return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b]) return pd.DataFrame(buses.loc[buses_in_europe_b & buses_with_v_nom_to_keep_b])
def _load_transformers_from_eg(buses): def _load_transformers_from_eg(buses, eg_transformers):
transformers = (pd.read_csv(snakemake.input.eg_transformers, quotechar="'", transformers = (pd.read_csv(eg_transformers, quotechar="'",
true_values=['t'], false_values=['f'], true_values=['t'], false_values=['f'],
dtype=dict(transformer_id='str', bus0='str', bus1='str')) dtype=dict(transformer_id='str', bus0='str', bus1='str'))
.set_index('transformer_id')) .set_index('transformer_id'))
@ -145,8 +145,8 @@ def _load_transformers_from_eg(buses):
return transformers return transformers
def _load_converters_from_eg(buses): def _load_converters_from_eg(buses, eg_converters):
converters = (pd.read_csv(snakemake.input.eg_converters, quotechar="'", converters = (pd.read_csv(eg_converters, quotechar="'",
true_values=['t'], false_values=['f'], true_values=['t'], false_values=['f'],
dtype=dict(converter_id='str', bus0='str', bus1='str')) dtype=dict(converter_id='str', bus0='str', bus1='str'))
.set_index('converter_id')) .set_index('converter_id'))
@ -158,8 +158,8 @@ def _load_converters_from_eg(buses):
return converters return converters
def _load_links_from_eg(buses): def _load_links_from_eg(buses, eg_links):
links = (pd.read_csv(snakemake.input.eg_links, quotechar="'", true_values=['t'], false_values=['f'], links = (pd.read_csv(eg_links, quotechar="'", true_values=['t'], false_values=['f'],
dtype=dict(link_id='str', bus0='str', bus1='str', under_construction="bool")) dtype=dict(link_id='str', bus0='str', bus1='str', under_construction="bool"))
.set_index('link_id')) .set_index('link_id'))
@ -176,11 +176,11 @@ def _load_links_from_eg(buses):
return links return links
def _add_links_from_tyndp(buses, links): def _add_links_from_tyndp(buses, links, links_tyndp, europe_shape):
links_tyndp = pd.read_csv(snakemake.input.links_tyndp) links_tyndp = pd.read_csv(links_tyndp)
# remove all links from list which lie outside all of the desired countries # remove all links from list which lie outside all of the desired countries
europe_shape = gpd.read_file(snakemake.input.europe_shape).loc[0, 'geometry'] europe_shape = gpd.read_file(europe_shape).loc[0, 'geometry']
europe_shape_prepped = shapely.prepared.prep(europe_shape) europe_shape_prepped = shapely.prepared.prep(europe_shape)
x1y1_in_europe_b = links_tyndp[['x1', 'y1']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1) x1y1_in_europe_b = links_tyndp[['x1', 'y1']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1)
x2y2_in_europe_b = links_tyndp[['x2', 'y2']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1) x2y2_in_europe_b = links_tyndp[['x2', 'y2']].apply(lambda p: europe_shape_prepped.contains(Point(p)), axis=1)
@ -248,8 +248,8 @@ def _add_links_from_tyndp(buses, links):
return buses, links.append(links_tyndp, sort=True) return buses, links.append(links_tyndp, sort=True)
def _load_lines_from_eg(buses): def _load_lines_from_eg(buses, eg_lines):
lines = (pd.read_csv(snakemake.input.eg_lines, quotechar="'", true_values=['t'], false_values=['f'], lines = (pd.read_csv(eg_lines, quotechar="'", true_values=['t'], false_values=['f'],
dtype=dict(line_id='str', bus0='str', bus1='str', dtype=dict(line_id='str', bus0='str', bus1='str',
underground="bool", under_construction="bool")) underground="bool", under_construction="bool"))
.set_index('line_id') .set_index('line_id')
@ -262,8 +262,8 @@ def _load_lines_from_eg(buses):
return lines return lines
def _apply_parameter_corrections(n): def _apply_parameter_corrections(n, parameter_corrections):
with open(snakemake.input.parameter_corrections) as f: with open(parameter_corrections) as f:
corrections = yaml.safe_load(f) corrections = yaml.safe_load(f)
if corrections is None: return if corrections is None: return
@ -285,14 +285,14 @@ def _apply_parameter_corrections(n):
df.loc[inds, attr] = r[inds].astype(df[attr].dtype) df.loc[inds, attr] = r[inds].astype(df[attr].dtype)
def _set_electrical_parameters_lines(lines): def _set_electrical_parameters_lines(lines, config):
v_noms = snakemake.config['electricity']['voltages'] v_noms = config['electricity']['voltages']
linetypes = snakemake.config['lines']['types'] linetypes = config['lines']['types']
for v_nom in v_noms: for v_nom in v_noms:
lines.loc[lines["v_nom"] == v_nom, 'type'] = linetypes[v_nom] lines.loc[lines["v_nom"] == v_nom, 'type'] = linetypes[v_nom]
lines['s_max_pu'] = snakemake.config['lines']['s_max_pu'] lines['s_max_pu'] = config['lines']['s_max_pu']
return lines return lines
@ -304,14 +304,14 @@ def _set_lines_s_nom_from_linetypes(n):
) )
def _set_electrical_parameters_links(links): def _set_electrical_parameters_links(links, config, links_p_nom):
if links.empty: return links if links.empty: return links
p_max_pu = snakemake.config['links'].get('p_max_pu', 1.) p_max_pu = config['links'].get('p_max_pu', 1.)
links['p_max_pu'] = p_max_pu links['p_max_pu'] = p_max_pu
links['p_min_pu'] = -p_max_pu links['p_min_pu'] = -p_max_pu
links_p_nom = pd.read_csv(snakemake.input.links_p_nom) links_p_nom = pd.read_csv(links_p_nom)
# filter links that are not in operation anymore # filter links that are not in operation anymore
removed_b = links_p_nom.Remarks.str.contains('Shut down|Replaced', na=False) removed_b = links_p_nom.Remarks.str.contains('Shut down|Replaced', na=False)
@ -331,8 +331,8 @@ def _set_electrical_parameters_links(links):
return links return links
def _set_electrical_parameters_converters(converters): def _set_electrical_parameters_converters(converters, config):
p_max_pu = snakemake.config['links'].get('p_max_pu', 1.) p_max_pu = config['links'].get('p_max_pu', 1.)
converters['p_max_pu'] = p_max_pu converters['p_max_pu'] = p_max_pu
converters['p_min_pu'] = -p_max_pu converters['p_min_pu'] = -p_max_pu
@ -345,8 +345,8 @@ def _set_electrical_parameters_converters(converters):
return converters return converters
def _set_electrical_parameters_transformers(transformers): def _set_electrical_parameters_transformers(transformers, config):
config = snakemake.config['transformers'] config = config['transformers']
## Add transformer parameters ## Add transformer parameters
transformers["x"] = config.get('x', 0.1) transformers["x"] = config.get('x', 0.1)
@ -373,7 +373,7 @@ def _remove_unconnected_components(network):
return network[component == component_sizes.index[0]] return network[component == component_sizes.index[0]]
def _set_countries_and_substations(n): def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
buses = n.buses buses = n.buses
@ -386,9 +386,9 @@ def _set_countries_and_substations(n):
index=buses.index index=buses.index
) )
countries = snakemake.config['countries'] countries = config['countries']
country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry'] country_shapes = gpd.read_file(country_shapes).set_index('name')['geometry']
offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes).set_index('name')['geometry'] offshore_shapes = gpd.read_file(offshore_shapes).set_index('name')['geometry']
substation_b = buses['symbol'].str.contains('substation|converter station', case=False) substation_b = buses['symbol'].str.contains('substation|converter station', case=False)
def prefer_voltage(x, which): def prefer_voltage(x, which):
@ -498,19 +498,19 @@ def _replace_b2b_converter_at_country_border_by_link(n):
.format(i, b0, line, linkcntry.at[i], buscntry.at[b1])) .format(i, b0, line, linkcntry.at[i], buscntry.at[b1]))
def _set_links_underwater_fraction(n): def _set_links_underwater_fraction(n, offshore_shapes):
if n.links.empty: return if n.links.empty: return
if not hasattr(n.links, 'geometry'): if not hasattr(n.links, 'geometry'):
n.links['underwater_fraction'] = 0. n.links['underwater_fraction'] = 0.
else: else:
offshore_shape = gpd.read_file(snakemake.input.offshore_shapes).unary_union offshore_shape = gpd.read_file(offshore_shapes).unary_union
links = gpd.GeoSeries(n.links.geometry.dropna().map(shapely.wkt.loads)) links = gpd.GeoSeries(n.links.geometry.dropna().map(shapely.wkt.loads))
n.links['underwater_fraction'] = links.intersection(offshore_shape).length / links.length n.links['underwater_fraction'] = links.intersection(offshore_shape).length / links.length
def _adjust_capacities_of_under_construction_branches(n): def _adjust_capacities_of_under_construction_branches(n, config):
lines_mode = snakemake.config['lines'].get('under_construction', 'undef') lines_mode = config['lines'].get('under_construction', 'undef')
if lines_mode == 'zero': if lines_mode == 'zero':
n.lines.loc[n.lines.under_construction, 'num_parallel'] = 0. n.lines.loc[n.lines.under_construction, 'num_parallel'] = 0.
n.lines.loc[n.lines.under_construction, 's_nom'] = 0. n.lines.loc[n.lines.under_construction, 's_nom'] = 0.
@ -519,7 +519,7 @@ def _adjust_capacities_of_under_construction_branches(n):
elif lines_mode != 'keep': elif lines_mode != 'keep':
logger.warning("Unrecognized configuration for `lines: under_construction` = `{}`. Keeping under construction lines.") logger.warning("Unrecognized configuration for `lines: under_construction` = `{}`. Keeping under construction lines.")
links_mode = snakemake.config['links'].get('under_construction', 'undef') links_mode = config['links'].get('under_construction', 'undef')
if links_mode == 'zero': if links_mode == 'zero':
n.links.loc[n.links.under_construction, "p_nom"] = 0. n.links.loc[n.links.under_construction, "p_nom"] = 0.
elif links_mode == 'remove': elif links_mode == 'remove':
@ -534,27 +534,30 @@ def _adjust_capacities_of_under_construction_branches(n):
return n return n
def base_network(): def base_network(eg_buses, eg_converters, eg_transformers, eg_lines, eg_links,
buses = _load_buses_from_eg() links_p_nom, links_tyndp, europe_shape, country_shapes, offshore_shapes,
parameter_corrections, config):
links = _load_links_from_eg(buses) buses = _load_buses_from_eg(eg_buses, europe_shape, config['electricity'])
if snakemake.config['links'].get('include_tyndp'):
buses, links = _add_links_from_tyndp(buses, links)
converters = _load_converters_from_eg(buses) links = _load_links_from_eg(buses, eg_links)
if config['links'].get('include_tyndp'):
buses, links = _add_links_from_tyndp(buses, links, links_tyndp, europe_shape)
lines = _load_lines_from_eg(buses) converters = _load_converters_from_eg(buses, eg_converters)
transformers = _load_transformers_from_eg(buses)
lines = _set_electrical_parameters_lines(lines) lines = _load_lines_from_eg(buses, eg_lines)
transformers = _set_electrical_parameters_transformers(transformers) transformers = _load_transformers_from_eg(buses, eg_transformers)
links = _set_electrical_parameters_links(links)
converters = _set_electrical_parameters_converters(converters) lines = _set_electrical_parameters_lines(lines, config)
transformers = _set_electrical_parameters_transformers(transformers, config)
links = _set_electrical_parameters_links(links, config, links_p_nom)
converters = _set_electrical_parameters_converters(converters, config)
n = pypsa.Network() n = pypsa.Network()
n.name = 'PyPSA-Eur' n.name = 'PyPSA-Eur'
n.set_snapshots(pd.date_range(freq='h', **snakemake.config['snapshots'])) n.set_snapshots(pd.date_range(freq='h', **config['snapshots']))
n.snapshot_weightings[:] *= 8760. / n.snapshot_weightings.sum() n.snapshot_weightings[:] *= 8760. / n.snapshot_weightings.sum()
n.import_components_from_dataframe(buses, "Bus") n.import_components_from_dataframe(buses, "Bus")
@ -565,17 +568,17 @@ def base_network():
_set_lines_s_nom_from_linetypes(n) _set_lines_s_nom_from_linetypes(n)
_apply_parameter_corrections(n) _apply_parameter_corrections(n, parameter_corrections)
n = _remove_unconnected_components(n) n = _remove_unconnected_components(n)
_set_countries_and_substations(n) _set_countries_and_substations(n, config, country_shapes, offshore_shapes)
_set_links_underwater_fraction(n) _set_links_underwater_fraction(n, offshore_shapes)
_replace_b2b_converter_at_country_border_by_link(n) _replace_b2b_converter_at_country_border_by_link(n)
n = _adjust_capacities_of_under_construction_branches(n) n = _adjust_capacities_of_under_construction_branches(n, config)
return n return n
@ -585,6 +588,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake('base_network') snakemake = mock_snakemake('base_network')
configure_logging(snakemake) configure_logging(snakemake)
n = base_network() n = base_network(snakemake.input.eg_buses, snakemake.input.eg_converters, snakemake.input.eg_transformers, snakemake.input.eg_lines, snakemake.input.eg_links,
snakemake.input.links_p_nom, snakemake.input.links_tyndp, snakemake.input.europe_shape, snakemake.input.country_shapes, snakemake.input.offshore_shapes,
snakemake.input.parameter_corrections, snakemake.config)
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])

View File

@ -74,7 +74,7 @@ if __name__ == "__main__":
snakemake = mock_snakemake('build_hydro_profile') snakemake = mock_snakemake('build_hydro_profile')
configure_logging(snakemake) configure_logging(snakemake)
config = snakemake.config['renewable']['hydro'] config_hydro = snakemake.config['renewable']['hydro']
cutout = atlite.Cutout(snakemake.input.cutout) cutout = atlite.Cutout(snakemake.input.cutout)
countries = snakemake.config['countries'] countries = snakemake.config['countries']
@ -89,7 +89,7 @@ if __name__ == "__main__":
lower_threshold_quantile=True, lower_threshold_quantile=True,
normalize_using_yearly=eia_stats) normalize_using_yearly=eia_stats)
if 'clip_min_inflow' in config: if 'clip_min_inflow' in config_hydro:
inflow = inflow.where(inflow > config['clip_min_inflow'], 0) inflow = inflow.where(inflow > config_hydro['clip_min_inflow'], 0)
inflow.to_netcdf(snakemake.output[0]) inflow.to_netcdf(snakemake.output[0])

View File

@ -70,7 +70,7 @@ def load_timeseries(fn, years, countries, powerstatistics=True):
""" """
logger.info(f"Retrieving load data from '{fn}'.") logger.info(f"Retrieving load data from '{fn}'.")
pattern = 'power_statistics' if powerstatistics else '_transparency' pattern = 'power_statistics' if powerstatistics else 'transparency'
pattern = f'_load_actual_entsoe_{pattern}' pattern = f'_load_actual_entsoe_{pattern}'
rename = lambda s: s[:-len(pattern)] rename = lambda s: s[:-len(pattern)]
date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True) date_parser = lambda x: dateutil.parser.parse(x, ignoretz=True)
@ -196,17 +196,16 @@ if __name__ == "__main__":
configure_logging(snakemake) configure_logging(snakemake)
config = snakemake.config powerstatistics = snakemake.config['load']['power_statistics']
powerstatistics = config['load']['power_statistics'] interpolate_limit = snakemake.config['load']['interpolate_limit']
interpolate_limit = config['load']['interpolate_limit'] countries = snakemake.config['countries']
countries = config['countries'] snapshots = pd.date_range(freq='h', **snakemake.config['snapshots'])
snapshots = pd.date_range(freq='h', **config['snapshots'])
years = slice(snapshots[0], snapshots[-1]) years = slice(snapshots[0], snapshots[-1])
time_shift = config['load']['time_shift_for_large_gaps'] time_shift = snakemake.config['load']['time_shift_for_large_gaps']
load = load_timeseries(snakemake.input[0], years, countries, powerstatistics) load = load_timeseries(snakemake.input[0], years, countries, powerstatistics)
if config['load']['manual_adjustments']: if snakemake.config['load']['manual_adjustments']:
load = manual_adjustment(load, powerstatistics) load = manual_adjustment(load, powerstatistics)
logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.") logger.info(f"Linearly interpolate gaps of size {interpolate_limit} and less.")

View File

@ -40,7 +40,7 @@ Description
""" """
import logging import logging
from _helpers import configure_logging from _helpers import configure_logging, retrieve_snakemake_keys
import atlite import atlite
import geopandas as gpd import geopandas as gpd
@ -73,18 +73,19 @@ if __name__ == "__main__":
snakemake = mock_snakemake('build_natura_raster') snakemake = mock_snakemake('build_natura_raster')
configure_logging(snakemake) configure_logging(snakemake)
paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake)
cutouts = snakemake.input.cutouts cutouts = paths.cutouts
xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts)) xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts))
bounds = transform_bounds(4326, 3035, min(xs), min(ys), max(Xs), max(Ys)) bounds = transform_bounds(4326, 3035, min(xs), min(ys), max(Xs), max(Ys))
transform, out_shape = get_transform_and_shape(bounds, res=100) transform, out_shape = get_transform_and_shape(bounds, res=100)
# adjusted boundaries # adjusted boundaries
shapes = gpd.read_file(snakemake.input.natura).to_crs(3035) shapes = gpd.read_file(paths.natura).to_crs(3035)
raster = ~geometry_mask(shapes.geometry, out_shape[::-1], transform) raster = ~geometry_mask(shapes.geometry, out_shape[::-1], transform)
raster = raster.astype(rio.uint8) raster = raster.astype(rio.uint8)
with rio.open(snakemake.output[0], 'w', driver='GTiff', dtype=rio.uint8, with rio.open(out[0], 'w', driver='GTiff', dtype=rio.uint8,
count=1, transform=transform, crs=3035, compress='lzw', count=1, transform=transform, crs=3035, compress='lzw',
width=raster.shape[1], height=raster.shape[0]) as dst: width=raster.shape[1], height=raster.shape[0]) as dst:
dst.write(raster, indexes=1) dst.write(raster, indexes=1)

View File

@ -84,11 +84,10 @@ from scipy.spatial import cKDTree as KDTree
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def add_custom_powerplants(ppl): def add_custom_powerplants(ppl, custom_powerplants, custom_ppl_query=False):
custom_ppl_query = snakemake.config['electricity']['custom_powerplants']
if not custom_ppl_query: if not custom_ppl_query:
return ppl return ppl
add_ppls = pd.read_csv(snakemake.input.custom_powerplants, index_col=0, add_ppls = pd.read_csv(custom_powerplants, index_col=0,
dtype={'bus': 'str'}) dtype={'bus': 'str'})
if isinstance(custom_ppl_query, str): if isinstance(custom_ppl_query, str):
add_ppls.query(custom_ppl_query, inplace=True) add_ppls.query(custom_ppl_query, inplace=True)
@ -119,7 +118,9 @@ if __name__ == "__main__":
if isinstance(ppl_query, str): if isinstance(ppl_query, str):
ppl.query(ppl_query, inplace=True) ppl.query(ppl_query, inplace=True)
ppl = add_custom_powerplants(ppl) # add carriers from own powerplant files # add carriers from own powerplant files:
custom_ppl_query = snakemake.config['electricity']['custom_powerplants']
ppl = add_custom_powerplants(ppl, snakemake.input.custom_powerplants, custom_ppl_query)
cntries_without_ppl = [c for c in countries if c not in ppl.Country.unique()] cntries_without_ppl = [c for c in countries if c not in ppl.Country.unique()]

View File

@ -201,54 +201,54 @@ if __name__ == '__main__':
snakemake = mock_snakemake('build_renewable_profiles', technology='solar') snakemake = mock_snakemake('build_renewable_profiles', technology='solar')
configure_logging(snakemake) configure_logging(snakemake)
pgb.streams.wrap_stderr() pgb.streams.wrap_stderr()
paths = snakemake.input
nprocesses = snakemake.config['atlite'].get('nprocesses') nprocesses = snakemake.config['atlite'].get('nprocesses')
noprogress = not snakemake.config['atlite'].get('show_progress', True) noprogress = not snakemake.config['atlite'].get('show_progress', True)
config = snakemake.config['renewable'][snakemake.wildcards.technology] config = snakemake.config['renewable'][snakemake.wildcards.technology]
resource = config['resource'] # pv panel config / wind turbine config resource = config['resource'] # pv panel config / wind turbine config
correction_factor = config.get('correction_factor', 1.) correction_factor = snakemake.config.get('correction_factor', 1.)
capacity_per_sqkm = config['capacity_per_sqkm'] capacity_per_sqkm = config['capacity_per_sqkm']
p_nom_max_meth = config.get('potential', 'conservative') p_nom_max_meth = snakemake.config.get('potential', 'conservative')
if isinstance(config.get("corine", {}), list): if isinstance(config.get("corine", {}), list):
config['corine'] = {'grid_codes': config['corine']} snakemake.config['corine'] = {'grid_codes': config['corine']}
if correction_factor != 1.: if correction_factor != 1.:
logger.info(f'correction_factor is set as {correction_factor}') logger.info(f'correction_factor is set as {correction_factor}')
cutout = atlite.Cutout(paths['cutout']) cutout = atlite.Cutout(snakemake.input['cutout'])
regions = gpd.read_file(paths.regions).set_index('name').rename_axis('bus') regions = gpd.read_file(snakemake.input.regions).set_index('name').rename_axis('bus')
buses = regions.index buses = regions.index
excluder = atlite.ExclusionContainer(crs=3035, res=100) excluder = atlite.ExclusionContainer(crs=3035, res=100)
if config['natura']: if config['natura']:
excluder.add_raster(paths.natura, nodata=0, allow_no_overlap=True) excluder.add_raster(snakemake.input.natura, nodata=0, allow_no_overlap=True)
corine = config.get("corine", {}) corine = snakemake.config.get("corine", {})
if "grid_codes" in corine: if "grid_codes" in corine:
codes = corine["grid_codes"] codes = corine["grid_codes"]
excluder.add_raster(paths.corine, codes=codes, invert=True, crs=3035) excluder.add_raster(snakemake.input.corine, codes=codes, invert=True, crs=3035)
if corine.get("distance", 0.) > 0.: if corine.get("distance", 0.) > 0.:
codes = corine["distance_grid_codes"] codes = corine["distance_grid_codes"]
buffer = corine["distance"] buffer = corine["distance"]
excluder.add_raster(paths.corine, codes=codes, buffer=buffer, crs=3035) excluder.add_raster(snakemake.input.corine, codes=codes, buffer=buffer, crs=3035)
if "max_depth" in config: if "max_depth" in config:
# lambda not supported for atlite + multiprocessing # lambda not supported for atlite + multiprocessing
# use named function np.greater with partially frozen argument instead # use named function np.greater with partially frozen argument instead
# and exclude areas where: -max_depth > grid cell depth # and exclude areas where: -max_depth > grid cell depth
func = functools.partial(np.greater,-config['max_depth']) func = functools.partial(np.greater,-config['max_depth'])
excluder.add_raster(paths.gebco, codes=func, crs=4236, nodata=-1000) excluder.add_raster(snakemake.input.gebco, codes=func, crs=4236, nodata=-1000)
if 'min_shore_distance' in config: if 'min_shore_distance' in config:
buffer = config['min_shore_distance'] buffer = config['min_shore_distance']
excluder.add_geometry(paths.country_shapes, buffer=buffer) excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer)
if 'max_shore_distance' in config: if 'max_shore_distance' in config:
buffer = config['max_shore_distance'] buffer = config['max_shore_distance']
excluder.add_geometry(paths.country_shapes, buffer=buffer, invert=True) excluder.add_geometry(snakemake.input.country_shapes, buffer=buffer, invert=True)
kwargs = dict(nprocesses=nprocesses, disable_progressbar=noprogress) kwargs = dict(nprocesses=nprocesses, disable_progressbar=noprogress)
if noprogress: if noprogress:
@ -315,7 +315,7 @@ if __name__ == '__main__':
if snakemake.wildcards.technology.startswith("offwind"): if snakemake.wildcards.technology.startswith("offwind"):
logger.info('Calculate underwater fraction of connections.') logger.info('Calculate underwater fraction of connections.')
offshore_shape = gpd.read_file(paths['offshore_shapes']).unary_union offshore_shape = gpd.read_file(snakemake.input['offshore_shapes']).unary_union
underwater_fraction = [] underwater_fraction = []
for bus in buses: for bus in buses:
p = centre_of_mass.sel(bus=bus).data p = centre_of_mass.sel(bus=bus).data
@ -326,11 +326,11 @@ if __name__ == '__main__':
ds['underwater_fraction'] = xr.DataArray(underwater_fraction, [buses]) ds['underwater_fraction'] = xr.DataArray(underwater_fraction, [buses])
# select only buses with some capacity and minimal capacity factor # select only buses with some capacity and minimal capacity factor
ds = ds.sel(bus=((ds['profile'].mean('time') > config.get('min_p_max_pu', 0.)) & ds = ds.sel(bus=((ds['profile'].mean('time') > snakemake.config.get('min_p_max_pu', 0.)) &
(ds['p_nom_max'] > config.get('min_p_nom_max', 0.)))) (ds['p_nom_max'] > snakemake.config.get('min_p_nom_max', 0.))))
if 'clip_p_max_pu' in config: if 'clip_p_max_pu' in snakemake.config:
min_p_max_pu = config['clip_p_max_pu'] min_p_max_pu = snakemake.config['clip_p_max_pu']
ds['profile'] = ds['profile'].where(ds['profile'] >= min_p_max_pu, 0) ds['profile'] = ds['profile'].where(ds['profile'] >= min_p_max_pu, 0)
ds.to_netcdf(snakemake.output.profile) ds.to_netcdf(snakemake.output.profile)

View File

@ -79,7 +79,7 @@ from itertools import takewhile
import pandas as pd import pandas as pd
import geopandas as gpd import geopandas as gpd
from shapely.geometry import MultiPolygon, Polygon from shapely.geometry import MultiPolygon, Polygon
from shapely.ops import cascaded_union from shapely.ops import unary_union
import pycountry as pyc import pycountry as pyc
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -95,7 +95,7 @@ def _get_country(target, **keys):
def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True): def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True):
if isinstance(polys, MultiPolygon): if isinstance(polys, MultiPolygon):
polys = sorted(polys, key=attrgetter('area'), reverse=True) polys = sorted(polys.geoms, key=attrgetter('area'), reverse=True)
mainpoly = polys[0] mainpoly = polys[0]
mainlength = np.sqrt(mainpoly.area/(2.*np.pi)) mainlength = np.sqrt(mainpoly.area/(2.*np.pi))
if mainpoly.area > minarea: if mainpoly.area > minarea:
@ -107,26 +107,25 @@ def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True):
return polys.simplify(tolerance=tolerance) return polys.simplify(tolerance=tolerance)
def countries(): def countries(naturalearth, country_list):
cntries = snakemake.config['countries'] if 'RS' in country_list: country_list.append('KV')
if 'RS' in cntries: cntries.append('KV')
df = gpd.read_file(snakemake.input.naturalearth) df = gpd.read_file(naturalearth)
# Names are a hassle in naturalearth, try several fields # Names are a hassle in naturalearth, try several fields
fieldnames = (df[x].where(lambda s: s!='-99') for x in ('ISO_A2', 'WB_A2', 'ADM0_A3')) fieldnames = (df[x].where(lambda s: s!='-99') for x in ('ISO_A2', 'WB_A2', 'ADM0_A3'))
df['name'] = reduce(lambda x,y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2] df['name'] = reduce(lambda x,y: x.fillna(y), fieldnames, next(fieldnames)).str[0:2]
df = df.loc[df.name.isin(cntries) & ((df['scalerank'] == 0) | (df['scalerank'] == 5))] df = df.loc[df.name.isin(country_list) & ((df['scalerank'] == 0) | (df['scalerank'] == 5))]
s = df.set_index('name')['geometry'].map(_simplify_polys) s = df.set_index('name')['geometry'].map(_simplify_polys)
if 'RS' in cntries: s['RS'] = s['RS'].union(s.pop('KV')) if 'RS' in country_list: s['RS'] = s['RS'].union(s.pop('KV'))
return s return s
def eez(country_shapes): def eez(country_shapes, eez, country_list):
df = gpd.read_file(snakemake.input.eez) df = gpd.read_file(eez)
df = df.loc[df['ISO_3digit'].isin([_get_country('alpha_3', alpha_2=c) for c in snakemake.config['countries']])] df = df.loc[df['ISO_3digit'].isin([_get_country('alpha_3', alpha_2=c) for c in country_list])]
df['name'] = df['ISO_3digit'].map(lambda c: _get_country('alpha_2', alpha_3=c)) df['name'] = df['ISO_3digit'].map(lambda c: _get_country('alpha_2', alpha_3=c))
s = df.set_index('name').geometry.map(lambda s: _simplify_polys(s, filterremote=False)) s = df.set_index('name').geometry.map(lambda s: _simplify_polys(s, filterremote=False))
s = gpd.GeoSeries({k:v for k,v in s.iteritems() if v.distance(country_shapes[k]) < 1e-3}) s = gpd.GeoSeries({k:v for k,v in s.iteritems() if v.distance(country_shapes[k]) < 1e-3})
@ -139,35 +138,35 @@ def country_cover(country_shapes, eez_shapes=None):
if eez_shapes is not None: if eez_shapes is not None:
shapes += list(eez_shapes) shapes += list(eez_shapes)
europe_shape = cascaded_union(shapes) europe_shape = unary_union(shapes)
if isinstance(europe_shape, MultiPolygon): if isinstance(europe_shape, MultiPolygon):
europe_shape = max(europe_shape, key=attrgetter('area')) europe_shape = max(europe_shape, key=attrgetter('area'))
return Polygon(shell=europe_shape.exterior) return Polygon(shell=europe_shape.exterior)
def nuts3(country_shapes): def nuts3(country_shapes, nuts3, nuts3pop, nuts3gdp, ch_cantons, ch_popgdp):
df = gpd.read_file(snakemake.input.nuts3) df = gpd.read_file(nuts3)
df = df.loc[df['STAT_LEVL_'] == 3] df = df.loc[df['STAT_LEVL_'] == 3]
df['geometry'] = df['geometry'].map(_simplify_polys) df['geometry'] = df['geometry'].map(_simplify_polys)
df = df.rename(columns={'NUTS_ID': 'id'})[['id', 'geometry']].set_index('id') df = df.rename(columns={'NUTS_ID': 'id'})[['id', 'geometry']].set_index('id')
pop = pd.read_table(snakemake.input.nuts3pop, na_values=[':'], delimiter=' ?\t', engine='python') pop = pd.read_table(nuts3pop, na_values=[':'], delimiter=' ?\t', engine='python')
pop = (pop pop = (pop
.set_index(pd.MultiIndex.from_tuples(pop.pop('unit,geo\\time').str.split(','))).loc['THS'] .set_index(pd.MultiIndex.from_tuples(pop.pop('unit,geo\\time').str.split(','))).loc['THS']
.applymap(lambda x: pd.to_numeric(x, errors='coerce')) .applymap(lambda x: pd.to_numeric(x, errors='coerce'))
.fillna(method='bfill', axis=1))['2014'] .fillna(method='bfill', axis=1))['2014']
gdp = pd.read_table(snakemake.input.nuts3gdp, na_values=[':'], delimiter=' ?\t', engine='python') gdp = pd.read_table(nuts3gdp, na_values=[':'], delimiter=' ?\t', engine='python')
gdp = (gdp gdp = (gdp
.set_index(pd.MultiIndex.from_tuples(gdp.pop('unit,geo\\time').str.split(','))).loc['EUR_HAB'] .set_index(pd.MultiIndex.from_tuples(gdp.pop('unit,geo\\time').str.split(','))).loc['EUR_HAB']
.applymap(lambda x: pd.to_numeric(x, errors='coerce')) .applymap(lambda x: pd.to_numeric(x, errors='coerce'))
.fillna(method='bfill', axis=1))['2014'] .fillna(method='bfill', axis=1))['2014']
cantons = pd.read_csv(snakemake.input.ch_cantons) cantons = pd.read_csv(ch_cantons)
cantons = cantons.set_index(cantons['HASC'].str[3:])['NUTS'] cantons = cantons.set_index(cantons['HASC'].str[3:])['NUTS']
cantons = cantons.str.pad(5, side='right', fillchar='0') cantons = cantons.str.pad(5, side='right', fillchar='0')
swiss = pd.read_excel(snakemake.input.ch_popgdp, skiprows=3, index_col=0) swiss = pd.read_excel(ch_popgdp, skiprows=3, index_col=0)
swiss.columns = swiss.columns.to_series().map(cantons) swiss.columns = swiss.columns.to_series().map(cantons)
pop = pop.append(pd.to_numeric(swiss.loc['Residents in 1000', 'CH040':])) pop = pop.append(pd.to_numeric(swiss.loc['Residents in 1000', 'CH040':]))
@ -218,16 +217,16 @@ if __name__ == "__main__":
snakemake = mock_snakemake('build_shapes') snakemake = mock_snakemake('build_shapes')
configure_logging(snakemake) configure_logging(snakemake)
out = snakemake.output country_shapes = countries(snakemake.input.naturalearth, snakemake.config['countries'])
save_to_geojson(country_shapes, snakemake.output.country_shapes)
country_shapes = countries() offshore_shapes = eez(country_shapes, snakemake.input.eez, snakemake.config['countries'])
save_to_geojson(country_shapes, out.country_shapes) save_to_geojson(offshore_shapes, snakemake.output.offshore_shapes)
offshore_shapes = eez(country_shapes)
save_to_geojson(offshore_shapes, out.offshore_shapes)
europe_shape = country_cover(country_shapes, offshore_shapes) europe_shape = country_cover(country_shapes, offshore_shapes)
save_to_geojson(gpd.GeoSeries(europe_shape), out.europe_shape) save_to_geojson(gpd.GeoSeries(europe_shape), snakemake.output.europe_shape)
nuts3_shapes = nuts3(country_shapes) nuts3_shapes = nuts3(country_shapes, snakemake.input.nuts3, snakemake.input.nuts3pop,
save_to_geojson(nuts3_shapes, out.nuts3_shapes) snakemake.input.nuts3gdp, snakemake.input.ch_cantons, snakemake.input.ch_popgdp)
save_to_geojson(nuts3_shapes, snakemake.output.nuts3_shapes)

View File

@ -140,6 +140,9 @@ from functools import reduce
from pypsa.networkclustering import (busmap_by_kmeans, busmap_by_spectral_clustering, from pypsa.networkclustering import (busmap_by_kmeans, busmap_by_spectral_clustering,
_make_consense, get_clustering_from_busmap) _make_consense, get_clustering_from_busmap)
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning)
from add_electricity import load_costs from add_electricity import load_costs
idx = pd.IndexSlice idx = pd.IndexSlice
@ -170,12 +173,9 @@ def weighting_for_country(n, x):
return (w * (100. / w.max())).clip(lower=1.).astype(int) return (w * (100. / w.max())).clip(lower=1.).astype(int)
def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None): def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"):
"""Determine the number of clusters per country""" """Determine the number of clusters per country"""
if solver_name is None:
solver_name = snakemake.config['solving']['solver']['name']
L = (n.loads_t.p_set.mean() L = (n.loads_t.p_set.mean()
.groupby(n.loads.bus).sum() .groupby(n.loads.bus).sum()
.groupby([n.buses.country, n.buses.sub_network]).sum() .groupby([n.buses.country, n.buses.sub_network]).sum()
@ -218,7 +218,7 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name=None):
results = opt.solve(m) results = opt.solve(m)
assert results['Solver'][0]['Status'] == 'ok', f"Solver returned non-optimally: {results}" assert results['Solver'][0]['Status'] == 'ok', f"Solver returned non-optimally: {results}"
return pd.Series(m.n.get_values(), index=L.index).astype(int) return pd.Series(m.n.get_values(), index=L.index).round().astype(int)
def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algorithm="kmeans", **algorithm_kwds): def busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights=None, algorithm="kmeans", **algorithm_kwds):
@ -268,12 +268,10 @@ def clustering_for_n_clusters(n, n_clusters, custom_busmap=False, aggregate_carr
else: else:
raise AttributeError(f"potential_mode should be one of 'simple' or 'conservative' but is '{potential_mode}'") raise AttributeError(f"potential_mode should be one of 'simple' or 'conservative' but is '{potential_mode}'")
if custom_busmap: if not custom_busmap:
busmap = pd.read_csv(snakemake.input.custom_busmap, index_col=0, squeeze=True)
busmap.index = busmap.index.astype(str)
logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}")
else:
busmap = busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm) busmap = busmap_for_n_clusters(n, n_clusters, solver_name, focus_weights, algorithm)
else:
busmap = custom_busmap
clustering = get_clustering_from_busmap( clustering = get_clustering_from_busmap(
n, busmap, n, busmap,
@ -306,14 +304,12 @@ def save_to_geojson(s, fn):
def cluster_regions(busmaps, input=None, output=None): def cluster_regions(busmaps, input=None, output=None):
if input is None: input = snakemake.input
if output is None: output = snakemake.output
busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0]) busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0])
for which in ('regions_onshore', 'regions_offshore'): for which in ('regions_onshore', 'regions_offshore'):
regions = gpd.read_file(getattr(input, which)).set_index('name') regions = gpd.read_file(getattr(input, which)).set_index('name')
geom_c = regions.geometry.groupby(busmap).apply(shapely.ops.cascaded_union) geom_c = regions.geometry.groupby(busmap).apply(shapely.ops.unary_union)
regions_c = gpd.GeoDataFrame(dict(geometry=geom_c)) regions_c = gpd.GeoDataFrame(dict(geometry=geom_c))
regions_c.index.name = 'name' regions_c.index.name = 'name'
save_to_geojson(regions_c, getattr(output, which)) save_to_geojson(regions_c, getattr(output, which))
@ -358,10 +354,8 @@ if __name__ == "__main__":
else: else:
line_length_factor = snakemake.config['lines']['length_factor'] line_length_factor = snakemake.config['lines']['length_factor']
Nyears = n.snapshot_weightings.objective.sum()/8760 Nyears = n.snapshot_weightings.objective.sum()/8760
hvac_overhead_cost = (load_costs(Nyears,
tech_costs=snakemake.input.tech_costs, hvac_overhead_cost = (load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears)
config=snakemake.config['costs'],
elec_config=snakemake.config['electricity'])
.at['HVAC overhead', 'capital_cost']) .at['HVAC overhead', 'capital_cost'])
def consense(x): def consense(x):
@ -373,12 +367,15 @@ if __name__ == "__main__":
potential_mode = consense(pd.Series([snakemake.config['renewable'][tech]['potential'] potential_mode = consense(pd.Series([snakemake.config['renewable'][tech]['potential']
for tech in renewable_carriers])) for tech in renewable_carriers]))
custom_busmap = snakemake.config["enable"].get("custom_busmap", False) custom_busmap = snakemake.config["enable"].get("custom_busmap", False)
if custom_busmap:
custom_busmap = pd.read_csv(snakemake.input.custom_busmap, index_col=0, squeeze=True)
custom_busmap.index = custom_busmap.index.astype(str)
logger.info(f"Imported custom busmap from {snakemake.input.custom_busmap}")
clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap, aggregate_carriers, clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap, aggregate_carriers,
line_length_factor=line_length_factor, line_length_factor, potential_mode,
potential_mode=potential_mode, snakemake.config['solving']['solver']['name'],
solver_name=snakemake.config['solving']['solver']['name'], "kmeans", hvac_overhead_cost, focus_weights)
extended_link_costs=hvac_overhead_cost,
focus_weights=focus_weights)
update_p_nom_max(n) update_p_nom_max(n)
@ -386,4 +383,4 @@ if __name__ == "__main__":
for attr in ('busmap', 'linemap'): #also available: linemap_positive, linemap_negative for attr in ('busmap', 'linemap'): #also available: linemap_positive, linemap_negative
getattr(clustering, attr).to_csv(snakemake.output[attr]) getattr(clustering, attr).to_csv(snakemake.output[attr])
cluster_regions((clustering.busmap,)) cluster_regions((clustering.busmap,), snakemake.input, snakemake.output)

View File

@ -54,7 +54,7 @@ Replacing '/summaries/' with '/plots/' creates nice colored maps of the results.
""" """
import logging import logging
from _helpers import configure_logging from _helpers import configure_logging, retrieve_snakemake_keys
import os import os
import pypsa import pypsa
@ -378,7 +378,7 @@ outputs = ["costs",
] ]
def make_summaries(networks_dict, country='all'): def make_summaries(networks_dict, paths, config, country='all'):
columns = pd.MultiIndex.from_tuples(networks_dict.keys(),names=["simpl","clusters","ll","opts"]) columns = pd.MultiIndex.from_tuples(networks_dict.keys(),names=["simpl","clusters","ll","opts"])
@ -403,8 +403,7 @@ def make_summaries(networks_dict, country='all'):
n = n[n.buses.country == country] n = n[n.buses.country == country]
Nyears = n.snapshot_weightings.objective.sum() / 8760. Nyears = n.snapshot_weightings.objective.sum() / 8760.
costs = load_costs(Nyears, snakemake.input[0], costs = load_costs(paths[0], config['costs'], config['electricity'], Nyears)
snakemake.config['costs'], snakemake.config['electricity'])
update_transmission_costs(n, costs, simple_hvdc_costs=False) update_transmission_costs(n, costs, simple_hvdc_costs=False)
assign_carriers(n) assign_carriers(n)
@ -415,8 +414,7 @@ def make_summaries(networks_dict, country='all'):
return dfs return dfs
def to_csv(dfs): def to_csv(dfs, dir):
dir = snakemake.output[0]
os.makedirs(dir, exist_ok=True) os.makedirs(dir, exist_ok=True)
for key, df in dfs.items(): for key, df in dfs.items():
df.to_csv(os.path.join(dir, f"{key}.csv")) df.to_csv(os.path.join(dir, f"{key}.csv"))
@ -432,25 +430,27 @@ if __name__ == "__main__":
network_dir = os.path.join('results', 'networks') network_dir = os.path.join('results', 'networks')
configure_logging(snakemake) configure_logging(snakemake)
def expand_from_wildcard(key): paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake)
w = getattr(snakemake.wildcards, key)
return snakemake.config["scenario"][key] if w == "all" else [w]
if snakemake.wildcards.ll.endswith("all"): def expand_from_wildcard(key, config):
ll = snakemake.config["scenario"]["ll"] w = getattr(wildcards, key)
if len(snakemake.wildcards.ll) == 4: return config["scenario"][key] if w == "all" else [w]
ll = [l for l in ll if l[0] == snakemake.wildcards.ll[0]]
if wildcards.ll.endswith("all"):
ll = config["scenario"]["ll"]
if len(wildcards.ll) == 4:
ll = [l for l in ll if l[0] == wildcards.ll[0]]
else: else:
ll = [snakemake.wildcards.ll] ll = [wildcards.ll]
networks_dict = {(simpl,clusters,l,opts) : networks_dict = {(simpl,clusters,l,opts) :
os.path.join(network_dir, f'elec_s{simpl}_' os.path.join(network_dir, f'elec_s{simpl}_'
f'{clusters}_ec_l{l}_{opts}.nc') f'{clusters}_ec_l{l}_{opts}.nc')
for simpl in expand_from_wildcard("simpl") for simpl in expand_from_wildcard("simpl", config)
for clusters in expand_from_wildcard("clusters") for clusters in expand_from_wildcard("clusters", config)
for l in ll for l in ll
for opts in expand_from_wildcard("opts")} for opts in expand_from_wildcard("opts", config)}
dfs = make_summaries(networks_dict, country=snakemake.wildcards.country) dfs = make_summaries(networks_dict, paths, config, country=wildcards.country)
to_csv(dfs) to_csv(dfs, out[0])

View File

@ -20,8 +20,8 @@ Description
""" """
import logging import logging
from _helpers import (load_network_for_plots, aggregate_p, aggregate_costs, from _helpers import (retrieve_snakemake_keys, load_network_for_plots,
configure_logging) aggregate_p, aggregate_costs, configure_logging)
import pandas as pd import pandas as pd
import numpy as np import numpy as np
@ -259,18 +259,19 @@ if __name__ == "__main__":
set_plot_style() set_plot_style()
opts = snakemake.config['plotting'] paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake)
map_figsize = opts['map']['figsize']
map_boundaries = opts['map']['boundaries']
n = load_network_for_plots(snakemake.input.network, snakemake.input.tech_costs, snakemake.config) map_figsize = config['map']['figsize']
map_boundaries = config['map']['boundaries']
scenario_opts = snakemake.wildcards.opts.split('-') n = load_network_for_plots(paths.network, paths.tech_costs, config)
scenario_opts = wildcards.opts.split('-')
fig, ax = plt.subplots(figsize=map_figsize, subplot_kw={"projection": ccrs.PlateCarree()}) fig, ax = plt.subplots(figsize=map_figsize, subplot_kw={"projection": ccrs.PlateCarree()})
plot_map(n, ax, snakemake.wildcards.attr, opts) plot_map(n, ax, wildcards.attr, config)
fig.savefig(snakemake.output.only_map, dpi=150, bbox_inches='tight') fig.savefig(out.only_map, dpi=150, bbox_inches='tight')
ax1 = fig.add_axes([-0.115, 0.625, 0.2, 0.2]) ax1 = fig.add_axes([-0.115, 0.625, 0.2, 0.2])
plot_total_energy_pie(n, ax1) plot_total_energy_pie(n, ax1)
@ -278,12 +279,12 @@ if __name__ == "__main__":
ax2 = fig.add_axes([-0.075, 0.1, 0.1, 0.45]) ax2 = fig.add_axes([-0.075, 0.1, 0.1, 0.45])
plot_total_cost_bar(n, ax2) plot_total_cost_bar(n, ax2)
ll = snakemake.wildcards.ll ll = wildcards.ll
ll_type = ll[0] ll_type = ll[0]
ll_factor = ll[1:] ll_factor = ll[1:]
lbl = dict(c='line cost', v='line volume')[ll_type] lbl = dict(c='line cost', v='line volume')[ll_type]
amnt = '{ll} x today\'s'.format(ll=ll_factor) if ll_factor != 'opt' else 'optimal' amnt = '{ll} x today\'s'.format(ll=ll_factor) if ll_factor != 'opt' else 'optimal'
fig.suptitle('Expansion to {amount} {label} at {clusters} clusters' fig.suptitle('Expansion to {amount} {label} at {clusters} clusters'
.format(amount=amnt, label=lbl, clusters=snakemake.wildcards.clusters)) .format(amount=amnt, label=lbl, clusters=wildcards.clusters))
fig.savefig(snakemake.output.ext, transparent=True, bbox_inches='tight') fig.savefig(out.ext, transparent=True, bbox_inches='tight')

View File

@ -19,7 +19,7 @@ Description
""" """
import logging import logging
from _helpers import configure_logging from _helpers import configure_logging, retrieve_snakemake_keys
import pypsa import pypsa
import pandas as pd import pandas as pd
@ -53,11 +53,13 @@ if __name__ == "__main__":
clusts= '5,full', country= 'all') clusts= '5,full', country= 'all')
configure_logging(snakemake) configure_logging(snakemake)
paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake)
plot_kwds = dict(drawstyle="steps-post") plot_kwds = dict(drawstyle="steps-post")
clusters = snakemake.wildcards.clusts.split(',') clusters = wildcards.clusts.split(',')
techs = snakemake.wildcards.techs.split(',') techs = wildcards.techs.split(',')
country = snakemake.wildcards.country country = wildcards.country
if country == 'all': if country == 'all':
country = None country = None
else: else:
@ -66,7 +68,7 @@ if __name__ == "__main__":
fig, axes = plt.subplots(1, len(techs)) fig, axes = plt.subplots(1, len(techs))
for j, cluster in enumerate(clusters): for j, cluster in enumerate(clusters):
net = pypsa.Network(snakemake.input[j]) net = pypsa.Network(paths[j])
for i, tech in enumerate(techs): for i, tech in enumerate(techs):
cum_p_nom_max(net, tech, country).plot(x="p_max_pu", y="cum_p_nom_max", cum_p_nom_max(net, tech, country).plot(x="p_max_pu", y="cum_p_nom_max",
@ -79,4 +81,4 @@ if __name__ == "__main__":
plt.legend(title="Cluster level") plt.legend(title="Cluster level")
fig.savefig(snakemake.output[0], transparent=True, bbox_inches='tight') fig.savefig(out[0], transparent=True, bbox_inches='tight')

View File

@ -21,7 +21,7 @@ Description
import os import os
import logging import logging
from _helpers import configure_logging from _helpers import configure_logging, retrieve_snakemake_keys
import pandas as pd import pandas as pd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
@ -55,7 +55,7 @@ def rename_techs(label):
preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","onshore wind","offshore wind ac", "offshore wind dc","solar PV","solar thermal","OCGT","hydrogen storage","battery storage"]) preferred_order = pd.Index(["transmission lines","hydroelectricity","hydro reservoir","run of river","pumped hydro storage","onshore wind","offshore wind ac", "offshore wind dc","solar PV","solar thermal","OCGT","hydrogen storage","battery storage"])
def plot_costs(infn, fn=None): def plot_costs(infn, config, fn=None):
## For now ignore the simpl header ## For now ignore the simpl header
cost_df = pd.read_csv(infn,index_col=list(range(3)),header=[1,2,3]) cost_df = pd.read_csv(infn,index_col=list(range(3)),header=[1,2,3])
@ -67,7 +67,7 @@ def plot_costs(infn, fn=None):
df = df.groupby(df.index.map(rename_techs)).sum() df = df.groupby(df.index.map(rename_techs)).sum()
to_drop = df.index[df.max(axis=1) < snakemake.config['plotting']['costs_threshold']] to_drop = df.index[df.max(axis=1) < config['plotting']['costs_threshold']]
print("dropping") print("dropping")
@ -84,7 +84,7 @@ def plot_costs(infn, fn=None):
fig, ax = plt.subplots() fig, ax = plt.subplots()
fig.set_size_inches((12,8)) fig.set_size_inches((12,8))
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index]) df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[config['plotting']['tech_colors'][i] for i in new_index])
handles,labels = ax.get_legend_handles_labels() handles,labels = ax.get_legend_handles_labels()
@ -92,7 +92,7 @@ def plot_costs(infn, fn=None):
handles.reverse() handles.reverse()
labels.reverse() labels.reverse()
ax.set_ylim([0,snakemake.config['plotting']['costs_max']]) ax.set_ylim([0,config['plotting']['costs_max']])
ax.set_ylabel("System Cost [EUR billion per year]") ax.set_ylabel("System Cost [EUR billion per year]")
@ -109,7 +109,7 @@ def plot_costs(infn, fn=None):
fig.savefig(fn, transparent=True) fig.savefig(fn, transparent=True)
def plot_energy(infn, fn=None): def plot_energy(infn, config, fn=None):
energy_df = pd.read_csv(infn, index_col=list(range(2)),header=[1,2,3]) energy_df = pd.read_csv(infn, index_col=list(range(2)),header=[1,2,3])
@ -120,7 +120,7 @@ def plot_energy(infn, fn=None):
df = df.groupby(df.index.map(rename_techs)).sum() df = df.groupby(df.index.map(rename_techs)).sum()
to_drop = df.index[df.abs().max(axis=1) < snakemake.config['plotting']['energy_threshold']] to_drop = df.index[df.abs().max(axis=1) < config['plotting']['energy_threshold']]
print("dropping") print("dropping")
@ -137,7 +137,7 @@ def plot_energy(infn, fn=None):
fig, ax = plt.subplots() fig, ax = plt.subplots()
fig.set_size_inches((12,8)) fig.set_size_inches((12,8))
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index]) df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[config['plotting']['tech_colors'][i] for i in new_index])
handles,labels = ax.get_legend_handles_labels() handles,labels = ax.get_legend_handles_labels()
@ -145,7 +145,7 @@ def plot_energy(infn, fn=None):
handles.reverse() handles.reverse()
labels.reverse() labels.reverse()
ax.set_ylim([snakemake.config['plotting']['energy_min'],snakemake.config['plotting']['energy_max']]) ax.set_ylim([config['plotting']['energy_min'], config['plotting']['energy_max']])
ax.set_ylabel("Energy [TWh/a]") ax.set_ylabel("Energy [TWh/a]")
@ -170,10 +170,12 @@ if __name__ == "__main__":
attr='', ext='png', country='all') attr='', ext='png', country='all')
configure_logging(snakemake) configure_logging(snakemake)
summary = snakemake.wildcards.summary paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake)
summary = wildcards.summary
try: try:
func = globals()[f"plot_{summary}"] func = globals()[f"plot_{summary}"]
except KeyError: except KeyError:
raise RuntimeError(f"plotting function for {summary} has not been defined") raise RuntimeError(f"plotting function for {summary} has not been defined")
func(os.path.join(snakemake.input[0], f"{summary}.csv"), snakemake.output[0]) func(os.path.join(paths[0], f"{summary}.csv"), config, out[0])

View File

@ -37,7 +37,7 @@ Description
""" """
import logging import logging
from _helpers import configure_logging from _helpers import configure_logging, retrieve_snakemake_keys
import pandas as pd import pandas as pd
@ -63,6 +63,8 @@ if __name__ == "__main__":
snakemake = mock_snakemake('prepare_links_p_nom', simpl='', network='elec') snakemake = mock_snakemake('prepare_links_p_nom', simpl='', network='elec')
configure_logging(snakemake) configure_logging(snakemake)
paths, config, wildcards, logs, out = retrieve_snakemake_keys(snakemake)
links_p_nom = pd.read_html('https://en.wikipedia.org/wiki/List_of_HVDC_projects', header=0, match="SwePol")[0] links_p_nom = pd.read_html('https://en.wikipedia.org/wiki/List_of_HVDC_projects', header=0, match="SwePol")[0]
mw = "Power (MW)" mw = "Power (MW)"
@ -74,4 +76,4 @@ if __name__ == "__main__":
links_p_nom['x1'], links_p_nom['y1'] = extract_coordinates(links_p_nom['Converterstation 1']) links_p_nom['x1'], links_p_nom['y1'] = extract_coordinates(links_p_nom['Converterstation 1'])
links_p_nom['x2'], links_p_nom['y2'] = extract_coordinates(links_p_nom['Converterstation 2']) links_p_nom['x2'], links_p_nom['y2'] = extract_coordinates(links_p_nom['Converterstation 2'])
links_p_nom.dropna(subset=['x1', 'y1', 'x2', 'y2']).to_csv(snakemake.output[0], index=False) links_p_nom.dropna(subset=['x1', 'y1', 'x2', 'y2']).to_csv(out[0], index=False)

View File

@ -70,21 +70,14 @@ idx = pd.IndexSlice
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def add_co2limit(n, Nyears=1., factor=None): def add_co2limit(n, co2limit, Nyears=1.):
if factor is not None:
annual_emissions = factor*snakemake.config['electricity']['co2base']
else:
annual_emissions = snakemake.config['electricity']['co2limit']
n.add("GlobalConstraint", "CO2Limit", n.add("GlobalConstraint", "CO2Limit",
carrier_attribute="co2_emissions", sense="<=", carrier_attribute="co2_emissions", sense="<=",
constant=annual_emissions * Nyears) constant=co2limit * Nyears)
def add_emission_prices(n, emission_prices=None, exclude_co2=False): def add_emission_prices(n, emission_prices={'co2': 0.}, exclude_co2=False):
if emission_prices is None:
emission_prices = snakemake.config['costs']['emission_prices']
if exclude_co2: emission_prices.pop('co2') if exclude_co2: emission_prices.pop('co2')
ep = (pd.Series(emission_prices).rename(lambda x: x+'_emissions') * ep = (pd.Series(emission_prices).rename(lambda x: x+'_emissions') *
n.carriers.filter(like='_emissions')).sum(axis=1) n.carriers.filter(like='_emissions')).sum(axis=1)
@ -94,13 +87,12 @@ def add_emission_prices(n, emission_prices=None, exclude_co2=False):
n.storage_units['marginal_cost'] += su_ep n.storage_units['marginal_cost'] += su_ep
def set_line_s_max_pu(n): def set_line_s_max_pu(n, s_max_pu = 0.7):
s_max_pu = snakemake.config['lines']['s_max_pu']
n.lines['s_max_pu'] = s_max_pu n.lines['s_max_pu'] = s_max_pu
logger.info(f"N-1 security margin of lines set to {s_max_pu}") logger.info(f"N-1 security margin of lines set to {s_max_pu}")
def set_transmission_limit(n, ll_type, factor, Nyears=1): def set_transmission_limit(n, ll_type, factor, costs, Nyears=1):
links_dc_b = n.links.carrier == 'DC' if not n.links.empty else pd.Series() links_dc_b = n.links.carrier == 'DC' if not n.links.empty else pd.Series()
_lines_s_nom = (np.sqrt(3) * n.lines.type.map(n.line_types.i_nom) * _lines_s_nom = (np.sqrt(3) * n.lines.type.map(n.line_types.i_nom) *
@ -112,9 +104,6 @@ def set_transmission_limit(n, ll_type, factor, Nyears=1):
ref = (lines_s_nom @ n.lines[col] + ref = (lines_s_nom @ n.lines[col] +
n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col]) n.links.loc[links_dc_b, "p_nom"] @ n.links.loc[links_dc_b, col])
costs = load_costs(Nyears, snakemake.input.tech_costs,
snakemake.config['costs'],
snakemake.config['electricity'])
update_transmission_costs(n, costs, simple_hvdc_costs=False) update_transmission_costs(n, costs, simple_hvdc_costs=False)
if factor == 'opt' or float(factor) > 1.0: if factor == 'opt' or float(factor) > 1.0:
@ -151,7 +140,7 @@ def average_every_nhours(n, offset):
return m return m
def apply_time_segmentation(n, segments): def apply_time_segmentation(n, segments, solver_name="cbc"):
logger.info(f"Aggregating time series to {segments} segments.") logger.info(f"Aggregating time series to {segments} segments.")
try: try:
import tsam.timeseriesaggregation as tsam import tsam.timeseriesaggregation as tsam
@ -170,8 +159,6 @@ def apply_time_segmentation(n, segments):
raw = pd.concat([p_max_pu, load, inflow], axis=1, sort=False) raw = pd.concat([p_max_pu, load, inflow], axis=1, sort=False)
solver_name = snakemake.config["solving"]["solver"]["name"]
agg = tsam.TimeSeriesAggregation(raw, hoursPerPeriod=len(raw), agg = tsam.TimeSeriesAggregation(raw, hoursPerPeriod=len(raw),
noTypicalPeriods=1, noSegments=int(segments), noTypicalPeriods=1, noSegments=int(segments),
segmentation=True, solver=solver_name) segmentation=True, solver=solver_name)
@ -208,9 +195,7 @@ def enforce_autarky(n, only_crossborder=False):
n.mremove("Line", lines_rm) n.mremove("Line", lines_rm)
n.mremove("Link", links_rm) n.mremove("Link", links_rm)
def set_line_nom_max(n): def set_line_nom_max(n, s_nom_max_set=np.inf, p_nom_max_set=np.inf):
s_nom_max_set = snakemake.config["lines"].get("s_nom_max,", np.inf)
p_nom_max_set = snakemake.config["links"].get("p_nom_max", np.inf)
n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True) n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True)
n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True) n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True)
@ -225,8 +210,9 @@ if __name__ == "__main__":
n = pypsa.Network(snakemake.input[0]) n = pypsa.Network(snakemake.input[0])
Nyears = n.snapshot_weightings.objective.sum() / 8760. Nyears = n.snapshot_weightings.objective.sum() / 8760.
costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears)
set_line_s_max_pu(n) set_line_s_max_pu(n, snakemake.config['lines']['s_max_pu'])
for o in opts: for o in opts:
m = re.match(r'^\d+h$', o, re.IGNORECASE) m = re.match(r'^\d+h$', o, re.IGNORECASE)
@ -237,16 +223,18 @@ if __name__ == "__main__":
for o in opts: for o in opts:
m = re.match(r'^\d+seg$', o, re.IGNORECASE) m = re.match(r'^\d+seg$', o, re.IGNORECASE)
if m is not None: if m is not None:
n = apply_time_segmentation(n, m.group(0)[:-3]) solver_name = snakemake.config["solving"]["solver"]["name"]
n = apply_time_segmentation(n, m.group(0)[:-3], solver_name)
break break
for o in opts: for o in opts:
if "Co2L" in o: if "Co2L" in o:
m = re.findall("[0-9]*\.?[0-9]+$", o) m = re.findall("[0-9]*\.?[0-9]+$", o)
if len(m) > 0: if len(m) > 0:
add_co2limit(n, Nyears, float(m[0])) co2limit = float(m[0]) * snakemake.config['electricity']['co2base']
add_co2limit(n, co2limit, Nyears)
else: else:
add_co2limit(n, Nyears) add_co2limit(n, snakemake.config['electricity']['co2limit'], Nyears)
break break
for o in opts: for o in opts:
@ -267,12 +255,13 @@ if __name__ == "__main__":
c.df.loc[sel,attr] *= factor c.df.loc[sel,attr] *= factor
if 'Ep' in opts: if 'Ep' in opts:
add_emission_prices(n) add_emission_prices(n, snakemake.config['costs']['emission_prices'])
ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:] ll_type, factor = snakemake.wildcards.ll[0], snakemake.wildcards.ll[1:]
set_transmission_limit(n, ll_type, factor, Nyears) set_transmission_limit(n, ll_type, factor, costs, Nyears)
set_line_nom_max(n) set_line_nom_max(n, s_nom_max_set=snakemake.config["lines"].get("s_nom_max,", np.inf),
p_nom_max_set=snakemake.config["links"].get("p_nom_max,", np.inf))
if "ATK" in opts: if "ATK" in opts:
enforce_autarky(n) enforce_autarky(n)

View File

@ -138,19 +138,15 @@ def simplify_network_to_380(n):
return n, trafo_map return n, trafo_map
def _prepare_connection_costs_per_link(n): def _prepare_connection_costs_per_link(n, costs, config):
if n.links.empty: return {} if n.links.empty: return {}
Nyears = n.snapshot_weightings.objective.sum() / 8760
costs = load_costs(Nyears, snakemake.input.tech_costs,
snakemake.config['costs'], snakemake.config['electricity'])
connection_costs_per_link = {} connection_costs_per_link = {}
for tech in snakemake.config['renewable']: for tech in config['renewable']:
if tech.startswith('offwind'): if tech.startswith('offwind'):
connection_costs_per_link[tech] = ( connection_costs_per_link[tech] = (
n.links.length * snakemake.config['lines']['length_factor'] * n.links.length * config['lines']['length_factor'] *
(n.links.underwater_fraction * costs.at[tech + '-connection-submarine', 'capital_cost'] + (n.links.underwater_fraction * costs.at[tech + '-connection-submarine', 'capital_cost'] +
(1. - n.links.underwater_fraction) * costs.at[tech + '-connection-underground', 'capital_cost']) (1. - n.links.underwater_fraction) * costs.at[tech + '-connection-underground', 'capital_cost'])
) )
@ -158,9 +154,9 @@ def _prepare_connection_costs_per_link(n):
return connection_costs_per_link return connection_costs_per_link
def _compute_connection_costs_to_bus(n, busmap, connection_costs_per_link=None, buses=None): def _compute_connection_costs_to_bus(n, busmap, costs, config, connection_costs_per_link=None, buses=None):
if connection_costs_per_link is None: if connection_costs_per_link is None:
connection_costs_per_link = _prepare_connection_costs_per_link(n) connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config)
if buses is None: if buses is None:
buses = busmap.index[busmap.index != busmap.values] buses = busmap.index[busmap.index != busmap.values]
@ -178,7 +174,7 @@ def _compute_connection_costs_to_bus(n, busmap, connection_costs_per_link=None,
return connection_costs_to_bus return connection_costs_to_bus
def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus): def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, output):
connection_costs = {} connection_costs = {}
for tech in connection_costs_to_bus: for tech in connection_costs_to_bus:
tech_b = n.generators.carrier == tech tech_b = n.generators.carrier == tech
@ -188,11 +184,11 @@ def _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus):
logger.info("Displacing {} generator(s) and adding connection costs to capital_costs: {} " logger.info("Displacing {} generator(s) and adding connection costs to capital_costs: {} "
.format(tech, ", ".join("{:.0f} Eur/MW/a for `{}`".format(d, b) for b, d in costs.iteritems()))) .format(tech, ", ".join("{:.0f} Eur/MW/a for `{}`".format(d, b) for b, d in costs.iteritems())))
connection_costs[tech] = costs connection_costs[tech] = costs
pd.DataFrame(connection_costs).to_csv(snakemake.output.connection_costs) pd.DataFrame(connection_costs).to_csv(output.connection_costs)
def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, aggregate_one_ports={"Load", "StorageUnit"}): def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output, aggregate_one_ports={"Load", "StorageUnit"}):
def replace_components(n, c, df, pnl): def replace_components(n, c, df, pnl):
n.mremove(c, n.df(c).index) n.mremove(c, n.df(c).index)
@ -201,7 +197,7 @@ def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, aggregate
if not df.empty: if not df.empty:
import_series_from_dataframe(n, df, c, attr) import_series_from_dataframe(n, df, c, attr)
_adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus) _adjust_capital_costs_using_connection_costs(n, connection_costs_to_bus, output)
generators, generators_pnl = aggregategenerators(n, busmap, custom_strategies={'p_nom_min': np.sum}) generators, generators_pnl = aggregategenerators(n, busmap, custom_strategies={'p_nom_min': np.sum})
replace_components(n, "Generator", generators, generators_pnl) replace_components(n, "Generator", generators, generators_pnl)
@ -217,7 +213,7 @@ def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, aggregate
n.mremove(c, df.index[df.bus0.isin(buses_to_del) | df.bus1.isin(buses_to_del)]) n.mremove(c, df.index[df.bus0.isin(buses_to_del) | df.bus1.isin(buses_to_del)])
def simplify_links(n): def simplify_links(n, costs, config, output):
## Complex multi-node links are folded into end-points ## Complex multi-node links are folded into end-points
logger.info("Simplifying connected link components") logger.info("Simplifying connected link components")
@ -264,7 +260,7 @@ def simplify_links(n):
busmap = n.buses.index.to_series() busmap = n.buses.index.to_series()
connection_costs_per_link = _prepare_connection_costs_per_link(n) connection_costs_per_link = _prepare_connection_costs_per_link(n, costs, config)
connection_costs_to_bus = pd.DataFrame(0., index=n.buses.index, columns=list(connection_costs_per_link)) connection_costs_to_bus = pd.DataFrame(0., index=n.buses.index, columns=list(connection_costs_per_link))
for lbl in labels.value_counts().loc[lambda s: s > 2].index: for lbl in labels.value_counts().loc[lambda s: s > 2].index:
@ -278,11 +274,11 @@ def simplify_links(n):
m = sp.spatial.distance_matrix(n.buses.loc[b, ['x', 'y']], m = sp.spatial.distance_matrix(n.buses.loc[b, ['x', 'y']],
n.buses.loc[buses[1:-1], ['x', 'y']]) n.buses.loc[buses[1:-1], ['x', 'y']])
busmap.loc[buses] = b[np.r_[0, m.argmin(axis=0), 1]] busmap.loc[buses] = b[np.r_[0, m.argmin(axis=0), 1]]
connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(n, busmap, connection_costs_per_link, buses) connection_costs_to_bus.loc[buses] += _compute_connection_costs_to_bus(n, busmap, costs, config, connection_costs_per_link, buses)
all_links = [i for _, i in sum(links, [])] all_links = [i for _, i in sum(links, [])]
p_max_pu = snakemake.config['links'].get('p_max_pu', 1.) p_max_pu = config['links'].get('p_max_pu', 1.)
lengths = n.links.loc[all_links, 'length'] lengths = n.links.loc[all_links, 'length']
name = lengths.idxmax() + '+{}'.format(len(links) - 1) name = lengths.idxmax() + '+{}'.format(len(links) - 1)
params = dict( params = dict(
@ -309,17 +305,17 @@ def simplify_links(n):
logger.debug("Collecting all components using the busmap") logger.debug("Collecting all components using the busmap")
_aggregate_and_move_components(n, busmap, connection_costs_to_bus) _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output)
return n, busmap return n, busmap
def remove_stubs(n): def remove_stubs(n, costs, config, output):
logger.info("Removing stubs") logger.info("Removing stubs")
busmap = busmap_by_stubs(n) # ['country']) busmap = busmap_by_stubs(n) # ['country'])
connection_costs_to_bus = _compute_connection_costs_to_bus(n, busmap) connection_costs_to_bus = _compute_connection_costs_to_bus(n, busmap, costs, config)
_aggregate_and_move_components(n, busmap, connection_costs_to_bus) _aggregate_and_move_components(n, busmap, connection_costs_to_bus, output)
return n, busmap return n, busmap
@ -360,25 +356,25 @@ def aggregate_to_substations(n, buses_i=None):
return clustering.network, busmap return clustering.network, busmap
def cluster(n, n_clusters): def cluster(n, n_clusters, config):
logger.info(f"Clustering to {n_clusters} buses") logger.info(f"Clustering to {n_clusters} buses")
focus_weights = snakemake.config.get('focus_weights', None) focus_weights = config.get('focus_weights', None)
renewable_carriers = pd.Index([tech renewable_carriers = pd.Index([tech
for tech in n.generators.carrier.unique() for tech in n.generators.carrier.unique()
if tech.split('-', 2)[0] in snakemake.config['renewable']]) if tech.split('-', 2)[0] in config['renewable']])
def consense(x): def consense(x):
v = x.iat[0] v = x.iat[0]
assert ((x == v).all() or x.isnull().all()), ( assert ((x == v).all() or x.isnull().all()), (
"The `potential` configuration option must agree for all renewable carriers, for now!" "The `potential` configuration option must agree for all renewable carriers, for now!"
) )
return v return v
potential_mode = (consense(pd.Series([snakemake.config['renewable'][tech]['potential'] potential_mode = (consense(pd.Series([config['renewable'][tech]['potential']
for tech in renewable_carriers])) for tech in renewable_carriers]))
if len(renewable_carriers) > 0 else 'conservative') if len(renewable_carriers) > 0 else 'conservative')
clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap=False, potential_mode=potential_mode, clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap=False, potential_mode=potential_mode,
solver_name=snakemake.config['solving']['solver']['name'], solver_name=config['solving']['solver']['name'],
focus_weights=focus_weights) focus_weights=focus_weights)
return clustering.network, clustering.busmap return clustering.network, clustering.busmap
@ -394,9 +390,13 @@ if __name__ == "__main__":
n, trafo_map = simplify_network_to_380(n) n, trafo_map = simplify_network_to_380(n)
n, simplify_links_map = simplify_links(n) Nyears = n.snapshot_weightings.objective.sum() / 8760
n, stub_map = remove_stubs(n) technology_costs = load_costs(snakemake.input.tech_costs, snakemake.config['costs'], snakemake.config['electricity'], Nyears)
n, simplify_links_map = simplify_links(n, technology_costs, snakemake.config, snakemake.output)
n, stub_map = remove_stubs(n, technology_costs, snakemake.config, snakemake.output)
busmaps = [trafo_map, simplify_links_map, stub_map] busmaps = [trafo_map, simplify_links_map, stub_map]
@ -405,7 +405,7 @@ if __name__ == "__main__":
busmaps.append(substation_map) busmaps.append(substation_map)
if snakemake.wildcards.simpl: if snakemake.wildcards.simpl:
n, cluster_map = cluster(n, int(snakemake.wildcards.simpl)) n, cluster_map = cluster(n, int(snakemake.wildcards.simpl), snakemake.config)
busmaps.append(cluster_map) busmaps.append(cluster_map)
# some entries in n.buses are not updated in previous functions, therefore can be wrong. as they are not needed # some entries in n.buses are not updated in previous functions, therefore can be wrong. as they are not needed

View File

@ -283,8 +283,7 @@ if __name__ == "__main__":
with memory_logger(filename=fn, interval=30.) as mem: with memory_logger(filename=fn, interval=30.) as mem:
n = pypsa.Network(snakemake.input[0]) n = pypsa.Network(snakemake.input[0])
n = prepare_network(n, solve_opts) n = prepare_network(n, solve_opts)
n = solve_network(n, config=snakemake.config, opts=opts, n = solve_network(n, snakemake.config, opts, solver_dir=tmpdir,
solver_dir=tmpdir,
solver_logfile=snakemake.log.solver) solver_logfile=snakemake.log.solver)
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])

View File

@ -109,15 +109,13 @@ if __name__ == "__main__":
n = set_parameters_from_optimized(n, n_optim) n = set_parameters_from_optimized(n, n_optim)
del n_optim del n_optim
config = snakemake.config
opts = snakemake.wildcards.opts.split('-') opts = snakemake.wildcards.opts.split('-')
config['solving']['options']['skip_iterations'] = False snakemake.config['solving']['options']['skip_iterations'] = False
fn = getattr(snakemake.log, 'memory', None) fn = getattr(snakemake.log, 'memory', None)
with memory_logger(filename=fn, interval=30.) as mem: with memory_logger(filename=fn, interval=30.) as mem:
n = prepare_network(n, solve_opts=snakemake.config['solving']['options']) n = prepare_network(n, snakemake.config['solving']['options'])
n = solve_network(n, config=config, opts=opts, n = solve_network(n, snakemake.config, opts, solver_dir=tmpdir,
solver_dir=tmpdir,
solver_logfile=snakemake.log.solver) solver_logfile=snakemake.log.solver)
n.export_to_netcdf(snakemake.output[0]) n.export_to_netcdf(snakemake.output[0])