Merge branch 'master' into bus-regions

This commit is contained in:
Martha Frysztacki 2024-02-14 17:05:43 +01:00 committed by GitHub
commit 234f2f247c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 187 additions and 199 deletions

1
.gitignore vendored
View File

@ -24,6 +24,7 @@ gurobi.log
doc/_build
config.yaml
config/config.yaml
dconf
/data/links_p_nom.csv

View File

@ -2,8 +2,8 @@
#
# SPDX-License-Identifier: MIT
from os.path import normpath, exists
from shutil import copyfile, move, rmtree
from os.path import normpath
from shutil import move, rmtree
from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider
@ -13,12 +13,8 @@ from snakemake.utils import min_version
min_version("7.7")
conf_file = os.path.join(workflow.current_basedir, "config/config.yaml")
conf_default_file = os.path.join(workflow.current_basedir, "config/config.default.yaml")
if not exists(conf_file) and exists(conf_default_file):
copyfile(conf_default_file, conf_file)
configfile: "config/config.default.yaml"
configfile: "config/config.yaml"

8
config/config.yaml Normal file
View File

@ -0,0 +1,8 @@
# SPDX-FileCopyrightText: : 2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: CC0-1.0
# add your own configuration overrides here, for instance
version: 0.9.0
# enable:
# retrieve: false

View File

@ -1,5 +1,4 @@
,Unit,Values,Description
power_statistics,bool,"{true, false}",Whether to load the electricity consumption data of the ENTSOE power statistics (only for files from 2019 and before) or from the ENTSOE transparency data (only has load data from 2015 onwards).
interpolate_limit,hours,integer,"Maximum gap size (consecutive nans) which interpolated linearly."
time_shift_for_large_gaps,string,string,"Periods which are used for copying time-slices in order to fill large gaps of nans. Have to be valid ``pandas`` period strings."
manual_adjustments,bool,"{true, false}","Whether to adjust the load data manually according to the function in :func:`manual_adjustment`."

1 Unit Values Description
power_statistics bool {true, false} Whether to load the electricity consumption data of the ENTSOE power statistics (only for files from 2019 and before) or from the ENTSOE transparency data (only has load data from 2015 onwards).
2 interpolate_limit hours integer Maximum gap size (consecutive nans) which interpolated linearly.
3 time_shift_for_large_gaps string string Periods which are used for copying time-slices in order to fill large gaps of nans. Have to be valid ``pandas`` period strings.
4 manual_adjustments bool {true, false} Whether to adjust the load data manually according to the function in :func:`manual_adjustment`.

View File

@ -9,7 +9,7 @@
Configuration
##########################################
PyPSA-Eur has several configuration options which are documented in this section and are collected in a ``config/config.yaml`` file located in the root directory. Users should copy the provided default configuration (``config/config.default.yaml``) and amend their own modifications and assumptions in the user-specific configuration file (``config/config.yaml``); confer installation instructions at :ref:`defaultconfig`.
PyPSA-Eur has several configuration options which are documented in this section and are collected in a ``config/config.yaml`` file. This file defines deviations from the default configuration (``config/config.default.yaml``); confer installation instructions at :ref:`defaultconfig`.
.. _toplevel_cf:

View File

@ -118,11 +118,10 @@ Nevertheless, you can still use open-source solvers for smaller problems.
Handling Configuration Files
============================
PyPSA-Eur has several configuration options that must be specified in a
``config/config.yaml`` file located in the root directory. An example configuration
``config/config.default.yaml`` is maintained in the repository, which will be used to
automatically create your customisable ``config/config.yaml`` on first use. More
details on the configuration options are in :ref:`config`.
PyPSA-Eur has several configuration options that users can specify in a
``config/config.yaml`` file. The default configuration
``config/config.default.yaml`` is maintained in the repository. More details on
the configuration options are in :ref:`config`.
You can also use ``snakemake`` to specify another file, e.g.
``config/config.mymodifications.yaml``, to update the settings of the ``config/config.yaml``.
@ -130,8 +129,3 @@ You can also use ``snakemake`` to specify another file, e.g.
.. code:: bash
.../pypsa-eur % snakemake -call --configfile config/config.mymodifications.yaml
.. warning::
Users are advised to regularly check their own ``config/config.yaml`` against changes
in the ``config/config.default.yaml`` when pulling a new version from the remote
repository.

View File

@ -74,7 +74,7 @@ what data to retrieve and what files to produce. Details are explained in
:ref:`wildcards` and :ref:`scenario`.
The model also has several further configuration options collected in the
``config/config.yaml`` file located in the root directory, which that are not part of
``config/config.default.yaml`` file located in the root directory, which that are not part of
the scenarios. Options are explained in :ref:`config`.
Folder Structure

View File

@ -10,6 +10,14 @@ Release Notes
Upcoming Release
================
* The default configuration ``config/config.default.yaml`` is now automatically
used as a base configuration file and no longer copied to
``config/config.yaml`` on first use. The file ``config/config.yaml`` should be
used to define deviations from the default configuration.
* Merged two OPSD time series data versions into such that the option ``load:
power_statistics:`` becomes superfluous and was hence removed.
* Add new default to overdimension heating in individual buildings. This allows
them to cover heat demand peaks e.g. 10% higher than those in the data. The
disadvantage of manipulating the costs is that the capacity is then not quite

View File

@ -91,7 +91,7 @@ None.
**Outputs**
- ``resources/load_raw.csv``
- ``resources/electricity_demand.csv``
Rule ``retrieve_cost_data``

View File

@ -24,9 +24,9 @@ rule build_electricity_demand:
countries=config["countries"],
load=config["load"],
input:
ancient(RESOURCES + "load_raw.csv"),
ancient("data/electricity_demand_raw.csv"),
output:
RESOURCES + "load.csv",
RESOURCES + "electricity_demand.csv",
log:
LOGS + "build_electricity_demand.log",
resources:
@ -417,7 +417,7 @@ rule add_electricity:
if config["conventional"]["dynamic_fuel_price"]
else []
),
load=RESOURCES + "load.csv",
load=RESOURCES + "electricity_demand.csv",
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
ua_md_gdp="data/GDP_PPP_30arcsec_v3_mapped_default.csv",
output:

View File

@ -4,11 +4,8 @@
import os, sys, glob
helper_source_path = [match for match in glob.glob("**/_helpers.py", recursive=True)]
for path in helper_source_path:
path = os.path.dirname(os.path.abspath(path))
sys.path.insert(0, os.path.abspath(path))
path = workflow.source_path("../scripts/_helpers.py")
sys.path.insert(0, os.path.dirname(path))
from _helpers import validate_checksum

View File

@ -188,27 +188,17 @@ if config["enable"]["retrieve"]:
if config["enable"]["retrieve"]:
rule retrieve_electricity_demand:
input:
HTTP.remote(
"data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv".format(
version=(
"2019-06-05"
if config["snapshots"]["end"] < "2019"
else "2020-10-06"
)
),
keep_local=True,
static=True,
),
params:
versions=["2019-06-05", "2020-10-06"],
output:
RESOURCES + "load_raw.csv",
"data/electricity_demand_raw.csv",
log:
LOGS + "retrieve_electricity_demand.log",
resources:
mem_mb=5000,
retries: 2
run:
move(input[0], output[0])
script:
"../scripts/retrieve_electricity_demand.py"
if config["enable"]["retrieve"]:

View File

@ -264,7 +264,6 @@ def mock_snakemake(
import os
import snakemake as sm
from packaging.version import Version, parse
from pypsa.descriptors import Dict
from snakemake.script import Snakemake
@ -290,13 +289,12 @@ def mock_snakemake(
if os.path.exists(p):
snakefile = p
break
kwargs = (
dict(rerun_triggers=[]) if parse(sm.__version__) > Version("7.7.0") else {}
)
if isinstance(configfiles, str):
configfiles = [configfiles]
workflow = sm.Workflow(snakefile, overwrite_configfiles=configfiles, **kwargs)
workflow = sm.Workflow(
snakefile, overwrite_configfiles=configfiles, rerun_triggers=[]
)
workflow.include(snakefile)
if configfiles:

View File

@ -52,7 +52,7 @@ Inputs
:scale: 34 %
- ``data/geth2015_hydro_capacities.csv``: alternative to capacities above; not currently used!
- ``resources/load.csv`` Hourly per-country load profiles.
- ``resources/electricity_demand.csv`` Hourly per-country electricity demand profiles.
- ``resources/regions_onshore.geojson``: confer :ref:`busregions`
- ``resources/nuts3_shapes.geojson``: confer :ref:`shapes`
- ``resources/powerplants.csv``: confer :ref:`powerplants`

View File

@ -78,10 +78,13 @@ import shapely.prepared
import shapely.wkt
import yaml
from _helpers import configure_logging
from packaging.version import Version, parse
from scipy import spatial
from scipy.sparse import csgraph
from shapely.geometry import LineString, Point
PD_GE_2_2 = parse(pd.__version__) >= Version("2.2")
logger = logging.getLogger(__name__)
@ -524,12 +527,13 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
)
return pd.Series(key, index)
compat_kws = dict(include_groups=False) if PD_GE_2_2 else {}
gb = buses.loc[substation_b].groupby(
["x", "y"], as_index=False, group_keys=False, sort=False
)
bus_map_low = gb.apply(prefer_voltage, "min", include_groups=False)
bus_map_low = gb.apply(prefer_voltage, "min", **compat_kws)
lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False)
bus_map_high = gb.apply(prefer_voltage, "max", include_groups=False)
bus_map_high = gb.apply(prefer_voltage, "max", **compat_kws)
hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False)
onshore_b = pd.Series(False, buses.index)

View File

@ -1,15 +1,13 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, The PyPSA-Eur Authors
# SPDX-FileCopyrightText: : 2020 @JanFrederickUnnewehr, 2020-2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
This rule downloads the load data from `Open Power System Data Time series.
This rule downloads the load data from `Open Power System Data Time series
<https://data.open-power-system-data.org/time_series/>`_. For all countries in
the network, the per country load timeseries with suffix
``_load_actual_entsoe_transparency`` are extracted from the dataset. After
filling small gaps linearly and large gaps by copying time-slice of a given
period, the load data is exported to a ``.csv`` file.
the network, the per country load timeseries are extracted from the dataset.
After filling small gaps linearly and large gaps by copying time-slice of a
given period, the load data is exported to a ``.csv`` file.
Relevant Settings
-----------------
@ -19,9 +17,7 @@ Relevant Settings
snapshots:
load:
interpolate_limit:
time_shift_for_large_gaps:
manual_adjustments:
interpolate_limit: time_shift_for_large_gaps: manual_adjustments:
.. seealso::
@ -31,12 +27,12 @@ Relevant Settings
Inputs
------
- ``resources/load_raw.csv``:
- ``data/electricity_demand_raw.csv``:
Outputs
-------
- ``resources/load.csv``:
- ``resources/electricity_demand.csv``:
"""
import logging
@ -49,7 +45,7 @@ from pandas import Timedelta as Delta
logger = logging.getLogger(__name__)
def load_timeseries(fn, years, countries, powerstatistics=True):
def load_timeseries(fn, years, countries):
"""
Read load data from OPSD time-series package version 2020-10-06.
@ -62,29 +58,15 @@ def load_timeseries(fn, years, countries, powerstatistics=True):
File name or url location (file format .csv)
countries : listlike
Countries for which to read load data.
powerstatistics: bool
Whether the electricity consumption data of the ENTSOE power
statistics (if true) or of the ENTSOE transparency map (if false)
should be parsed.
Returns
-------
load : pd.DataFrame
Load time-series with UTC timestamps x ISO-2 countries
"""
logger.info(f"Retrieving load data from '{fn}'.")
pattern = "power_statistics" if powerstatistics else "transparency"
pattern = f"_load_actual_entsoe_{pattern}"
def rename(s):
return s[: -len(pattern)]
return (
pd.read_csv(fn, index_col=0, parse_dates=[0], date_format="%Y-%m-%dT%H:%M:%SZ")
.tz_localize(None)
.filter(like=pattern)
.rename(columns=rename)
.dropna(how="all", axis=0)
.rename(columns={"GB_UKM": "GB"})
.filter(items=countries)
@ -149,17 +131,18 @@ def copy_timeslice(load, cntry, start, stop, delta, fn_load=None):
].values
elif fn_load is not None:
duration = pd.date_range(freq="h", start=start - delta, end=stop - delta)
load_raw = load_timeseries(fn_load, duration, [cntry], powerstatistics)
load_raw = load_timeseries(fn_load, duration, [cntry])
load.loc[start:stop, cntry] = load_raw.loc[
start - delta : stop - delta, cntry
].values
def manual_adjustment(load, fn_load, powerstatistics, countries):
def manual_adjustment(load, fn_load, countries):
"""
Adjust gaps manual for load data from OPSD time-series package.
1. For the ENTSOE power statistics load data (if powerstatistics is True)
1. For years later than 2015 for which the load data is mainly taken from the
ENTSOE power statistics
Kosovo (KV) and Albania (AL) do not exist in the data set. Kosovo gets the
same load curve as Serbia and Albania the same as Macdedonia, both scaled
@ -167,7 +150,8 @@ def manual_adjustment(load, fn_load, powerstatistics, countries):
IEA Data browser [0] for the year 2013.
2. For the ENTSOE transparency load data (if powerstatistics is False)
2. For years earlier than 2015 for which the load data is mainly taken from the
ENTSOE transparency platforms
Albania (AL) and Macedonia (MK) do not exist in the data set. Both get the
same load curve as Montenegro, scaled by the corresponding ratio of total energy
@ -183,9 +167,6 @@ def manual_adjustment(load, fn_load, powerstatistics, countries):
----------
load : pd.DataFrame
Load time-series with UTC timestamps x ISO-2 countries
powerstatistics: bool
Whether argument load comprises the electricity consumption data of
the ENTSOE power statistics or of the ENTSOE transparency map
load_fn: str
File name or url location (file format .csv)
@ -195,88 +176,72 @@ def manual_adjustment(load, fn_load, powerstatistics, countries):
Manual adjusted and interpolated load time-series with UTC
timestamps x ISO-2 countries
"""
if powerstatistics:
if "MK" in load.columns:
if "AL" not in load.columns or load.AL.isnull().values.all():
load["AL"] = load["MK"] * (4.1 / 7.4)
if "RS" in load.columns:
if "KV" not in load.columns or load.KV.isnull().values.all():
load["KV"] = load["RS"] * (4.8 / 27.0)
copy_timeslice(
load, "GR", "2015-08-11 21:00", "2015-08-15 20:00", Delta(weeks=1)
)
copy_timeslice(
load, "AT", "2018-12-31 22:00", "2019-01-01 22:00", Delta(days=2)
)
copy_timeslice(
load, "CH", "2010-01-19 07:00", "2010-01-19 22:00", Delta(days=1)
)
copy_timeslice(
load, "CH", "2010-03-28 00:00", "2010-03-28 21:00", Delta(days=1)
)
# is a WE, so take WE before
copy_timeslice(
load, "CH", "2010-10-08 13:00", "2010-10-10 21:00", Delta(weeks=1)
)
copy_timeslice(
load, "CH", "2010-11-04 04:00", "2010-11-04 22:00", Delta(days=1)
)
copy_timeslice(
load, "NO", "2010-12-09 11:00", "2010-12-09 18:00", Delta(days=1)
)
# whole january missing
copy_timeslice(
load,
"GB",
"2010-01-01 00:00",
"2010-01-31 23:00",
Delta(days=-365),
fn_load,
)
# 1.1. at midnight gets special treatment
copy_timeslice(
load,
"IE",
"2016-01-01 00:00",
"2016-01-01 01:00",
Delta(days=-366),
fn_load,
)
copy_timeslice(
load,
"PT",
"2016-01-01 00:00",
"2016-01-01 01:00",
Delta(days=-366),
fn_load,
)
copy_timeslice(
load,
"GB",
"2016-01-01 00:00",
"2016-01-01 01:00",
Delta(days=-366),
fn_load,
)
else:
if "AL" not in load and "AL" in countries:
if "ME" in load:
if "AL" not in load and "AL" in countries:
load["AL"] = load.ME * (5.7 / 2.9)
if "MK" not in load and "MK" in countries:
load["AL"] = load.ME * (5.7 / 2.9)
elif "MK" in load:
load["AL"] = load["MK"] * (4.1 / 7.4)
if "MK" in countries:
if "MK" not in load or load.MK.isnull().sum() > len(load) / 2:
if "ME" in load:
load["MK"] = load.ME * (6.7 / 2.9)
if "BA" not in load and "BA" in countries:
load["BA"] = load.HR * (11.0 / 16.2)
copy_timeslice(
load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1)
)
copy_timeslice(
load, "LU", "2019-01-02 11:00", "2019-01-05 05:00", Delta(weeks=-1)
)
copy_timeslice(
load, "LU", "2019-02-05 20:00", "2019-02-06 19:00", Delta(weeks=-1)
)
if "BA" not in load and "BA" in countries:
if "ME" in load:
load["BA"] = load.HR * (11.0 / 16.2)
if "KV" not in load or load.KV.isnull().values.all():
if "RS" in load:
load["KV"] = load["RS"] * (4.8 / 27.0)
copy_timeslice(load, "GR", "2015-08-11 21:00", "2015-08-15 20:00", Delta(weeks=1))
copy_timeslice(load, "AT", "2018-12-31 22:00", "2019-01-01 22:00", Delta(days=2))
copy_timeslice(load, "CH", "2010-01-19 07:00", "2010-01-19 22:00", Delta(days=1))
copy_timeslice(load, "CH", "2010-03-28 00:00", "2010-03-28 21:00", Delta(days=1))
# is a WE, so take WE before
copy_timeslice(load, "CH", "2010-10-08 13:00", "2010-10-10 21:00", Delta(weeks=1))
copy_timeslice(load, "CH", "2010-11-04 04:00", "2010-11-04 22:00", Delta(days=1))
copy_timeslice(load, "NO", "2010-12-09 11:00", "2010-12-09 18:00", Delta(days=1))
# whole january missing
copy_timeslice(
load,
"GB",
"2010-01-01 00:00",
"2010-01-31 23:00",
Delta(days=-365),
fn_load,
)
# 1.1. at midnight gets special treatment
copy_timeslice(
load,
"IE",
"2016-01-01 00:00",
"2016-01-01 01:00",
Delta(days=-366),
fn_load,
)
copy_timeslice(
load,
"PT",
"2016-01-01 00:00",
"2016-01-01 01:00",
Delta(days=-366),
fn_load,
)
copy_timeslice(
load,
"GB",
"2016-01-01 00:00",
"2016-01-01 01:00",
Delta(days=-366),
fn_load,
)
copy_timeslice(load, "BG", "2018-10-27 21:00", "2018-10-28 22:00", Delta(weeks=1))
copy_timeslice(load, "LU", "2019-01-02 11:00", "2019-01-05 05:00", Delta(weeks=-1))
copy_timeslice(load, "LU", "2019-02-05 20:00", "2019-02-06 19:00", Delta(weeks=-1))
if "UA" in countries:
copy_timeslice(
@ -297,14 +262,13 @@ if __name__ == "__main__":
configure_logging(snakemake)
powerstatistics = snakemake.params.load["power_statistics"]
interpolate_limit = snakemake.params.load["interpolate_limit"]
countries = snakemake.params.countries
snapshots = pd.date_range(freq="h", **snakemake.params.snapshots)
years = slice(snapshots[0], snapshots[-1])
time_shift = snakemake.params.load["time_shift_for_large_gaps"]
load = load_timeseries(snakemake.input[0], years, countries, powerstatistics)
load = load_timeseries(snakemake.input[0], years, countries)
if "UA" in countries:
# attach load of UA (best data only for entsoe transparency)
@ -321,7 +285,7 @@ if __name__ == "__main__":
load["MD"] = 6.2e6 * (load_ua / load_ua.sum())
if snakemake.params.load["manual_adjustments"]:
load = manual_adjustment(load, snakemake.input[0], powerstatistics, countries)
load = manual_adjustment(load, snakemake.input[0], countries)
if load.empty:
logger.warning("Build electricity demand time series is empty.")

View File

@ -13,7 +13,6 @@ from itertools import product
import country_converter as coco
import geopandas as gpd
import pandas as pd
from packaging.version import Version, parse
logger = logging.getLogger(__name__)
cc = coco.CountryConverter()
@ -84,12 +83,7 @@ def prepare_hotmaps_database(regions):
gdf = gpd.GeoDataFrame(df, geometry="coordinates", crs="EPSG:4326")
kws = (
dict(op="within")
if parse(gpd.__version__) < Version("0.10")
else dict(predicate="within")
)
gdf = gpd.sjoin(gdf, regions, how="inner", **kws)
gdf = gpd.sjoin(gdf, regions, how="inner", predicate="within")
gdf.rename(columns={"index_right": "bus"}, inplace=True)
gdf["country"] = gdf.bus.str[:2]

View File

@ -158,7 +158,7 @@ def country_cover(country_shapes, eez_shapes=None):
shapes = pd.concat([shapes, eez_shapes])
europe_shape = shapes.unary_union
if isinstance(europe_shape, MultiPolygon):
europe_shape = max(europe_shape, key=attrgetter("area"))
europe_shape = max(europe_shape.geoms, key=attrgetter("area"))
return Polygon(shell=europe_shape.exterior)

View File

@ -10,7 +10,6 @@ import logging
import geopandas as gpd
import pandas as pd
from packaging.version import Version, parse
from pypsa.geo import haversine_pts
from shapely import wkt
@ -41,12 +40,9 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25):
for i in [0, 1]:
gdf = gpd.GeoDataFrame(geometry=df[f"point{i}"], crs="EPSG:4326")
kws = (
dict(op="within")
if parse(gpd.__version__) < Version("0.10")
else dict(predicate="within")
)
bus_mapping = gpd.sjoin(gdf, bus_regions, how="left", **kws).index_right
bus_mapping = gpd.sjoin(
gdf, bus_regions, how="left", predicate="within"
).index_right
bus_mapping = bus_mapping.groupby(bus_mapping.index).first()
df[f"bus{i}"] = bus_mapping

View File

@ -135,6 +135,7 @@ import pypsa
import seaborn as sns
from _helpers import configure_logging, update_p_nom_max
from add_electricity import load_costs
from packaging.version import Version, parse
from pypsa.clustering.spatial import (
busmap_by_greedy_modularity,
busmap_by_hac,
@ -142,6 +143,8 @@ from pypsa.clustering.spatial import (
get_clustering_from_busmap,
)
PD_GE_2_2 = parse(pd.__version__) >= Version("2.2")
warnings.filterwarnings(action="ignore", category=UserWarning)
idx = pd.IndexSlice
logger = logging.getLogger(__name__)
@ -362,9 +365,11 @@ def busmap_for_n_clusters(
f"`algorithm` must be one of 'kmeans' or 'hac'. Is {algorithm}."
)
compat_kws = dict(include_groups=False) if PD_GE_2_2 else {}
return (
n.buses.groupby(["country", "sub_network"], group_keys=False)
.apply(busmap_for_country, include_groups=False)
.apply(busmap_for_country, **compat_kws)
.squeeze()
.rename("busmap")
)

View File

@ -23,15 +23,12 @@ from add_electricity import calculate_annuity, sanitize_carriers, sanitize_locat
from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2
from networkx.algorithms import complement
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
from packaging.version import Version, parse
from pypsa.geo import haversine_pts
from pypsa.io import import_components_from_dataframe
from scipy.stats import beta
spatial = SimpleNamespace()
logger = logging.getLogger(__name__)
pd_version = parse(pd.__version__)
agg_group_kwargs = dict(numeric_only=False) if pd_version >= Version("1.3") else {}
def define_spatial(nodes, options):
@ -1853,16 +1850,7 @@ def add_heat(n, costs):
p_nom_extendable=True,
)
if isinstance(options["tes_tau"], dict):
tes_time_constant_days = options["tes_tau"][name_type]
else:
logger.warning(
"Deprecated: a future version will require you to specify 'tes_tau' ",
"for 'decentral' and 'central' separately.",
)
tes_time_constant_days = (
options["tes_tau"] if name_type == "decentral" else 180.0
)
tes_time_constant_days = options["tes_tau"][name_type]
n.madd(
"Store",
@ -3404,7 +3392,7 @@ def cluster_heat_buses(n):
# cluster heat nodes
# static dataframe
agg = define_clustering(df.columns, aggregate_dict)
df = df.groupby(level=0).agg(agg, **agg_group_kwargs)
df = df.groupby(level=0).agg(agg, numeric_only=False)
# time-varying data
pnl = c.pnl
agg = define_clustering(pd.Index(pnl.keys()), aggregate_dict)
@ -3413,7 +3401,7 @@ def cluster_heat_buses(n):
def renamer(s):
return s.replace("residential ", "").replace("services ", "")
pnl[k] = pnl[k].T.groupby(renamer).agg(agg[k], **agg_group_kwargs).T
pnl[k] = pnl[k].T.groupby(renamer).agg(agg[k], numeric_only=False).T
# remove unclustered assets of service/residential
to_drop = c.df.index.difference(df.index)

View File

@ -0,0 +1,46 @@
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2023-2024 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Retrieve electricity prices from OPSD.
"""
import logging
import pandas as pd
logger = logging.getLogger(__name__)
from _helpers import configure_logging
if __name__ == "__main__":
if "snakemake" not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake("retrieve_electricity_demand")
rootpath = ".."
else:
rootpath = "."
configure_logging(snakemake)
url = "https://data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv"
df1, df2 = [
pd.read_csv(url.format(version=version), index_col=0)
for version in snakemake.params.versions
]
combined = pd.concat([df1, df2[df2.index > df1.index[-1]]])
pattern = "_load_actual_entsoe_transparency"
transparency = combined.filter(like=pattern).rename(
columns=lambda x: x.replace(pattern, "")
)
pattern = "_load_actual_entsoe_power_statistics"
powerstatistics = combined.filter(like=pattern).rename(
columns=lambda x: x.replace(pattern, "")
)
res = transparency.fillna(powerstatistics)
res.to_csv(snakemake.output[0])