diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 78e70b57..02d360d3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -74,7 +74,7 @@ repos: # Format Snakemake rule / workflow files - repo: https://github.com/snakemake/snakefmt - rev: v0.8.5 + rev: v0.9.0 hooks: - id: snakefmt diff --git a/config/config.default.yaml b/config/config.default.yaml index 1d28223f..661f855e 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -376,7 +376,7 @@ sector: 2045: 0.8 2050: 1.0 district_heating_loss: 0.15 - cluster_heat_buses: false + cluster_heat_buses: true bev_dsm_restriction_value: 0.75 bev_dsm_restriction_time: 7 transport_heating_deadband_upper: 20. diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 914791a1..dc1a9dd1 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -10,6 +10,8 @@ Release Notes Upcoming Release ================ +* Bugfix: Correct technology keys for the electricity production plotting to work out the box. + * New configuration option ``everywhere_powerplants`` to build conventional powerplants everywhere, irrespective of existing powerplants locations, in the network (https://github.com/PyPSA/pypsa-eur/pull/850). * Remove option for wave energy as technology data is not maintained. @@ -18,6 +20,15 @@ Upcoming Release CO2 atmosphere store. This gives a more sparse constraint that should improve the performance of the solving process. +* Bugfix: Assure entering of code block which corrects Norwegian heat demand. + +* Add warning when BEV availability weekly profile has negative values in `build_transport_demand`. + +* Stacktrace of uncaught exceptions should now be correctly included inside log files (via `configure_logging(..)`). + +* Cluster residential and services heat buses by default. Can be disabled with ``cluster_heat_buses: false``. + + PyPSA-Eur 0.9.0 (5th January 2024) ================================== diff --git a/rules/retrieve.smk b/rules/retrieve.smk index 7a180e22..5e1e3e59 100644 --- a/rules/retrieve.smk +++ b/rules/retrieve.smk @@ -191,7 +191,7 @@ if config["enable"]["retrieve"]: input: HTTP.remote( "data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv".format( - version="2019-06-05" + version="2019-06-05" if config["snapshots"]["end"] < "2019" else "2020-10-06" ), diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 03bde840..c5c96db9 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -80,6 +80,7 @@ def configure_logging(snakemake, skip_handlers=False): Do (not) skip the default handlers created for redirecting output to STDERR and file. """ import logging + import sys kwargs = snakemake.config.get("logging", dict()).copy() kwargs.setdefault("level", "INFO") @@ -103,6 +104,16 @@ def configure_logging(snakemake, skip_handlers=False): ) logging.basicConfig(**kwargs) + # Setup a function to handle uncaught exceptions and include them with their stacktrace into logfiles + def handle_exception(exc_type, exc_value, exc_traceback): + # Log the exception + logger = logging.getLogger() + logger.error( + "Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback) + ) + + sys.excepthook = handle_exception + def update_p_nom_max(n): # if extendable carriers (solar/onwind/...) have capacity >= 0, @@ -223,7 +234,13 @@ def progress_retrieve(url, file, disable=False): urllib.request.urlretrieve(url, file, reporthook=update_to) -def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): +def mock_snakemake( + rulename, + root_dir=None, + configfiles=[], + submodule_dir="workflow/submodules/pypsa-eur", + **wildcards, +): """ This function is expected to be executed from the 'scripts'-directory of ' the snakemake project. It returns a snakemake.script.Snakemake object, @@ -239,6 +256,9 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): path to the root directory of the snakemake project configfiles: list, str list of configfiles to be used to update the config + submodule_dir: str, Path + in case PyPSA-Eur is used as a submodule, submodule_dir is + the path of pypsa-eur relative to the project directory. **wildcards: keyword arguments fixing the wildcards. Only necessary if wildcards are needed. @@ -257,7 +277,10 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards): root_dir = Path(root_dir).resolve() user_in_script_dir = Path.cwd().resolve() == script_dir - if user_in_script_dir: + if str(submodule_dir) in __file__: + # the submodule_dir path is only need to locate the project dir + os.chdir(Path(__file__[: __file__.find(str(submodule_dir))])) + elif user_in_script_dir: os.chdir(root_dir) elif Path.cwd().resolve() != root_dir: raise RuntimeError( diff --git a/scripts/build_electricity_production.py b/scripts/build_electricity_production.py index beb859bd..e89ad78b 100644 --- a/scripts/build_electricity_production.py +++ b/scripts/build_electricity_production.py @@ -58,7 +58,7 @@ if __name__ == "__main__": gen = client.query_generation(country, start=start, end=end, nett=True) gen = gen.tz_localize(None).resample("1h").mean() gen = gen.loc[start.tz_localize(None) : end.tz_localize(None)] - gen = gen.rename(columns=carrier_grouper).groupby(level=0, axis=1).sum() + gen = gen.rename(columns=carrier_grouper).T.groupby(level=0).sum().T generation.append(gen) except NoMatchingDataError: unavailable_countries.append(country) diff --git a/scripts/build_energy_totals.py b/scripts/build_energy_totals.py index 67b86466..80c5d442 100644 --- a/scripts/build_energy_totals.py +++ b/scripts/build_energy_totals.py @@ -479,7 +479,7 @@ def build_energy_totals(countries, eurostat, swiss, idees): # The main heating source for about 73 per cent of the households is based on electricity # => 26% is non-electric - if "NO" in df: + if "NO" in df.index: elec_fraction = 0.73 no_norway = df.drop("NO") diff --git a/scripts/build_transport_demand.py b/scripts/build_transport_demand.py index 0bcfb7ed..33c8faae 100644 --- a/scripts/build_transport_demand.py +++ b/scripts/build_transport_demand.py @@ -8,10 +8,14 @@ improvements due to drivetrain changes, time series for electric vehicle availability and demand-side management constraints. """ +import logging + import numpy as np import pandas as pd import xarray as xr -from _helpers import generate_periodic_profiles +from _helpers import configure_logging, generate_periodic_profiles + +logger = logging.getLogger(__name__) def build_nodal_transport_data(fn, pop_layout): @@ -130,6 +134,12 @@ def bev_availability_profile(fn, snapshots, nodes, options): traffic.mean() - traffic.min() ) + if not avail[avail < 0].empty: + logger.warning( + "The BEV availability weekly profile has negative values which can " + "lead to infeasibility." + ) + return generate_periodic_profiles( dt_index=snapshots, nodes=nodes, @@ -160,6 +170,7 @@ if __name__ == "__main__": simpl="", clusters=48, ) + configure_logging(snakemake) pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0) diff --git a/scripts/make_summary.py b/scripts/make_summary.py index fb13e91e..7223f4d5 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -521,9 +521,7 @@ def calculate_weighted_prices(n, label, weighted_prices): if not names.empty: load += ( - n.links_t.p0[names] - .groupby(n.links.loc[names, "bus0"], axis=1) - .sum() + n.links_t.p0[names].T.groupby(n.links.loc[names, "bus0"]).sum().T ) # Add H2 Store when charging @@ -563,11 +561,10 @@ def calculate_market_values(n, label, market_values): dispatch = ( n.generators_t.p[gens] - .groupby(n.generators.loc[gens, "bus"], axis=1) + .T.groupby(n.generators.loc[gens, "bus"]) .sum() - .reindex(columns=buses, fill_value=0.0) + .T.reindex(columns=buses, fill_value=0.0) ) - revenue = dispatch * n.buses_t.marginal_price[buses] market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum() @@ -586,9 +583,9 @@ def calculate_market_values(n, label, market_values): dispatch = ( n.links_t["p" + i][links] - .groupby(n.links.loc[links, "bus" + i], axis=1) + .T.groupby(n.links.loc[links, "bus" + i]) .sum() - .reindex(columns=buses, fill_value=0.0) + .T.reindex(columns=buses, fill_value=0.0) ) revenue = dispatch * n.buses_t.marginal_price[buses] diff --git a/scripts/plot_network.py b/scripts/plot_network.py index 67481120..63b5d848 100644 --- a/scripts/plot_network.py +++ b/scripts/plot_network.py @@ -271,10 +271,11 @@ def plot_h2_map(network, regions): assign_location(n) h2_storage = n.stores.query("carrier == 'H2'") - regions["H2"] = h2_storage.rename( - index=h2_storage.bus.map(n.buses.location) - ).e_nom_opt.div( - 1e6 + regions["H2"] = ( + h2_storage.rename(index=h2_storage.bus.map(n.buses.location)) + .e_nom_opt.groupby(level=0) + .sum() + .div(1e6) ) # TWh regions["H2"] = regions["H2"].where(regions["H2"] > 0.1) diff --git a/scripts/plot_summary.py b/scripts/plot_summary.py index 67ac9b55..2a6c9f15 100644 --- a/scripts/plot_summary.py +++ b/scripts/plot_summary.py @@ -154,7 +154,7 @@ def plot_costs(): df = df.drop(to_drop) - logger.info(f"Total system cost of {round(df.sum()[0])} EUR billion per year") + logger.info(f"Total system cost of {round(df.sum().iloc[0])} EUR billion per year") new_index = preferred_order.intersection(df.index).append( df.index.difference(preferred_order) @@ -214,7 +214,7 @@ def plot_energy(): df = df.drop(to_drop) - logger.info(f"Total energy of {round(df.sum()[0])} TWh/a") + logger.info(f"Total energy of {round(df.sum().iloc[0])} TWh/a") if df.empty: fig, ax = plt.subplots(figsize=(12, 8)) @@ -304,7 +304,9 @@ def plot_balances(): df = df.drop(to_drop) - logger.debug(f"Total energy balance for {v} of {round(df.sum()[0],2)} {units}") + logger.debug( + f"Total energy balance for {v} of {round(df.sum().iloc[0],2)} {units}" + ) if df.empty: continue diff --git a/scripts/plot_validation_electricity_production.py b/scripts/plot_validation_electricity_production.py index 5c5569d0..e95a8126 100644 --- a/scripts/plot_validation_electricity_production.py +++ b/scripts/plot_validation_electricity_production.py @@ -45,6 +45,12 @@ if __name__ == "__main__": header=[0, 1], parse_dates=True, ) + subset_technologies = ["Geothermal", "Nuclear", "Biomass", "Lignite", "Oil", "Coal"] + lowercase_technologies = [ + technology.lower() if technology in subset_technologies else technology + for technology in historic.columns.levels[1] + ] + historic.columns = historic.columns.set_levels(lowercase_technologies, level=1) colors = n.carriers.set_index("nice_name").color.where( lambda s: s != "", "lightgrey" diff --git a/scripts/solve_network.py b/scripts/solve_network.py index 0b503644..55704d4d 100644 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -341,6 +341,8 @@ def prepare_network( for df in ( n.generators_t.p_max_pu, n.generators_t.p_min_pu, + n.links_t.p_max_pu, + n.links_t.p_min_pu, n.storage_units_t.inflow, ): df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)