Merge branch 'master' into reformulate-co2-constraint
This commit is contained in:
commit
b0e23652cd
@ -74,7 +74,7 @@ repos:
|
|||||||
|
|
||||||
# Format Snakemake rule / workflow files
|
# Format Snakemake rule / workflow files
|
||||||
- repo: https://github.com/snakemake/snakefmt
|
- repo: https://github.com/snakemake/snakefmt
|
||||||
rev: v0.8.5
|
rev: v0.9.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: snakefmt
|
- id: snakefmt
|
||||||
|
|
||||||
|
@ -376,7 +376,7 @@ sector:
|
|||||||
2045: 0.8
|
2045: 0.8
|
||||||
2050: 1.0
|
2050: 1.0
|
||||||
district_heating_loss: 0.15
|
district_heating_loss: 0.15
|
||||||
cluster_heat_buses: false
|
cluster_heat_buses: true
|
||||||
bev_dsm_restriction_value: 0.75
|
bev_dsm_restriction_value: 0.75
|
||||||
bev_dsm_restriction_time: 7
|
bev_dsm_restriction_time: 7
|
||||||
transport_heating_deadband_upper: 20.
|
transport_heating_deadband_upper: 20.
|
||||||
|
@ -10,6 +10,8 @@ Release Notes
|
|||||||
Upcoming Release
|
Upcoming Release
|
||||||
================
|
================
|
||||||
|
|
||||||
|
* Bugfix: Correct technology keys for the electricity production plotting to work out the box.
|
||||||
|
|
||||||
* New configuration option ``everywhere_powerplants`` to build conventional powerplants everywhere, irrespective of existing powerplants locations, in the network (https://github.com/PyPSA/pypsa-eur/pull/850).
|
* New configuration option ``everywhere_powerplants`` to build conventional powerplants everywhere, irrespective of existing powerplants locations, in the network (https://github.com/PyPSA/pypsa-eur/pull/850).
|
||||||
|
|
||||||
* Remove option for wave energy as technology data is not maintained.
|
* Remove option for wave energy as technology data is not maintained.
|
||||||
@ -18,6 +20,15 @@ Upcoming Release
|
|||||||
CO2 atmosphere store. This gives a more sparse constraint that should improve
|
CO2 atmosphere store. This gives a more sparse constraint that should improve
|
||||||
the performance of the solving process.
|
the performance of the solving process.
|
||||||
|
|
||||||
|
* Bugfix: Assure entering of code block which corrects Norwegian heat demand.
|
||||||
|
|
||||||
|
* Add warning when BEV availability weekly profile has negative values in `build_transport_demand`.
|
||||||
|
|
||||||
|
* Stacktrace of uncaught exceptions should now be correctly included inside log files (via `configure_logging(..)`).
|
||||||
|
|
||||||
|
* Cluster residential and services heat buses by default. Can be disabled with ``cluster_heat_buses: false``.
|
||||||
|
|
||||||
|
|
||||||
PyPSA-Eur 0.9.0 (5th January 2024)
|
PyPSA-Eur 0.9.0 (5th January 2024)
|
||||||
==================================
|
==================================
|
||||||
|
|
||||||
|
@ -191,7 +191,7 @@ if config["enable"]["retrieve"]:
|
|||||||
input:
|
input:
|
||||||
HTTP.remote(
|
HTTP.remote(
|
||||||
"data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv".format(
|
"data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv".format(
|
||||||
version="2019-06-05"
|
version="2019-06-05"
|
||||||
if config["snapshots"]["end"] < "2019"
|
if config["snapshots"]["end"] < "2019"
|
||||||
else "2020-10-06"
|
else "2020-10-06"
|
||||||
),
|
),
|
||||||
|
@ -80,6 +80,7 @@ def configure_logging(snakemake, skip_handlers=False):
|
|||||||
Do (not) skip the default handlers created for redirecting output to STDERR and file.
|
Do (not) skip the default handlers created for redirecting output to STDERR and file.
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
kwargs = snakemake.config.get("logging", dict()).copy()
|
kwargs = snakemake.config.get("logging", dict()).copy()
|
||||||
kwargs.setdefault("level", "INFO")
|
kwargs.setdefault("level", "INFO")
|
||||||
@ -103,6 +104,16 @@ def configure_logging(snakemake, skip_handlers=False):
|
|||||||
)
|
)
|
||||||
logging.basicConfig(**kwargs)
|
logging.basicConfig(**kwargs)
|
||||||
|
|
||||||
|
# Setup a function to handle uncaught exceptions and include them with their stacktrace into logfiles
|
||||||
|
def handle_exception(exc_type, exc_value, exc_traceback):
|
||||||
|
# Log the exception
|
||||||
|
logger = logging.getLogger()
|
||||||
|
logger.error(
|
||||||
|
"Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback)
|
||||||
|
)
|
||||||
|
|
||||||
|
sys.excepthook = handle_exception
|
||||||
|
|
||||||
|
|
||||||
def update_p_nom_max(n):
|
def update_p_nom_max(n):
|
||||||
# if extendable carriers (solar/onwind/...) have capacity >= 0,
|
# if extendable carriers (solar/onwind/...) have capacity >= 0,
|
||||||
@ -223,7 +234,13 @@ def progress_retrieve(url, file, disable=False):
|
|||||||
urllib.request.urlretrieve(url, file, reporthook=update_to)
|
urllib.request.urlretrieve(url, file, reporthook=update_to)
|
||||||
|
|
||||||
|
|
||||||
def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards):
|
def mock_snakemake(
|
||||||
|
rulename,
|
||||||
|
root_dir=None,
|
||||||
|
configfiles=[],
|
||||||
|
submodule_dir="workflow/submodules/pypsa-eur",
|
||||||
|
**wildcards,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
This function is expected to be executed from the 'scripts'-directory of '
|
This function is expected to be executed from the 'scripts'-directory of '
|
||||||
the snakemake project. It returns a snakemake.script.Snakemake object,
|
the snakemake project. It returns a snakemake.script.Snakemake object,
|
||||||
@ -239,6 +256,9 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards):
|
|||||||
path to the root directory of the snakemake project
|
path to the root directory of the snakemake project
|
||||||
configfiles: list, str
|
configfiles: list, str
|
||||||
list of configfiles to be used to update the config
|
list of configfiles to be used to update the config
|
||||||
|
submodule_dir: str, Path
|
||||||
|
in case PyPSA-Eur is used as a submodule, submodule_dir is
|
||||||
|
the path of pypsa-eur relative to the project directory.
|
||||||
**wildcards:
|
**wildcards:
|
||||||
keyword arguments fixing the wildcards. Only necessary if wildcards are
|
keyword arguments fixing the wildcards. Only necessary if wildcards are
|
||||||
needed.
|
needed.
|
||||||
@ -257,7 +277,10 @@ def mock_snakemake(rulename, root_dir=None, configfiles=[], **wildcards):
|
|||||||
root_dir = Path(root_dir).resolve()
|
root_dir = Path(root_dir).resolve()
|
||||||
|
|
||||||
user_in_script_dir = Path.cwd().resolve() == script_dir
|
user_in_script_dir = Path.cwd().resolve() == script_dir
|
||||||
if user_in_script_dir:
|
if str(submodule_dir) in __file__:
|
||||||
|
# the submodule_dir path is only need to locate the project dir
|
||||||
|
os.chdir(Path(__file__[: __file__.find(str(submodule_dir))]))
|
||||||
|
elif user_in_script_dir:
|
||||||
os.chdir(root_dir)
|
os.chdir(root_dir)
|
||||||
elif Path.cwd().resolve() != root_dir:
|
elif Path.cwd().resolve() != root_dir:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
|
@ -58,7 +58,7 @@ if __name__ == "__main__":
|
|||||||
gen = client.query_generation(country, start=start, end=end, nett=True)
|
gen = client.query_generation(country, start=start, end=end, nett=True)
|
||||||
gen = gen.tz_localize(None).resample("1h").mean()
|
gen = gen.tz_localize(None).resample("1h").mean()
|
||||||
gen = gen.loc[start.tz_localize(None) : end.tz_localize(None)]
|
gen = gen.loc[start.tz_localize(None) : end.tz_localize(None)]
|
||||||
gen = gen.rename(columns=carrier_grouper).groupby(level=0, axis=1).sum()
|
gen = gen.rename(columns=carrier_grouper).T.groupby(level=0).sum().T
|
||||||
generation.append(gen)
|
generation.append(gen)
|
||||||
except NoMatchingDataError:
|
except NoMatchingDataError:
|
||||||
unavailable_countries.append(country)
|
unavailable_countries.append(country)
|
||||||
|
@ -479,7 +479,7 @@ def build_energy_totals(countries, eurostat, swiss, idees):
|
|||||||
# The main heating source for about 73 per cent of the households is based on electricity
|
# The main heating source for about 73 per cent of the households is based on electricity
|
||||||
# => 26% is non-electric
|
# => 26% is non-electric
|
||||||
|
|
||||||
if "NO" in df:
|
if "NO" in df.index:
|
||||||
elec_fraction = 0.73
|
elec_fraction = 0.73
|
||||||
|
|
||||||
no_norway = df.drop("NO")
|
no_norway = df.drop("NO")
|
||||||
|
@ -8,10 +8,14 @@ improvements due to drivetrain changes, time series for electric vehicle
|
|||||||
availability and demand-side management constraints.
|
availability and demand-side management constraints.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import xarray as xr
|
import xarray as xr
|
||||||
from _helpers import generate_periodic_profiles
|
from _helpers import configure_logging, generate_periodic_profiles
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def build_nodal_transport_data(fn, pop_layout):
|
def build_nodal_transport_data(fn, pop_layout):
|
||||||
@ -130,6 +134,12 @@ def bev_availability_profile(fn, snapshots, nodes, options):
|
|||||||
traffic.mean() - traffic.min()
|
traffic.mean() - traffic.min()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if not avail[avail < 0].empty:
|
||||||
|
logger.warning(
|
||||||
|
"The BEV availability weekly profile has negative values which can "
|
||||||
|
"lead to infeasibility."
|
||||||
|
)
|
||||||
|
|
||||||
return generate_periodic_profiles(
|
return generate_periodic_profiles(
|
||||||
dt_index=snapshots,
|
dt_index=snapshots,
|
||||||
nodes=nodes,
|
nodes=nodes,
|
||||||
@ -160,6 +170,7 @@ if __name__ == "__main__":
|
|||||||
simpl="",
|
simpl="",
|
||||||
clusters=48,
|
clusters=48,
|
||||||
)
|
)
|
||||||
|
configure_logging(snakemake)
|
||||||
|
|
||||||
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
|
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
|
||||||
|
|
||||||
|
@ -521,9 +521,7 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
|||||||
|
|
||||||
if not names.empty:
|
if not names.empty:
|
||||||
load += (
|
load += (
|
||||||
n.links_t.p0[names]
|
n.links_t.p0[names].T.groupby(n.links.loc[names, "bus0"]).sum().T
|
||||||
.groupby(n.links.loc[names, "bus0"], axis=1)
|
|
||||||
.sum()
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Add H2 Store when charging
|
# Add H2 Store when charging
|
||||||
@ -563,11 +561,10 @@ def calculate_market_values(n, label, market_values):
|
|||||||
|
|
||||||
dispatch = (
|
dispatch = (
|
||||||
n.generators_t.p[gens]
|
n.generators_t.p[gens]
|
||||||
.groupby(n.generators.loc[gens, "bus"], axis=1)
|
.T.groupby(n.generators.loc[gens, "bus"])
|
||||||
.sum()
|
.sum()
|
||||||
.reindex(columns=buses, fill_value=0.0)
|
.T.reindex(columns=buses, fill_value=0.0)
|
||||||
)
|
)
|
||||||
|
|
||||||
revenue = dispatch * n.buses_t.marginal_price[buses]
|
revenue = dispatch * n.buses_t.marginal_price[buses]
|
||||||
|
|
||||||
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
|
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
|
||||||
@ -586,9 +583,9 @@ def calculate_market_values(n, label, market_values):
|
|||||||
|
|
||||||
dispatch = (
|
dispatch = (
|
||||||
n.links_t["p" + i][links]
|
n.links_t["p" + i][links]
|
||||||
.groupby(n.links.loc[links, "bus" + i], axis=1)
|
.T.groupby(n.links.loc[links, "bus" + i])
|
||||||
.sum()
|
.sum()
|
||||||
.reindex(columns=buses, fill_value=0.0)
|
.T.reindex(columns=buses, fill_value=0.0)
|
||||||
)
|
)
|
||||||
|
|
||||||
revenue = dispatch * n.buses_t.marginal_price[buses]
|
revenue = dispatch * n.buses_t.marginal_price[buses]
|
||||||
|
@ -271,10 +271,11 @@ def plot_h2_map(network, regions):
|
|||||||
assign_location(n)
|
assign_location(n)
|
||||||
|
|
||||||
h2_storage = n.stores.query("carrier == 'H2'")
|
h2_storage = n.stores.query("carrier == 'H2'")
|
||||||
regions["H2"] = h2_storage.rename(
|
regions["H2"] = (
|
||||||
index=h2_storage.bus.map(n.buses.location)
|
h2_storage.rename(index=h2_storage.bus.map(n.buses.location))
|
||||||
).e_nom_opt.div(
|
.e_nom_opt.groupby(level=0)
|
||||||
1e6
|
.sum()
|
||||||
|
.div(1e6)
|
||||||
) # TWh
|
) # TWh
|
||||||
regions["H2"] = regions["H2"].where(regions["H2"] > 0.1)
|
regions["H2"] = regions["H2"].where(regions["H2"] > 0.1)
|
||||||
|
|
||||||
|
@ -154,7 +154,7 @@ def plot_costs():
|
|||||||
|
|
||||||
df = df.drop(to_drop)
|
df = df.drop(to_drop)
|
||||||
|
|
||||||
logger.info(f"Total system cost of {round(df.sum()[0])} EUR billion per year")
|
logger.info(f"Total system cost of {round(df.sum().iloc[0])} EUR billion per year")
|
||||||
|
|
||||||
new_index = preferred_order.intersection(df.index).append(
|
new_index = preferred_order.intersection(df.index).append(
|
||||||
df.index.difference(preferred_order)
|
df.index.difference(preferred_order)
|
||||||
@ -214,7 +214,7 @@ def plot_energy():
|
|||||||
|
|
||||||
df = df.drop(to_drop)
|
df = df.drop(to_drop)
|
||||||
|
|
||||||
logger.info(f"Total energy of {round(df.sum()[0])} TWh/a")
|
logger.info(f"Total energy of {round(df.sum().iloc[0])} TWh/a")
|
||||||
|
|
||||||
if df.empty:
|
if df.empty:
|
||||||
fig, ax = plt.subplots(figsize=(12, 8))
|
fig, ax = plt.subplots(figsize=(12, 8))
|
||||||
@ -304,7 +304,9 @@ def plot_balances():
|
|||||||
|
|
||||||
df = df.drop(to_drop)
|
df = df.drop(to_drop)
|
||||||
|
|
||||||
logger.debug(f"Total energy balance for {v} of {round(df.sum()[0],2)} {units}")
|
logger.debug(
|
||||||
|
f"Total energy balance for {v} of {round(df.sum().iloc[0],2)} {units}"
|
||||||
|
)
|
||||||
|
|
||||||
if df.empty:
|
if df.empty:
|
||||||
continue
|
continue
|
||||||
|
@ -45,6 +45,12 @@ if __name__ == "__main__":
|
|||||||
header=[0, 1],
|
header=[0, 1],
|
||||||
parse_dates=True,
|
parse_dates=True,
|
||||||
)
|
)
|
||||||
|
subset_technologies = ["Geothermal", "Nuclear", "Biomass", "Lignite", "Oil", "Coal"]
|
||||||
|
lowercase_technologies = [
|
||||||
|
technology.lower() if technology in subset_technologies else technology
|
||||||
|
for technology in historic.columns.levels[1]
|
||||||
|
]
|
||||||
|
historic.columns = historic.columns.set_levels(lowercase_technologies, level=1)
|
||||||
|
|
||||||
colors = n.carriers.set_index("nice_name").color.where(
|
colors = n.carriers.set_index("nice_name").color.where(
|
||||||
lambda s: s != "", "lightgrey"
|
lambda s: s != "", "lightgrey"
|
||||||
|
@ -341,6 +341,8 @@ def prepare_network(
|
|||||||
for df in (
|
for df in (
|
||||||
n.generators_t.p_max_pu,
|
n.generators_t.p_max_pu,
|
||||||
n.generators_t.p_min_pu,
|
n.generators_t.p_min_pu,
|
||||||
|
n.links_t.p_max_pu,
|
||||||
|
n.links_t.p_min_pu,
|
||||||
n.storage_units_t.inflow,
|
n.storage_units_t.inflow,
|
||||||
):
|
):
|
||||||
df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)
|
df.where(df > solve_opts["clip_p_max_pu"], other=0.0, inplace=True)
|
||||||
|
Loading…
Reference in New Issue
Block a user