2023-03-06 08:27:45 +00:00
|
|
|
# -*- coding: utf-8 -*-
|
2024-02-19 15:21:48 +00:00
|
|
|
# SPDX-FileCopyrightText: : 2020-2024 The PyPSA-Eur Authors
|
2023-03-06 17:49:23 +00:00
|
|
|
#
|
|
|
|
# SPDX-License-Identifier: MIT
|
2022-04-03 16:49:35 +00:00
|
|
|
"""
|
2023-03-09 11:45:43 +00:00
|
|
|
Build land transport demand per clustered model region including efficiency
|
|
|
|
improvements due to drivetrain changes, time series for electric vehicle
|
|
|
|
availability and demand-side management constraints.
|
2022-04-03 16:49:35 +00:00
|
|
|
"""
|
|
|
|
|
2024-01-11 13:11:25 +00:00
|
|
|
import logging
|
|
|
|
|
2022-04-03 16:49:35 +00:00
|
|
|
import numpy as np
|
|
|
|
import pandas as pd
|
|
|
|
import xarray as xr
|
2024-03-14 14:15:56 +00:00
|
|
|
from _helpers import (
|
|
|
|
configure_logging,
|
|
|
|
generate_periodic_profiles,
|
|
|
|
get_snapshots,
|
|
|
|
set_scenario_config,
|
|
|
|
)
|
2022-04-03 16:49:35 +00:00
|
|
|
|
2024-01-11 13:11:25 +00:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2022-04-03 16:49:35 +00:00
|
|
|
|
2024-03-14 15:48:32 +00:00
|
|
|
def build_nodal_transport_data(fn, pop_layout, year):
|
2024-03-04 14:50:50 +00:00
|
|
|
# get numbers of car and fuel efficiency per country
|
2024-03-14 15:48:32 +00:00
|
|
|
transport_data = pd.read_csv(fn, index_col=[0, 1])
|
2024-07-23 13:10:05 +00:00
|
|
|
transport_data = transport_data.xs(year, level="year")
|
2022-04-03 16:49:35 +00:00
|
|
|
|
2024-03-01 10:32:12 +00:00
|
|
|
# break number of cars down to nodal level based on population density
|
2022-04-03 16:49:35 +00:00
|
|
|
nodal_transport_data = transport_data.loc[pop_layout.ct].fillna(0.0)
|
|
|
|
nodal_transport_data.index = pop_layout.index
|
|
|
|
nodal_transport_data["number cars"] = (
|
|
|
|
pop_layout["fraction"] * nodal_transport_data["number cars"]
|
|
|
|
)
|
2024-03-01 10:32:12 +00:00
|
|
|
# fill missing fuel efficiency with average data
|
2022-04-03 16:49:35 +00:00
|
|
|
nodal_transport_data.loc[
|
|
|
|
nodal_transport_data["average fuel efficiency"] == 0.0,
|
|
|
|
"average fuel efficiency",
|
|
|
|
] = transport_data["average fuel efficiency"].mean()
|
|
|
|
|
|
|
|
return nodal_transport_data
|
|
|
|
|
|
|
|
|
|
|
|
def build_transport_demand(traffic_fn, airtemp_fn, nodes, nodal_transport_data):
|
2024-03-01 10:32:12 +00:00
|
|
|
"""
|
2024-04-12 12:28:03 +00:00
|
|
|
Returns transport demand per bus in unit km driven [100 km].
|
2024-03-01 10:32:12 +00:00
|
|
|
"""
|
|
|
|
# averaged weekly counts from the year 2010-2015
|
|
|
|
traffic = pd.read_csv(traffic_fn, skiprows=2, usecols=["count"]).squeeze("columns")
|
2024-03-04 08:44:51 +00:00
|
|
|
|
2024-03-01 10:32:12 +00:00
|
|
|
# create annual profile take account time zone + summer time
|
2022-04-03 16:49:35 +00:00
|
|
|
transport_shape = generate_periodic_profiles(
|
|
|
|
dt_index=snapshots,
|
|
|
|
nodes=nodes,
|
|
|
|
weekly_profile=traffic.values,
|
|
|
|
)
|
|
|
|
transport_shape = transport_shape / transport_shape.sum()
|
|
|
|
|
|
|
|
# get heating demand for correction to demand time series
|
|
|
|
temperature = xr.open_dataarray(airtemp_fn).to_pandas()
|
|
|
|
|
|
|
|
# correction factors for vehicle heating
|
|
|
|
dd_ICE = transport_degree_factor(
|
|
|
|
temperature,
|
|
|
|
options["transport_heating_deadband_lower"],
|
|
|
|
options["transport_heating_deadband_upper"],
|
|
|
|
options["ICE_lower_degree_factor"],
|
|
|
|
options["ICE_upper_degree_factor"],
|
|
|
|
)
|
|
|
|
|
|
|
|
# divide out the heating/cooling demand from ICE totals
|
|
|
|
ice_correction = (transport_shape * (1 + dd_ICE)).sum() / transport_shape.sum()
|
|
|
|
|
2024-05-25 15:16:47 +00:00
|
|
|
# unit TWh
|
2022-04-03 16:49:35 +00:00
|
|
|
energy_totals_transport = (
|
|
|
|
pop_weighted_energy_totals["total road"]
|
|
|
|
+ pop_weighted_energy_totals["total rail"]
|
|
|
|
- pop_weighted_energy_totals["electricity rail"]
|
|
|
|
)
|
2024-04-12 12:29:35 +00:00
|
|
|
|
2024-05-25 15:16:47 +00:00
|
|
|
# average fuel efficiency in MWh/100 km
|
|
|
|
eff = nodal_transport_data["average fuel efficiency"]
|
2024-04-12 12:29:35 +00:00
|
|
|
|
2023-03-10 13:12:22 +00:00
|
|
|
return (transport_shape.multiply(energy_totals_transport) * 1e6 * nyears).divide(
|
2024-04-12 12:28:03 +00:00
|
|
|
eff * ice_correction
|
2022-04-03 16:49:35 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def transport_degree_factor(
|
|
|
|
temperature,
|
|
|
|
deadband_lower=15,
|
|
|
|
deadband_upper=20,
|
|
|
|
lower_degree_factor=0.5,
|
|
|
|
upper_degree_factor=1.6,
|
|
|
|
):
|
|
|
|
"""
|
|
|
|
Work out how much energy demand in vehicles increases due to heating and
|
|
|
|
cooling.
|
2023-03-06 08:27:45 +00:00
|
|
|
|
2022-04-03 16:49:35 +00:00
|
|
|
There is a deadband where there is no increase. Degree factors are %
|
|
|
|
increase in demand compared to no heating/cooling fuel consumption.
|
|
|
|
Returns per unit increase in demand for each place and time
|
|
|
|
"""
|
|
|
|
|
|
|
|
dd = temperature.copy()
|
|
|
|
|
|
|
|
dd[(temperature > deadband_lower) & (temperature < deadband_upper)] = 0.0
|
|
|
|
|
|
|
|
dT_lower = deadband_lower - temperature[temperature < deadband_lower]
|
|
|
|
dd[temperature < deadband_lower] = lower_degree_factor / 100 * dT_lower
|
|
|
|
|
|
|
|
dT_upper = temperature[temperature > deadband_upper] - deadband_upper
|
|
|
|
dd[temperature > deadband_upper] = upper_degree_factor / 100 * dT_upper
|
|
|
|
|
|
|
|
return dd
|
|
|
|
|
|
|
|
|
|
|
|
def bev_availability_profile(fn, snapshots, nodes, options):
|
|
|
|
"""
|
|
|
|
Derive plugged-in availability for passenger electric vehicles.
|
|
|
|
"""
|
2024-03-01 10:32:12 +00:00
|
|
|
# car count in typical week
|
2023-02-16 19:13:26 +00:00
|
|
|
traffic = pd.read_csv(fn, skiprows=2, usecols=["count"]).squeeze("columns")
|
2024-03-01 10:32:12 +00:00
|
|
|
# maximum share plugged-in availability for passenger electric vehicles
|
2022-04-03 16:49:35 +00:00
|
|
|
avail_max = options["bev_avail_max"]
|
2024-03-01 10:32:12 +00:00
|
|
|
# average share plugged-in availability for passenger electric vehicles
|
2022-04-03 16:49:35 +00:00
|
|
|
avail_mean = options["bev_avail_mean"]
|
|
|
|
|
2024-03-01 10:32:12 +00:00
|
|
|
# linear scaling, highest when traffic is lowest, decreases if traffic increases
|
2022-04-03 16:49:35 +00:00
|
|
|
avail = avail_max - (avail_max - avail_mean) * (traffic - traffic.min()) / (
|
|
|
|
traffic.mean() - traffic.min()
|
|
|
|
)
|
|
|
|
|
2024-01-11 13:11:25 +00:00
|
|
|
if not avail[avail < 0].empty:
|
|
|
|
logger.warning(
|
|
|
|
"The BEV availability weekly profile has negative values which can "
|
|
|
|
"lead to infeasibility."
|
|
|
|
)
|
|
|
|
|
2023-10-08 09:20:36 +00:00
|
|
|
return generate_periodic_profiles(
|
2022-04-03 16:49:35 +00:00
|
|
|
dt_index=snapshots,
|
|
|
|
nodes=nodes,
|
|
|
|
weekly_profile=avail.values,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def bev_dsm_profile(snapshots, nodes, options):
|
|
|
|
dsm_week = np.zeros((24 * 7,))
|
2024-03-04 08:44:51 +00:00
|
|
|
|
2024-03-01 10:32:12 +00:00
|
|
|
# assuming that at a certain time ("bev_dsm_restriction_time") EVs have to
|
|
|
|
# be charged to a minimum value (defined in bev_dsm_restriction_value)
|
2022-04-03 16:49:35 +00:00
|
|
|
dsm_week[(np.arange(0, 7, 1) * 24 + options["bev_dsm_restriction_time"])] = options[
|
|
|
|
"bev_dsm_restriction_value"
|
|
|
|
]
|
|
|
|
|
2023-10-08 09:20:36 +00:00
|
|
|
return generate_periodic_profiles(
|
2022-04-03 16:49:35 +00:00
|
|
|
dt_index=snapshots,
|
|
|
|
nodes=nodes,
|
|
|
|
weekly_profile=dsm_week,
|
|
|
|
)
|
|
|
|
|
2024-03-04 08:44:51 +00:00
|
|
|
|
2024-03-01 10:32:12 +00:00
|
|
|
# %%
|
2022-04-03 16:49:35 +00:00
|
|
|
if __name__ == "__main__":
|
|
|
|
if "snakemake" not in globals():
|
2023-03-06 18:09:45 +00:00
|
|
|
from _helpers import mock_snakemake
|
2022-04-03 16:49:35 +00:00
|
|
|
|
2024-09-13 13:37:01 +00:00
|
|
|
snakemake = mock_snakemake("build_transport_demand", clusters=128)
|
2024-01-11 13:11:25 +00:00
|
|
|
configure_logging(snakemake)
|
2024-02-12 10:53:20 +00:00
|
|
|
set_scenario_config(snakemake)
|
2022-04-03 16:49:35 +00:00
|
|
|
|
|
|
|
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
|
|
|
|
|
|
|
|
nodes = pop_layout.index
|
|
|
|
|
|
|
|
pop_weighted_energy_totals = pd.read_csv(
|
|
|
|
snakemake.input.pop_weighted_energy_totals, index_col=0
|
|
|
|
)
|
|
|
|
|
2023-06-15 16:52:25 +00:00
|
|
|
options = snakemake.params.sector
|
2022-04-03 16:49:35 +00:00
|
|
|
|
2024-03-14 14:15:56 +00:00
|
|
|
snapshots = get_snapshots(
|
|
|
|
snakemake.params.snapshots, snakemake.params.drop_leap_day, tz="UTC"
|
|
|
|
)
|
2022-04-03 16:49:35 +00:00
|
|
|
|
2023-03-10 13:12:22 +00:00
|
|
|
nyears = len(snapshots) / 8760
|
2022-04-03 16:49:35 +00:00
|
|
|
|
2024-03-14 15:48:32 +00:00
|
|
|
energy_totals_year = snakemake.params.energy_totals_year
|
2022-04-03 16:49:35 +00:00
|
|
|
nodal_transport_data = build_nodal_transport_data(
|
2024-03-14 15:48:32 +00:00
|
|
|
snakemake.input.transport_data, pop_layout, energy_totals_year
|
2022-04-03 16:49:35 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
transport_demand = build_transport_demand(
|
|
|
|
snakemake.input.traffic_data_KFZ,
|
|
|
|
snakemake.input.temp_air_total,
|
|
|
|
nodes,
|
|
|
|
nodal_transport_data,
|
|
|
|
)
|
|
|
|
|
|
|
|
avail_profile = bev_availability_profile(
|
|
|
|
snakemake.input.traffic_data_Pkw, snapshots, nodes, options
|
|
|
|
)
|
|
|
|
|
|
|
|
dsm_profile = bev_dsm_profile(snapshots, nodes, options)
|
|
|
|
|
|
|
|
nodal_transport_data.to_csv(snakemake.output.transport_data)
|
|
|
|
transport_demand.to_csv(snakemake.output.transport_demand)
|
|
|
|
avail_profile.to_csv(snakemake.output.avail_profile)
|
|
|
|
dsm_profile.to_csv(snakemake.output.dsm_profile)
|