prepare: separate code for transport demand and nodal energy totals

This commit is contained in:
Fabian Neumann 2022-04-03 18:49:35 +02:00
parent b6cfcf6364
commit b112da0565
5 changed files with 312 additions and 173 deletions

View File

@ -427,15 +427,45 @@ else:
build_retro_cost_output = {}
rule build_population_weighted_energy_totals:
input:
energy_totals='resources/energy_totals.csv',
clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv"
output: "resources/pop_weighted_energy_totals_s{simpl}_{clusters}.csv"
threads: 1
resources: mem_mb=2000
script: "scripts/build_population_weighted_energy_totals.py"
rule build_transport_demand:
input:
clustered_pop_layout="resources/pop_layout_elec_s{simpl}_{clusters}.csv",
pop_weighted_energy_totals="resources/pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
transport_data='resources/transport_data.csv',
traffic_data_KFZ="data/emobility/KFZ__count",
traffic_data_Pkw="data/emobility/Pkw__count",
temp_air_total="resources/temp_air_total_elec_s{simpl}_{clusters}.nc",
output:
transport_demand="resources/transport_demand_s{simpl}_{clusters}.csv",
transport_data="resources/transport_data_s{simpl}_{clusters}.csv",
avail_profile="resources/avail_profile_s{simpl}_{clusters}.csv",
dsm_profile="resources/dsm_profile_s{simpl}_{clusters}.csv"
threads: 1
resources: mem_mb=2000
script: "scripts/build_transport_demand.py"
rule prepare_sector_network:
input:
overrides="data/override_component_attrs",
network=pypsaeur('networks/elec_s{simpl}_{clusters}_ec_lv{lv}_{opts}.nc'),
energy_totals_name='resources/energy_totals.csv',
pop_weighted_energy_totals="resources/pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
transport_demand="resources/transport_demand_s{simpl}_{clusters}.csv",
transport_data="resources/transport_data_s{simpl}_{clusters}.csv",
avail_profile="resources/avail_profile_s{simpl}_{clusters}.csv",
dsm_profile="resources/dsm_profile_s{simpl}_{clusters}.csv",
co2_totals_name='resources/co2_totals.csv',
transport_name='resources/transport_data.csv',
traffic_data_KFZ="data/emobility/KFZ__count",
traffic_data_Pkw="data/emobility/Pkw__count",
biomass_potentials='resources/biomass_potentials_s{simpl}_{clusters}.csv',
heat_profile="data/heat_load_profile_BDEW.csv",
costs=CDIR + "costs_{planning_horizons}.csv",

View File

@ -0,0 +1,22 @@
"""Build population-weighted energy totals."""
import pandas as pd
if __name__ == '__main__':
if 'snakemake' not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
'build_transport_demand',
simpl='',
clusters=48,
)
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
energy_totals = pd.read_csv(snakemake.input.energy_totals, index_col=0)
nodal_energy_totals = energy_totals.loc[pop_layout.ct].fillna(0.)
nodal_energy_totals.index = pop_layout.index
nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0)
nodal_energy_totals.to_csv(snakemake.output[0])

View File

@ -0,0 +1,201 @@
"""Build transport demand."""
import pandas as pd
import numpy as np
import xarray as xr
from helper import generate_periodic_profiles
def build_nodal_transport_data(fn, pop_layout):
transport_data = pd.read_csv(fn, index_col=0)
nodal_transport_data = transport_data.loc[pop_layout.ct].fillna(0.0)
nodal_transport_data.index = pop_layout.index
nodal_transport_data["number cars"] = (
pop_layout["fraction"] * nodal_transport_data["number cars"]
)
nodal_transport_data.loc[
nodal_transport_data["average fuel efficiency"] == 0.0,
"average fuel efficiency",
] = transport_data["average fuel efficiency"].mean()
return nodal_transport_data
def build_transport_demand(traffic_fn, airtemp_fn, nodes, nodal_transport_data):
## Get overall demand curve for all vehicles
traffic = pd.read_csv(
traffic_fn, skiprows=2, usecols=["count"], squeeze=True
)
transport_shape = generate_periodic_profiles(
dt_index=snapshots,
nodes=nodes,
weekly_profile=traffic.values,
)
transport_shape = transport_shape / transport_shape.sum()
# electric motors are more efficient, so alter transport demand
plug_to_wheels_eta = options["bev_plug_to_wheel_efficiency"]
battery_to_wheels_eta = plug_to_wheels_eta * options["bev_charge_efficiency"]
efficiency_gain = (
nodal_transport_data["average fuel efficiency"] / battery_to_wheels_eta
)
# get heating demand for correction to demand time series
temperature = xr.open_dataarray(airtemp_fn).to_pandas()
# correction factors for vehicle heating
dd_ICE = transport_degree_factor(
temperature,
options["transport_heating_deadband_lower"],
options["transport_heating_deadband_upper"],
options["ICE_lower_degree_factor"],
options["ICE_upper_degree_factor"],
)
dd_EV = transport_degree_factor(
temperature,
options["transport_heating_deadband_lower"],
options["transport_heating_deadband_upper"],
options["EV_lower_degree_factor"],
options["EV_upper_degree_factor"],
)
# divide out the heating/cooling demand from ICE totals
# and multiply back in the heating/cooling demand for EVs
ice_correction = (transport_shape * (1 + dd_ICE)).sum() / transport_shape.sum()
energy_totals_transport = (
pop_weighted_energy_totals["total road"]
+ pop_weighted_energy_totals["total rail"]
- pop_weighted_energy_totals["electricity rail"]
)
transport = (
(transport_shape.multiply(energy_totals_transport) * 1e6 * Nyears)
.divide(efficiency_gain * ice_correction)
.multiply(1 + dd_EV)
)
return transport
def transport_degree_factor(
temperature,
deadband_lower=15,
deadband_upper=20,
lower_degree_factor=0.5,
upper_degree_factor=1.6,
):
"""
Work out how much energy demand in vehicles increases due to heating and cooling.
There is a deadband where there is no increase.
Degree factors are % increase in demand compared to no heating/cooling fuel consumption.
Returns per unit increase in demand for each place and time
"""
dd = temperature.copy()
dd[(temperature > deadband_lower) & (temperature < deadband_upper)] = 0.0
dT_lower = deadband_lower - temperature[temperature < deadband_lower]
dd[temperature < deadband_lower] = lower_degree_factor / 100 * dT_lower
dT_upper = temperature[temperature > deadband_upper] - deadband_upper
dd[temperature > deadband_upper] = upper_degree_factor / 100 * dT_upper
return dd
def bev_availability_profile(fn, snapshots, nodes, options):
"""
Derive plugged-in availability for passenger electric vehicles.
"""
traffic = pd.read_csv(fn, skiprows=2, usecols=["count"], squeeze=True)
avail_max = options["bev_avail_max"]
avail_mean = options["bev_avail_mean"]
avail = avail_max - (avail_max - avail_mean) * (traffic - traffic.min()) / (
traffic.mean() - traffic.min()
)
avail_profile = generate_periodic_profiles(
dt_index=snapshots,
nodes=nodes,
weekly_profile=avail.values,
)
return avail_profile
def bev_dsm_profile(snapshots, nodes, options):
dsm_week = np.zeros((24 * 7,))
dsm_week[(np.arange(0, 7, 1) * 24 + options["bev_dsm_restriction_time"])] = options[
"bev_dsm_restriction_value"
]
dsm_profile = generate_periodic_profiles(
dt_index=snapshots,
nodes=nodes,
weekly_profile=dsm_week,
)
return dsm_profile
if __name__ == "__main__":
if "snakemake" not in globals():
from helper import mock_snakemake
snakemake = mock_snakemake(
"build_transport_demand",
simpl="",
clusters=48,
)
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
nodes = pop_layout.index
pop_weighted_energy_totals = pd.read_csv(
snakemake.input.pop_weighted_energy_totals, index_col=0
)
options = snakemake.config["sector"]
snapshots = pd.date_range(freq='h', **snakemake.config["snapshots"], tz="UTC")
Nyears = 1
nodal_transport_data = build_nodal_transport_data(
snakemake.input.transport_data,
pop_layout
)
transport_demand = build_transport_demand(
snakemake.input.traffic_data_KFZ,
snakemake.input.temp_air_total,
nodes, nodal_transport_data
)
avail_profile = bev_availability_profile(
snakemake.input.traffic_data_Pkw,
snapshots, nodes, options
)
dsm_profile = bev_dsm_profile(snapshots, nodes, options)
nodal_transport_data.to_csv(snakemake.output.transport_data)
transport_demand.to_csv(snakemake.output.transport_demand)
avail_profile.to_csv(snakemake.output.avail_profile)
dsm_profile.to_csv(snakemake.output.dsm_profile)

View File

@ -1,4 +1,5 @@
import os
import pytz
import pandas as pd
from pathlib import Path
from pypsa.descriptors import Dict
@ -100,4 +101,25 @@ def progress_retrieve(url, file):
def dlProgress(count, blockSize, totalSize):
pbar.update( int(count * blockSize * 100 / totalSize) )
urllib.request.urlretrieve(url, file, reporthook=dlProgress)
urllib.request.urlretrieve(url, file, reporthook=dlProgress)
def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None):
"""
Give a 24*7 long list of weekly hourly profiles, generate this for each
country for the period dt_index, taking account of time zones and summer time.
"""
weekly_profile = pd.Series(weekly_profile, range(24*7))
week_df = pd.DataFrame(index=dt_index, columns=nodes)
for node in nodes:
timezone = pytz.timezone(pytz.country_timezones[node[:2]][0])
tz_dt_index = dt_index.tz_convert(timezone)
week_df[node] = [24 * dt.weekday() + dt.hour for dt in tz_dt_index]
week_df[node] = week_df[node].map(weekly_profile)
week_df = week_df.tz_localize(localize)
return week_df

View File

@ -3,7 +3,6 @@
import pypsa
import re
import os
import pytz
import pandas as pd
import numpy as np
@ -15,7 +14,7 @@ from scipy.stats import beta
from vresutils.costdata import annuity
from build_energy_totals import build_eea_co2, build_eurostat_co2, build_co2_totals
from helper import override_component_attrs
from helper import override_component_attrs, generate_periodic_profiles
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
from networkx.algorithms import complement
@ -565,27 +564,6 @@ def average_every_nhours(n, offset):
return m
def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None):
"""
Give a 24*7 long list of weekly hourly profiles, generate this for each
country for the period dt_index, taking account of time zones and summer time.
"""
weekly_profile = pd.Series(weekly_profile, range(24*7))
week_df = pd.DataFrame(index=dt_index, columns=nodes)
for node in nodes:
timezone = pytz.timezone(pytz.country_timezones[node[:2]][0])
tz_dt_index = dt_index.tz_convert(timezone)
week_df[node] = [24 * dt.weekday() + dt.hour for dt in tz_dt_index]
week_df[node] = week_df[node].map(weekly_profile)
week_df = week_df.tz_localize(localize)
return week_df
def cycling_shift(df, steps=1):
"""Cyclic shift on index of pd.Series|pd.DataFrame by number of steps"""
df = df.copy()
@ -594,55 +572,10 @@ def cycling_shift(df, steps=1):
return df
def transport_degree_factor(
temperature,
deadband_lower=15,
deadband_upper=20,
lower_degree_factor=0.5,
upper_degree_factor=1.6):
"""
Work out how much energy demand in vehicles increases due to heating and cooling.
There is a deadband where there is no increase.
Degree factors are % increase in demand compared to no heating/cooling fuel consumption.
Returns per unit increase in demand for each place and time
"""
dd = temperature.copy()
dd[(temperature > deadband_lower) & (temperature < deadband_upper)] = 0.
dT_lower = deadband_lower - temperature[temperature < deadband_lower]
dd[temperature < deadband_lower] = lower_degree_factor / 100 * dT_lower
dT_upper = temperature[temperature > deadband_upper] - deadband_upper
dd[temperature > deadband_upper] = upper_degree_factor / 100 * dT_upper
return dd
def build_heat_demand(n):
# TODO separate sectors and move into own rules
def prepare_data(n):
##############
#Heating
##############
ashp_cop = xr.open_dataarray(snakemake.input.cop_air_total).to_pandas().reindex(index=n.snapshots)
gshp_cop = xr.open_dataarray(snakemake.input.cop_soil_total).to_pandas().reindex(index=n.snapshots)
solar_thermal = xr.open_dataarray(snakemake.input.solar_thermal_total).to_pandas().reindex(index=n.snapshots)
# 1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2
solar_thermal = options['solar_cf_correction'] * solar_thermal / 1e3
energy_totals = pd.read_csv(snakemake.input.energy_totals_name, index_col=0)
nodal_energy_totals = energy_totals.loc[pop_layout.ct].fillna(0.)
nodal_energy_totals.index = pop_layout.index
# district heat share not weighted by population
district_heat_share = nodal_energy_totals["district heat share"].round(2)
nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0)
# copy forward the daily average heat demand into each hour, so it can be multipled by the intraday profile
daily_space_heat_demand = xr.open_dataarray(snakemake.input.heat_demand_total).to_pandas().reindex(index=n.snapshots, method="ffill")
@ -669,8 +602,8 @@ def prepare_data(n):
else:
heat_demand_shape = intraday_year_profile
heat_demand[f"{sector} {use}"] = (heat_demand_shape/heat_demand_shape.sum()).multiply(nodal_energy_totals[f"total {sector} {use}"]) * 1e6
electric_heat_supply[f"{sector} {use}"] = (heat_demand_shape/heat_demand_shape.sum()).multiply(nodal_energy_totals[f"electricity {sector} {use}"]) * 1e6
heat_demand[f"{sector} {use}"] = (heat_demand_shape/heat_demand_shape.sum()).multiply(pop_weighted_energy_totals[f"total {sector} {use}"]) * 1e6
electric_heat_supply[f"{sector} {use}"] = (heat_demand_shape/heat_demand_shape.sum()).multiply(pop_weighted_energy_totals[f"electricity {sector} {use}"]) * 1e6
heat_demand = pd.concat(heat_demand, axis=1)
electric_heat_supply = pd.concat(electric_heat_supply, axis=1)
@ -679,92 +612,7 @@ def prepare_data(n):
electric_nodes = n.loads.index[n.loads.carrier == "electricity"]
n.loads_t.p_set[electric_nodes] = n.loads_t.p_set[electric_nodes] - electric_heat_supply.groupby(level=1, axis=1).sum()[electric_nodes]
##############
#Transport
##############
## Get overall demand curve for all vehicles
traffic = pd.read_csv(snakemake.input.traffic_data_KFZ, skiprows=2, usecols=["count"], squeeze=True)
#Generate profiles
transport_shape = generate_periodic_profiles(
dt_index=n.snapshots.tz_localize("UTC"),
nodes=pop_layout.index,
weekly_profile=traffic.values
)
transport_shape = transport_shape / transport_shape.sum()
transport_data = pd.read_csv(snakemake.input.transport_name, index_col=0)
nodal_transport_data = transport_data.loc[pop_layout.ct].fillna(0.)
nodal_transport_data.index = pop_layout.index
nodal_transport_data["number cars"] = pop_layout["fraction"] * nodal_transport_data["number cars"]
nodal_transport_data.loc[nodal_transport_data["average fuel efficiency"] == 0., "average fuel efficiency"] = transport_data["average fuel efficiency"].mean()
# electric motors are more efficient, so alter transport demand
plug_to_wheels_eta = options.get("bev_plug_to_wheel_efficiency", 0.2)
battery_to_wheels_eta = plug_to_wheels_eta * options.get("bev_charge_efficiency", 0.9)
efficiency_gain = nodal_transport_data["average fuel efficiency"] / battery_to_wheels_eta
#get heating demand for correction to demand time series
temperature = xr.open_dataarray(snakemake.input.temp_air_total).to_pandas()
# correction factors for vehicle heating
dd_ICE = transport_degree_factor(
temperature,
options['transport_heating_deadband_lower'],
options['transport_heating_deadband_upper'],
options['ICE_lower_degree_factor'],
options['ICE_upper_degree_factor']
)
dd_EV = transport_degree_factor(
temperature,
options['transport_heating_deadband_lower'],
options['transport_heating_deadband_upper'],
options['EV_lower_degree_factor'],
options['EV_upper_degree_factor']
)
# divide out the heating/cooling demand from ICE totals
# and multiply back in the heating/cooling demand for EVs
ice_correction = (transport_shape * (1 + dd_ICE)).sum() / transport_shape.sum()
energy_totals_transport = nodal_energy_totals["total road"] + nodal_energy_totals["total rail"] - nodal_energy_totals["electricity rail"]
transport = (transport_shape.multiply(energy_totals_transport) * 1e6 * Nyears).divide(efficiency_gain * ice_correction).multiply(1 + dd_EV)
## derive plugged-in availability for PKW's (cars)
traffic = pd.read_csv(snakemake.input.traffic_data_Pkw, skiprows=2, usecols=["count"], squeeze=True)
avail_max = options.get("bev_avail_max", 0.95)
avail_mean = options.get("bev_avail_mean", 0.8)
avail = avail_max - (avail_max - avail_mean) * (traffic - traffic.min()) / (traffic.mean() - traffic.min())
avail_profile = generate_periodic_profiles(
dt_index=n.snapshots.tz_localize("UTC"),
nodes=pop_layout.index,
weekly_profile=avail.values
)
dsm_week = np.zeros((24*7,))
dsm_week[(np.arange(0,7,1) * 24 + options['bev_dsm_restriction_time'])] = options['bev_dsm_restriction_value']
dsm_profile = generate_periodic_profiles(
dt_index=n.snapshots.tz_localize("UTC"),
nodes=pop_layout.index,
weekly_profile=dsm_week
)
return nodal_energy_totals, heat_demand, ashp_cop, gshp_cop, solar_thermal, transport, avail_profile, dsm_profile, nodal_transport_data, district_heat_share
return heat_demand
# TODO checkout PyPSA-Eur script
@ -1324,6 +1172,11 @@ def add_land_transport(n, costs):
logger.info("Add land transport")
transport = pd.read_csv(snakemake.input.transport_demand, index_col=0, parse_dates=True)
number_cars = pd.read_csv(snakemake.input.transport_data, index_col=0)["number cars"]
avail_profile = pd.read_csv(snakemake.input.avail_profile, index_col=0, parse_dates=True)
dsm_profile = pd.read_csv(snakemake.input.dsm_profile, index_col=0, parse_dates=True)
fuel_cell_share = get(options["land_transport_fuel_cell_share"], investment_year)
electric_share = get(options["land_transport_electric_share"], investment_year)
ice_share = 1 - fuel_cell_share - electric_share
@ -1357,8 +1210,7 @@ def add_land_transport(n, costs):
p_set=p_set
)
p_nom = nodal_transport_data["number cars"] * options.get("bev_charge_rate", 0.011) * electric_share
p_nom = number_cars * options.get("bev_charge_rate", 0.011) * electric_share
n.madd("Link",
nodes,
@ -1390,7 +1242,7 @@ def add_land_transport(n, costs):
if electric_share > 0 and options["bev_dsm"]:
e_nom = nodal_transport_data["number cars"] * options.get("bev_energy", 0.05) * options["bev_availability"] * electric_share
e_nom = number_cars * options.get("bev_energy", 0.05) * options["bev_availability"] * electric_share
n.madd("Store",
nodes,
@ -1468,6 +1320,15 @@ def add_heat(n, costs):
"urban central"
]
cop = {
"air": xr.open_dataarray(snakemake.input.cop_air_total).to_pandas().reindex(index=n.snapshots),
"ground": xr.open_dataarray(snakemake.input.cop_soil_total).to_pandas().reindex(index=n.snapshots)
}
solar_thermal = xr.open_dataarray(snakemake.input.solar_thermal_total).to_pandas().reindex(index=n.snapshots)
# 1e3 converts from W/m^2 to MW/(1000m^2) = kW/m^2
solar_thermal = options['solar_cf_correction'] * solar_thermal / 1e3
for name in heat_systems:
name_type = "central" if name == "urban central" else "decentral"
@ -1513,7 +1374,6 @@ def add_heat(n, costs):
heat_pump_type = "air" if "urban" in name else "ground"
costs_name = f"{name_type} {heat_pump_type}-sourced heat pump"
cop = {"air" : ashp_cop, "ground" : gshp_cop}
efficiency = cop[heat_pump_type][nodes[name]] if options["time_dep_hp_cop"] else costs.at[costs_name, 'efficiency']
n.madd("Link",
@ -1792,6 +1652,8 @@ def create_nodes_for_heat_sector():
nodes[sector + " rural"] = pop_layout.index
nodes[sector + " urban decentral"] = pop_layout.index
district_heat_share = pop_weighted_energy_totals["district heat share"]
# maximum potential of urban demand covered by district heating
central_fraction = options['district_heating']["potential"]
# district heating share at each node
@ -2075,7 +1937,7 @@ def add_industry(n, costs):
all_navigation = ["total international navigation", "total domestic navigation"]
efficiency = options['shipping_average_efficiency'] / costs.at["fuel cell", "efficiency"]
shipping_hydrogen_share = get(options['shipping_hydrogen_share'], investment_year)
p_set = shipping_hydrogen_share * nodal_energy_totals.loc[nodes, all_navigation].sum(axis=1) * 1e6 * efficiency / 8760
p_set = shipping_hydrogen_share * pop_weighted_energy_totals.loc[nodes, all_navigation].sum(axis=1) * 1e6 * efficiency / 8760
n.madd("Load",
nodes,
@ -2089,7 +1951,7 @@ def add_industry(n, costs):
shipping_oil_share = 1 - shipping_hydrogen_share
p_set = shipping_oil_share * nodal_energy_totals.loc[nodes, all_navigation].sum(axis=1) * 1e6 / 8760.
p_set = shipping_oil_share * pop_weighted_energy_totals.loc[nodes, all_navigation].sum(axis=1) * 1e6 / 8760.
n.madd("Load",
nodes,
@ -2099,7 +1961,7 @@ def add_industry(n, costs):
p_set=p_set
)
co2 = shipping_oil_share * nodal_energy_totals.loc[nodes, all_navigation].sum().sum() * 1e6 / 8760 * costs.at["oil", "CO2 intensity"]
co2 = shipping_oil_share * pop_weighted_energy_totals.loc[nodes, all_navigation].sum().sum() * 1e6 / 8760 * costs.at["oil", "CO2 intensity"]
n.add("Load",
"shipping oil emissions",
@ -2177,7 +2039,7 @@ def add_industry(n, costs):
)
all_aviation = ["total international aviation", "total domestic aviation"]
p_set = nodal_energy_totals.loc[nodes, all_aviation].sum(axis=1).sum() * 1e6 / 8760
p_set = pop_weighted_energy_totals.loc[nodes, all_aviation].sum(axis=1).sum() * 1e6 / 8760
n.add("Load",
"kerosene for aviation",
@ -2297,7 +2159,7 @@ def add_agriculture(n, costs):
suffix=" agriculture electricity",
bus=nodes,
carrier='agriculture electricity',
p_set=nodal_energy_totals.loc[nodes, "total agriculture electricity"] * 1e6 / 8760
p_set=pop_weighted_energy_totals.loc[nodes, "total agriculture electricity"] * 1e6 / 8760
)
# heat
@ -2307,7 +2169,7 @@ def add_agriculture(n, costs):
suffix=" agriculture heat",
bus=nodes + " services rural heat",
carrier="agriculture heat",
p_set=nodal_energy_totals.loc[nodes, "total agriculture heat"] * 1e6 / 8760
p_set=pop_weighted_energy_totals.loc[nodes, "total agriculture heat"] * 1e6 / 8760
)
# machinery
@ -2316,7 +2178,7 @@ def add_agriculture(n, costs):
assert electric_share <= 1.
ice_share = 1 - electric_share
machinery_nodal_energy = nodal_energy_totals.loc[nodes, "total agriculture machinery"]
machinery_nodal_energy = pop_weighted_energy_totals.loc[nodes, "total agriculture machinery"]
if electric_share > 0:
@ -2436,6 +2298,8 @@ if __name__ == "__main__":
Nyears,
snakemake.config['costs']['lifetime'])
pop_weighted_energy_totals = pd.read_csv(snakemake.input.pop_weighted_energy_totals, index_col=0)
patch_electricity_network(n)
define_spatial(pop_layout.index)
@ -2466,7 +2330,7 @@ if __name__ == "__main__":
if o == "biomasstransport":
options["biomass_transport"] = True
nodal_energy_totals, heat_demand, ashp_cop, gshp_cop, solar_thermal, transport, avail_profile, dsm_profile, nodal_transport_data, district_heat_share = prepare_data(n)
heat_demand = build_heat_demand(n)
if "nodistrict" in opts:
options["district_heating"]["progress"] = 0.0