commit
67cf85ce04
@ -13,15 +13,15 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
from memory_profiler import _get_memory, choose_backend
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
# TODO: provide alternative when multiprocessing is not available
|
# TODO: provide alternative when multiprocessing is not available
|
||||||
try:
|
try:
|
||||||
from multiprocessing import Pipe, Process
|
from multiprocessing import Pipe, Process
|
||||||
except ImportError:
|
except ImportError:
|
||||||
from multiprocessing.dummy import Process, Pipe
|
from multiprocessing.dummy import Pipe, Process
|
||||||
|
|
||||||
from memory_profiler import _get_memory, choose_backend
|
|
||||||
|
|
||||||
|
|
||||||
# The memory logging facilities have been adapted from memory_profiler
|
# The memory logging facilities have been adapted from memory_profiler
|
||||||
|
@ -15,8 +15,6 @@ import pandas as pd
|
|||||||
import pytz
|
import pytz
|
||||||
import requests
|
import requests
|
||||||
import yaml
|
import yaml
|
||||||
from pypsa.components import component_attrs, components
|
|
||||||
from pypsa.descriptors import Dict
|
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -362,8 +360,25 @@ def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None):
|
|||||||
return week_df
|
return week_df
|
||||||
|
|
||||||
|
|
||||||
def parse(l):
|
def parse(infix):
|
||||||
return yaml.safe_load(l[0]) if len(l) == 1 else {l.pop(0): parse(l)}
|
"""
|
||||||
|
Recursively parse a chained wildcard expression into a dictionary or a YAML
|
||||||
|
object.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
list_to_parse : list
|
||||||
|
The list to parse.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
dict or YAML object
|
||||||
|
The parsed list.
|
||||||
|
"""
|
||||||
|
if len(infix) == 1:
|
||||||
|
return yaml.safe_load(infix[0])
|
||||||
|
else:
|
||||||
|
return {infix.pop(0): parse(infix)}
|
||||||
|
|
||||||
|
|
||||||
def update_config_with_sector_opts(config, sector_opts):
|
def update_config_with_sector_opts(config, sector_opts):
|
||||||
@ -371,8 +386,8 @@ def update_config_with_sector_opts(config, sector_opts):
|
|||||||
|
|
||||||
for o in sector_opts.split("-"):
|
for o in sector_opts.split("-"):
|
||||||
if o.startswith("CF+"):
|
if o.startswith("CF+"):
|
||||||
l = o.split("+")[1:]
|
infix = o.split("+")[1:]
|
||||||
update_config(config, parse(l))
|
update_config(config, parse(infix))
|
||||||
|
|
||||||
|
|
||||||
def get_checksum_from_zenodo(file_url):
|
def get_checksum_from_zenodo(file_url):
|
||||||
|
@ -8,17 +8,15 @@ Prepares brownfield data from previous planning horizon.
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
idx = pd.IndexSlice
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
import pypsa
|
import pypsa
|
||||||
from _helpers import update_config_with_sector_opts
|
from _helpers import update_config_with_sector_opts
|
||||||
from add_existing_baseyear import add_build_year_to_new_assets
|
from add_existing_baseyear import add_build_year_to_new_assets
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
idx = pd.IndexSlice
|
||||||
|
|
||||||
|
|
||||||
def add_brownfield(n, n_p, year):
|
def add_brownfield(n, n_p, year):
|
||||||
logger.info(f"Preparing brownfield for the year {year}")
|
logger.info(f"Preparing brownfield for the year {year}")
|
||||||
@ -121,7 +119,7 @@ def add_brownfield(n, n_p, year):
|
|||||||
|
|
||||||
|
|
||||||
def disable_grid_expansion_if_LV_limit_hit(n):
|
def disable_grid_expansion_if_LV_limit_hit(n):
|
||||||
if not "lv_limit" in n.global_constraints.index:
|
if "lv_limit" not in n.global_constraints.index:
|
||||||
return
|
return
|
||||||
|
|
||||||
total_expansion = (
|
total_expansion = (
|
||||||
@ -133,7 +131,7 @@ def disable_grid_expansion_if_LV_limit_hit(n):
|
|||||||
|
|
||||||
# allow small numerical differences
|
# allow small numerical differences
|
||||||
if lv_limit - total_expansion < 1:
|
if lv_limit - total_expansion < 1:
|
||||||
logger.info(f"LV is already reached, disabling expansion and LV limit")
|
logger.info("LV is already reached, disabling expansion and LV limit")
|
||||||
extendable_acs = n.lines.query("s_nom_extendable").index
|
extendable_acs = n.lines.query("s_nom_extendable").index
|
||||||
n.lines.loc[extendable_acs, "s_nom_extendable"] = False
|
n.lines.loc[extendable_acs, "s_nom_extendable"] = False
|
||||||
n.lines.loc[extendable_acs, "s_nom"] = n.lines.loc[extendable_acs, "s_nom_min"]
|
n.lines.loc[extendable_acs, "s_nom"] = n.lines.loc[extendable_acs, "s_nom_min"]
|
||||||
|
@ -294,10 +294,10 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1.
|
|||||||
nuts3 = gpd.read_file(nuts3_shapes).set_index("index")
|
nuts3 = gpd.read_file(nuts3_shapes).set_index("index")
|
||||||
|
|
||||||
def upsample(cntry, group):
|
def upsample(cntry, group):
|
||||||
l = opsd_load[cntry]
|
load = opsd_load[cntry]
|
||||||
|
|
||||||
if len(group) == 1:
|
if len(group) == 1:
|
||||||
return pd.DataFrame({group.index[0]: l})
|
return pd.DataFrame({group.index[0]: load})
|
||||||
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
|
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
|
||||||
transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr()
|
transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr()
|
||||||
gdp_n = pd.Series(
|
gdp_n = pd.Series(
|
||||||
@ -314,8 +314,8 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1.
|
|||||||
# overwrite factor because nuts3 provides no data for UA+MD
|
# overwrite factor because nuts3 provides no data for UA+MD
|
||||||
factors = normed(ua_md_gdp.loc[group.index, "GDP_PPP"].squeeze())
|
factors = normed(ua_md_gdp.loc[group.index, "GDP_PPP"].squeeze())
|
||||||
return pd.DataFrame(
|
return pd.DataFrame(
|
||||||
factors.values * l.values[:, np.newaxis],
|
factors.values * load.values[:, np.newaxis],
|
||||||
index=l.index,
|
index=load.index,
|
||||||
columns=factors.index,
|
columns=factors.index,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -622,7 +622,7 @@ def attach_hydro(n, costs, ppl, profile_hydro, hydro_capacities, carriers, **par
|
|||||||
hydro.max_hours > 0, hydro.country.map(max_hours_country)
|
hydro.max_hours > 0, hydro.country.map(max_hours_country)
|
||||||
).fillna(6)
|
).fillna(6)
|
||||||
|
|
||||||
if flatten_dispatch := params.get("flatten_dispatch", False):
|
if params.get("flatten_dispatch", False):
|
||||||
buffer = params.get("flatten_dispatch_buffer", 0.2)
|
buffer = params.get("flatten_dispatch_buffer", 0.2)
|
||||||
average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"]
|
average_capacity_factor = inflow_t[hydro.index].mean() / hydro["p_nom"]
|
||||||
p_max_pu = (average_capacity_factor + buffer).clip(upper=1)
|
p_max_pu = (average_capacity_factor + buffer).clip(upper=1)
|
||||||
|
@ -8,25 +8,20 @@ horizon.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
idx = pd.IndexSlice
|
|
||||||
|
|
||||||
from types import SimpleNamespace
|
from types import SimpleNamespace
|
||||||
|
|
||||||
import country_converter as coco
|
import country_converter as coco
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
import pypsa
|
import pypsa
|
||||||
import xarray as xr
|
import xarray as xr
|
||||||
from _helpers import update_config_with_sector_opts
|
from _helpers import update_config_with_sector_opts
|
||||||
from add_electricity import sanitize_carriers
|
from add_electricity import sanitize_carriers
|
||||||
from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs
|
from prepare_sector_network import cluster_heat_buses, define_spatial, prepare_costs
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
cc = coco.CountryConverter()
|
cc = coco.CountryConverter()
|
||||||
|
idx = pd.IndexSlice
|
||||||
spatial = SimpleNamespace()
|
spatial = SimpleNamespace()
|
||||||
|
|
||||||
|
|
||||||
|
@ -9,11 +9,11 @@ using data from JRC ENSPRESO.
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
import geopandas as gpd
|
import geopandas as gpd
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
AVAILABLE_BIOMASS_YEARS = [2010, 2020, 2030, 2040, 2050]
|
AVAILABLE_BIOMASS_YEARS = [2010, 2020, 2030, 2040, 2050]
|
||||||
|
|
||||||
|
|
||||||
|
@ -80,4 +80,9 @@ def build_biomass_transport_costs():
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
if "snakemake" not in globals():
|
||||||
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
|
snakemake = mock_snakemake("build_biomass_transport_costs")
|
||||||
|
|
||||||
build_biomass_transport_costs()
|
build_biomass_transport_costs()
|
||||||
|
@ -28,7 +28,7 @@ if __name__ == "__main__":
|
|||||||
gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0)
|
gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0)
|
||||||
)
|
)
|
||||||
|
|
||||||
I = cutout.indicatormatrix(clustered_regions)
|
I = cutout.indicatormatrix(clustered_regions) # noqa: E741
|
||||||
|
|
||||||
pop = {}
|
pop = {}
|
||||||
for item in ["total", "urban", "rural"]:
|
for item in ["total", "urban", "rural"]:
|
||||||
|
@ -41,13 +41,13 @@ Outputs
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
import dateutil
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from _helpers import configure_logging
|
from _helpers import configure_logging
|
||||||
from pandas import Timedelta as Delta
|
from pandas import Timedelta as Delta
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def load_timeseries(fn, years, countries, powerstatistics=True):
|
def load_timeseries(fn, years, countries, powerstatistics=True):
|
||||||
"""
|
"""
|
||||||
|
@ -7,9 +7,6 @@ Build total energy demands per country using JRC IDEES, eurostat, and EEA data.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
@ -21,7 +18,7 @@ from _helpers import mute_print
|
|||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
cc = coco.CountryConverter()
|
cc = coco.CountryConverter()
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
idx = pd.IndexSlice
|
idx = pd.IndexSlice
|
||||||
|
|
||||||
|
|
||||||
|
@ -9,12 +9,12 @@ production sites with data from SciGRID_gas and Global Energy Monitor.
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
import geopandas as gpd
|
import geopandas as gpd
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from cluster_gas_network import load_bus_regions
|
from cluster_gas_network import load_bus_regions
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def read_scigrid_gas(fn):
|
def read_scigrid_gas(fn):
|
||||||
df = gpd.read_file(fn)
|
df = gpd.read_file(fn)
|
||||||
@ -27,8 +27,11 @@ def build_gem_lng_data(fn):
|
|||||||
df = pd.read_excel(fn[0], sheet_name="LNG terminals - data")
|
df = pd.read_excel(fn[0], sheet_name="LNG terminals - data")
|
||||||
df = df.set_index("ComboID")
|
df = df.set_index("ComboID")
|
||||||
|
|
||||||
remove_country = ["Cyprus", "Turkey"]
|
remove_country = ["Cyprus", "Turkey"] # noqa: F841
|
||||||
remove_terminal = ["Puerto de la Luz LNG Terminal", "Gran Canaria LNG Terminal"]
|
remove_terminal = [ # noqa: F841
|
||||||
|
"Puerto de la Luz LNG Terminal",
|
||||||
|
"Gran Canaria LNG Terminal",
|
||||||
|
]
|
||||||
|
|
||||||
df = df.query(
|
df = df.query(
|
||||||
"Status != 'Cancelled' \
|
"Status != 'Cancelled' \
|
||||||
@ -45,8 +48,8 @@ def build_gem_prod_data(fn):
|
|||||||
df = pd.read_excel(fn[0], sheet_name="Gas extraction - main")
|
df = pd.read_excel(fn[0], sheet_name="Gas extraction - main")
|
||||||
df = df.set_index("GEM Unit ID")
|
df = df.set_index("GEM Unit ID")
|
||||||
|
|
||||||
remove_country = ["Cyprus", "Türkiye"]
|
remove_country = ["Cyprus", "Türkiye"] # noqa: F841
|
||||||
remove_fuel_type = ["oil"]
|
remove_fuel_type = ["oil"] # noqa: F841
|
||||||
|
|
||||||
df = df.query(
|
df = df.query(
|
||||||
"Status != 'shut in' \
|
"Status != 'shut in' \
|
||||||
@ -96,8 +99,8 @@ def build_gas_input_locations(gem_fn, entry_fn, sto_fn, countries):
|
|||||||
]
|
]
|
||||||
|
|
||||||
sto = read_scigrid_gas(sto_fn)
|
sto = read_scigrid_gas(sto_fn)
|
||||||
remove_country = ["RU", "UA", "TR", "BY"]
|
remove_country = ["RU", "UA", "TR", "BY"] # noqa: F841
|
||||||
sto = sto.query("country_code != @remove_country")
|
sto = sto.query("country_code not in @remove_country")
|
||||||
|
|
||||||
# production sites inside the model scope
|
# production sites inside the model scope
|
||||||
prod = build_gem_prod_data(gem_fn)
|
prod = build_gem_prod_data(gem_fn)
|
||||||
|
@ -9,13 +9,13 @@ Preprocess gas network based on data from bthe SciGRID_gas project
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
import geopandas as gpd
|
import geopandas as gpd
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from pypsa.geo import haversine_pts
|
from pypsa.geo import haversine_pts
|
||||||
from shapely.geometry import Point
|
from shapely.geometry import Point
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def diameter_to_capacity(pipe_diameter_mm):
|
def diameter_to_capacity(pipe_diameter_mm):
|
||||||
"""
|
"""
|
||||||
|
@ -34,7 +34,7 @@ if __name__ == "__main__":
|
|||||||
gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0)
|
gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0)
|
||||||
)
|
)
|
||||||
|
|
||||||
I = cutout.indicatormatrix(clustered_regions)
|
I = cutout.indicatormatrix(clustered_regions) # noqa: E741
|
||||||
|
|
||||||
pop_layout = xr.open_dataarray(snakemake.input.pop_layout)
|
pop_layout = xr.open_dataarray(snakemake.input.pop_layout)
|
||||||
|
|
||||||
|
@ -7,9 +7,6 @@ Build spatial distribution of industries from Hotmaps database.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
import uuid
|
import uuid
|
||||||
from itertools import product
|
from itertools import product
|
||||||
|
|
||||||
@ -18,6 +15,7 @@ import geopandas as gpd
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
from packaging.version import Version, parse
|
from packaging.version import Version, parse
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
cc = coco.CountryConverter()
|
cc = coco.CountryConverter()
|
||||||
|
|
||||||
|
|
||||||
@ -32,7 +30,7 @@ def locate_missing_industrial_sites(df):
|
|||||||
try:
|
try:
|
||||||
from geopy.extra.rate_limiter import RateLimiter
|
from geopy.extra.rate_limiter import RateLimiter
|
||||||
from geopy.geocoders import Nominatim
|
from geopy.geocoders import Nominatim
|
||||||
except:
|
except ImportError:
|
||||||
raise ModuleNotFoundError(
|
raise ModuleNotFoundError(
|
||||||
"Optional dependency 'geopy' not found."
|
"Optional dependency 'geopy' not found."
|
||||||
"Install via 'conda install -c conda-forge geopy'"
|
"Install via 'conda install -c conda-forge geopy'"
|
||||||
@ -101,7 +99,7 @@ def prepare_hotmaps_database(regions):
|
|||||||
# get all duplicated entries
|
# get all duplicated entries
|
||||||
duplicated_i = gdf.index[gdf.index.duplicated()]
|
duplicated_i = gdf.index[gdf.index.duplicated()]
|
||||||
# convert from raw data country name to iso-2-code
|
# convert from raw data country name to iso-2-code
|
||||||
code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2")
|
code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2") # noqa: F841
|
||||||
# screen out malformed country allocation
|
# screen out malformed country allocation
|
||||||
gdf_filtered = gdf.loc[duplicated_i].query("country == @code")
|
gdf_filtered = gdf.loc[duplicated_i].query("country == @code")
|
||||||
# concat not duplicated and filtered gdf
|
# concat not duplicated and filtered gdf
|
||||||
|
@ -7,11 +7,8 @@ Build industrial production per country.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from functools import partial
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
import country_converter as coco
|
import country_converter as coco
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -19,6 +16,7 @@ import pandas as pd
|
|||||||
from _helpers import mute_print
|
from _helpers import mute_print
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
cc = coco.CountryConverter()
|
cc = coco.CountryConverter()
|
||||||
|
|
||||||
tj_to_ktoe = 0.0238845
|
tj_to_ktoe = 0.0238845
|
||||||
|
@ -50,7 +50,6 @@ With a heat balance considering the maximum temperature threshold of the transmi
|
|||||||
the maximal possible capacity factor "s_max_pu" for each transmission line at each time step is calculated.
|
the maximal possible capacity factor "s_max_pu" for each transmission line at each time step is calculated.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
import atlite
|
import atlite
|
||||||
@ -99,7 +98,7 @@ def calculate_line_rating(n, cutout):
|
|||||||
-------
|
-------
|
||||||
xarray DataArray object with maximal power.
|
xarray DataArray object with maximal power.
|
||||||
"""
|
"""
|
||||||
relevant_lines = n.lines[(n.lines["underground"] == False)]
|
relevant_lines = n.lines[~n.lines["underground"]]
|
||||||
buses = relevant_lines[["bus0", "bus1"]].values
|
buses = relevant_lines[["bus0", "bus1"]].values
|
||||||
x = n.buses.x
|
x = n.buses.x
|
||||||
y = n.buses.y
|
y = n.buses.y
|
||||||
|
@ -8,15 +8,14 @@ Build mapping between cutout grid cells and population (total, urban, rural).
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
import atlite
|
import atlite
|
||||||
import geopandas as gpd
|
import geopandas as gpd
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import xarray as xr
|
import xarray as xr
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if "snakemake" not in globals():
|
if "snakemake" not in globals():
|
||||||
from _helpers import mock_snakemake
|
from _helpers import mock_snakemake
|
||||||
@ -34,7 +33,7 @@ if __name__ == "__main__":
|
|||||||
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index")
|
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index")
|
||||||
|
|
||||||
# Indicator matrix NUTS3 -> grid cells
|
# Indicator matrix NUTS3 -> grid cells
|
||||||
I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells)
|
I = atlite.cutout.compute_indicatormatrix(nuts3.geometry, grid_cells) # noqa: E741
|
||||||
|
|
||||||
# Indicator matrix grid_cells -> NUTS3; inprinciple Iinv*I is identity
|
# Indicator matrix grid_cells -> NUTS3; inprinciple Iinv*I is identity
|
||||||
# but imprecisions mean not perfect
|
# but imprecisions mean not perfect
|
||||||
|
@ -340,7 +340,7 @@ if __name__ == "__main__":
|
|||||||
f"Completed weighted capacity factor time series calculation ({duration:2.2f}s)"
|
f"Completed weighted capacity factor time series calculation ({duration:2.2f}s)"
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info(f"Calculating maximal capacity per bus")
|
logger.info("Calculating maximal capacity per bus")
|
||||||
p_nom_max = capacity_per_sqkm * availability @ area
|
p_nom_max = capacity_per_sqkm * availability @ area
|
||||||
|
|
||||||
logger.info("Calculate average distances.")
|
logger.info("Calculate average distances.")
|
||||||
|
@ -554,7 +554,7 @@ def prepare_temperature_data():
|
|||||||
|
|
||||||
|
|
||||||
# windows ---------------------------------------------------------------
|
# windows ---------------------------------------------------------------
|
||||||
def window_limit(l, window_assumptions):
|
def window_limit(l, window_assumptions): # noqa: E741
|
||||||
"""
|
"""
|
||||||
Define limit u value from which on window is retrofitted.
|
Define limit u value from which on window is retrofitted.
|
||||||
"""
|
"""
|
||||||
@ -567,7 +567,7 @@ def window_limit(l, window_assumptions):
|
|||||||
return m * l + a
|
return m * l + a
|
||||||
|
|
||||||
|
|
||||||
def u_retro_window(l, window_assumptions):
|
def u_retro_window(l, window_assumptions): # noqa: E741
|
||||||
"""
|
"""
|
||||||
Define retrofitting value depending on renovation strength.
|
Define retrofitting value depending on renovation strength.
|
||||||
"""
|
"""
|
||||||
@ -580,7 +580,7 @@ def u_retro_window(l, window_assumptions):
|
|||||||
return max(m * l + a, 0.8)
|
return max(m * l + a, 0.8)
|
||||||
|
|
||||||
|
|
||||||
def window_cost(u, cost_retro, window_assumptions):
|
def window_cost(u, cost_retro, window_assumptions): # noqa: E741
|
||||||
"""
|
"""
|
||||||
Get costs for new windows depending on u value.
|
Get costs for new windows depending on u value.
|
||||||
"""
|
"""
|
||||||
@ -600,7 +600,7 @@ def window_cost(u, cost_retro, window_assumptions):
|
|||||||
return window_cost
|
return window_cost
|
||||||
|
|
||||||
|
|
||||||
def calculate_costs(u_values, l, cost_retro, window_assumptions):
|
def calculate_costs(u_values, l, cost_retro, window_assumptions): # noqa: E741
|
||||||
"""
|
"""
|
||||||
Returns costs for a given retrofitting strength weighted by the average
|
Returns costs for a given retrofitting strength weighted by the average
|
||||||
surface/volume ratio of the component for each building type.
|
surface/volume ratio of the component for each building type.
|
||||||
@ -626,7 +626,7 @@ def calculate_costs(u_values, l, cost_retro, window_assumptions):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035):
|
def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035): # noqa: E741
|
||||||
"""
|
"""
|
||||||
Calculate U-values after building retrofitting, depending on the old
|
Calculate U-values after building retrofitting, depending on the old
|
||||||
U-values (u_values). This is for simple insulation measuers, adding an
|
U-values (u_values). This is for simple insulation measuers, adding an
|
||||||
@ -746,7 +746,7 @@ def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor)
|
|||||||
"""
|
"""
|
||||||
# (1) by transmission
|
# (1) by transmission
|
||||||
# calculate new U values of building elements due to additional insulation
|
# calculate new U values of building elements due to additional insulation
|
||||||
for l in l_strength:
|
for l in l_strength: # noqa: E741
|
||||||
u_values[f"new_U_{l}"] = calculate_new_u(
|
u_values[f"new_U_{l}"] = calculate_new_u(
|
||||||
u_values, l, l_weight, window_assumptions
|
u_values, l, l_weight, window_assumptions
|
||||||
)
|
)
|
||||||
|
@ -34,7 +34,7 @@ if __name__ == "__main__":
|
|||||||
gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0)
|
gpd.read_file(snakemake.input.regions_onshore).set_index("name").buffer(0)
|
||||||
)
|
)
|
||||||
|
|
||||||
I = cutout.indicatormatrix(clustered_regions)
|
I = cutout.indicatormatrix(clustered_regions) # noqa: E741
|
||||||
|
|
||||||
pop_layout = xr.open_dataarray(snakemake.input.pop_layout)
|
pop_layout = xr.open_dataarray(snakemake.input.pop_layout)
|
||||||
|
|
||||||
|
@ -8,14 +8,14 @@ Cluster gas transmission network to clustered model regions.
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
import geopandas as gpd
|
import geopandas as gpd
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from packaging.version import Version, parse
|
from packaging.version import Version, parse
|
||||||
from pypsa.geo import haversine_pts
|
from pypsa.geo import haversine_pts
|
||||||
from shapely import wkt
|
from shapely import wkt
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def concat_gdf(gdf_list, crs="EPSG:4326"):
|
def concat_gdf(gdf_list, crs="EPSG:4326"):
|
||||||
"""
|
"""
|
||||||
|
@ -133,6 +133,7 @@ import pyomo.environ as po
|
|||||||
import pypsa
|
import pypsa
|
||||||
import seaborn as sns
|
import seaborn as sns
|
||||||
from _helpers import configure_logging, update_p_nom_max
|
from _helpers import configure_logging, update_p_nom_max
|
||||||
|
from add_electricity import load_costs
|
||||||
from pypsa.clustering.spatial import (
|
from pypsa.clustering.spatial import (
|
||||||
busmap_by_greedy_modularity,
|
busmap_by_greedy_modularity,
|
||||||
busmap_by_hac,
|
busmap_by_hac,
|
||||||
@ -141,11 +142,7 @@ from pypsa.clustering.spatial import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
warnings.filterwarnings(action="ignore", category=UserWarning)
|
warnings.filterwarnings(action="ignore", category=UserWarning)
|
||||||
|
|
||||||
from add_electricity import load_costs
|
|
||||||
|
|
||||||
idx = pd.IndexSlice
|
idx = pd.IndexSlice
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@ -6,8 +6,6 @@
|
|||||||
Copy used configuration files and important scripts for archiving.
|
Copy used configuration files and important scripts for archiving.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from pathlib import Path
|
|
||||||
from shutil import copy
|
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
@ -8,9 +8,6 @@ capacity factors, curtailment, energy balances, prices and other metrics.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -19,7 +16,7 @@ import pypsa
|
|||||||
from prepare_sector_network import prepare_costs
|
from prepare_sector_network import prepare_costs
|
||||||
|
|
||||||
idx = pd.IndexSlice
|
idx = pd.IndexSlice
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
opt_name = {"Store": "e", "Line": "s", "Transformer": "s"}
|
opt_name = {"Store": "e", "Line": "s", "Transformer": "s"}
|
||||||
|
|
||||||
|
|
||||||
@ -509,10 +506,6 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
|||||||
|
|
||||||
if carrier in ["H2", "gas"]:
|
if carrier in ["H2", "gas"]:
|
||||||
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
|
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
|
||||||
elif carrier[:5] == "space":
|
|
||||||
load = heat_demand_df[buses.str[:2]].rename(
|
|
||||||
columns=lambda i: str(i) + suffix
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
load = n.loads_t.p_set[buses]
|
load = n.loads_t.p_set[buses]
|
||||||
|
|
||||||
|
@ -12,15 +12,12 @@ other metrics.
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import pypsa
|
import pypsa
|
||||||
from make_summary import (
|
from make_summary import calculate_cfs # noqa: F401
|
||||||
assign_carriers,
|
from make_summary import calculate_nodal_cfs # noqa: F401
|
||||||
assign_locations,
|
from make_summary import calculate_nodal_costs # noqa: F401
|
||||||
calculate_cfs,
|
from make_summary import assign_carriers, assign_locations
|
||||||
calculate_nodal_cfs,
|
|
||||||
calculate_nodal_costs,
|
|
||||||
)
|
|
||||||
from prepare_sector_network import prepare_costs
|
from prepare_sector_network import prepare_costs
|
||||||
from pypsa.descriptors import get_active_assets, nominal_attrs
|
from pypsa.descriptors import get_active_assets
|
||||||
from six import iteritems
|
from six import iteritems
|
||||||
|
|
||||||
idx = pd.IndexSlice
|
idx = pd.IndexSlice
|
||||||
|
@ -13,8 +13,6 @@ nodes.
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
import cartopy.crs as ccrs
|
import cartopy.crs as ccrs
|
||||||
import geopandas as gpd
|
import geopandas as gpd
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
@ -24,6 +22,7 @@ from make_summary import assign_carriers
|
|||||||
from plot_summary import preferred_order, rename_techs
|
from plot_summary import preferred_order, rename_techs
|
||||||
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
|
from pypsa.plot import add_legend_circles, add_legend_lines, add_legend_patches
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
plt.style.use(["ggplot"])
|
plt.style.use(["ggplot"])
|
||||||
|
|
||||||
|
|
||||||
@ -896,14 +895,12 @@ def plot_series(network, carrier="AC", name="test"):
|
|||||||
fig.tight_layout()
|
fig.tight_layout()
|
||||||
|
|
||||||
fig.savefig(
|
fig.savefig(
|
||||||
"{}/{RDIR}maps/series-{}-{}-{}-{}-{}.pdf".format(
|
"results/{}maps/series-{}-{}-{}-{}.pdf".format(
|
||||||
"results",
|
|
||||||
snakemake.params.RDIR,
|
snakemake.params.RDIR,
|
||||||
snakemake.wildcards["ll"],
|
snakemake.wildcards["ll"],
|
||||||
carrier,
|
carrier,
|
||||||
start,
|
start,
|
||||||
stop,
|
stop,
|
||||||
name,
|
|
||||||
),
|
),
|
||||||
transparent=True,
|
transparent=True,
|
||||||
)
|
)
|
||||||
|
@ -8,17 +8,14 @@ Creates plots from summary CSV files.
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
import matplotlib.gridspec as gridspec
|
import matplotlib.gridspec as gridspec
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import numpy as np
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
plt.style.use("ggplot")
|
|
||||||
|
|
||||||
from prepare_sector_network import co2_emissions_year
|
from prepare_sector_network import co2_emissions_year
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
plt.style.use("ggplot")
|
||||||
|
|
||||||
|
|
||||||
# consolidate and rename
|
# consolidate and rename
|
||||||
def rename_techs(label):
|
def rename_techs(label):
|
||||||
|
@ -9,7 +9,6 @@ import pandas as pd
|
|||||||
import pypsa
|
import pypsa
|
||||||
import seaborn as sns
|
import seaborn as sns
|
||||||
from _helpers import configure_logging
|
from _helpers import configure_logging
|
||||||
from pypsa.statistics import get_bus_and_carrier
|
|
||||||
|
|
||||||
sns.set_theme("paper", style="whitegrid")
|
sns.set_theme("paper", style="whitegrid")
|
||||||
|
|
||||||
|
@ -58,7 +58,6 @@ Description
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import re
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
@ -195,7 +194,7 @@ def apply_time_segmentation(n, segments, solver_name="cbc"):
|
|||||||
logger.info(f"Aggregating time series to {segments} segments.")
|
logger.info(f"Aggregating time series to {segments} segments.")
|
||||||
try:
|
try:
|
||||||
import tsam.timeseriesaggregation as tsam
|
import tsam.timeseriesaggregation as tsam
|
||||||
except:
|
except ImportError:
|
||||||
raise ModuleNotFoundError(
|
raise ModuleNotFoundError(
|
||||||
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
|
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
|
||||||
)
|
)
|
||||||
|
@ -305,7 +305,7 @@ def set_carbon_constraints(n, opts):
|
|||||||
m = re.match(r"^\d+p\d$", o, re.IGNORECASE)
|
m = re.match(r"^\d+p\d$", o, re.IGNORECASE)
|
||||||
if m is not None:
|
if m is not None:
|
||||||
budget = snakemake.config["co2_budget"][m.group(0)] * 1e9
|
budget = snakemake.config["co2_budget"][m.group(0)] * 1e9
|
||||||
if budget != None:
|
if budget is not None:
|
||||||
logger.info(f"add carbon budget of {budget}")
|
logger.info(f"add carbon budget of {budget}")
|
||||||
n.add(
|
n.add(
|
||||||
"GlobalConstraint",
|
"GlobalConstraint",
|
||||||
@ -428,7 +428,7 @@ def apply_time_segmentation_perfect(
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
import tsam.timeseriesaggregation as tsam
|
import tsam.timeseriesaggregation as tsam
|
||||||
except:
|
except ImportError:
|
||||||
raise ModuleNotFoundError(
|
raise ModuleNotFoundError(
|
||||||
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
|
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
|
||||||
)
|
)
|
||||||
|
@ -11,6 +11,7 @@ import logging
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
from itertools import product
|
from itertools import product
|
||||||
|
from types import SimpleNamespace
|
||||||
|
|
||||||
import networkx as nx
|
import networkx as nx
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -22,18 +23,13 @@ from add_electricity import calculate_annuity, sanitize_carriers
|
|||||||
from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2
|
from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2
|
||||||
from networkx.algorithms import complement
|
from networkx.algorithms import complement
|
||||||
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
|
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
|
||||||
|
from packaging.version import Version, parse
|
||||||
from pypsa.geo import haversine_pts
|
from pypsa.geo import haversine_pts
|
||||||
from pypsa.io import import_components_from_dataframe
|
from pypsa.io import import_components_from_dataframe
|
||||||
from scipy.stats import beta
|
from scipy.stats import beta
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
from types import SimpleNamespace
|
|
||||||
|
|
||||||
spatial = SimpleNamespace()
|
spatial = SimpleNamespace()
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
from packaging.version import Version, parse
|
|
||||||
|
|
||||||
pd_version = parse(pd.__version__)
|
pd_version = parse(pd.__version__)
|
||||||
agg_group_kwargs = dict(numeric_only=False) if pd_version >= Version("1.3") else {}
|
agg_group_kwargs = dict(numeric_only=False) if pd_version >= Version("1.3") else {}
|
||||||
|
|
||||||
@ -187,8 +183,6 @@ def define_spatial(nodes, options):
|
|||||||
return spatial
|
return spatial
|
||||||
|
|
||||||
|
|
||||||
from types import SimpleNamespace
|
|
||||||
|
|
||||||
spatial = SimpleNamespace()
|
spatial = SimpleNamespace()
|
||||||
|
|
||||||
|
|
||||||
@ -1476,7 +1470,6 @@ def add_land_transport(n, costs):
|
|||||||
# TODO options?
|
# TODO options?
|
||||||
|
|
||||||
logger.info("Add land transport")
|
logger.info("Add land transport")
|
||||||
nhours = n.snapshot_weightings.generators.sum()
|
|
||||||
|
|
||||||
transport = pd.read_csv(
|
transport = pd.read_csv(
|
||||||
snakemake.input.transport_demand, index_col=0, parse_dates=True
|
snakemake.input.transport_demand, index_col=0, parse_dates=True
|
||||||
@ -3124,6 +3117,7 @@ def add_waste_heat(n):
|
|||||||
# TODO options?
|
# TODO options?
|
||||||
|
|
||||||
logger.info("Add possibility to use industrial waste heat in district heating")
|
logger.info("Add possibility to use industrial waste heat in district heating")
|
||||||
|
cf_industry = snakemake.params.industry
|
||||||
|
|
||||||
# AC buses with district heating
|
# AC buses with district heating
|
||||||
urban_central = n.buses.index[n.buses.carrier == "urban central heat"]
|
urban_central = n.buses.index[n.buses.carrier == "urban central heat"]
|
||||||
@ -3484,7 +3478,7 @@ def apply_time_segmentation(
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
import tsam.timeseriesaggregation as tsam
|
import tsam.timeseriesaggregation as tsam
|
||||||
except:
|
except ImportError:
|
||||||
raise ModuleNotFoundError(
|
raise ModuleNotFoundError(
|
||||||
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
|
"Optional dependency 'tsam' not found." "Install via 'pip install tsam'"
|
||||||
)
|
)
|
||||||
|
@ -7,13 +7,12 @@ Retrieve monthly fuel prices from Destatis.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from _helpers import configure_logging, progress_retrieve
|
from _helpers import configure_logging, progress_retrieve
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if "snakemake" not in globals():
|
if "snakemake" not in globals():
|
||||||
from _helpers import mock_snakemake
|
from _helpers import mock_snakemake
|
||||||
|
@ -7,14 +7,13 @@ Retrieve and extract data bundle for sector-coupled studies.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
import tarfile
|
import tarfile
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from _helpers import configure_logging, progress_retrieve, validate_checksum
|
from _helpers import configure_logging, progress_retrieve, validate_checksum
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if "snakemake" not in globals():
|
if "snakemake" not in globals():
|
||||||
from _helpers import mock_snakemake
|
from _helpers import mock_snakemake
|
||||||
|
@ -86,7 +86,7 @@ The rule :mod:`simplify_network` does up to four things:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from functools import partial, reduce
|
from functools import reduce
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
@ -39,10 +39,10 @@ import xarray as xr
|
|||||||
from _benchmark import memory_logger
|
from _benchmark import memory_logger
|
||||||
from _helpers import configure_logging, get_opt, update_config_with_sector_opts
|
from _helpers import configure_logging, get_opt, update_config_with_sector_opts
|
||||||
from pypsa.descriptors import get_activity_mask
|
from pypsa.descriptors import get_activity_mask
|
||||||
|
from pypsa.descriptors import get_switchable_as_dense as get_as_dense
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
pypsa.pf.logger.setLevel(logging.WARNING)
|
pypsa.pf.logger.setLevel(logging.WARNING)
|
||||||
from pypsa.descriptors import get_switchable_as_dense as get_as_dense
|
|
||||||
|
|
||||||
|
|
||||||
def add_land_use_constraint(n, planning_horizons, config):
|
def add_land_use_constraint(n, planning_horizons, config):
|
||||||
@ -572,7 +572,7 @@ def add_SAFE_constraints(n, config):
|
|||||||
peakdemand = n.loads_t.p_set.sum(axis=1).max()
|
peakdemand = n.loads_t.p_set.sum(axis=1).max()
|
||||||
margin = 1.0 + config["electricity"]["SAFE_reservemargin"]
|
margin = 1.0 + config["electricity"]["SAFE_reservemargin"]
|
||||||
reserve_margin = peakdemand * margin
|
reserve_margin = peakdemand * margin
|
||||||
conventional_carriers = config["electricity"]["conventional_carriers"]
|
conventional_carriers = config["electricity"]["conventional_carriers"] # noqa: F841
|
||||||
ext_gens_i = n.generators.query(
|
ext_gens_i = n.generators.query(
|
||||||
"carrier in @conventional_carriers & p_nom_extendable"
|
"carrier in @conventional_carriers & p_nom_extendable"
|
||||||
).index
|
).index
|
||||||
@ -690,11 +690,11 @@ def add_battery_constraints(n):
|
|||||||
|
|
||||||
|
|
||||||
def add_lossy_bidirectional_link_constraints(n):
|
def add_lossy_bidirectional_link_constraints(n):
|
||||||
if not n.links.p_nom_extendable.any() or not "reversed" in n.links.columns:
|
if not n.links.p_nom_extendable.any() or "reversed" not in n.links.columns:
|
||||||
return
|
return
|
||||||
|
|
||||||
n.links["reversed"] = n.links.reversed.fillna(0).astype(bool)
|
n.links["reversed"] = n.links.reversed.fillna(0).astype(bool)
|
||||||
carriers = n.links.loc[n.links.reversed, "carrier"].unique()
|
carriers = n.links.loc[n.links.reversed, "carrier"].unique() # noqa: F841
|
||||||
|
|
||||||
forward_i = n.links.query(
|
forward_i = n.links.query(
|
||||||
"carrier in @carriers and ~reversed and p_nom_extendable"
|
"carrier in @carriers and ~reversed and p_nom_extendable"
|
||||||
|
Loading…
Reference in New Issue
Block a user