merge master

This commit is contained in:
lisazeyen 2023-08-30 12:03:49 +02:00
commit 9ec7442c69
14 changed files with 152 additions and 73 deletions

14
.gitignore vendored
View File

@ -29,18 +29,18 @@ dconf
/data/links_p_nom.csv /data/links_p_nom.csv
/data/*totals.csv /data/*totals.csv
/data/biomass* /data/biomass*
/data/emobility/ /data/bundle-sector/emobility/
/data/eea* /data/bundle-sector/eea*
/data/jrc* /data/bundle-sector/jrc*
/data/heating/ /data/heating/
/data/eurostat* /data/bundle-sector/eurostat*
/data/odyssee/ /data/odyssee/
/data/transport_data.csv /data/transport_data.csv
/data/switzerland* /data/bundle-sector/switzerland*
/data/.nfs* /data/.nfs*
/data/Industrial_Database.csv /data/bundle-sector/Industrial_Database.csv
/data/retro/tabula-calculator-calcsetbuilding.csv /data/retro/tabula-calculator-calcsetbuilding.csv
/data/nuts* /data/bundle-sector/nuts*
data/gas_network/scigrid-gas/ data/gas_network/scigrid-gas/
data/costs_*.csv data/costs_*.csv

View File

@ -14,6 +14,12 @@ Upcoming Release
* For industry distribution, use EPRTR as fallback if ETS data is not available. * For industry distribution, use EPRTR as fallback if ETS data is not available.
* The minimum capacity for renewable generators when using the myopic option has been fixed.
* Files downloaded from zenodo are now write-protected to prevent accidental re-download.
* Files extracted from sector-coupled data bundle have been moved from ``data/`` to ``data/sector-bundle``.
* New feature multi-decade optimisation with perfect foresight. * New feature multi-decade optimisation with perfect foresight.
PyPSA-Eur 0.8.1 (27th July 2023) PyPSA-Eur 0.8.1 (27th July 2023)

View File

@ -350,7 +350,9 @@ rule add_electricity:
hydro_capacities=ancient("data/bundle/hydro_capacities.csv"), hydro_capacities=ancient("data/bundle/hydro_capacities.csv"),
geth_hydro_capacities="data/geth2015_hydro_capacities.csv", geth_hydro_capacities="data/geth2015_hydro_capacities.csv",
unit_commitment="data/unit_commitment.csv", unit_commitment="data/unit_commitment.csv",
fuel_price=RESOURCES + "monthly_fuel_price.csv", fuel_price=RESOURCES + "monthly_fuel_price.csv"
if config["conventional"]["dynamic_fuel_price"]
else [],
load=RESOURCES + "load.csv", load=RESOURCES + "load.csv",
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson", nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
output: output:
@ -478,7 +480,7 @@ rule prepare_network:
input: input:
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc", RESOURCES + "networks/elec_s{simpl}_{clusters}_ec.nc",
tech_costs=COSTS, tech_costs=COSTS,
co2_price=RESOURCES + "co2_price.csv", co2_price=lambda w: RESOURCES + "co2_price.csv" if "Ept" in w.opts else [],
output: output:
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc", RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
log: log:

View File

@ -242,9 +242,9 @@ rule build_energy_totals:
energy=config["energy"], energy=config["energy"],
input: input:
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson", nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
co2="data/eea/UNFCCC_v23.csv", co2="data/bundle-sector/eea/UNFCCC_v23.csv",
swiss="data/switzerland-sfoe/switzerland-new_format.csv", swiss="data/bundle-sector/switzerland-sfoe/switzerland-new_format.csv",
idees="data/jrc-idees-2015", idees="data/bundle-sector/jrc-idees-2015",
district_heat_share="data/district_heat_share.csv", district_heat_share="data/district_heat_share.csv",
eurostat=input_eurostat, eurostat=input_eurostat,
output: output:
@ -272,7 +272,7 @@ rule build_biomass_potentials:
"https://cidportal.jrc.ec.europa.eu/ftp/jrc-opendata/ENSPRESO/ENSPRESO_BIOMASS.xlsx", "https://cidportal.jrc.ec.europa.eu/ftp/jrc-opendata/ENSPRESO/ENSPRESO_BIOMASS.xlsx",
keep_local=True, keep_local=True,
), ),
nuts2="data/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", # https://gisco-services.ec.europa.eu/distribution/v2/nuts/download/#nuts21 nuts2="data/bundle-sector/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", # https://gisco-services.ec.europa.eu/distribution/v2/nuts/download/#nuts21
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
nuts3_population=ancient("data/bundle/nama_10r_3popgdp.tsv.gz"), nuts3_population=ancient("data/bundle/nama_10r_3popgdp.tsv.gz"),
swiss_cantons=ancient("data/bundle/ch_cantons.csv"), swiss_cantons=ancient("data/bundle/ch_cantons.csv"),
@ -366,7 +366,7 @@ if not config["sector"]["regional_co2_sequestration_potential"]["enable"]:
rule build_salt_cavern_potentials: rule build_salt_cavern_potentials:
input: input:
salt_caverns="data/h2_salt_caverns_GWh_per_sqkm.geojson", salt_caverns="data/bundle-sector/h2_salt_caverns_GWh_per_sqkm.geojson",
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}_{clusters}.geojson", regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}_{clusters}.geojson",
output: output:
@ -388,7 +388,7 @@ rule build_ammonia_production:
params: params:
countries=config["countries"], countries=config["countries"],
input: input:
usgs="data/myb1-2017-nitro.xls", usgs="data/bundle-sector/myb1-2017-nitro.xls",
output: output:
ammonia_production=RESOURCES + "ammonia_production.csv", ammonia_production=RESOURCES + "ammonia_production.csv",
threads: 1 threads: 1
@ -410,7 +410,7 @@ rule build_industry_sector_ratios:
ammonia=config["sector"].get("ammonia", False), ammonia=config["sector"].get("ammonia", False),
input: input:
ammonia_production=RESOURCES + "ammonia_production.csv", ammonia_production=RESOURCES + "ammonia_production.csv",
idees="data/jrc-idees-2015", idees="data/bundle-sector/jrc-idees-2015",
output: output:
industry_sector_ratios=RESOURCES + "industry_sector_ratios.csv", industry_sector_ratios=RESOURCES + "industry_sector_ratios.csv",
threads: 1 threads: 1
@ -432,8 +432,8 @@ rule build_industrial_production_per_country:
countries=config["countries"], countries=config["countries"],
input: input:
ammonia_production=RESOURCES + "ammonia_production.csv", ammonia_production=RESOURCES + "ammonia_production.csv",
jrc="data/jrc-idees-2015", jrc="data/bundle-sector/jrc-idees-2015",
eurostat="data/eurostat-energy_balances-may_2018_edition", eurostat="data/bundle-sector/eurostat-energy_balances-may_2018_edition",
output: output:
industrial_production_per_country=RESOURCES industrial_production_per_country=RESOURCES
+ "industrial_production_per_country.csv", + "industrial_production_per_country.csv",
@ -483,7 +483,7 @@ rule build_industrial_distribution_key:
input: input:
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson", regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv", clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
hotmaps_industrial_database="data/Industrial_Database.csv", hotmaps_industrial_database="data/bundle-sector/Industrial_Database.csv",
output: output:
industrial_distribution_key=RESOURCES industrial_distribution_key=RESOURCES
+ "industrial_distribution_key_elec_s{simpl}_{clusters}.csv", + "industrial_distribution_key_elec_s{simpl}_{clusters}.csv",
@ -558,7 +558,7 @@ rule build_industrial_energy_demand_per_country_today:
countries=config["countries"], countries=config["countries"],
industry=config["industry"], industry=config["industry"],
input: input:
jrc="data/jrc-idees-2015", jrc="data/bundle-sector/jrc-idees-2015",
ammonia_production=RESOURCES + "ammonia_production.csv", ammonia_production=RESOURCES + "ammonia_production.csv",
industrial_production_per_country=RESOURCES industrial_production_per_country=RESOURCES
+ "industrial_production_per_country.csv", + "industrial_production_per_country.csv",
@ -684,8 +684,8 @@ rule build_transport_demand:
pop_weighted_energy_totals=RESOURCES pop_weighted_energy_totals=RESOURCES
+ "pop_weighted_energy_totals_s{simpl}_{clusters}.csv", + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
transport_data=RESOURCES + "transport_data.csv", transport_data=RESOURCES + "transport_data.csv",
traffic_data_KFZ="data/emobility/KFZ__count", traffic_data_KFZ="data/bundle-sector/emobility/KFZ__count",
traffic_data_Pkw="data/emobility/Pkw__count", traffic_data_Pkw="data/bundle-sector/emobility/Pkw__count",
temp_air_total=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc", temp_air_total=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc",
output: output:
transport_demand=RESOURCES + "transport_demand_s{simpl}_{clusters}.csv", transport_demand=RESOURCES + "transport_demand_s{simpl}_{clusters}.csv",
@ -734,7 +734,7 @@ rule prepare_sector_network:
avail_profile=RESOURCES + "avail_profile_s{simpl}_{clusters}.csv", avail_profile=RESOURCES + "avail_profile_s{simpl}_{clusters}.csv",
dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv", dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv",
co2_totals_name=RESOURCES + "co2_totals.csv", co2_totals_name=RESOURCES + "co2_totals.csv",
co2="data/eea/UNFCCC_v23.csv", co2="data/bundle-sector/eea/UNFCCC_v23.csv",
biomass_potentials=RESOURCES + "biomass_potentials_s{simpl}_{clusters}.csv", biomass_potentials=RESOURCES + "biomass_potentials_s{simpl}_{clusters}.csv",
heat_profile="data/heat_load_profile_BDEW.csv", heat_profile="data/heat_load_profile_BDEW.csv",
costs="data/costs_{}.csv".format(config["costs"]["year"]) costs="data/costs_{}.csv".format(config["costs"]["year"])

View File

@ -42,7 +42,7 @@ def has_internet_access(url="www.zenodo.org") -> bool:
def input_eurostat(w): def input_eurostat(w):
# 2016 includes BA, 2017 does not # 2016 includes BA, 2017 does not
report_year = config["energy"]["eurostat_report_year"] report_year = config["energy"]["eurostat_report_year"]
return f"data/eurostat-energy_balances-june_{report_year}_edition" return f"data/bundle-sector/eurostat-energy_balances-june_{report_year}_edition"
def solved_previous_horizon(wildcards): def solved_previous_horizon(wildcards):

View File

@ -135,6 +135,8 @@ rule plot_summary:
countries=config["countries"], countries=config["countries"],
planning_horizons=config["scenario"]["planning_horizons"], planning_horizons=config["scenario"]["planning_horizons"],
sector_opts=config["scenario"]["sector_opts"], sector_opts=config["scenario"]["sector_opts"],
emissions_scope=config["energy"]["emissions"],
eurostat_report_year=config["energy"]["eurostat_report_year"],
plotting=config["plotting"], plotting=config["plotting"],
RDIR=RDIR, RDIR=RDIR,
input: input:
@ -142,7 +144,7 @@ rule plot_summary:
energy=RESULTS + "csvs/energy.csv", energy=RESULTS + "csvs/energy.csv",
balances=RESULTS + "csvs/supply_energy.csv", balances=RESULTS + "csvs/supply_energy.csv",
eurostat=input_eurostat, eurostat=input_eurostat,
co2="data/eea/UNFCCC_v23.csv", co2="data/bundle-sector/eea/UNFCCC_v23.csv",
output: output:
costs=RESULTS + "graphs/costs.pdf", costs=RESULTS + "graphs/costs.pdf",
energy=RESULTS + "graphs/energy.pdf", energy=RESULTS + "graphs/energy.pdf",

View File

@ -27,7 +27,7 @@ if config["enable"]["retrieve"] and config["enable"].get("retrieve_databundle",
rule retrieve_databundle: rule retrieve_databundle:
output: output:
expand("data/bundle/{file}", file=datafiles), protected(expand("data/bundle/{file}", file=datafiles)),
log: log:
LOGS + "retrieve_databundle.log", LOGS + "retrieve_databundle.log",
resources: resources:
@ -92,7 +92,7 @@ if config["enable"]["retrieve"] and config["enable"].get(
static=True, static=True,
), ),
output: output:
RESOURCES + "natura.tiff", protected(RESOURCES + "natura.tiff"),
log: log:
LOGS + "retrieve_natura_raster.log", LOGS + "retrieve_natura_raster.log",
resources: resources:
@ -106,22 +106,30 @@ if config["enable"]["retrieve"] and config["enable"].get(
"retrieve_sector_databundle", True "retrieve_sector_databundle", True
): ):
datafiles = [ datafiles = [
"data/eea/UNFCCC_v23.csv", "eea/UNFCCC_v23.csv",
"data/switzerland-sfoe/switzerland-new_format.csv", "switzerland-sfoe/switzerland-new_format.csv",
"data/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", "nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson",
"data/myb1-2017-nitro.xls", "myb1-2017-nitro.xls",
"data/Industrial_Database.csv", "Industrial_Database.csv",
"data/emobility/KFZ__count", "emobility/KFZ__count",
"data/emobility/Pkw__count", "emobility/Pkw__count",
"data/h2_salt_caverns_GWh_per_sqkm.geojson", "h2_salt_caverns_GWh_per_sqkm.geojson",
directory("data/eurostat-energy_balances-june_2016_edition"), ]
directory("data/eurostat-energy_balances-may_2018_edition"),
directory("data/jrc-idees-2015"), datafolders = [
protected(
directory("data/bundle-sector/eurostat-energy_balances-june_2016_edition")
),
protected(
directory("data/bundle-sector/eurostat-energy_balances-may_2018_edition")
),
protected(directory("data/bundle-sector/jrc-idees-2015")),
] ]
rule retrieve_sector_databundle: rule retrieve_sector_databundle:
output: output:
*datafiles, protected(expand("data/bundle-sector/{files}", files=datafiles)),
*datafolders,
log: log:
LOGS + "retrieve_sector_databundle.log", LOGS + "retrieve_sector_databundle.log",
retries: 2 retries: 2
@ -143,7 +151,9 @@ if config["enable"]["retrieve"] and (
rule retrieve_gas_infrastructure_data: rule retrieve_gas_infrastructure_data:
output: output:
expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles), protected(
expand("data/gas_network/scigrid-gas/data/{files}", files=datafiles)
),
log: log:
LOGS + "retrieve_gas_infrastructure_data.log", LOGS + "retrieve_gas_infrastructure_data.log",
retries: 2 retries: 2
@ -187,7 +197,7 @@ if config["enable"]["retrieve"]:
static=True, static=True,
), ),
output: output:
"data/shipdensity_global.zip", protected("data/shipdensity_global.zip"),
log: log:
LOGS + "retrieve_ship_raster.log", LOGS + "retrieve_ship_raster.log",
resources: resources:

View File

@ -165,7 +165,7 @@ def sanitize_carriers(n, config):
nice_names = ( nice_names = (
pd.Series(config["plotting"]["nice_names"]) pd.Series(config["plotting"]["nice_names"])
.reindex(carrier_i) .reindex(carrier_i)
.fillna(carrier_i.to_series().str.title()) .fillna(carrier_i.to_series())
) )
n.carriers["nice_name"] = n.carriers.nice_name.where( n.carriers["nice_name"] = n.carriers.nice_name.where(
n.carriers.nice_name != "", nice_names n.carriers.nice_name != "", nice_names

View File

@ -446,15 +446,23 @@ def add_heating_capacities_installed_before_baseyear(
# split existing capacities between residential and services # split existing capacities between residential and services
# proportional to energy demand # proportional to energy demand
p_set_sum = n.loads_t.p_set.sum()
ratio_residential = pd.Series( ratio_residential = pd.Series(
[ [
( (
n.loads_t.p_set.sum()[f"{node} residential rural heat"] p_set_sum[f"{node} residential rural heat"]
/ ( / (
n.loads_t.p_set.sum()[f"{node} residential rural heat"] p_set_sum[f"{node} residential rural heat"]
+ n.loads_t.p_set.sum()[f"{node} services rural heat"] + p_set_sum[f"{node} services rural heat"]
) )
) )
# if rural heating demand for one of the nodes doesn't exist,
# then columns were dropped before and heating demand share should be 0.0
if all(
f"{node} {service} rural heat" in p_set_sum.index
for service in ["residential", "services"]
)
else 0.0
for node in nodal_df.index for node in nodal_df.index
], ],
index=nodal_df.index, index=nodal_df.index,

View File

@ -13,10 +13,13 @@ logger = logging.getLogger(__name__)
import uuid import uuid
from itertools import product from itertools import product
import country_converter as coco
import geopandas as gpd import geopandas as gpd
import pandas as pd import pandas as pd
from packaging.version import Version, parse from packaging.version import Version, parse
cc = coco.CountryConverter()
def locate_missing_industrial_sites(df): def locate_missing_industrial_sites(df):
""" """
@ -107,6 +110,17 @@ def prepare_hotmaps_database(regions):
# concat not duplicated and filtered gdf # concat not duplicated and filtered gdf
gdf = pd.concat([gdf.drop(duplicated_i), gdf_filtered]).sort_index() gdf = pd.concat([gdf.drop(duplicated_i), gdf_filtered]).sort_index()
# the .sjoin can lead to duplicates if a geom is in two overlapping regions
if gdf.index.duplicated().any():
# get all duplicated entries
duplicated_i = gdf.index[gdf.index.duplicated()]
# convert from raw data country name to iso-2-code
code = cc.convert(gdf.loc[duplicated_i, "Country"], to="iso2")
# screen out malformed country allocation
gdf_filtered = gdf.loc[duplicated_i].query("country == @code")
# concat not duplicated and filtered gdf
gdf = pd.concat([gdf.drop(duplicated_i), gdf_filtered])
return gdf return gdf

View File

@ -711,5 +711,5 @@ if __name__ == "__main__":
if snakemake.params.foresight == "myopic": if snakemake.params.foresight == "myopic":
cumulative_cost = calculate_cumulative_cost() cumulative_cost = calculate_cumulative_cost()
cumulative_cost.to_csv( cumulative_cost.to_csv(
"results/" + snakemake.params.RDIR + "/csvs/cumulative_cost.csv" "results/" + snakemake.params.RDIR + "csvs/cumulative_cost.csv"
) )

View File

@ -457,7 +457,6 @@ def plot_carbon_budget_distribution(input_eurostat):
""" """
Plot historical carbon emissions in the EU and decarbonization path. Plot historical carbon emissions in the EU and decarbonization path.
""" """
import seaborn as sns import seaborn as sns
sns.set() sns.set()
@ -502,6 +501,14 @@ def plot_carbon_budget_distribution(input_eurostat):
# plot committed and under-discussion targets # plot committed and under-discussion targets
# (notice that historical emissions include all countries in the # (notice that historical emissions include all countries in the
# network, but targets refer to EU) # network, but targets refer to EU)
ax1.plot(
[2020],
[0.8 * emissions[1990]],
marker="*",
markersize=12,
markerfacecolor="black",
markeredgecolor="black",
)
ax1.plot( ax1.plot(
[2030], [2030],
@ -512,7 +519,23 @@ def plot_carbon_budget_distribution(input_eurostat):
markeredgecolor="black", markeredgecolor="black",
) )
ax1.plot(
[2030],
[0.6 * emissions[1990]],
marker="*",
markersize=12,
markerfacecolor="black",
markeredgecolor="black",
)
ax1.plot(
[2050, 2050],
[x * emissions[1990] for x in [0.2, 0.05]],
color="gray",
linewidth=2,
marker="_",
alpha=0.5,
)
ax1.plot( ax1.plot(
[2050], [2050],

View File

@ -46,7 +46,6 @@ def define_spatial(nodes, options):
---------- ----------
nodes : list-like nodes : list-like
""" """
global spatial global spatial
spatial.nodes = nodes spatial.nodes = nodes
@ -192,17 +191,17 @@ def get(item, investment_year=None):
def co2_emissions_year( def co2_emissions_year(
countries, input_eurostat, opts, snakemake, year countries, input_eurostat, opts, emissions_scope, report_year, year
): ):
""" """
Calculate CO2 emissions in one specific year (e.g. 1990 or 2018). Calculate CO2 emissions in one specific year (e.g. 1990 or 2018).
""" """
emissions_scope = snakemake.config["energy"]["emissions"] emissions_scope = snakemake.params.energy["emissions"]
eea_co2 = build_eea_co2(snakemake.input.co2, year, emissions_scope) eea_co2 = build_eea_co2(snakemake.input.co2, year, emissions_scope)
# TODO: read Eurostat data from year > 2014 # TODO: read Eurostat data from year > 2014
# this only affects the estimation of CO2 emissions for BA, RS, AL, ME, MK # this only affects the estimation of CO2 emissions for BA, RS, AL, ME, MK
report_year = snakemake.config["energy"]["eurostat_report_year"] report_year = snakemake.params.energy["eurostat_report_year"]
if year > 2014: if year > 2014:
eurostat_co2 = build_eurostat_co2( eurostat_co2 = build_eurostat_co2(
input_eurostat, countries, report_year, year=2014 input_eurostat, countries, report_year, year=2014
@ -241,12 +240,24 @@ def build_carbon_budget(o, input_eurostat, fn, emissions_scope, report_year):
countries = snakemake.params.countries countries = snakemake.params.countries
e_1990 = co2_emissions_year( e_1990 = co2_emissions_year(
countries, input_eurostat, opts, snakemake, year=1990 countries,
input_eurostat,
opts,
emissions_scope,
report_year,
input_co2,
year=1990,
) )
# emissions at the beginning of the path (last year available 2018) # emissions at the beginning of the path (last year available 2018)
e_0 = co2_emissions_year( e_0 = co2_emissions_year(
countries, input_eurostat, opts, snakemake, year=2018, countries,
input_eurostat,
opts,
emissions_scope,
report_year,
input_co2,
year=2018,
) )
planning_horizons = snakemake.params.planning_horizons planning_horizons = snakemake.params.planning_horizons
@ -357,7 +368,6 @@ def update_wind_solar_costs(n, costs):
Update costs for wind and solar generators added with pypsa-eur to those Update costs for wind and solar generators added with pypsa-eur to those
cost in the planning year. cost in the planning year.
""" """
# NB: solar costs are also manipulated for rooftop # NB: solar costs are also manipulated for rooftop
# when distribution grid is inserted # when distribution grid is inserted
n.generators.loc[n.generators.carrier == "solar", "capital_cost"] = costs.at[ n.generators.loc[n.generators.carrier == "solar", "capital_cost"] = costs.at[
@ -435,7 +445,6 @@ def add_carrier_buses(n, carrier, nodes=None):
""" """
Add buses to connect e.g. coal, nuclear and oil plants. Add buses to connect e.g. coal, nuclear and oil plants.
""" """
if nodes is None: if nodes is None:
nodes = vars(spatial)[carrier].nodes nodes = vars(spatial)[carrier].nodes
location = vars(spatial)[carrier].locations location = vars(spatial)[carrier].locations
@ -716,6 +725,7 @@ def average_every_nhours(n, offset):
return m return m
def cycling_shift(df, steps=1): def cycling_shift(df, steps=1):
""" """
Cyclic shift on index of pd.Series|pd.DataFrame by number of steps. Cyclic shift on index of pd.Series|pd.DataFrame by number of steps.
@ -1150,7 +1160,6 @@ def add_storage_and_grids(n, costs):
e_cyclic=True, e_cyclic=True,
carrier="H2 Store", carrier="H2 Store",
capital_cost=h2_capital_cost, capital_cost=h2_capital_cost,
lifetime=costs.at["hydrogen storage tank type 1 including compressor", "lifetime"],
) )
if options["gas_network"] or options["H2_retrofit"]: if options["gas_network"] or options["H2_retrofit"]:
@ -3076,7 +3085,6 @@ def maybe_adjust_costs_and_potentials(n, opts):
logger.info(f"changing {attr} for {carrier} by factor {factor}") logger.info(f"changing {attr} for {carrier} by factor {factor}")
# TODO this should rather be a config no wildcard
def limit_individual_line_extension(n, maxext): def limit_individual_line_extension(n, maxext):
logger.info(f"Limiting new HVAC and HVDC extensions to {maxext} MW") logger.info(f"Limiting new HVAC and HVDC extensions to {maxext} MW")
n.lines["s_nom_max"] = n.lines["s_nom"] + maxext n.lines["s_nom_max"] = n.lines["s_nom"] + maxext
@ -3211,7 +3219,7 @@ def apply_time_segmentation(
df = pnl.copy() df = pnl.copy()
df.columns = pd.MultiIndex.from_product([[c.name], [attr], df.columns]) df.columns = pd.MultiIndex.from_product([[c.name], [attr], df.columns])
raw = pd.concat([raw, df], axis=1) raw = pd.concat([raw, df], axis=1)
raw = raw.dropna(axis=1)
# normalise all time-dependent data # normalise all time-dependent data
annual_max = raw.max().replace(0, 1) annual_max = raw.max().replace(0, 1)
raw = raw.div(annual_max, level=0) raw = raw.div(annual_max, level=0)
@ -3268,28 +3276,26 @@ def set_temporal_aggregation(n, opts, solver_name):
# segments with package tsam # segments with package tsam
m = re.match(r"^(\d+)seg$", o, re.IGNORECASE) m = re.match(r"^(\d+)seg$", o, re.IGNORECASE)
if m is not None: if m is not None:
if snakemake.params.foresight!="perfect": segments = int(m[1])
segments = int(m[1]) logger.info(f"Use temporal segmentation with {segments} segments")
logger.info(f"Use temporal segmentation with {segments} segments") n = apply_time_segmentation(n, segments, solver_name=solver_name)
n = apply_time_segmentation(n, segments, solver_name=solver_name) break
break
else:
logger.info("Apply temporal segmentation at prepare_perfect_foresight.")
return n return n
#%%
if __name__ == "__main__": if __name__ == "__main__":
if "snakemake" not in globals(): if "snakemake" not in globals():
from _helpers import mock_snakemake from _helpers import mock_snakemake
snakemake = mock_snakemake( snakemake = mock_snakemake(
"prepare_sector_network", "prepare_sector_network",
configfiles="test/config.overnight.yaml",
simpl="", simpl="",
opts="", opts="",
clusters="37", clusters="5",
ll="v1.0", ll="v1.5",
sector_opts="60SEG-T-H-B-I-A-solar+p3-dist1", sector_opts="CO2L0-24H-T-H-B-I-A-solar+p3-dist1",
planning_horizons="2050", planning_horizons="2030",
) )
logging.basicConfig(level=snakemake.config["logging"]["level"]) logging.basicConfig(level=snakemake.config["logging"]["level"])
@ -3391,7 +3397,6 @@ if __name__ == "__main__":
add_allam(n, costs) add_allam(n, costs)
solver_name = snakemake.config["solving"]["solver"]["name"] solver_name = snakemake.config["solving"]["solver"]["name"]
n = set_temporal_aggregation(n, opts, solver_name) n = set_temporal_aggregation(n, opts, solver_name)
limit_type = "config" limit_type = "config"
@ -3404,8 +3409,14 @@ if __name__ == "__main__":
if not os.path.exists(fn): if not os.path.exists(fn):
emissions_scope = snakemake.params.emissions_scope emissions_scope = snakemake.params.emissions_scope
report_year = snakemake.params.eurostat_report_year report_year = snakemake.params.eurostat_report_year
input_co2 = snakemake.input.co2
build_carbon_budget( build_carbon_budget(
o, snakemake.input.eurostat, fn, emissions_scope, report_year o,
snakemake.input.eurostat,
fn,
emissions_scope,
report_year,
input_co2,
) )
co2_cap = pd.read_csv(fn, index_col=0).squeeze() co2_cap = pd.read_csv(fn, index_col=0).squeeze()
limit = co2_cap.loc[investment_year] limit = co2_cap.loc[investment_year]

View File

@ -110,6 +110,9 @@ def _add_land_use_constraint(n):
# warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind' # warning: this will miss existing offwind which is not classed AC-DC and has carrier 'offwind'
for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]: for carrier in ["solar", "onwind", "offwind-ac", "offwind-dc"]:
extendable_i = (n.generators.carrier == carrier) & n.generators.p_nom_extendable
n.generators.loc[extendable_i, "p_nom_min"] = 0
ext_i = (n.generators.carrier == carrier) & ~n.generators.p_nom_extendable ext_i = (n.generators.carrier == carrier) & ~n.generators.p_nom_extendable
existing = ( existing = (
n.generators.loc[ext_i, "p_nom"] n.generators.loc[ext_i, "p_nom"]
@ -126,7 +129,7 @@ def _add_land_use_constraint(n):
if len(existing_large): if len(existing_large):
logger.warning( logger.warning(
f"Existing capacities larger than technical potential for {existing_large},\ f"Existing capacities larger than technical potential for {existing_large},\
adjust technical potential to existing capacities" adjust technical potential to existing capacities"
) )
n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[ n.generators.loc[existing_large, "p_nom_max"] = n.generators.loc[
existing_large, "p_nom_min" existing_large, "p_nom_min"