Merge pull request #84 from PyPSA/powerplants
Update powerplants according to new powerplantmatching version
This commit is contained in:
commit
3b83985436
@ -9,6 +9,7 @@ install:
|
|||||||
- sudo apt-get install -yq --no-install-recommends curl bzip2 xz-utils git ca-certificates coinor-cbc
|
- sudo apt-get install -yq --no-install-recommends curl bzip2 xz-utils git ca-certificates coinor-cbc
|
||||||
|
|
||||||
# download and extract data dependencies
|
# download and extract data dependencies
|
||||||
|
- mkdir ./resources
|
||||||
- curl -L "https://zenodo.org/record/3517921/files/pypsa-eur-tutorial-data-bundle.tar.xz" -o "./bundle.tar.xz"
|
- curl -L "https://zenodo.org/record/3517921/files/pypsa-eur-tutorial-data-bundle.tar.xz" -o "./bundle.tar.xz"
|
||||||
- curl -L "https://zenodo.org/record/3518020/files/pypsa-eur-tutorial-cutouts.tar.xz" -o "./cutouts.tar.xz"
|
- curl -L "https://zenodo.org/record/3518020/files/pypsa-eur-tutorial-cutouts.tar.xz" -o "./cutouts.tar.xz"
|
||||||
- curl -L "https://zenodo.org/record/3518215/files/natura.tiff" -o "./resources/natura.tiff"
|
- curl -L "https://zenodo.org/record/3518215/files/natura.tiff" -o "./resources/natura.tiff"
|
||||||
|
15
Snakefile
15
Snakefile
@ -32,14 +32,13 @@ if config['enable']['prepare_links_p_nom']:
|
|||||||
# group: 'nonfeedin_preparation'
|
# group: 'nonfeedin_preparation'
|
||||||
script: 'scripts/prepare_links_p_nom.py'
|
script: 'scripts/prepare_links_p_nom.py'
|
||||||
|
|
||||||
if config['enable']['powerplantmatching']:
|
rule build_powerplants:
|
||||||
rule build_powerplants:
|
input: base_network="networks/base.nc"
|
||||||
input: base_network="networks/base.nc"
|
output: "resources/powerplants.csv"
|
||||||
output: "resources/powerplants.csv"
|
threads: 1
|
||||||
threads: 1
|
resources: mem=500
|
||||||
resources: mem=500
|
# group: 'nonfeedin_preparation'
|
||||||
# group: 'nonfeedin_preparation'
|
script: "scripts/build_powerplants.py"
|
||||||
script: "scripts/build_powerplants.py"
|
|
||||||
|
|
||||||
rule base_network:
|
rule base_network:
|
||||||
input:
|
input:
|
||||||
|
@ -18,7 +18,6 @@ snapshots:
|
|||||||
closed: 'left' # end is not inclusive
|
closed: 'left' # end is not inclusive
|
||||||
|
|
||||||
enable:
|
enable:
|
||||||
powerplantmatching: false
|
|
||||||
prepare_links_p_nom: false
|
prepare_links_p_nom: false
|
||||||
|
|
||||||
electricity:
|
electricity:
|
||||||
@ -34,7 +33,15 @@ electricity:
|
|||||||
battery: 6
|
battery: 6
|
||||||
H2: 168
|
H2: 168
|
||||||
|
|
||||||
conventional_carriers: [] # [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass]
|
powerplants_filter: false
|
||||||
|
custom_powerplants: false #replace or add
|
||||||
|
conventional_carriers: [] # nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass]
|
||||||
|
|
||||||
|
# estimate_renewable_capacities_from_capacity_stats:
|
||||||
|
# # Wind is the Fueltype in ppm.data.Capacity_stats, onwind, offwind-{ac,dc} the carrier in PyPSA-Eur
|
||||||
|
# Wind: [onwind, offwind-ac, offwind-dc]
|
||||||
|
# Solar: [solar]
|
||||||
|
|
||||||
|
|
||||||
atlite:
|
atlite:
|
||||||
nprocesses: 4
|
nprocesses: 4
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -36,8 +36,9 @@ Relevant Settings
|
|||||||
lines:
|
lines:
|
||||||
length_factor:
|
length_factor:
|
||||||
|
|
||||||
.. seealso::
|
.. seealso::
|
||||||
Documentation of the configuration file ``config.yaml`` at :ref:`costs_cf`, :ref:`electricity_cf`, :ref:`load_cf`, :ref:`renewable_cf`, :ref:`lines_cf`
|
Documentation of the configuration file ``config.yaml`` at :ref:`costs_cf`,
|
||||||
|
:ref:`electricity_cf`, :ref:`load_cf`, :ref:`renewable_cf`, :ref:`lines_cf`
|
||||||
|
|
||||||
Inputs
|
Inputs
|
||||||
------
|
------
|
||||||
@ -94,7 +95,6 @@ import pandas as pd
|
|||||||
idx = pd.IndexSlice
|
idx = pd.IndexSlice
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import scipy as sp
|
|
||||||
import xarray as xr
|
import xarray as xr
|
||||||
|
|
||||||
import geopandas as gpd
|
import geopandas as gpd
|
||||||
@ -104,14 +104,8 @@ from vresutils.load import timeseries_opsd
|
|||||||
from vresutils import transfer as vtransfer
|
from vresutils import transfer as vtransfer
|
||||||
|
|
||||||
import pypsa
|
import pypsa
|
||||||
|
import powerplantmatching as ppm
|
||||||
|
|
||||||
try:
|
|
||||||
import powerplantmatching as ppm
|
|
||||||
from build_powerplants import country_alpha_2
|
|
||||||
|
|
||||||
has_ppm = True
|
|
||||||
except ImportError:
|
|
||||||
has_ppm = False
|
|
||||||
|
|
||||||
def normed(s): return s/s.sum()
|
def normed(s): return s/s.sum()
|
||||||
|
|
||||||
@ -119,7 +113,8 @@ def _add_missing_carriers_from_costs(n, costs, carriers):
|
|||||||
missing_carriers = pd.Index(carriers).difference(n.carriers.index)
|
missing_carriers = pd.Index(carriers).difference(n.carriers.index)
|
||||||
if missing_carriers.empty: return
|
if missing_carriers.empty: return
|
||||||
|
|
||||||
emissions_cols = costs.columns.to_series().loc[lambda s: s.str.endswith('_emissions')].values
|
emissions_cols = costs.columns.to_series()\
|
||||||
|
.loc[lambda s: s.str.endswith('_emissions')].values
|
||||||
suptechs = missing_carriers.str.split('-').str[0]
|
suptechs = missing_carriers.str.split('-').str[0]
|
||||||
emissions = costs.loc[suptechs, emissions_cols].fillna(0.)
|
emissions = costs.loc[suptechs, emissions_cols].fillna(0.)
|
||||||
emissions.index = missing_carriers
|
emissions.index = missing_carriers
|
||||||
@ -139,7 +134,8 @@ def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None):
|
|||||||
costs.loc[costs.unit.str.contains("/kW"),"value"] *= 1e3
|
costs.loc[costs.unit.str.contains("/kW"),"value"] *= 1e3
|
||||||
costs.loc[costs.unit.str.contains("USD"),"value"] *= config['USD2013_to_EUR2013']
|
costs.loc[costs.unit.str.contains("USD"),"value"] *= config['USD2013_to_EUR2013']
|
||||||
|
|
||||||
costs = costs.loc[idx[:,config['year'],:], "value"].unstack(level=2).groupby("technology").sum(min_count=1)
|
costs = (costs.loc[idx[:,config['year'],:], "value"]
|
||||||
|
.unstack(level=2).groupby("technology").sum(min_count=1))
|
||||||
|
|
||||||
costs = costs.fillna({"CO2 intensity" : 0,
|
costs = costs.fillna({"CO2 intensity" : 0,
|
||||||
"FOM" : 0,
|
"FOM" : 0,
|
||||||
@ -150,7 +146,8 @@ def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None):
|
|||||||
"investment" : 0,
|
"investment" : 0,
|
||||||
"lifetime" : 25})
|
"lifetime" : 25})
|
||||||
|
|
||||||
costs["capital_cost"] = ((annuity(costs["lifetime"], costs["discount rate"]) + costs["FOM"]/100.) *
|
costs["capital_cost"] = ((annuity(costs["lifetime"], costs["discount rate"]) +
|
||||||
|
costs["FOM"]/100.) *
|
||||||
costs["investment"] * Nyears)
|
costs["investment"] * Nyears)
|
||||||
|
|
||||||
costs.at['OCGT', 'fuel'] = costs.at['gas', 'fuel']
|
costs.at['OCGT', 'fuel'] = costs.at['gas', 'fuel']
|
||||||
@ -163,7 +160,8 @@ def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None):
|
|||||||
costs.at['OCGT', 'co2_emissions'] = costs.at['gas', 'co2_emissions']
|
costs.at['OCGT', 'co2_emissions'] = costs.at['gas', 'co2_emissions']
|
||||||
costs.at['CCGT', 'co2_emissions'] = costs.at['gas', 'co2_emissions']
|
costs.at['CCGT', 'co2_emissions'] = costs.at['gas', 'co2_emissions']
|
||||||
|
|
||||||
costs.at['solar', 'capital_cost'] = 0.5*(costs.at['solar-rooftop', 'capital_cost'] + costs.at['solar-utility', 'capital_cost'])
|
costs.at['solar', 'capital_cost'] = 0.5*(costs.at['solar-rooftop', 'capital_cost'] +
|
||||||
|
costs.at['solar-utility', 'capital_cost'])
|
||||||
|
|
||||||
def costs_for_storage(store, link1, link2=None, max_hours=1.):
|
def costs_for_storage(store, link1, link2=None, max_hours=1.):
|
||||||
capital_cost = link1['capital_cost'] + max_hours * store['capital_cost']
|
capital_cost = link1['capital_cost'] + max_hours * store['capital_cost']
|
||||||
@ -183,8 +181,8 @@ def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None):
|
|||||||
costs_for_storage(costs.loc["battery storage"], costs.loc["battery inverter"],
|
costs_for_storage(costs.loc["battery storage"], costs.loc["battery inverter"],
|
||||||
max_hours=max_hours['battery'])
|
max_hours=max_hours['battery'])
|
||||||
costs.loc["H2"] = \
|
costs.loc["H2"] = \
|
||||||
costs_for_storage(costs.loc["hydrogen storage"], costs.loc["fuel cell"], costs.loc["electrolysis"],
|
costs_for_storage(costs.loc["hydrogen storage"], costs.loc["fuel cell"],
|
||||||
max_hours=max_hours['H2'])
|
costs.loc["electrolysis"], max_hours=max_hours['H2'])
|
||||||
|
|
||||||
for attr in ('marginal_cost', 'capital_cost'):
|
for attr in ('marginal_cost', 'capital_cost'):
|
||||||
overwrites = config.get(attr)
|
overwrites = config.get(attr)
|
||||||
@ -194,19 +192,27 @@ def load_costs(Nyears=1., tech_costs=None, config=None, elec_config=None):
|
|||||||
|
|
||||||
return costs
|
return costs
|
||||||
|
|
||||||
def load_powerplants(n, ppl_fn=None):
|
def load_powerplants(ppl_fn=None):
|
||||||
if ppl_fn is None:
|
if ppl_fn is None:
|
||||||
ppl_fn = snakemake.input.powerplants
|
ppl_fn = snakemake.input.powerplants
|
||||||
ppl = pd.read_csv(ppl_fn, index_col=0, dtype={'bus': 'str'})
|
carrier_dict = {'ocgt': 'OCGT', 'ccgt': 'CCGT', 'bioenergy':'biomass',
|
||||||
return ppl.loc[ppl.bus.isin(n.buses.index)]
|
'ccgt, thermal': 'CCGT', 'hard coal': 'coal'}
|
||||||
|
return (pd.read_csv(ppl_fn, index_col=0, dtype={'bus': 'str'})
|
||||||
|
.powerplant.to_pypsa_names()
|
||||||
|
.rename(columns=str.lower).drop(columns=['efficiency'])
|
||||||
|
.replace({'carrier': carrier_dict}))
|
||||||
|
|
||||||
# ## Attach components
|
|
||||||
|
# =============================================================================
|
||||||
|
# Attach components
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
# ### Load
|
# ### Load
|
||||||
|
|
||||||
def attach_load(n):
|
def attach_load(n):
|
||||||
substation_lv_i = n.buses.index[n.buses['substation_lv']]
|
substation_lv_i = n.buses.index[n.buses['substation_lv']]
|
||||||
regions = gpd.read_file(snakemake.input.regions).set_index('name').reindex(substation_lv_i)
|
regions = (gpd.read_file(snakemake.input.regions).set_index('name')
|
||||||
|
.reindex(substation_lv_i))
|
||||||
opsd_load = (timeseries_opsd(slice(*n.snapshots[[0,-1]].year.astype(str)),
|
opsd_load = (timeseries_opsd(slice(*n.snapshots[[0,-1]].year.astype(str)),
|
||||||
snakemake.input.opsd_load) *
|
snakemake.input.opsd_load) *
|
||||||
snakemake.config.get('load', {}).get('scaling_factor', 1.0))
|
snakemake.config.get('load', {}).get('scaling_factor', 1.0))
|
||||||
@ -224,17 +230,21 @@ def attach_load(n):
|
|||||||
return pd.DataFrame({group.index[0]: l})
|
return pd.DataFrame({group.index[0]: l})
|
||||||
else:
|
else:
|
||||||
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
|
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
|
||||||
transfer = vtransfer.Shapes2Shapes(group, nuts3_cntry.geometry, normed=False).T.tocsr()
|
transfer = vtransfer.Shapes2Shapes(group, nuts3_cntry.geometry,
|
||||||
gdp_n = pd.Series(transfer.dot(nuts3_cntry['gdp'].fillna(1.).values), index=group.index)
|
normed=False).T.tocsr()
|
||||||
pop_n = pd.Series(transfer.dot(nuts3_cntry['pop'].fillna(1.).values), index=group.index)
|
gdp_n = pd.Series(transfer.dot(nuts3_cntry['gdp'].fillna(1.).values),
|
||||||
|
index=group.index)
|
||||||
|
pop_n = pd.Series(transfer.dot(nuts3_cntry['pop'].fillna(1.).values),
|
||||||
|
index=group.index)
|
||||||
|
|
||||||
# relative factors 0.6 and 0.4 have been determined from a linear
|
# relative factors 0.6 and 0.4 have been determined from a linear
|
||||||
# regression on the country to continent load data (refer to vresutils.load._upsampling_weights)
|
# regression on the country to continent load data (refer to vresutils.load._upsampling_weights)
|
||||||
factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))
|
factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))
|
||||||
return pd.DataFrame(factors.values * l.values[:,np.newaxis], index=l.index, columns=factors.index)
|
return pd.DataFrame(factors.values * l.values[:,np.newaxis],
|
||||||
|
index=l.index, columns=factors.index)
|
||||||
|
|
||||||
load = pd.concat([upsample(cntry, group)
|
load = pd.concat([upsample(cntry, group) for cntry, group
|
||||||
for cntry, group in regions.geometry.groupby(regions.country)], axis=1)
|
in regions.geometry.groupby(regions.country)], axis=1)
|
||||||
|
|
||||||
n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load)
|
n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load)
|
||||||
|
|
||||||
@ -248,16 +258,16 @@ def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=Fal
|
|||||||
|
|
||||||
dc_b = n.links.carrier == 'DC'
|
dc_b = n.links.carrier == 'DC'
|
||||||
if simple_hvdc_costs:
|
if simple_hvdc_costs:
|
||||||
n.links.loc[dc_b, 'capital_cost'] = (n.links.loc[dc_b, 'length'] * length_factor *
|
costs = (n.links.loc[dc_b, 'length'] * length_factor *
|
||||||
costs.at['HVDC overhead', 'capital_cost'])
|
costs.at['HVDC overhead', 'capital_cost'])
|
||||||
else:
|
else:
|
||||||
n.links.loc[dc_b, 'capital_cost'] = (n.links.loc[dc_b, 'length'] * length_factor *
|
costs = (n.links.loc[dc_b, 'length'] * length_factor *
|
||||||
((1. - n.links.loc[dc_b, 'underwater_fraction']) *
|
((1. - n.links.loc[dc_b, 'underwater_fraction']) *
|
||||||
costs.at['HVDC overhead', 'capital_cost'] +
|
costs.at['HVDC overhead', 'capital_cost'] +
|
||||||
n.links.loc[dc_b, 'underwater_fraction'] *
|
n.links.loc[dc_b, 'underwater_fraction'] *
|
||||||
costs.at['HVDC submarine', 'capital_cost']) +
|
costs.at['HVDC submarine', 'capital_cost']) +
|
||||||
costs.at['HVDC inverter pair', 'capital_cost'])
|
costs.at['HVDC inverter pair', 'capital_cost'])
|
||||||
|
n.links.loc[dc_b, 'capital_cost'] = costs
|
||||||
# ### Generators
|
# ### Generators
|
||||||
|
|
||||||
def attach_wind_and_solar(n, costs):
|
def attach_wind_and_solar(n, costs):
|
||||||
@ -271,13 +281,20 @@ def attach_wind_and_solar(n, costs):
|
|||||||
suptech = tech.split('-', 2)[0]
|
suptech = tech.split('-', 2)[0]
|
||||||
if suptech == 'offwind':
|
if suptech == 'offwind':
|
||||||
underwater_fraction = ds['underwater_fraction'].to_pandas()
|
underwater_fraction = ds['underwater_fraction'].to_pandas()
|
||||||
connection_cost = (snakemake.config['lines']['length_factor'] * ds['average_distance'].to_pandas() *
|
connection_cost = (snakemake.config['lines']['length_factor'] *
|
||||||
(underwater_fraction * costs.at[tech + '-connection-submarine', 'capital_cost'] +
|
ds['average_distance'].to_pandas() *
|
||||||
(1. - underwater_fraction) * costs.at[tech + '-connection-underground', 'capital_cost']))
|
(underwater_fraction *
|
||||||
capital_cost = costs.at['offwind', 'capital_cost'] + costs.at[tech + '-station', 'capital_cost'] + connection_cost
|
costs.at[tech + '-connection-submarine', 'capital_cost'] +
|
||||||
logger.info("Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}".format(connection_cost.min(), connection_cost.max(), tech))
|
(1. - underwater_fraction) *
|
||||||
|
costs.at[tech + '-connection-underground', 'capital_cost']))
|
||||||
|
capital_cost = (costs.at['offwind', 'capital_cost'] +
|
||||||
|
costs.at[tech + '-station', 'capital_cost'] +
|
||||||
|
connection_cost)
|
||||||
|
logger.info("Added connection cost of {:0.0f}-{:0.0f} Eur/MW/a to {}"
|
||||||
|
.format(connection_cost.min(), connection_cost.max(), tech))
|
||||||
elif suptech == 'onwind':
|
elif suptech == 'onwind':
|
||||||
capital_cost = costs.at['onwind', 'capital_cost'] + costs.at['onwind-landcosts', 'capital_cost']
|
capital_cost = (costs.at['onwind', 'capital_cost'] +
|
||||||
|
costs.at['onwind-landcosts', 'capital_cost'])
|
||||||
else:
|
else:
|
||||||
capital_cost = costs.at[tech, 'capital_cost']
|
capital_cost = costs.at[tech, 'capital_cost']
|
||||||
|
|
||||||
@ -299,26 +316,19 @@ def attach_wind_and_solar(n, costs):
|
|||||||
def attach_conventional_generators(n, costs, ppl):
|
def attach_conventional_generators(n, costs, ppl):
|
||||||
carriers = snakemake.config['electricity']['conventional_carriers']
|
carriers = snakemake.config['electricity']['conventional_carriers']
|
||||||
_add_missing_carriers_from_costs(n, costs, carriers)
|
_add_missing_carriers_from_costs(n, costs, carriers)
|
||||||
ppl = ppl.rename(columns={'Name': 'name', 'Capacity': 'p_nom'})
|
ppl = (ppl.query('carrier in @carriers').join(costs, on='carrier')
|
||||||
ppm_fuels = {'OCGT': 'OCGT', 'CCGT': 'CCGT',
|
.rename(index=lambda s: 'C' + str(s)))
|
||||||
'oil': 'Oil', 'nuclear': 'Nuclear',
|
|
||||||
'geothermal': 'Geothermal', 'biomass': 'Bioenergy',
|
|
||||||
'coal': 'Hard Coal', 'lignite': 'Lignite'}
|
|
||||||
|
|
||||||
for tech in carriers:
|
logger.info('Adding {} generators with capacities\n{}'
|
||||||
p = pd.DataFrame(ppl.loc[ppl['Fueltype'] == ppm_fuels[tech]])
|
.format(len(ppl), ppl.groupby('carrier').p_nom.sum()))
|
||||||
p.index = 'C' + p.index.astype(str)
|
n.madd("Generator", ppl.index,
|
||||||
logger.info('Adding {} generators of type {} with capacity {}'
|
carrier=ppl.carrier,
|
||||||
.format(len(p), tech, p.p_nom.sum()))
|
bus=ppl.bus,
|
||||||
|
p_nom=ppl.p_nom,
|
||||||
n.madd("Generator", p.index,
|
efficiency=ppl.efficiency,
|
||||||
carrier=tech,
|
marginal_cost=ppl.marginal_cost,
|
||||||
bus=p['bus'],
|
capital_cost=0)
|
||||||
p_nom=p['p_nom'],
|
logger.warning(f'Capital costs for conventional generators put to 0 EUR/MW.')
|
||||||
efficiency=costs.at[tech, 'efficiency'],
|
|
||||||
marginal_cost=costs.at[tech, 'marginal_cost'],
|
|
||||||
capital_cost=0)
|
|
||||||
logger.warn(f'Capital costs for conventional generators put to 0 EUR/MW.')
|
|
||||||
|
|
||||||
|
|
||||||
def attach_hydro(n, costs, ppl):
|
def attach_hydro(n, costs, ppl):
|
||||||
@ -327,100 +337,98 @@ def attach_hydro(n, costs, ppl):
|
|||||||
|
|
||||||
_add_missing_carriers_from_costs(n, costs, carriers)
|
_add_missing_carriers_from_costs(n, costs, carriers)
|
||||||
|
|
||||||
ppl = ppl.loc[ppl['Fueltype'] == 'Hydro']
|
ppl = ppl.query('carrier == "hydro"').reset_index(drop=True)\
|
||||||
ppl = ppl.set_index(pd.RangeIndex(len(ppl)).astype(str) + ' hydro', drop=False)
|
.rename(index=lambda s: str(s) + ' hydro')
|
||||||
|
ror = ppl.query('technology == "Run-Of-River"')
|
||||||
ppl = ppl.rename(columns={'Capacity':'p_nom', 'Technology': 'technology'})
|
phs = ppl.query('technology == "Pumped Storage"')
|
||||||
ppl = ppl.loc[ppl.technology.notnull(), ['bus', 'p_nom', 'technology']]
|
hydro = ppl.query('technology == "Reservoir"')
|
||||||
|
|
||||||
ppl = ppl.assign(
|
|
||||||
has_inflow=ppl.technology.str.contains('Reservoir|Run-Of-River|Natural Inflow'),
|
|
||||||
has_store=ppl.technology.str.contains('Reservoir|Pumped Storage'),
|
|
||||||
has_pump=ppl.technology.str.contains('Pumped Storage')
|
|
||||||
)
|
|
||||||
|
|
||||||
country = ppl['bus'].map(n.buses.country).rename("country")
|
country = ppl['bus'].map(n.buses.country).rename("country")
|
||||||
|
|
||||||
if ppl.has_inflow.any():
|
inflow_idx = ror.index | hydro.index
|
||||||
dist_key = ppl.loc[ppl.has_inflow, 'p_nom'].groupby(country).transform(normed)
|
if not inflow_idx.empty:
|
||||||
|
dist_key = ppl.loc[inflow_idx, 'p_nom'].groupby(country).transform(normed)
|
||||||
|
|
||||||
with xr.open_dataarray(snakemake.input.profile_hydro) as inflow:
|
with xr.open_dataarray(snakemake.input.profile_hydro) as inflow:
|
||||||
inflow_countries = pd.Index(country.loc[ppl.has_inflow].values)
|
inflow_countries = pd.Index(country[inflow_idx])
|
||||||
assert len(inflow_countries.unique().difference(inflow.indexes['countries'])) == 0, (
|
missing_c = (inflow_countries.unique()
|
||||||
"'{}' is missing inflow time-series for at least one country: {}"
|
.difference(inflow.indexes['countries']))
|
||||||
.format(snakemake.input.profile_hydro, ", ".join(inflow_countries.unique().difference(inflow.indexes['countries'])))
|
assert missing_c.empty, (f"'{snakemake.input.profile_hydro}' is missing "
|
||||||
)
|
f"inflow time-series for at least one country: {', '.join(missing_c)}")
|
||||||
|
|
||||||
inflow_t = (
|
inflow_t = (inflow.sel(countries=inflow_countries)
|
||||||
inflow.sel(countries=inflow_countries)
|
.rename({'countries': 'name'})
|
||||||
.rename({'countries': 'name'})
|
.assign_coords(name=inflow_idx)
|
||||||
.assign_coords(name=ppl.index[ppl.has_inflow])
|
.transpose('time', 'name')
|
||||||
.transpose('time', 'name')
|
.to_pandas()
|
||||||
.to_pandas()
|
.multiply(dist_key, axis=1))
|
||||||
.multiply(dist_key, axis=1)
|
|
||||||
)
|
|
||||||
|
|
||||||
if 'ror' in carriers:
|
if 'ror' in carriers and not ror.empty:
|
||||||
ror = ppl.loc[ppl.has_inflow & ~ ppl.has_store]
|
n.madd("Generator", ror.index,
|
||||||
if not ror.empty:
|
carrier='ror',
|
||||||
n.madd("Generator", ror.index,
|
bus=ror['bus'],
|
||||||
carrier='ror',
|
p_nom=ror['p_nom'],
|
||||||
bus=ror['bus'],
|
efficiency=costs.at['ror', 'efficiency'],
|
||||||
p_nom=ror['p_nom'],
|
capital_cost=costs.at['ror', 'capital_cost'],
|
||||||
efficiency=costs.at['ror', 'efficiency'],
|
weight=ror['p_nom'],
|
||||||
capital_cost=costs.at['ror', 'capital_cost'],
|
p_max_pu=(inflow_t[ror.index]
|
||||||
weight=ror['p_nom'],
|
.divide(ror['p_nom'], axis=1)
|
||||||
p_max_pu=(inflow_t.loc[:, ror.index]
|
.where(lambda df: df<=1., other=1.)))
|
||||||
.divide(ror['p_nom'], axis=1)
|
|
||||||
.where(lambda df: df<=1., other=1.)))
|
|
||||||
|
|
||||||
if 'PHS' in carriers:
|
if 'PHS' in carriers and not phs.empty:
|
||||||
phs = ppl.loc[ppl.has_store & ppl.has_pump]
|
# fill missing max hours to config value and assume no natural inflow
|
||||||
if not phs.empty:
|
# due to lack of data
|
||||||
n.madd('StorageUnit', phs.index,
|
phs = phs.replace({'max_hours': {0: c['PHS_max_hours']}})
|
||||||
carrier='PHS',
|
n.madd('StorageUnit', phs.index,
|
||||||
bus=phs['bus'],
|
carrier='PHS',
|
||||||
p_nom=phs['p_nom'],
|
bus=phs['bus'],
|
||||||
capital_cost=costs.at['PHS', 'capital_cost'],
|
p_nom=phs['p_nom'],
|
||||||
max_hours=c['PHS_max_hours'],
|
capital_cost=costs.at['PHS', 'capital_cost'],
|
||||||
efficiency_store=np.sqrt(costs.at['PHS','efficiency']),
|
max_hours=phs['max_hours'],
|
||||||
efficiency_dispatch=np.sqrt(costs.at['PHS','efficiency']),
|
efficiency_store=np.sqrt(costs.at['PHS','efficiency']),
|
||||||
cyclic_state_of_charge=True,
|
efficiency_dispatch=np.sqrt(costs.at['PHS','efficiency']),
|
||||||
inflow=inflow_t.loc[:, phs.index[phs.has_inflow]])
|
cyclic_state_of_charge=True)
|
||||||
|
|
||||||
if 'hydro' in carriers:
|
if 'hydro' in carriers and not hydro.empty:
|
||||||
hydro = ppl.loc[ppl.has_store & ~ ppl.has_pump & ppl.has_inflow].join(country)
|
hydro_max_hours = c.get('hydro_max_hours')
|
||||||
if not hydro.empty:
|
hydro_stats = pd.read_csv(snakemake.input.hydro_capacities,
|
||||||
hydro_max_hours = c.get('hydro_max_hours')
|
comment="#", na_values='-', index_col=0)
|
||||||
if hydro_max_hours == 'energy_capacity_totals_by_country':
|
e_target = hydro_stats["E_store[TWh]"].clip(lower=0.2) * 1e6
|
||||||
hydro_e_country = pd.read_csv(snakemake.input.hydro_capacities, index_col=0)["E_store[TWh]"].clip(lower=0.2)*1e6
|
e_installed = hydro.eval('p_nom * max_hours').groupby(hydro.country).sum()
|
||||||
hydro_max_hours_country = hydro_e_country / hydro.groupby('country').p_nom.sum()
|
e_missing = e_target - e_installed
|
||||||
hydro_max_hours = hydro.country.map(hydro_e_country / hydro.groupby('country').p_nom.sum())
|
missing_mh_i = hydro.query('max_hours == 0').index
|
||||||
elif hydro_max_hours == 'estimate_by_large_installations':
|
|
||||||
hydro_capacities = pd.read_csv(snakemake.input.hydro_capacities, comment="#", na_values='-', index_col=0)
|
|
||||||
estim_hydro_max_hours = hydro_capacities.e_stor / hydro_capacities.p_nom_discharge
|
|
||||||
|
|
||||||
missing_countries = (pd.Index(hydro['country'].unique())
|
if hydro_max_hours == 'energy_capacity_totals_by_country':
|
||||||
.difference(estim_hydro_max_hours.dropna().index))
|
# watch out some p_nom values like IE's are totally underrepresented
|
||||||
if not missing_countries.empty:
|
max_hours_country = e_missing / \
|
||||||
logger.warning("Assuming max_hours=6 for hydro reservoirs in the countries: {}"
|
hydro.loc[missing_mh_i].groupby('country').p_nom.sum()
|
||||||
.format(", ".join(missing_countries)))
|
|
||||||
|
|
||||||
hydro_max_hours = hydro['country'].map(estim_hydro_max_hours).fillna(6)
|
elif hydro_max_hours == 'estimate_by_large_installations':
|
||||||
|
max_hours_country = hydro_stats['E_store[TWh]'] * 1e3 / \
|
||||||
|
hydro_stats['p_nom_discharge[GW]']
|
||||||
|
|
||||||
n.madd('StorageUnit', hydro.index, carrier='hydro',
|
missing_countries = (pd.Index(hydro['country'].unique())
|
||||||
bus=hydro['bus'],
|
.difference(max_hours_country.dropna().index))
|
||||||
p_nom=hydro['p_nom'],
|
if not missing_countries.empty:
|
||||||
max_hours=hydro_max_hours,
|
logger.warning("Assuming max_hours=6 for hydro reservoirs in the countries: {}"
|
||||||
capital_cost=(costs.at['hydro', 'capital_cost']
|
.format(", ".join(missing_countries)))
|
||||||
if c.get('hydro_capital_cost') else 0.),
|
hydro_max_hours = hydro.max_hours.where(hydro.max_hours > 0,
|
||||||
marginal_cost=costs.at['hydro', 'marginal_cost'],
|
hydro.country.map(max_hours_country)).fillna(6)
|
||||||
p_max_pu=1., # dispatch
|
|
||||||
p_min_pu=0., # store
|
|
||||||
efficiency_dispatch=costs.at['hydro', 'efficiency'],
|
n.madd('StorageUnit', hydro.index, carrier='hydro',
|
||||||
efficiency_store=0.,
|
bus=hydro['bus'],
|
||||||
cyclic_state_of_charge=True,
|
p_nom=hydro['p_nom'],
|
||||||
inflow=inflow_t.loc[:, hydro.index])
|
max_hours=hydro_max_hours,
|
||||||
|
capital_cost=(costs.at['hydro', 'capital_cost']
|
||||||
|
if c.get('hydro_capital_cost') else 0.),
|
||||||
|
marginal_cost=costs.at['hydro', 'marginal_cost'],
|
||||||
|
p_max_pu=1., # dispatch
|
||||||
|
p_min_pu=0., # store
|
||||||
|
efficiency_dispatch=costs.at['hydro', 'efficiency'],
|
||||||
|
efficiency_store=0.,
|
||||||
|
cyclic_state_of_charge=True,
|
||||||
|
inflow=inflow_t.loc[:, hydro.index])
|
||||||
|
|
||||||
|
|
||||||
def attach_extendable_generators(n, costs, ppl):
|
def attach_extendable_generators(n, costs, ppl):
|
||||||
@ -433,7 +441,7 @@ def attach_extendable_generators(n, costs, ppl):
|
|||||||
suptech = tech.split('-')[0]
|
suptech = tech.split('-')[0]
|
||||||
|
|
||||||
if suptech == 'OCGT':
|
if suptech == 'OCGT':
|
||||||
ocgt = ppl.loc[ppl.Fueltype.isin(('OCGT', 'CCGT'))].groupby('bus', as_index=False).first()
|
ocgt = ppl.query("carrier in ['OCGT', 'CCGT']").groupby('bus', as_index=False).first()
|
||||||
n.madd('Generator', ocgt.index,
|
n.madd('Generator', ocgt.index,
|
||||||
suffix=' OCGT',
|
suffix=' OCGT',
|
||||||
bus=ocgt['bus'],
|
bus=ocgt['bus'],
|
||||||
@ -445,7 +453,7 @@ def attach_extendable_generators(n, costs, ppl):
|
|||||||
efficiency=costs.at['OCGT', 'efficiency'])
|
efficiency=costs.at['OCGT', 'efficiency'])
|
||||||
|
|
||||||
elif suptech == 'CCGT':
|
elif suptech == 'CCGT':
|
||||||
ccgt = ppl.loc[ppl.Fueltype.isin(('OCGT', 'CCGT'))].groupby('bus', as_index=False).first()
|
ccgt = ppl.query("carrier in ['OCGT', 'CCGT']").groupby('bus', as_index=False).first()
|
||||||
n.madd('Generator', ccgt.index,
|
n.madd('Generator', ccgt.index,
|
||||||
suffix=' CCGT',
|
suffix=' CCGT',
|
||||||
bus=ccgt['bus'],
|
bus=ccgt['bus'],
|
||||||
@ -456,8 +464,9 @@ def attach_extendable_generators(n, costs, ppl):
|
|||||||
marginal_cost=costs.at['CCGT', 'marginal_cost'],
|
marginal_cost=costs.at['CCGT', 'marginal_cost'],
|
||||||
efficiency=costs.at['CCGT', 'efficiency'])
|
efficiency=costs.at['CCGT', 'efficiency'])
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError(f"Adding extendable generators for carrier '{tech}' is not implemented, yet."
|
raise NotImplementedError(f"Adding extendable generators for carrier "
|
||||||
"Only OCGT and CCGT are allowed at the moment.")
|
"'{tech}' is not implemented, yet. "
|
||||||
|
"Only OCGT and CCGT are allowed at the moment.")
|
||||||
|
|
||||||
|
|
||||||
def attach_storage(n, costs):
|
def attach_storage(n, costs):
|
||||||
@ -533,27 +542,27 @@ def attach_storage(n, costs):
|
|||||||
|
|
||||||
def estimate_renewable_capacities(n, tech_map=None):
|
def estimate_renewable_capacities(n, tech_map=None):
|
||||||
if tech_map is None:
|
if tech_map is None:
|
||||||
tech_map = snakemake.config['electricity'].get('estimate_renewable_capacities_from_capacity_stats', {})
|
tech_map = (snakemake.config['electricity']
|
||||||
|
.get('estimate_renewable_capacities_from_capacity_stats', {}))
|
||||||
|
|
||||||
if len(tech_map) == 0: return
|
if len(tech_map) == 0: return
|
||||||
|
|
||||||
assert has_ppm, "The estimation of renewable capacities needs the powerplantmatching package"
|
capacities = (ppm.data.Capacity_stats().powerplant.convert_country_to_alpha2()
|
||||||
|
[lambda df: df.Energy_Source_Level_2]
|
||||||
capacities = ppm.data.Capacity_stats()
|
.set_index(['Fueltype', 'Country']).sort_index())
|
||||||
capacities['alpha_2'] = capacities['Country'].map(country_alpha_2)
|
|
||||||
capacities = capacities.loc[capacities.Energy_Source_Level_2].set_index(['Fueltype', 'alpha_2']).sort_index()
|
|
||||||
|
|
||||||
countries = n.buses.country.unique()
|
countries = n.buses.country.unique()
|
||||||
|
|
||||||
for ppm_fueltype, techs in tech_map.items():
|
for ppm_fueltype, techs in tech_map.items():
|
||||||
tech_capacities = capacities.loc[ppm_fueltype, 'Capacity'].reindex(countries, fill_value=0.)
|
tech_capacities = capacities.loc[ppm_fueltype, 'Capacity']\
|
||||||
tech_b = n.generators.carrier.isin(techs)
|
.reindex(countries, fill_value=0.)
|
||||||
n.generators.loc[tech_b, 'p_nom'] = (
|
tech_i = n.generators.query('carrier in @techs').index
|
||||||
(n.generators_t.p_max_pu.mean().loc[tech_b] * n.generators.loc[tech_b, 'p_nom_max']) # maximal yearly generation
|
n.generators.loc[tech_i, 'p_nom'] = (
|
||||||
.groupby(n.generators.bus.map(n.buses.country)) # for each country
|
(n.generators_t.p_max_pu[tech_i].mean() *
|
||||||
.transform(lambda s: normed(s) * tech_capacities.at[s.name])
|
n.generators.loc[tech_i, 'p_nom_max']) # maximal yearly generation
|
||||||
.where(lambda s: s>0.1, 0.) # only capacities above 100kW
|
.groupby(n.generators.bus.map(n.buses.country)) # for each country
|
||||||
)
|
.transform(lambda s: normed(s) * tech_capacities.at[s.name])
|
||||||
|
.where(lambda s: s>0.1, 0.)) # only capacities above 100kW
|
||||||
|
|
||||||
def add_co2limit(n, Nyears=1.):
|
def add_co2limit(n, Nyears=1.):
|
||||||
n.add("GlobalConstraint", "CO2Limit",
|
n.add("GlobalConstraint", "CO2Limit",
|
||||||
@ -592,7 +601,7 @@ if __name__ == "__main__":
|
|||||||
Nyears = n.snapshot_weightings.sum()/8760.
|
Nyears = n.snapshot_weightings.sum()/8760.
|
||||||
|
|
||||||
costs = load_costs(Nyears)
|
costs = load_costs(Nyears)
|
||||||
ppl = load_powerplants(n)
|
ppl = load_powerplants()
|
||||||
|
|
||||||
attach_load(n)
|
attach_load(n)
|
||||||
|
|
||||||
|
@ -7,10 +7,11 @@ Relevant Settings
|
|||||||
|
|
||||||
.. code:: yaml
|
.. code:: yaml
|
||||||
|
|
||||||
enable:
|
electricity:
|
||||||
powerplantmatching:
|
powerplants_filter:
|
||||||
|
custom_powerplants:
|
||||||
|
|
||||||
.. seealso::
|
.. seealso::
|
||||||
Documentation of the configuration file ``config.yaml`` at
|
Documentation of the configuration file ``config.yaml`` at
|
||||||
:ref:`toplevel_cf`
|
:ref:`toplevel_cf`
|
||||||
|
|
||||||
@ -35,71 +36,71 @@ Description
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import numpy as np
|
|
||||||
import pandas as pd
|
|
||||||
from scipy.spatial import cKDTree as KDTree
|
from scipy.spatial import cKDTree as KDTree
|
||||||
import pycountry as pyc
|
|
||||||
|
|
||||||
import pypsa
|
import pypsa
|
||||||
import powerplantmatching as ppm
|
import powerplantmatching as pm
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def add_custom_powerplants(ppl):
|
||||||
|
custom_ppl_query = snakemake.config['electricity']['custom_powerplants']
|
||||||
|
if not custom_ppl_query:
|
||||||
|
return ppl
|
||||||
|
add_ppls = pd.read_csv(snakemake.input.custom_powerplants, index_col=0)
|
||||||
|
if isinstance(custom_ppl_query, str):
|
||||||
|
add_ppls.query(add_ppls, inplace=True)
|
||||||
|
return ppl.append(add_ppls, sort=False)
|
||||||
|
|
||||||
def country_alpha_2(name):
|
|
||||||
try:
|
|
||||||
cntry = pyc.countries.get(name=name)
|
|
||||||
except KeyError:
|
|
||||||
cntry = None
|
|
||||||
if cntry is None:
|
|
||||||
cntry = pyc.countries.get(official_name=name)
|
|
||||||
return cntry.alpha_2
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if 'snakemake' not in globals():
|
if 'snakemake' not in globals():
|
||||||
from vresutils.snakemake import MockSnakemake, Dict
|
from vresutils.snakemake import MockSnakemake, Dict
|
||||||
|
|
||||||
snakemake = MockSnakemake(
|
snakemake = MockSnakemake(
|
||||||
input=Dict(base_network='networks/base.nc'),
|
input=Dict(base_network='networks/base.nc',
|
||||||
|
custom_powerplants='data/custom_powerplants.csv'),
|
||||||
output=['resources/powerplants.csv']
|
output=['resources/powerplants.csv']
|
||||||
)
|
)
|
||||||
|
|
||||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||||
|
|
||||||
n = pypsa.Network(snakemake.input.base_network)
|
n = pypsa.Network(snakemake.input.base_network)
|
||||||
|
|
||||||
ppm.powerplants(from_url=True)
|
|
||||||
|
|
||||||
ppl = (ppm.collection.matched_data()
|
|
||||||
[lambda df : ~df.Fueltype.isin(('Solar', 'Wind'))]
|
|
||||||
.pipe(ppm.cleaning.clean_technology)
|
|
||||||
.assign(Fueltype=lambda df: (
|
|
||||||
df.Fueltype.where(df.Fueltype != 'Natural Gas',
|
|
||||||
df.Technology.replace('Steam Turbine', 'OCGT').fillna('OCGT'))))
|
|
||||||
.pipe(ppm.utils.fill_geoposition))
|
|
||||||
|
|
||||||
# ppl.loc[(ppl.Fueltype == 'Other') & ppl.Technology.str.contains('CCGT'), 'Fueltype'] = 'CCGT'
|
|
||||||
# ppl.loc[(ppl.Fueltype == 'Other') & ppl.Technology.str.contains('Steam Turbine'), 'Fueltype'] = 'CCGT'
|
|
||||||
|
|
||||||
ppl = ppl.loc[ppl.lon.notnull() & ppl.lat.notnull()]
|
|
||||||
ppl = ppl.replace({"Country": {"Macedonia, Republic of": "North Macedonia"}})
|
|
||||||
|
|
||||||
ppl_country = ppl.Country.map(country_alpha_2)
|
|
||||||
countries = n.buses.country.unique()
|
countries = n.buses.country.unique()
|
||||||
cntries_without_ppl = []
|
|
||||||
|
|
||||||
for cntry in countries:
|
ppl = (pm.powerplants(from_url=True)
|
||||||
substation_lv_i = n.buses.index[n.buses['substation_lv'] & (n.buses.country == cntry)]
|
.powerplant.convert_country_to_alpha2()
|
||||||
ppl_b = ppl_country == cntry
|
.query('Fueltype not in ["Solar", "Wind"] and Country in @countries')
|
||||||
if not ppl_b.any():
|
.replace({'Technology': {'Steam Turbine': 'OCGT'}})
|
||||||
cntries_without_ppl.append(cntry)
|
.assign(Fueltype=lambda df: (
|
||||||
continue
|
df.Fueltype
|
||||||
|
.where(df.Fueltype != 'Natural Gas',
|
||||||
|
df.Technology.replace('Steam Turbine',
|
||||||
|
'OCGT').fillna('OCGT')))))
|
||||||
|
|
||||||
kdtree = KDTree(n.buses.loc[substation_lv_i, ['x','y']].values)
|
ppl_query = snakemake.config['electricity']['powerplants_filter']
|
||||||
ppl.loc[ppl_b, 'bus'] = substation_lv_i[kdtree.query(ppl.loc[ppl_b, ['lon','lat']].values)[1]]
|
if isinstance(ppl_query, str):
|
||||||
|
ppl.query(ppl_query, inplace=True)
|
||||||
|
|
||||||
|
ppl = add_custom_powerplants(ppl) # add carriers from own powerplant files
|
||||||
|
|
||||||
|
cntries_without_ppl = [c for c in countries if c not in ppl.Country.unique()]
|
||||||
|
|
||||||
|
for c in countries:
|
||||||
|
substation_i = n.buses.query('substation_lv and country == @c').index
|
||||||
|
kdtree = KDTree(n.buses.loc[substation_i, ['x','y']].values)
|
||||||
|
ppl_i = ppl.query('Country == @c').index
|
||||||
|
|
||||||
|
ppl.loc[ppl_i, 'bus'] = substation_i[kdtree.query(ppl.loc[ppl_i,
|
||||||
|
['lon','lat']].values)[1]]
|
||||||
|
|
||||||
if cntries_without_ppl:
|
if cntries_without_ppl:
|
||||||
logging.warning("No powerplants known in: {}".format(", ".join(cntries_without_ppl)))
|
logging.warning(f"No powerplants known in: {', '.join(cntries_without_ppl)}")
|
||||||
|
|
||||||
bus_null_b = ppl["bus"].isnull()
|
bus_null_b = ppl["bus"].isnull()
|
||||||
if bus_null_b.any():
|
if bus_null_b.any():
|
||||||
logging.warning("Couldn't find close bus for {} powerplants".format(bus_null_b.sum()))
|
logging.warning(f"Couldn't find close bus for {bus_null_b.sum()} powerplants")
|
||||||
|
|
||||||
ppl.to_csv(snakemake.output[0])
|
ppl.to_csv(snakemake.output[0])
|
||||||
|
@ -18,7 +18,6 @@ snapshots:
|
|||||||
closed: 'left' # end is not inclusive
|
closed: 'left' # end is not inclusive
|
||||||
|
|
||||||
enable:
|
enable:
|
||||||
powerplantmatching: false
|
|
||||||
prepare_links_p_nom: false
|
prepare_links_p_nom: false
|
||||||
|
|
||||||
electricity:
|
electricity:
|
||||||
@ -33,6 +32,8 @@ electricity:
|
|||||||
battery: 6
|
battery: 6
|
||||||
H2: 168
|
H2: 168
|
||||||
|
|
||||||
|
powerplants_filter: false
|
||||||
|
custom_powerplants: false #replace or add
|
||||||
conventional_carriers: [coal, CCGT] # [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass]
|
conventional_carriers: [coal, CCGT] # [nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass]
|
||||||
|
|
||||||
atlite:
|
atlite:
|
||||||
|
Loading…
Reference in New Issue
Block a user