make fuel prices flexible over years
fix marginal cost issue (on pypsa) minor fixes
This commit is contained in:
parent
3e64599c4c
commit
c7f67f0641
@ -19,6 +19,8 @@ if not exists("config/config.yaml"):
|
|||||||
|
|
||||||
|
|
||||||
configfile: "config/config.yaml"
|
configfile: "config/config.yaml"
|
||||||
|
configfile: "config/config.validation.yaml"
|
||||||
|
configfile: "config/test/config.validation.yaml"
|
||||||
|
|
||||||
|
|
||||||
COSTS = f"data/costs_{config['costs']['year']}.csv"
|
COSTS = f"data/costs_{config['costs']['year']}.csv"
|
||||||
@ -106,7 +108,7 @@ rule sync:
|
|||||||
cluster=f"{config['remote']['ssh']}:{config['remote']['path']}",
|
cluster=f"{config['remote']['ssh']}:{config['remote']['path']}",
|
||||||
shell:
|
shell:
|
||||||
"""
|
"""
|
||||||
rsync -uvarh --no-g --ignore-missing-args --files-from=.sync-send . {params.cluster}
|
rsync -uvarh --ignore-missing-args --files-from=.sync-send . {params.cluster}
|
||||||
rsync -uvarh --no-g {params.cluster}/results results
|
rsync -uvarh --no-g {params.cluster}/results results || echo "No results directory, skipping rsync"
|
||||||
rsync -uvarh --no-g {params.cluster}/logs logs
|
rsync -uvarh --no-g {params.cluster}/logs logs || echo "No logs directory, skipping rsync"
|
||||||
"""
|
"""
|
||||||
|
@ -9,4 +9,9 @@ scenario:
|
|||||||
clusters: # number of nodes in Europe, any integer between 37 (1 node per country-zone) and several hundred
|
clusters: # number of nodes in Europe, any integer between 37 (1 node per country-zone) and several hundred
|
||||||
- 37
|
- 37
|
||||||
opts: # only relevant for PyPSA-Eur
|
opts: # only relevant for PyPSA-Eur
|
||||||
- 'Ept-12h'
|
- 'Ept'
|
||||||
|
|
||||||
|
snapshots:
|
||||||
|
start: "2019-04-01"
|
||||||
|
end: "2019-04-10"
|
||||||
|
inclusive: 'left' # include start, not end
|
||||||
|
@ -56,3 +56,4 @@ dependencies:
|
|||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
- tsam>=1.1.0
|
- tsam>=1.1.0
|
||||||
|
- git+https://github.com/pypsa/pypsa.git
|
||||||
|
@ -16,7 +16,7 @@ def memory(w):
|
|||||||
factor *= int(m.group(1)) / 8760
|
factor *= int(m.group(1)) / 8760
|
||||||
break
|
break
|
||||||
if w.clusters.endswith("m") or w.clusters.endswith("c"):
|
if w.clusters.endswith("m") or w.clusters.endswith("c"):
|
||||||
return int(factor * (35000 + 600 * int(w.clusters[:-1])))
|
return int(factor * (55000 + 600 * int(w.clusters[:-1])))
|
||||||
elif w.clusters == "all":
|
elif w.clusters == "all":
|
||||||
return int(factor * (18000 + 180 * 4000))
|
return int(factor * (18000 + 180 * 4000))
|
||||||
else:
|
else:
|
||||||
|
@ -26,6 +26,7 @@ rule solve_network:
|
|||||||
threads: 4
|
threads: 4
|
||||||
resources:
|
resources:
|
||||||
mem_mb=memory,
|
mem_mb=memory,
|
||||||
|
walltime="24:00:00",
|
||||||
shadow:
|
shadow:
|
||||||
"minimal"
|
"minimal"
|
||||||
conda:
|
conda:
|
||||||
|
@ -159,7 +159,7 @@ def sanitize_carriers(n, config):
|
|||||||
|
|
||||||
for c in n.iterate_components():
|
for c in n.iterate_components():
|
||||||
if "carrier" in c.df:
|
if "carrier" in c.df:
|
||||||
add_missing_carriers(n, c.df)
|
add_missing_carriers(n, c.df.carrier)
|
||||||
|
|
||||||
carrier_i = n.carriers.index
|
carrier_i = n.carriers.index
|
||||||
nice_names = (
|
nice_names = (
|
||||||
@ -809,13 +809,10 @@ if __name__ == "__main__":
|
|||||||
unit_commitment = None
|
unit_commitment = None
|
||||||
|
|
||||||
if params.conventional["dynamic_fuel_price"]:
|
if params.conventional["dynamic_fuel_price"]:
|
||||||
monthly_fuel_price = pd.read_csv(
|
fuel_price = pd.read_csv(
|
||||||
snakemake.input.fuel_price, index_col=0, header=0
|
snakemake.input.fuel_price, index_col=0, header=0, parse_dates=True
|
||||||
)
|
)
|
||||||
monthly_fuel_price.index = pd.date_range(
|
fuel_price = fuel_price.reindex(n.snapshots).fillna(method="ffill")
|
||||||
start=n.snapshots[0], end=n.snapshots[-1], freq="MS"
|
|
||||||
)
|
|
||||||
fuel_price = monthly_fuel_price.reindex(n.snapshots).fillna(method="ffill")
|
|
||||||
else:
|
else:
|
||||||
fuel_price = None
|
fuel_price = None
|
||||||
|
|
||||||
|
@ -52,14 +52,6 @@ from _helpers import configure_logging
|
|||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
validation_year = 2019
|
|
||||||
|
|
||||||
# sheet names to pypsa syntax
|
|
||||||
sheet_name_map = {
|
|
||||||
"5.1 Hard coal and lignite": "coal",
|
|
||||||
"5.2 Mineral oil": "oil",
|
|
||||||
"5.3.1 Natural gas - indices": "gas",
|
|
||||||
}
|
|
||||||
|
|
||||||
# keywords in datasheet
|
# keywords in datasheet
|
||||||
keywords = {
|
keywords = {
|
||||||
@ -69,6 +61,15 @@ keywords = {
|
|||||||
"gas": "GP09-062 Natural gas",
|
"gas": "GP09-062 Natural gas",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# sheet names to pypsa syntax
|
||||||
|
sheet_name_map = {
|
||||||
|
"coal": "5.1 Hard coal and lignite",
|
||||||
|
"lignite": "5.1 Hard coal and lignite",
|
||||||
|
"oil": "5.2 Mineral oil",
|
||||||
|
"gas": "5.3.1 Natural gas - indices",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# import fuel price 2015 in Eur/MWh
|
# import fuel price 2015 in Eur/MWh
|
||||||
# source for coal, oil, gas, Agora, slide 24 [2]
|
# source for coal, oil, gas, Agora, slide 24 [2]
|
||||||
# source lignite, price for 2020, scaled by price index, ENTSO-E [3]
|
# source lignite, price for 2020, scaled by price index, ENTSO-E [3]
|
||||||
@ -76,43 +77,30 @@ price_2015 = {"coal": 8.3, "oil": 30.6, "gas": 20.6, "lignite": 3.8} # 2020 3.9
|
|||||||
|
|
||||||
|
|
||||||
def get_fuel_price():
|
def get_fuel_price():
|
||||||
fuel_price = pd.read_excel(
|
price = {}
|
||||||
snakemake.input.fuel_price_raw, sheet_name=list(sheet_name_map.keys())
|
|
||||||
)
|
|
||||||
fuel_price = {
|
|
||||||
sheet_name_map[key]: value
|
|
||||||
for key, value in fuel_price.items()
|
|
||||||
if key in sheet_name_map
|
|
||||||
}
|
|
||||||
# lignite and hard coal are on the same sheet
|
|
||||||
fuel_price["lignite"] = fuel_price["coal"]
|
|
||||||
|
|
||||||
def extract_df(sheet, keyword):
|
|
||||||
# Create a DatetimeIndex for the first day of each month of a given year
|
|
||||||
month_list = pd.date_range(
|
|
||||||
start=f"{validation_year}-01-01", end=f"{validation_year}-12-01", freq="MS"
|
|
||||||
).month
|
|
||||||
start = fuel_price[sheet].index[(fuel_price[sheet] == keyword).any(axis=1)]
|
|
||||||
df = fuel_price[sheet].loc[start[0] : start[0] + 18]
|
|
||||||
df = df.dropna(axis=0)
|
|
||||||
df.set_index(df.columns[0], inplace=True)
|
|
||||||
df.index = df.index.map(lambda x: int(x.replace(" ...", "")))
|
|
||||||
df = df.iloc[:, :12]
|
|
||||||
df.columns = month_list
|
|
||||||
return df
|
|
||||||
|
|
||||||
m_price = {}
|
|
||||||
for carrier, keyword in keywords.items():
|
for carrier, keyword in keywords.items():
|
||||||
df = extract_df(carrier, keyword).loc[validation_year]
|
sheet_name = sheet_name_map[carrier]
|
||||||
m_price[carrier] = df.mul(price_2015[carrier] / 100)
|
df = pd.read_excel(
|
||||||
|
snakemake.input.fuel_price_raw,
|
||||||
|
sheet_name=sheet_name,
|
||||||
|
index_col=0,
|
||||||
|
skiprows=6,
|
||||||
|
nrows=18,
|
||||||
|
)
|
||||||
|
df = df.dropna(axis=0).iloc[:, :12]
|
||||||
|
start, end = df.index[0], str(int(df.index[-1][:4]) + 1)
|
||||||
|
df = df.stack()
|
||||||
|
df.index = pd.date_range(start=start, end=end, freq="MS", inclusive="left")
|
||||||
|
df = df.mul(price_2015[carrier] / 100)
|
||||||
|
price[carrier] = df
|
||||||
|
|
||||||
pd.concat(m_price, axis=1).to_csv(snakemake.output.fuel_price)
|
return pd.concat(price, axis=1)
|
||||||
|
|
||||||
|
|
||||||
def get_co2_price():
|
def get_co2_price():
|
||||||
# emission price
|
# emission price
|
||||||
CO2_price = pd.read_excel(snakemake.input.co2_price_raw, index_col=1, header=5)
|
co2_price = pd.read_excel(snakemake.input.co2_price_raw, index_col=1, header=5)
|
||||||
CO2_price["Auction Price €/tCO2"].to_csv(snakemake.output.co2_price)
|
return co2_price["Auction Price €/tCO2"]
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@ -123,5 +111,8 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
configure_logging(snakemake)
|
configure_logging(snakemake)
|
||||||
|
|
||||||
get_fuel_price()
|
fuel_price = get_fuel_price()
|
||||||
get_co2_price()
|
fuel_price.to_csv(snakemake.output.fuel_price)
|
||||||
|
|
||||||
|
co2_price = get_co2_price()
|
||||||
|
co2_price.to_csv(snakemake.output.co2_price)
|
||||||
|
@ -506,15 +506,6 @@ if __name__ == "__main__":
|
|||||||
).all() or x.isnull().all(), "The `potential` configuration option must agree for all renewable carriers, for now!"
|
).all() or x.isnull().all(), "The `potential` configuration option must agree for all renewable carriers, for now!"
|
||||||
return v
|
return v
|
||||||
|
|
||||||
# translate str entries of aggregation_strategies to pd.Series functions:
|
|
||||||
aggregation_strategies = {
|
|
||||||
p: {
|
|
||||||
k: getattr(pd.Series, v)
|
|
||||||
for k, v in params.aggregation_strategies[p].items()
|
|
||||||
}
|
|
||||||
for p in params.aggregation_strategies.keys()
|
|
||||||
}
|
|
||||||
|
|
||||||
custom_busmap = params.custom_busmap
|
custom_busmap = params.custom_busmap
|
||||||
if custom_busmap:
|
if custom_busmap:
|
||||||
custom_busmap = pd.read_csv(
|
custom_busmap = pd.read_csv(
|
||||||
|
@ -534,15 +534,6 @@ if __name__ == "__main__":
|
|||||||
n = pypsa.Network(snakemake.input.network)
|
n = pypsa.Network(snakemake.input.network)
|
||||||
Nyears = n.snapshot_weightings.objective.sum() / 8760
|
Nyears = n.snapshot_weightings.objective.sum() / 8760
|
||||||
|
|
||||||
# translate str entries of aggregation_strategies to pd.Series functions:
|
|
||||||
aggregation_strategies = {
|
|
||||||
p: {
|
|
||||||
k: getattr(pd.Series, v)
|
|
||||||
for k, v in params.aggregation_strategies[p].items()
|
|
||||||
}
|
|
||||||
for p in params.aggregation_strategies.keys()
|
|
||||||
}
|
|
||||||
|
|
||||||
n, trafo_map = simplify_network_to_380(n)
|
n, trafo_map = simplify_network_to_380(n)
|
||||||
|
|
||||||
technology_costs = load_costs(
|
technology_costs = load_costs(
|
||||||
@ -560,7 +551,7 @@ if __name__ == "__main__":
|
|||||||
params.p_max_pu,
|
params.p_max_pu,
|
||||||
params.simplify_network["exclude_carriers"],
|
params.simplify_network["exclude_carriers"],
|
||||||
snakemake.output,
|
snakemake.output,
|
||||||
aggregation_strategies,
|
params.aggregation_strategies,
|
||||||
)
|
)
|
||||||
|
|
||||||
busmaps = [trafo_map, simplify_links_map]
|
busmaps = [trafo_map, simplify_links_map]
|
||||||
@ -573,12 +564,12 @@ if __name__ == "__main__":
|
|||||||
params.length_factor,
|
params.length_factor,
|
||||||
params.simplify_network,
|
params.simplify_network,
|
||||||
snakemake.output,
|
snakemake.output,
|
||||||
aggregation_strategies=aggregation_strategies,
|
aggregation_strategies=params.aggregation_strategies,
|
||||||
)
|
)
|
||||||
busmaps.append(stub_map)
|
busmaps.append(stub_map)
|
||||||
|
|
||||||
if params.simplify_network["to_substations"]:
|
if params.simplify_network["to_substations"]:
|
||||||
n, substation_map = aggregate_to_substations(n, aggregation_strategies)
|
n, substation_map = aggregate_to_substations(n, params.aggregation_strategies)
|
||||||
busmaps.append(substation_map)
|
busmaps.append(substation_map)
|
||||||
|
|
||||||
# treatment of outliers (nodes without a profile for considered carrier):
|
# treatment of outliers (nodes without a profile for considered carrier):
|
||||||
@ -592,7 +583,9 @@ if __name__ == "__main__":
|
|||||||
logger.info(
|
logger.info(
|
||||||
f"clustering preparation (hac): aggregating {len(buses_i)} buses of type {carrier}."
|
f"clustering preparation (hac): aggregating {len(buses_i)} buses of type {carrier}."
|
||||||
)
|
)
|
||||||
n, busmap_hac = aggregate_to_substations(n, aggregation_strategies, buses_i)
|
n, busmap_hac = aggregate_to_substations(
|
||||||
|
n, params.aggregation_strategies, buses_i
|
||||||
|
)
|
||||||
busmaps.append(busmap_hac)
|
busmaps.append(busmap_hac)
|
||||||
|
|
||||||
if snakemake.wildcards.simpl:
|
if snakemake.wildcards.simpl:
|
||||||
@ -603,7 +596,7 @@ if __name__ == "__main__":
|
|||||||
solver_name,
|
solver_name,
|
||||||
params.simplify_network["algorithm"],
|
params.simplify_network["algorithm"],
|
||||||
params.simplify_network["feature"],
|
params.simplify_network["feature"],
|
||||||
aggregation_strategies,
|
params.aggregation_strategies,
|
||||||
)
|
)
|
||||||
busmaps.append(cluster_map)
|
busmaps.append(cluster_map)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user