Merge branch 'master' into dac-location-consistency

This commit is contained in:
Fabian Neumann 2024-02-05 08:50:33 +01:00 committed by GitHub
commit 5b71979547
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 78 additions and 63 deletions

View File

@ -32,7 +32,14 @@ jobs:
- ubuntu-latest
- macos-latest
- windows-latest
inhouse:
- stable
- master
exclude:
- os: macos-latest
inhouse: master
- os: windows-latest
inhouse: master
runs-on: ${{ matrix.os }}
defaults:
@ -46,16 +53,6 @@ jobs:
run: |
echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc
- name: Add solver to environment
run: |
echo -e "- glpk\n- ipopt<3.13.3" >> envs/environment.yaml
if: ${{ matrix.os }} == 'windows-latest'
- name: Add solver to environment
run: |
echo -e "- glpk\n- ipopt" >> envs/environment.yaml
if: ${{ matrix.os }} != 'windows-latest'
- name: Setup micromamba
uses: mamba-org/setup-micromamba@v1
with:
@ -66,6 +63,11 @@ jobs:
cache-environment: true
cache-downloads: true
- name: Install inhouse packages
run: |
pip install git+https://github.com/PyPSA/atlite.git@master git+https://github.com/PyPSA/powerplantmatching.git@master git+https://github.com/PyPSA/linopy.git@master
if: ${{ matrix.inhouse }} == 'master'
- name: Set cache dates
run: |
echo "WEEK=$(date +'%Y%U')" >> $GITHUB_ENV
@ -86,7 +88,7 @@ jobs:
snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime
- name: Upload artifacts
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4.3.0
with:
name: resources-results
path: |
@ -94,3 +96,4 @@ jobs:
results
if-no-files-found: warn
retention-days: 1
if: matrix.os == 'ubuntu' && matrix.inhouse == 'stable'

View File

@ -606,9 +606,34 @@ industry:
MWh_NH3_per_MWh_H2_cracker: 1.46 # https://github.com/euronion/trace/blob/44a5ff8401762edbef80eff9cfe5a47c8d3c8be4/data/efficiencies.csv
NH3_process_emissions: 24.5
petrochemical_process_emissions: 25.5
HVC_primary_fraction: 1.
HVC_mechanical_recycling_fraction: 0.
HVC_chemical_recycling_fraction: 0.
#HVC primary/recycling based on values used in Neumann et al https://doi.org/10.1016/j.joule.2023.06.016, linearly interpolated between 2020 and 2050
#2020 recycling rates based on Agora https://static.agora-energiewende.de/fileadmin/Projekte/2021/2021_02_EU_CEAP/A-EW_254_Mobilising-circular-economy_study_WEB.pdf
#fractions refer to the total primary HVC production in 2020
#assumes 6.7 Mtplastics produced from recycling in 2020
HVC_primary_fraction:
2020: 1.0
2025: 0.9
2030: 0.8
2035: 0.7
2040: 0.6
2045: 0.5
2050: 0.4
HVC_mechanical_recycling_fraction:
2020: 0.12
2025: 0.15
2030: 0.18
2035: 0.21
2040: 0.24
2045: 0.27
2050: 0.30
HVC_chemical_recycling_fraction:
2020: 0.0
2025: 0.0
2030: 0.04
2035: 0.08
2040: 0.12
2045: 0.16
2050: 0.20
HVC_production_today: 52.
MWh_elec_per_tHVC_mechanical_recycling: 0.547
MWh_elec_per_tHVC_chemical_recycling: 6.9

View File

@ -62,6 +62,17 @@ Upcoming Release
* The rule ``plot_network`` has been split into separate rules for plotting
electricity, hydrogen and gas networks.
* To determine the optimal topology to meet the number of clusters, the workflow used pyomo in combination with ``ipopt`` or ``gurobi``. This dependency has been replaced by using ``linopy`` in combination with ``scipopt`` or ``gurobi``. The environment file has been updated accordingly.
* The ``highs`` solver was added to the default environment file.
* Default settings for recycling rates and primary product shares of high-value
chemicals have been set in accordance with the values used in `Neumann et al.
(2023) <https://doi.org/10.1016/j.joule.2023.06.016>`_ linearly interpolated
between 2020 and 2050. The recycling rates are based on data from `Agora
Energiewende (2021)
<https://static.agora-energiewende.de/fileadmin/Projekte/2021/2021_02_EU_CEAP/A-EW_254_Mobilising-circular-economy_study_WEB.pdf>`_.
PyPSA-Eur 0.9.0 (5th January 2024)
==================================

View File

@ -35,8 +35,9 @@ dependencies:
- netcdf4
- networkx
- scipy
- glpk
- shapely>=2.0
- pyomo
- pyscipopt
- matplotlib
- proj
- fiona
@ -47,7 +48,7 @@ dependencies:
- tabula-py
- pyxlsb
- graphviz
- ipopt
- pre-commit
# Keep in conda environment when calling ipython
- ipython
@ -60,3 +61,4 @@ dependencies:
- pip:
- tsam>=2.3.1
- highspy

View File

@ -482,7 +482,7 @@ def add_heating_capacities_installed_before_baseyear(
"Link",
nodes,
suffix=f" {name} gas boiler-{grouping_year}",
bus0=spatial.gas.nodes,
bus0="EU gas" if "EU gas" in spatial.gas.nodes else nodes + " gas",
bus1=nodes + " " + name + " heat",
bus2="co2 atmosphere",
carrier=name + " gas boiler",

View File

@ -122,14 +122,15 @@ Exemplary unsolved network clustered to 37 nodes:
"""
import logging
import os
import warnings
from functools import reduce
import geopandas as gpd
import linopy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyomo.environ as po
import pypsa
import seaborn as sns
from _helpers import configure_logging, update_p_nom_max
@ -214,7 +215,7 @@ def get_feature_for_hac(n, buses_i=None, feature=None):
return feature_data
def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"):
def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="scip"):
"""
Determine the number of clusters per country.
"""
@ -254,31 +255,22 @@ def distribute_clusters(n, n_clusters, focus_weights=None, solver_name="cbc"):
L.sum(), 1.0, rtol=1e-3
), f"Country weights L must sum up to 1.0 when distributing clusters. Is {L.sum()}."
m = po.ConcreteModel()
def n_bounds(model, *n_id):
return (1, N[n_id])
m.n = po.Var(list(L.index), bounds=n_bounds, domain=po.Integers)
m.tot = po.Constraint(expr=(po.summation(m.n) == n_clusters))
m.objective = po.Objective(
expr=sum((m.n[i] - L.loc[i] * n_clusters) ** 2 for i in L.index),
sense=po.minimize,
m = linopy.Model()
clusters = m.add_variables(
lower=1, upper=N, coords=[L.index], name="n", integer=True
)
opt = po.SolverFactory(solver_name)
if solver_name == "appsi_highs" or not opt.has_capability("quadratic_objective"):
logger.warning(
f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `ipopt`."
m.add_constraints(clusters.sum() == n_clusters, name="tot")
# leave out constant in objective (L * n_clusters) ** 2
m.objective = (clusters * clusters - 2 * clusters * L * n_clusters).sum()
if solver_name == "gurobi":
logging.getLogger("gurobipy").propagate = False
elif solver_name != "scip":
logger.info(
f"The configured solver `{solver_name}` does not support quadratic objectives. Falling back to `scip`."
)
opt = po.SolverFactory("ipopt")
results = opt.solve(m)
assert (
results["Solver"][0]["Status"] == "ok"
), f"Solver returned non-optimally: {results}"
return pd.Series(m.n.get_values(), index=L.index).round().astype(int)
solver_name = "scip"
m.solve(solver_name=solver_name)
return m.solution["n"].to_series().astype(int)
def busmap_for_n_clusters(
@ -372,7 +364,7 @@ def busmap_for_n_clusters(
return (
n.buses.groupby(["country", "sub_network"], group_keys=False)
.apply(busmap_for_country)
.apply(busmap_for_country, include_groups=False)
.squeeze()
.rename("busmap")
)
@ -385,7 +377,7 @@ def clustering_for_n_clusters(
aggregate_carriers=None,
line_length_factor=1.25,
aggregation_strategies=dict(),
solver_name="cbc",
solver_name="scip",
algorithm="hac",
feature=None,
extended_link_costs=0,
@ -462,7 +454,6 @@ if __name__ == "__main__":
params = snakemake.params
solver_name = snakemake.config["solving"]["solver"]["name"]
solver_name = "appsi_highs" if solver_name == "highs" else solver_name
n = pypsa.Network(snakemake.input.network)

View File

@ -1319,22 +1319,6 @@ def add_storage_and_grids(n, costs):
n, "H2 pipeline ", carriers=["DC", "gas pipeline"]
)
h2_pipes["p_nom"] = 0.0
if snakemake.input.get("custom_h2_pipelines"):
fn = snakemake.input.custom_h2_pipelines
custom_pipes = pd.read_csv(fn, index_col=0)
h2_pipes = pd.concat([h2_pipes, custom_pipes])
# drop duplicates according to buses (order can be different) and keep pipe with highest p_nom
h2_pipes["buses_sorted"] = h2_pipes[["bus0", "bus1"]].apply(sorted, axis=1)
h2_pipes = (
h2_pipes.sort_values("p_nom")
.drop_duplicates(subset=["buses_sorted"], keep="last")
.drop(columns="buses_sorted")
)
# TODO Add efficiency losses
n.madd(
"Link",
@ -1343,7 +1327,6 @@ def add_storage_and_grids(n, costs):
bus1=h2_pipes.bus1.values + " H2",
p_min_pu=-1,
p_nom_extendable=True,
p_nom_min=h2_pipes.p_nom.values,
length=h2_pipes.length.values,
capital_cost=costs.at["H2 (g) pipeline", "fixed"] * h2_pipes.length.values,
carrier="H2 pipeline",