Merge branch 'master' into post-merge-param
This commit is contained in:
commit
28d39680c6
@ -67,7 +67,7 @@ repos:
|
|||||||
|
|
||||||
# Do YAML formatting (before the linter checks it for misses)
|
# Do YAML formatting (before the linter checks it for misses)
|
||||||
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
|
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
|
||||||
rev: v2.8.0
|
rev: v2.9.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: pretty-format-yaml
|
- id: pretty-format-yaml
|
||||||
args: [--autofix, --indent, "2", --preserve-quotes]
|
args: [--autofix, --indent, "2", --preserve-quotes]
|
||||||
|
@ -623,9 +623,9 @@ clustering:
|
|||||||
solving:
|
solving:
|
||||||
#tmpdir: "path/to/tmp"
|
#tmpdir: "path/to/tmp"
|
||||||
options:
|
options:
|
||||||
formulation: kirchhoff
|
|
||||||
clip_p_max_pu: 1.e-2
|
clip_p_max_pu: 1.e-2
|
||||||
load_shedding: false
|
load_shedding: false
|
||||||
|
transmission_losses: 0
|
||||||
noisy_costs: true
|
noisy_costs: true
|
||||||
skip_iterations: true
|
skip_iterations: true
|
||||||
track_iterations: false
|
track_iterations: false
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
,Unit,Values,Description
|
,Unit,Values,Description
|
||||||
options,,,
|
options,,,
|
||||||
-- formulation,--,"Any of {'angles', 'kirchhoff', 'cycles', 'ptdf'}","Specifies which variant of linearized power flow formulations to use in the optimisation problem. Recommended is 'kirchhoff'. Explained in `this article <https://arxiv.org/abs/1704.01881>`_."
|
|
||||||
-- load_shedding,bool/float,"{'true','false', float}","Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh."
|
-- load_shedding,bool/float,"{'true','false', float}","Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh."
|
||||||
|
-- transmission_losses,int,"[0-9]","Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored."
|
||||||
-- noisy_costs,bool,"{'true','false'}","Add random noise to marginal cost of generators by :math:`\mathcal{U}(0.009,0,011)` and capital cost of lines and links by :math:`\mathcal{U}(0.09,0,11)`."
|
-- noisy_costs,bool,"{'true','false'}","Add random noise to marginal cost of generators by :math:`\mathcal{U}(0.009,0,011)` and capital cost of lines and links by :math:`\mathcal{U}(0.09,0,11)`."
|
||||||
-- min_iterations,--,int,"Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
|
-- min_iterations,--,int,"Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
|
||||||
-- max_iterations,--,int,"Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
|
-- max_iterations,--,int,"Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run."
|
||||||
|
|
@ -23,6 +23,12 @@ Upcoming Release
|
|||||||
hydrogen fuel cell. Add switches for both re-electrification options under
|
hydrogen fuel cell. Add switches for both re-electrification options under
|
||||||
``sector: hydrogen_turbine:`` and ``sector: hydrogen_fuel_cell:``.
|
``sector: hydrogen_turbine:`` and ``sector: hydrogen_fuel_cell:``.
|
||||||
|
|
||||||
|
* Remove ``vresutils`` dependency.
|
||||||
|
|
||||||
|
* Add option to include a piecewise linear approximation of transmission losses,
|
||||||
|
e.g. by setting ``solving: options: transmission_losses: 2`` for an
|
||||||
|
approximation with two tangents.
|
||||||
|
|
||||||
PyPSA-Eur 0.8.0 (18th March 2023)
|
PyPSA-Eur 0.8.0 (18th March 2023)
|
||||||
=================================
|
=================================
|
||||||
|
|
||||||
|
@ -9,7 +9,6 @@ sphinxcontrib-bibtex
|
|||||||
myst-parser # recommark is deprecated, https://stackoverflow.com/a/71660856/13573820
|
myst-parser # recommark is deprecated, https://stackoverflow.com/a/71660856/13573820
|
||||||
|
|
||||||
pypsa
|
pypsa
|
||||||
vresutils>=0.3.1
|
|
||||||
powerplantmatching>=0.5.5
|
powerplantmatching>=0.5.5
|
||||||
atlite>=0.2.9
|
atlite>=0.2.9
|
||||||
dask[distributed]
|
dask[distributed]
|
||||||
|
@ -133,12 +133,12 @@ The coefficient of performance (COP) of air- and ground-sourced heat pumps depen
|
|||||||
For the sink water temperature Tsink we assume 55 °C [`Config <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L207>`_ file]. For the time- and location-dependent source temperatures Tsource, we rely on the `ERA5 <https://doi.org/10.1002/qj.3803>`_ reanalysis weather data. The temperature differences are converted into COP time series using results from a regression analysis performed in the study by `Stafell et al. <https://pubs.rsc.org/en/content/articlelanding/2012/EE/c2ee22653g>`_. For air-sourced heat pumps (ASHP), we use the function:
|
For the sink water temperature Tsink we assume 55 °C [`Config <https://github.com/PyPSA/pypsa-eur-sec/blob/3daff49c9999ba7ca7534df4e587e1d516044fc3/config.default.yaml#L207>`_ file]. For the time- and location-dependent source temperatures Tsource, we rely on the `ERA5 <https://doi.org/10.1002/qj.3803>`_ reanalysis weather data. The temperature differences are converted into COP time series using results from a regression analysis performed in the study by `Stafell et al. <https://pubs.rsc.org/en/content/articlelanding/2012/EE/c2ee22653g>`_. For air-sourced heat pumps (ASHP), we use the function:
|
||||||
|
|
||||||
.. math::
|
.. math::
|
||||||
COP (\Delta T) = 6.81 + 0.121\Delta T + 0.000630\Delta T^2
|
COP (\Delta T) = 6.81 - 0.121\Delta T + 0.000630\Delta T^2
|
||||||
|
|
||||||
for ground-sourced heat pumps (GSHP), we use the function:
|
for ground-sourced heat pumps (GSHP), we use the function:
|
||||||
|
|
||||||
.. math::
|
.. math::
|
||||||
COP(\Delta T) = 8.77 + 0.150\Delta T + 0.000734\Delta T^2
|
COP(\Delta T) = 8.77 - 0.150\Delta T + 0.000734\Delta T^2
|
||||||
|
|
||||||
**Resistive heaters**
|
**Resistive heaters**
|
||||||
|
|
||||||
|
@ -9,6 +9,6 @@ Support
|
|||||||
|
|
||||||
* In case of code-related **questions**, please post on `stack overflow <https://stackoverflow.com/questions/tagged/pypsa>`_.
|
* In case of code-related **questions**, please post on `stack overflow <https://stackoverflow.com/questions/tagged/pypsa>`_.
|
||||||
* For non-programming related and more general questions please refer to the `mailing list <https://groups.google.com/group/pypsa>`_.
|
* For non-programming related and more general questions please refer to the `mailing list <https://groups.google.com/group/pypsa>`_.
|
||||||
* To **discuss** with other PyPSA users, organise projects, share news, and get in touch with the community you can use the [discord server](https://discord.gg/JTdvaEBb).
|
* To **discuss** with other PyPSA users, organise projects, share news, and get in touch with the community you can use the `discord server <https://discord.gg/AnuJBk23FU>`_.
|
||||||
* For **bugs and feature requests**, please use the `issue tracker <https://github.com/PyPSA/pypsa-eur/issues>`_.
|
* For **bugs and feature requests**, please use the `issue tracker <https://github.com/PyPSA/pypsa-eur/issues>`_.
|
||||||
* We strongly welcome anyone interested in providing **contributions** to this project. If you have any ideas, suggestions or encounter problems, feel invited to file issues or make pull requests on `Github <https://github.com/PyPSA/PyPSA>`_. For further information on how to contribute, please refer to :ref:`contributing`.
|
* We strongly welcome anyone interested in providing **contributions** to this project. If you have any ideas, suggestions or encounter problems, feel invited to file issues or make pull requests on `Github <https://github.com/PyPSA/PyPSA>`_. For further information on how to contribute, please refer to :ref:`contributing`.
|
||||||
|
@ -226,7 +226,7 @@ dependencies:
|
|||||||
- nspr=4.35
|
- nspr=4.35
|
||||||
- nss=3.88
|
- nss=3.88
|
||||||
- numexpr=2.8.3
|
- numexpr=2.8.3
|
||||||
- numpy=1.23.5
|
- numpy=1.24
|
||||||
- openjdk=17.0.3
|
- openjdk=17.0.3
|
||||||
- openjpeg=2.5.0
|
- openjpeg=2.5.0
|
||||||
- openpyxl=3.1.0
|
- openpyxl=3.1.0
|
||||||
@ -378,4 +378,3 @@ dependencies:
|
|||||||
- highspy==1.5.0.dev0
|
- highspy==1.5.0.dev0
|
||||||
- pybind11==2.10.3
|
- pybind11==2.10.3
|
||||||
- tsam==2.2.2
|
- tsam==2.2.2
|
||||||
- vresutils==0.3.1
|
|
||||||
|
@ -10,7 +10,7 @@ dependencies:
|
|||||||
- python>=3.8
|
- python>=3.8
|
||||||
- pip
|
- pip
|
||||||
|
|
||||||
- pypsa>=0.21.3
|
- pypsa>=0.23
|
||||||
- atlite>=0.2.9
|
- atlite>=0.2.9
|
||||||
- dask
|
- dask
|
||||||
|
|
||||||
@ -25,7 +25,7 @@ dependencies:
|
|||||||
- pytables
|
- pytables
|
||||||
- lxml
|
- lxml
|
||||||
- powerplantmatching>=0.5.5
|
- powerplantmatching>=0.5.5
|
||||||
- numpy<1.24
|
- numpy
|
||||||
- pandas>=1.4
|
- pandas>=1.4
|
||||||
- geopandas>=0.11.0
|
- geopandas>=0.11.0
|
||||||
- xarray
|
- xarray
|
||||||
@ -55,5 +55,4 @@ dependencies:
|
|||||||
- rasterio!=1.2.10
|
- rasterio!=1.2.10
|
||||||
|
|
||||||
- pip:
|
- pip:
|
||||||
- vresutils>=0.3.1
|
|
||||||
- tsam>=1.1.0
|
- tsam>=1.1.0
|
||||||
|
@ -16,8 +16,6 @@ rule solve_network:
|
|||||||
),
|
),
|
||||||
python=LOGS
|
python=LOGS
|
||||||
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
|
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
|
||||||
memory=LOGS
|
|
||||||
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log",
|
|
||||||
benchmark:
|
benchmark:
|
||||||
BENCHMARKS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
|
BENCHMARKS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
|
||||||
threads: 4
|
threads: 4
|
||||||
@ -45,8 +43,6 @@ rule solve_operations_network:
|
|||||||
),
|
),
|
||||||
python=LOGS
|
python=LOGS
|
||||||
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
|
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
|
||||||
memory=LOGS
|
|
||||||
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log",
|
|
||||||
benchmark:
|
benchmark:
|
||||||
(
|
(
|
||||||
BENCHMARKS
|
BENCHMARKS
|
||||||
|
@ -100,8 +100,6 @@ rule solve_sector_network_myopic:
|
|||||||
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
|
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
|
||||||
python=LOGS
|
python=LOGS
|
||||||
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
|
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
|
||||||
memory=LOGS
|
|
||||||
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log",
|
|
||||||
threads: 4
|
threads: 4
|
||||||
resources:
|
resources:
|
||||||
mem_mb=config["solving"]["mem"],
|
mem_mb=config["solving"]["mem"],
|
||||||
|
@ -23,8 +23,6 @@ rule solve_sector_network:
|
|||||||
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
|
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
|
||||||
python=LOGS
|
python=LOGS
|
||||||
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
|
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
|
||||||
memory=LOGS
|
|
||||||
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log",
|
|
||||||
threads: config["solving"]["solver"].get("threads", 4)
|
threads: config["solving"]["solver"].get("threads", 4)
|
||||||
resources:
|
resources:
|
||||||
mem_mb=config["solving"]["mem"],
|
mem_mb=config["solving"]["mem"],
|
||||||
|
@ -85,16 +85,18 @@ It further adds extendable ``generators`` with **zero** capacity for
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
from itertools import product
|
||||||
|
|
||||||
import geopandas as gpd
|
import geopandas as gpd
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import powerplantmatching as pm
|
import powerplantmatching as pm
|
||||||
import pypsa
|
import pypsa
|
||||||
|
import scipy.sparse as sparse
|
||||||
import xarray as xr
|
import xarray as xr
|
||||||
from _helpers import configure_logging, update_p_nom_max
|
from _helpers import configure_logging, update_p_nom_max
|
||||||
from powerplantmatching.export import map_country_bus
|
from powerplantmatching.export import map_country_bus
|
||||||
from vresutils import transfer as vtransfer
|
from shapely.prepared import prep
|
||||||
|
|
||||||
idx = pd.IndexSlice
|
idx = pd.IndexSlice
|
||||||
|
|
||||||
@ -216,6 +218,21 @@ def load_powerplants(ppl_fn):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def shapes_to_shapes(orig, dest):
|
||||||
|
"""
|
||||||
|
Adopted from vresutils.transfer.Shapes2Shapes()
|
||||||
|
"""
|
||||||
|
orig_prepped = list(map(prep, orig))
|
||||||
|
transfer = sparse.lil_matrix((len(dest), len(orig)), dtype=float)
|
||||||
|
|
||||||
|
for i, j in product(range(len(dest)), range(len(orig))):
|
||||||
|
if orig_prepped[j].intersects(dest[i]):
|
||||||
|
area = orig[j].intersection(dest[i]).area
|
||||||
|
transfer[i, j] = area / dest[i].area
|
||||||
|
|
||||||
|
return transfer
|
||||||
|
|
||||||
|
|
||||||
def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.0):
|
def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.0):
|
||||||
substation_lv_i = n.buses.index[n.buses["substation_lv"]]
|
substation_lv_i = n.buses.index[n.buses["substation_lv"]]
|
||||||
regions = gpd.read_file(regions).set_index("name").reindex(substation_lv_i)
|
regions = gpd.read_file(regions).set_index("name").reindex(substation_lv_i)
|
||||||
@ -232,9 +249,7 @@ def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.0):
|
|||||||
return pd.DataFrame({group.index[0]: l})
|
return pd.DataFrame({group.index[0]: l})
|
||||||
else:
|
else:
|
||||||
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
|
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
|
||||||
transfer = vtransfer.Shapes2Shapes(
|
transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr()
|
||||||
group, nuts3_cntry.geometry, normed=False
|
|
||||||
).T.tocsr()
|
|
||||||
gdp_n = pd.Series(
|
gdp_n = pd.Series(
|
||||||
transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index
|
transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index
|
||||||
)
|
)
|
||||||
@ -403,7 +418,9 @@ def attach_conventional_generators(
|
|||||||
if f"conventional_{carrier}_{attr}" in conventional_inputs:
|
if f"conventional_{carrier}_{attr}" in conventional_inputs:
|
||||||
# Values affecting generators of technology k country-specific
|
# Values affecting generators of technology k country-specific
|
||||||
# First map generator buses to countries; then map countries to p_max_pu
|
# First map generator buses to countries; then map countries to p_max_pu
|
||||||
values = pd.read_csv(values, index_col=0).iloc[:, 0]
|
values = pd.read_csv(
|
||||||
|
snakemake.input[f"conventional_{carrier}_{attr}"], index_col=0
|
||||||
|
).iloc[:, 0]
|
||||||
bus_values = n.buses.country.map(values)
|
bus_values = n.buses.country.map(values)
|
||||||
n.generators[attr].update(
|
n.generators[attr].update(
|
||||||
n.generators.loc[idx].bus.map(bus_values).dropna()
|
n.generators.loc[idx].bus.map(bus_values).dropna()
|
||||||
|
@ -234,6 +234,7 @@ def nuts3(country_shapes, nuts3, nuts3pop, nuts3gdp, ch_cantons, ch_popgdp):
|
|||||||
manual = gpd.GeoDataFrame(
|
manual = gpd.GeoDataFrame(
|
||||||
[["BA1", "BA", 3871.0], ["RS1", "RS", 7210.0], ["AL1", "AL", 2893.0]],
|
[["BA1", "BA", 3871.0], ["RS1", "RS", 7210.0], ["AL1", "AL", 2893.0]],
|
||||||
columns=["NUTS_ID", "country", "pop"],
|
columns=["NUTS_ID", "country", "pop"],
|
||||||
|
geometry=gpd.GeoSeries(),
|
||||||
)
|
)
|
||||||
manual["geometry"] = manual["country"].map(country_shapes)
|
manual["geometry"] = manual["country"].map(country_shapes)
|
||||||
manual = manual.dropna()
|
manual = manual.dropna()
|
||||||
|
@ -22,13 +22,13 @@ from _helpers import (
|
|||||||
override_component_attrs,
|
override_component_attrs,
|
||||||
update_config_with_sector_opts,
|
update_config_with_sector_opts,
|
||||||
)
|
)
|
||||||
|
from add_electricity import calculate_annuity
|
||||||
from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2
|
from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2
|
||||||
from networkx.algorithms import complement
|
from networkx.algorithms import complement
|
||||||
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
|
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
|
||||||
from pypsa.geo import haversine_pts
|
from pypsa.geo import haversine_pts
|
||||||
from pypsa.io import import_components_from_dataframe
|
from pypsa.io import import_components_from_dataframe
|
||||||
from scipy.stats import beta
|
from scipy.stats import beta
|
||||||
from vresutils.costdata import annuity
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -742,7 +742,7 @@ def prepare_costs(cost_file, params, nyears):
|
|||||||
costs = costs.fillna(params["fill_values"])
|
costs = costs.fillna(params["fill_values"])
|
||||||
|
|
||||||
def annuity_factor(v):
|
def annuity_factor(v):
|
||||||
return annuity(v["lifetime"], v["discount rate"]) + v["FOM"] / 100
|
return calculate_annuity(v["lifetime"], v["discount rate"]) + v["FOM"] / 100
|
||||||
|
|
||||||
costs["fixed"] = [
|
costs["fixed"] = [
|
||||||
annuity_factor(v) * v["investment"] * nyears for i, v in costs.iterrows()
|
annuity_factor(v) * v["investment"] * nyears for i, v in costs.iterrows()
|
||||||
@ -851,7 +851,7 @@ def add_wave(n, wave_cost_factor):
|
|||||||
capacity = pd.Series({"Attenuator": 750, "F2HB": 1000, "MultiPA": 600})
|
capacity = pd.Series({"Attenuator": 750, "F2HB": 1000, "MultiPA": 600})
|
||||||
|
|
||||||
# in EUR/MW
|
# in EUR/MW
|
||||||
annuity_factor = annuity(25, 0.07) + 0.03
|
annuity_factor = calculate_annuity(25, 0.07) + 0.03
|
||||||
costs = (
|
costs = (
|
||||||
1e6
|
1e6
|
||||||
* wave_cost_factor
|
* wave_cost_factor
|
||||||
|
@ -58,9 +58,8 @@ if __name__ == "__main__":
|
|||||||
else:
|
else:
|
||||||
url = "https://zenodo.org/record/3517935/files/pypsa-eur-data-bundle.tar.xz"
|
url = "https://zenodo.org/record/3517935/files/pypsa-eur-data-bundle.tar.xz"
|
||||||
|
|
||||||
# Save locations
|
|
||||||
tarball_fn = Path(f"{rootpath}/bundle.tar.xz")
|
tarball_fn = Path(f"{rootpath}/bundle.tar.xz")
|
||||||
to_fn = Path(f"{rootpath}/data")
|
to_fn = Path(rootpath) / Path(snakemake.output[0]).parent.parent
|
||||||
|
|
||||||
logger.info(f"Downloading databundle from '{url}'.")
|
logger.info(f"Downloading databundle from '{url}'.")
|
||||||
disable_progress = snakemake.config["run"].get("disable_progressbar", False)
|
disable_progress = snakemake.config["run"].get("disable_progressbar", False)
|
||||||
|
@ -29,7 +29,7 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
# Save locations
|
# Save locations
|
||||||
zip_fn = Path(f"{rootpath}/IGGIELGN.zip")
|
zip_fn = Path(f"{rootpath}/IGGIELGN.zip")
|
||||||
to_fn = Path(f"{rootpath}/data/gas_network/scigrid-gas")
|
to_fn = Path(rootpath) / Path(snakemake.output[0]).parent.parent
|
||||||
|
|
||||||
logger.info(f"Downloading databundle from '{url}'.")
|
logger.info(f"Downloading databundle from '{url}'.")
|
||||||
disable_progress = snakemake.config["run"].get("disable_progressbar", False)
|
disable_progress = snakemake.config["run"].get("disable_progressbar", False)
|
||||||
|
@ -10,23 +10,25 @@ import logging
|
|||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import tarfile
|
import tarfile
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
# Add pypsa-eur scripts to path for import of _helpers
|
|
||||||
sys.path.insert(0, os.getcwd() + "/../pypsa-eur/scripts")
|
|
||||||
|
|
||||||
from _helpers import configure_logging, progress_retrieve
|
from _helpers import configure_logging, progress_retrieve
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
if "snakemake" not in globals():
|
||||||
|
from _helpers import mock_snakemake
|
||||||
|
|
||||||
|
snakemake = mock_snakemake("retrieve_databundle")
|
||||||
|
rootpath = ".."
|
||||||
|
else:
|
||||||
|
rootpath = "."
|
||||||
configure_logging(snakemake)
|
configure_logging(snakemake)
|
||||||
|
|
||||||
url = "https://zenodo.org/record/5824485/files/pypsa-eur-sec-data-bundle.tar.gz"
|
url = "https://zenodo.org/record/5824485/files/pypsa-eur-sec-data-bundle.tar.gz"
|
||||||
|
|
||||||
tarball_fn = Path("sector-bundle.tar.gz")
|
tarball_fn = Path(f"{rootpath}/sector-bundle.tar.gz")
|
||||||
to_fn = Path("data")
|
to_fn = Path(rootpath) / Path(snakemake.output[0]).parent.parent
|
||||||
|
|
||||||
logger.info(f"Downloading databundle from '{url}'.")
|
logger.info(f"Downloading databundle from '{url}'.")
|
||||||
disable_progress = snakemake.config["run"].get("disable_progressbar", False)
|
disable_progress = snakemake.config["run"].get("disable_progressbar", False)
|
||||||
|
@ -38,7 +38,6 @@ from _helpers import (
|
|||||||
override_component_attrs,
|
override_component_attrs,
|
||||||
update_config_with_sector_opts,
|
update_config_with_sector_opts,
|
||||||
)
|
)
|
||||||
from vresutils.benchmark import memory_logger
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
pypsa.pf.logger.setLevel(logging.WARNING)
|
pypsa.pf.logger.setLevel(logging.WARNING)
|
||||||
@ -601,6 +600,7 @@ def solve_network(n, config, opts="", **kwargs):
|
|||||||
track_iterations = cf_solving.get("track_iterations", False)
|
track_iterations = cf_solving.get("track_iterations", False)
|
||||||
min_iterations = cf_solving.get("min_iterations", 4)
|
min_iterations = cf_solving.get("min_iterations", 4)
|
||||||
max_iterations = cf_solving.get("max_iterations", 6)
|
max_iterations = cf_solving.get("max_iterations", 6)
|
||||||
|
transmission_losses = cf_solving.get("transmission_losses", 0)
|
||||||
|
|
||||||
# add to network for extra_functionality
|
# add to network for extra_functionality
|
||||||
n.config = config
|
n.config = config
|
||||||
@ -614,6 +614,7 @@ def solve_network(n, config, opts="", **kwargs):
|
|||||||
if skip_iterations:
|
if skip_iterations:
|
||||||
status, condition = n.optimize(
|
status, condition = n.optimize(
|
||||||
solver_name=solver_name,
|
solver_name=solver_name,
|
||||||
|
transmission_losses=transmission_losses,
|
||||||
extra_functionality=extra_functionality,
|
extra_functionality=extra_functionality,
|
||||||
**solver_options,
|
**solver_options,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
@ -624,6 +625,7 @@ def solve_network(n, config, opts="", **kwargs):
|
|||||||
track_iterations=track_iterations,
|
track_iterations=track_iterations,
|
||||||
min_iterations=min_iterations,
|
min_iterations=min_iterations,
|
||||||
max_iterations=max_iterations,
|
max_iterations=max_iterations,
|
||||||
|
transmission_losses=transmission_losses,
|
||||||
extra_functionality=extra_functionality,
|
extra_functionality=extra_functionality,
|
||||||
**solver_options,
|
**solver_options,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
@ -667,23 +669,17 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
np.random.seed(solve_opts.get("seed", 123))
|
np.random.seed(solve_opts.get("seed", 123))
|
||||||
|
|
||||||
fn = getattr(snakemake.log, "memory", None)
|
if "overrides" in snakemake.input.keys():
|
||||||
with memory_logger(filename=fn, interval=30.0) as mem:
|
overrides = override_component_attrs(snakemake.input.overrides)
|
||||||
if "overrides" in snakemake.input.keys():
|
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
|
||||||
overrides = override_component_attrs(snakemake.input.overrides)
|
else:
|
||||||
n = pypsa.Network(
|
n = pypsa.Network(snakemake.input.network)
|
||||||
snakemake.input.network, override_component_attrs=overrides
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
n = pypsa.Network(snakemake.input.network)
|
|
||||||
|
|
||||||
n = prepare_network(n, solve_opts, config=snakemake.config)
|
n = prepare_network(n, solve_opts, config=snakemake.config)
|
||||||
|
|
||||||
n = solve_network(
|
n = solve_network(
|
||||||
n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver
|
n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver
|
||||||
)
|
)
|
||||||
|
|
||||||
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
||||||
n.export_to_netcdf(snakemake.output[0])
|
n.export_to_netcdf(snakemake.output[0])
|
||||||
|
|
||||||
logger.info("Maximum memory usage: {}".format(mem.mem_usage))
|
|
||||||
|
@ -17,7 +17,6 @@ from _helpers import (
|
|||||||
update_config_with_sector_opts,
|
update_config_with_sector_opts,
|
||||||
)
|
)
|
||||||
from solve_network import prepare_network, solve_network
|
from solve_network import prepare_network, solve_network
|
||||||
from vresutils.benchmark import memory_logger
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -46,23 +45,17 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
np.random.seed(solve_opts.get("seed", 123))
|
np.random.seed(solve_opts.get("seed", 123))
|
||||||
|
|
||||||
fn = getattr(snakemake.log, "memory", None)
|
if "overrides" in snakemake.input:
|
||||||
with memory_logger(filename=fn, interval=30.0) as mem:
|
overrides = override_component_attrs(snakemake.input.overrides)
|
||||||
if "overrides" in snakemake.input:
|
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
|
||||||
overrides = override_component_attrs(snakemake.input.overrides)
|
else:
|
||||||
n = pypsa.Network(
|
n = pypsa.Network(snakemake.input.network)
|
||||||
snakemake.input.network, override_component_attrs=overrides
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
n = pypsa.Network(snakemake.input.network)
|
|
||||||
|
|
||||||
n.optimize.fix_optimal_capacities()
|
n.optimize.fix_optimal_capacities()
|
||||||
n = prepare_network(n, solve_opts, config=snakemake.config)
|
n = prepare_network(n, solve_opts, config=snakemake.config)
|
||||||
n = solve_network(
|
n = solve_network(
|
||||||
n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver
|
n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver
|
||||||
)
|
)
|
||||||
|
|
||||||
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
||||||
n.export_to_netcdf(snakemake.output[0])
|
n.export_to_netcdf(snakemake.output[0])
|
||||||
|
|
||||||
logger.info("Maximum memory usage: {}".format(mem.mem_usage))
|
|
||||||
|
Loading…
Reference in New Issue
Block a user