diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 88685f3b..48915f4c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -67,7 +67,7 @@ repos: # Do YAML formatting (before the linter checks it for misses) - repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks - rev: v2.8.0 + rev: v2.9.0 hooks: - id: pretty-format-yaml args: [--autofix, --indent, "2", --preserve-quotes] diff --git a/config/config.default.yaml b/config/config.default.yaml index 10e6a6ed..78c1385c 100644 --- a/config/config.default.yaml +++ b/config/config.default.yaml @@ -623,9 +623,9 @@ clustering: solving: #tmpdir: "path/to/tmp" options: - formulation: kirchhoff clip_p_max_pu: 1.e-2 load_shedding: false + transmission_losses: 0 noisy_costs: true skip_iterations: true track_iterations: false diff --git a/doc/configtables/solving.csv b/doc/configtables/solving.csv index cba28cbe..c252ff32 100644 --- a/doc/configtables/solving.csv +++ b/doc/configtables/solving.csv @@ -1,7 +1,7 @@ ,Unit,Values,Description options,,, --- formulation,--,"Any of {'angles', 'kirchhoff', 'cycles', 'ptdf'}","Specifies which variant of linearized power flow formulations to use in the optimisation problem. Recommended is 'kirchhoff'. Explained in `this article `_." -- load_shedding,bool/float,"{'true','false', float}","Add generators with very high marginal cost to simulate load shedding and avoid problem infeasibilities. If load shedding is a float, it denotes the marginal cost in EUR/kWh." +-- transmission_losses,int,"[0-9]","Add piecewise linear approximation of transmission losses based on n tangents. Defaults to 0, which means losses are ignored." -- noisy_costs,bool,"{'true','false'}","Add random noise to marginal cost of generators by :math:`\mathcal{U}(0.009,0,011)` and capital cost of lines and links by :math:`\mathcal{U}(0.09,0,11)`." -- min_iterations,--,int,"Minimum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run." -- max_iterations,--,int,"Maximum number of solving iterations in between which resistance and reactence (``x/r``) are updated for branches according to ``s_nom_opt`` of the previous run." diff --git a/doc/release_notes.rst b/doc/release_notes.rst index 8e6bfdd4..3af16477 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -23,6 +23,12 @@ Upcoming Release hydrogen fuel cell. Add switches for both re-electrification options under ``sector: hydrogen_turbine:`` and ``sector: hydrogen_fuel_cell:``. +* Remove ``vresutils`` dependency. + +* Add option to include a piecewise linear approximation of transmission losses, + e.g. by setting ``solving: options: transmission_losses: 2`` for an + approximation with two tangents. + PyPSA-Eur 0.8.0 (18th March 2023) ================================= diff --git a/doc/requirements.txt b/doc/requirements.txt index 2f08b8d9..3e760c81 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -9,7 +9,6 @@ sphinxcontrib-bibtex myst-parser # recommark is deprecated, https://stackoverflow.com/a/71660856/13573820 pypsa -vresutils>=0.3.1 powerplantmatching>=0.5.5 atlite>=0.2.9 dask[distributed] diff --git a/doc/supply_demand.rst b/doc/supply_demand.rst index 16242405..b043268b 100644 --- a/doc/supply_demand.rst +++ b/doc/supply_demand.rst @@ -133,12 +133,12 @@ The coefficient of performance (COP) of air- and ground-sourced heat pumps depen For the sink water temperature Tsink we assume 55 °C [`Config `_ file]. For the time- and location-dependent source temperatures Tsource, we rely on the `ERA5 `_ reanalysis weather data. The temperature differences are converted into COP time series using results from a regression analysis performed in the study by `Stafell et al. `_. For air-sourced heat pumps (ASHP), we use the function: .. math:: - COP (\Delta T) = 6.81 + 0.121\Delta T + 0.000630\Delta T^2 + COP (\Delta T) = 6.81 - 0.121\Delta T + 0.000630\Delta T^2 for ground-sourced heat pumps (GSHP), we use the function: .. math:: - COP(\Delta T) = 8.77 + 0.150\Delta T + 0.000734\Delta T^2 + COP(\Delta T) = 8.77 - 0.150\Delta T + 0.000734\Delta T^2 **Resistive heaters** diff --git a/doc/support.rst b/doc/support.rst index 36d1a2dd..1d512d59 100644 --- a/doc/support.rst +++ b/doc/support.rst @@ -9,6 +9,6 @@ Support * In case of code-related **questions**, please post on `stack overflow `_. * For non-programming related and more general questions please refer to the `mailing list `_. -* To **discuss** with other PyPSA users, organise projects, share news, and get in touch with the community you can use the [discord server](https://discord.gg/JTdvaEBb). +* To **discuss** with other PyPSA users, organise projects, share news, and get in touch with the community you can use the `discord server `_. * For **bugs and feature requests**, please use the `issue tracker `_. * We strongly welcome anyone interested in providing **contributions** to this project. If you have any ideas, suggestions or encounter problems, feel invited to file issues or make pull requests on `Github `_. For further information on how to contribute, please refer to :ref:`contributing`. diff --git a/envs/environment.fixed.yaml b/envs/environment.fixed.yaml index 7d8fcc45..1ff9313d 100644 --- a/envs/environment.fixed.yaml +++ b/envs/environment.fixed.yaml @@ -226,7 +226,7 @@ dependencies: - nspr=4.35 - nss=3.88 - numexpr=2.8.3 -- numpy=1.23.5 +- numpy=1.24 - openjdk=17.0.3 - openjpeg=2.5.0 - openpyxl=3.1.0 @@ -378,4 +378,3 @@ dependencies: - highspy==1.5.0.dev0 - pybind11==2.10.3 - tsam==2.2.2 - - vresutils==0.3.1 diff --git a/envs/environment.yaml b/envs/environment.yaml index 0a9891a5..9d800fdc 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -10,7 +10,7 @@ dependencies: - python>=3.8 - pip -- pypsa>=0.21.3 +- pypsa>=0.23 - atlite>=0.2.9 - dask @@ -25,7 +25,7 @@ dependencies: - pytables - lxml - powerplantmatching>=0.5.5 -- numpy<1.24 +- numpy - pandas>=1.4 - geopandas>=0.11.0 - xarray @@ -55,5 +55,4 @@ dependencies: - rasterio!=1.2.10 - pip: - - vresutils>=0.3.1 - tsam>=1.1.0 diff --git a/rules/solve_electricity.smk b/rules/solve_electricity.smk index 400220c1..2e9c4b44 100644 --- a/rules/solve_electricity.smk +++ b/rules/solve_electricity.smk @@ -16,8 +16,6 @@ rule solve_network: ), python=LOGS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log", - memory=LOGS - + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log", benchmark: BENCHMARKS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}" threads: 4 @@ -45,8 +43,6 @@ rule solve_operations_network: ), python=LOGS + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log", - memory=LOGS - + "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log", benchmark: ( BENCHMARKS diff --git a/rules/solve_myopic.smk b/rules/solve_myopic.smk index 49b93a80..ec7638cf 100644 --- a/rules/solve_myopic.smk +++ b/rules/solve_myopic.smk @@ -100,8 +100,6 @@ rule solve_sector_network_myopic: + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", python=LOGS + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", - memory=LOGS - + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log", threads: 4 resources: mem_mb=config["solving"]["mem"], diff --git a/rules/solve_overnight.smk b/rules/solve_overnight.smk index c3608471..b657eb2b 100644 --- a/rules/solve_overnight.smk +++ b/rules/solve_overnight.smk @@ -23,8 +23,6 @@ rule solve_sector_network: + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log", python=LOGS + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log", - memory=LOGS - + "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log", threads: config["solving"]["solver"].get("threads", 4) resources: mem_mb=config["solving"]["mem"], diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 3717f0e8..69d91b87 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -85,16 +85,18 @@ It further adds extendable ``generators`` with **zero** capacity for """ import logging +from itertools import product import geopandas as gpd import numpy as np import pandas as pd import powerplantmatching as pm import pypsa +import scipy.sparse as sparse import xarray as xr from _helpers import configure_logging, update_p_nom_max from powerplantmatching.export import map_country_bus -from vresutils import transfer as vtransfer +from shapely.prepared import prep idx = pd.IndexSlice @@ -216,6 +218,21 @@ def load_powerplants(ppl_fn): ) +def shapes_to_shapes(orig, dest): + """ + Adopted from vresutils.transfer.Shapes2Shapes() + """ + orig_prepped = list(map(prep, orig)) + transfer = sparse.lil_matrix((len(dest), len(orig)), dtype=float) + + for i, j in product(range(len(dest)), range(len(orig))): + if orig_prepped[j].intersects(dest[i]): + area = orig[j].intersection(dest[i]).area + transfer[i, j] = area / dest[i].area + + return transfer + + def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.0): substation_lv_i = n.buses.index[n.buses["substation_lv"]] regions = gpd.read_file(regions).set_index("name").reindex(substation_lv_i) @@ -232,9 +249,7 @@ def attach_load(n, regions, load, nuts3_shapes, countries, scaling=1.0): return pd.DataFrame({group.index[0]: l}) else: nuts3_cntry = nuts3.loc[nuts3.country == cntry] - transfer = vtransfer.Shapes2Shapes( - group, nuts3_cntry.geometry, normed=False - ).T.tocsr() + transfer = shapes_to_shapes(group, nuts3_cntry.geometry).T.tocsr() gdp_n = pd.Series( transfer.dot(nuts3_cntry["gdp"].fillna(1.0).values), index=group.index ) @@ -403,7 +418,9 @@ def attach_conventional_generators( if f"conventional_{carrier}_{attr}" in conventional_inputs: # Values affecting generators of technology k country-specific # First map generator buses to countries; then map countries to p_max_pu - values = pd.read_csv(values, index_col=0).iloc[:, 0] + values = pd.read_csv( + snakemake.input[f"conventional_{carrier}_{attr}"], index_col=0 + ).iloc[:, 0] bus_values = n.buses.country.map(values) n.generators[attr].update( n.generators.loc[idx].bus.map(bus_values).dropna() diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index f529517c..0c8b0a94 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -234,6 +234,7 @@ def nuts3(country_shapes, nuts3, nuts3pop, nuts3gdp, ch_cantons, ch_popgdp): manual = gpd.GeoDataFrame( [["BA1", "BA", 3871.0], ["RS1", "RS", 7210.0], ["AL1", "AL", 2893.0]], columns=["NUTS_ID", "country", "pop"], + geometry=gpd.GeoSeries(), ) manual["geometry"] = manual["country"].map(country_shapes) manual = manual.dropna() diff --git a/scripts/prepare_sector_network.py b/scripts/prepare_sector_network.py index 1db9b916..6a71b1e2 100644 --- a/scripts/prepare_sector_network.py +++ b/scripts/prepare_sector_network.py @@ -22,13 +22,13 @@ from _helpers import ( override_component_attrs, update_config_with_sector_opts, ) +from add_electricity import calculate_annuity from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2 from networkx.algorithms import complement from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation from pypsa.geo import haversine_pts from pypsa.io import import_components_from_dataframe from scipy.stats import beta -from vresutils.costdata import annuity logger = logging.getLogger(__name__) @@ -742,7 +742,7 @@ def prepare_costs(cost_file, params, nyears): costs = costs.fillna(params["fill_values"]) def annuity_factor(v): - return annuity(v["lifetime"], v["discount rate"]) + v["FOM"] / 100 + return calculate_annuity(v["lifetime"], v["discount rate"]) + v["FOM"] / 100 costs["fixed"] = [ annuity_factor(v) * v["investment"] * nyears for i, v in costs.iterrows() @@ -851,7 +851,7 @@ def add_wave(n, wave_cost_factor): capacity = pd.Series({"Attenuator": 750, "F2HB": 1000, "MultiPA": 600}) # in EUR/MW - annuity_factor = annuity(25, 0.07) + 0.03 + annuity_factor = calculate_annuity(25, 0.07) + 0.03 costs = ( 1e6 * wave_cost_factor diff --git a/scripts/retrieve_databundle.py b/scripts/retrieve_databundle.py index de42587d..2204ac36 100644 --- a/scripts/retrieve_databundle.py +++ b/scripts/retrieve_databundle.py @@ -58,9 +58,8 @@ if __name__ == "__main__": else: url = "https://zenodo.org/record/3517935/files/pypsa-eur-data-bundle.tar.xz" - # Save locations tarball_fn = Path(f"{rootpath}/bundle.tar.xz") - to_fn = Path(f"{rootpath}/data") + to_fn = Path(rootpath) / Path(snakemake.output[0]).parent.parent logger.info(f"Downloading databundle from '{url}'.") disable_progress = snakemake.config["run"].get("disable_progressbar", False) diff --git a/scripts/retrieve_gas_infrastructure_data.py b/scripts/retrieve_gas_infrastructure_data.py index dda7bd8c..42b726db 100644 --- a/scripts/retrieve_gas_infrastructure_data.py +++ b/scripts/retrieve_gas_infrastructure_data.py @@ -29,7 +29,7 @@ if __name__ == "__main__": # Save locations zip_fn = Path(f"{rootpath}/IGGIELGN.zip") - to_fn = Path(f"{rootpath}/data/gas_network/scigrid-gas") + to_fn = Path(rootpath) / Path(snakemake.output[0]).parent.parent logger.info(f"Downloading databundle from '{url}'.") disable_progress = snakemake.config["run"].get("disable_progressbar", False) diff --git a/scripts/retrieve_sector_databundle.py b/scripts/retrieve_sector_databundle.py index 97426ab2..0d172c8d 100644 --- a/scripts/retrieve_sector_databundle.py +++ b/scripts/retrieve_sector_databundle.py @@ -10,23 +10,25 @@ import logging logger = logging.getLogger(__name__) -import os -import sys import tarfile from pathlib import Path -# Add pypsa-eur scripts to path for import of _helpers -sys.path.insert(0, os.getcwd() + "/../pypsa-eur/scripts") - from _helpers import configure_logging, progress_retrieve if __name__ == "__main__": + if "snakemake" not in globals(): + from _helpers import mock_snakemake + + snakemake = mock_snakemake("retrieve_databundle") + rootpath = ".." + else: + rootpath = "." configure_logging(snakemake) url = "https://zenodo.org/record/5824485/files/pypsa-eur-sec-data-bundle.tar.gz" - tarball_fn = Path("sector-bundle.tar.gz") - to_fn = Path("data") + tarball_fn = Path(f"{rootpath}/sector-bundle.tar.gz") + to_fn = Path(rootpath) / Path(snakemake.output[0]).parent.parent logger.info(f"Downloading databundle from '{url}'.") disable_progress = snakemake.config["run"].get("disable_progressbar", False) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index c7041e85..180c83c6 100644 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -38,7 +38,6 @@ from _helpers import ( override_component_attrs, update_config_with_sector_opts, ) -from vresutils.benchmark import memory_logger logger = logging.getLogger(__name__) pypsa.pf.logger.setLevel(logging.WARNING) @@ -601,6 +600,7 @@ def solve_network(n, config, opts="", **kwargs): track_iterations = cf_solving.get("track_iterations", False) min_iterations = cf_solving.get("min_iterations", 4) max_iterations = cf_solving.get("max_iterations", 6) + transmission_losses = cf_solving.get("transmission_losses", 0) # add to network for extra_functionality n.config = config @@ -614,6 +614,7 @@ def solve_network(n, config, opts="", **kwargs): if skip_iterations: status, condition = n.optimize( solver_name=solver_name, + transmission_losses=transmission_losses, extra_functionality=extra_functionality, **solver_options, **kwargs, @@ -624,6 +625,7 @@ def solve_network(n, config, opts="", **kwargs): track_iterations=track_iterations, min_iterations=min_iterations, max_iterations=max_iterations, + transmission_losses=transmission_losses, extra_functionality=extra_functionality, **solver_options, **kwargs, @@ -667,23 +669,17 @@ if __name__ == "__main__": np.random.seed(solve_opts.get("seed", 123)) - fn = getattr(snakemake.log, "memory", None) - with memory_logger(filename=fn, interval=30.0) as mem: - if "overrides" in snakemake.input.keys(): - overrides = override_component_attrs(snakemake.input.overrides) - n = pypsa.Network( - snakemake.input.network, override_component_attrs=overrides - ) - else: - n = pypsa.Network(snakemake.input.network) + if "overrides" in snakemake.input.keys(): + overrides = override_component_attrs(snakemake.input.overrides) + n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) + else: + n = pypsa.Network(snakemake.input.network) - n = prepare_network(n, solve_opts, config=snakemake.config) + n = prepare_network(n, solve_opts, config=snakemake.config) - n = solve_network( - n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver - ) + n = solve_network( + n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver + ) - n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) - n.export_to_netcdf(snakemake.output[0]) - - logger.info("Maximum memory usage: {}".format(mem.mem_usage)) + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) + n.export_to_netcdf(snakemake.output[0]) diff --git a/scripts/solve_operations_network.py b/scripts/solve_operations_network.py index c1b2be6f..c268e2ee 100644 --- a/scripts/solve_operations_network.py +++ b/scripts/solve_operations_network.py @@ -17,7 +17,6 @@ from _helpers import ( update_config_with_sector_opts, ) from solve_network import prepare_network, solve_network -from vresutils.benchmark import memory_logger logger = logging.getLogger(__name__) @@ -46,23 +45,17 @@ if __name__ == "__main__": np.random.seed(solve_opts.get("seed", 123)) - fn = getattr(snakemake.log, "memory", None) - with memory_logger(filename=fn, interval=30.0) as mem: - if "overrides" in snakemake.input: - overrides = override_component_attrs(snakemake.input.overrides) - n = pypsa.Network( - snakemake.input.network, override_component_attrs=overrides - ) - else: - n = pypsa.Network(snakemake.input.network) + if "overrides" in snakemake.input: + overrides = override_component_attrs(snakemake.input.overrides) + n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides) + else: + n = pypsa.Network(snakemake.input.network) - n.optimize.fix_optimal_capacities() - n = prepare_network(n, solve_opts, config=snakemake.config) - n = solve_network( - n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver - ) + n.optimize.fix_optimal_capacities() + n = prepare_network(n, solve_opts, config=snakemake.config) + n = solve_network( + n, config=snakemake.config, opts=opts, log_fn=snakemake.log.solver + ) - n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) - n.export_to_netcdf(snakemake.output[0]) - - logger.info("Maximum memory usage: {}".format(mem.mem_usage)) + n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards))) + n.export_to_netcdf(snakemake.output[0])