diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 00000000..a3268243 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,47 @@ +# SPDX-FileCopyrightText: : 2021 The PyPSA-Eur Authors +# +# SPDX-License-Identifier: CC0-1.0 + +name: CI + +on: [push] + +jobs: + build: + + runs-on: ${{ matrix.os }} + strategy: + max-parallel: 5 + matrix: + os: + - ubuntu-latest + - macos-latest + - windows-latest + + defaults: + run: + shell: bash -l {0} + + steps: + + - uses: actions/checkout@v2 + + - name: Setup Miniconda + uses: conda-incubator/setup-miniconda@v2.1.1 + with: # checks out environment 'test' by default + mamba-version: "*" + channels: conda-forge,defaults + channel-priority: true + + - name: Install dependencies + run: | + echo -ne "url: ${CDSAPI_URL}\nkey: ${CDSAPI_TOKEN}\n" > ~/.cdsapirc + echo -e " - glpk\n - ipopt<3.13.3" >> envs/environment.yaml + mamba env update -f envs/environment.yaml --name test + + - name: Test snakemake workflow + run: | + conda list + cp test/config.test1.yaml config.yaml + snakemake --cores all solve_all_networks + rm -rf resources/*.nc resources/*.geojson resources/*.h5 networks results diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 43b25200..00000000 --- a/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors -# -# SPDX-License-Identifier: GPL-3.0-or-later - -branches: - only: - - master - -os: - - windows - - linux - - osx - -language: bash - -before_install: - # install conda - - wget https://raw.githubusercontent.com/trichter/conda4travis/latest/conda4travis.sh -O conda4travis.sh - - source conda4travis.sh - - # install conda environment - - conda install -c conda-forge mamba - - mamba env create -f ./envs/environment.yaml - - conda activate pypsa-eur - - # install open-source solver - - mamba install -c conda-forge glpk ipopt'<3.13.3' - - # list packages for easier debugging - - conda list - -script: - - cp ./test/config.test1.yaml ./config.yaml - - snakemake -j all solve_all_networks - - rm -rf resources/*.nc resources/*.geojson resources/*.h5 networks results - # could repeat for more configurations in future diff --git a/README.md b/README.md index dc6b4791..15f979a7 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ SPDX-License-Identifier: CC-BY-4.0 --> ![GitHub release (latest by date including pre-releases)](https://img.shields.io/github/v/release/pypsa/pypsa-eur?include_prereleases) -[![Build Status](https://travis-ci.org/PyPSA/pypsa-eur.svg?branch=master)](https://travis-ci.org/PyPSA/pypsa-eur) +[![Build Status](https://github.com/pypsa/pypsa-eur/actions/workflows/ci.yaml/badge.svg)](https://github.com/PyPSA/pypsa-eur/actions) [![Documentation](https://readthedocs.org/projects/pypsa-eur/badge/?version=latest)](https://pypsa-eur.readthedocs.io/en/latest/?badge=latest) ![Size](https://img.shields.io/github/repo-size/pypsa/pypsa-eur) [![Zenodo](https://zenodo.org/badge/DOI/10.5281/zenodo.3520874.svg)](https://doi.org/10.5281/zenodo.3520874) diff --git a/Snakefile b/Snakefile index 817c905e..2702fd3d 100644 --- a/Snakefile +++ b/Snakefile @@ -5,6 +5,9 @@ from os.path import normpath, exists from shutil import copyfile +from snakemake.remote.HTTP import RemoteProvider as HTTPRemoteProvider +HTTP = HTTPRemoteProvider() + if not exists("config.yaml"): copyfile("config.default.yaml", "config.yaml") @@ -135,10 +138,12 @@ rule build_bus_regions: resources: mem=1000 script: "scripts/build_bus_regions.py" - if config['enable'].get('build_cutout', False): rule build_cutout: - output: directory("cutouts/{cutout}") + input: + regions_onshore="resources/regions_onshore.geojson", + regions_offshore="resources/regions_offshore.geojson" + output: "cutouts/{cutout}.nc" log: "logs/build_cutout/{cutout}.log" benchmark: "benchmarks/build_cutout_{cutout}" threads: ATLITE_NPROCESSES @@ -148,16 +153,16 @@ if config['enable'].get('build_cutout', False): if config['enable'].get('retrieve_cutout', True): rule retrieve_cutout: - output: directory(expand("cutouts/{cutouts}", **config['atlite'])), - log: "logs/retrieve_cutout.log" - script: 'scripts/retrieve_cutout.py' + input: HTTP.remote("zenodo.org/record/4709858/files/{cutout}.nc", keep_local=True) + output: "cutouts/{cutout}.nc" + shell: "mv {input} {output}" if config['enable'].get('build_natura_raster', False): rule build_natura_raster: input: natura="data/bundle/natura/Natura2000_end2015.shp", - cutouts=expand("cutouts/{cutouts}", **config['atlite']) + cutouts=expand("cutouts/{cutouts}.nc", **config['atlite']) output: "resources/natura.tiff" log: "logs/build_natura_raster.log" script: "scripts/build_natura_raster.py" @@ -165,9 +170,9 @@ if config['enable'].get('build_natura_raster', False): if config['enable'].get('retrieve_natura_raster', True): rule retrieve_natura_raster: + input: HTTP.remote("zenodo.org/record/4706686/files/natura.tiff", keep_local=True) output: "resources/natura.tiff" - log: "logs/retrieve_natura_raster.log" - script: 'scripts/retrieve_natura_raster.py' + shell: "mv {input} {output}" rule build_renewable_profiles: @@ -181,11 +186,10 @@ rule build_renewable_profiles: country_shapes='resources/country_shapes.geojson', offshore_shapes='resources/offshore_shapes.geojson', regions=lambda w: ("resources/regions_onshore.geojson" - if w.technology in ('onwind', 'solar') - else "resources/regions_offshore.geojson"), - cutout=lambda w: "cutouts/" + config["renewable"][w.technology]['cutout'] - output: - profile="resources/profile_{technology}.nc", + if w.technology in ('onwind', 'solar') + else "resources/regions_offshore.geojson"), + cutout=lambda w: "cutouts/" + config["renewable"][w.technology]['cutout'] + ".nc" + output: profile="resources/profile_{technology}.nc", log: "logs/build_renewable_profile_{technology}.log" benchmark: "benchmarks/build_renewable_profiles_{technology}" threads: ATLITE_NPROCESSES @@ -198,7 +202,7 @@ if 'hydro' in config['renewable'].keys(): input: country_shapes='resources/country_shapes.geojson', eia_hydro_generation='data/bundle/EIA_hydro_generation_2000_2014.csv', - cutout="cutouts/" + config["renewable"]['hydro']['cutout'] + cutout="cutouts/" + config["renewable"]['hydro']['cutout'] + ".nc" output: 'resources/profile_hydro.nc' log: "logs/build_hydro_profile.log" resources: mem=5000 @@ -388,29 +392,3 @@ rule plot_p_nom_max: log: "logs/plot_p_nom_max/elec_s{simpl}_{clusts}_{techs}_{country}_{ext}.log" script: "scripts/plot_p_nom_max.py" - -rule build_country_flh: - input: - base_network="networks/base.nc", - corine="data/bundle/corine/g250_clc06_V18_5.tif", - natura="resources/natura.tiff", - gebco=lambda w: ("data/bundle/GEBCO_2014_2D.nc" - if "max_depth" in config["renewable"][w.technology].keys() - else []), - country_shapes='resources/country_shapes.geojson', - offshore_shapes='resources/offshore_shapes.geojson', - pietzker="data/pietzker2014.xlsx", - regions=lambda w: ("resources/country_shapes.geojson" - if w.technology in ('onwind', 'solar') - else "resources/offshore_shapes.geojson"), - cutout=lambda w: "cutouts/" + config["renewable"][w.technology]['cutout'] - output: - area="resources/country_flh_area_{technology}.csv", - aggregated="resources/country_flh_aggregated_{technology}.csv", - uncorrected="resources/country_flh_uncorrected_{technology}.csv", - plot="resources/country_flh_{technology}.pdf", - exclusion=directory("resources/country_exclusion_{technology}") - log: "logs/build_country_flh_{technology}.log" - resources: mem=10000 - benchmark: "benchmarks/build_country_flh_{technology}" - script: "scripts/build_country_flh.py" diff --git a/config.default.yaml b/config.default.yaml index cca71e61..d8f3697a 100755 --- a/config.default.yaml +++ b/config.default.yaml @@ -36,7 +36,7 @@ enable: electricity: voltages: [220., 300., 380.] co2limit: 7.75e+7 # 0.05 * 3.1e9*0.5 - co2base: 3.1e+9 # 1 * 3.1e9*0.5 + co2base: 1.487e+9 agg_p_nom_limits: data/agg_p_nom_minmax.csv extendable_carriers: @@ -62,18 +62,28 @@ electricity: atlite: nprocesses: 4 cutouts: + # use 'base' to determine geographical bounds and time span from config + # base: + # module: era5 europe-2013-era5: - module: era5 - xs: [-12., 35.] - ys: [72., 33.] - years: [2013, 2013] + module: era5 # in priority order + x: [-12., 35.] + y: [33., 72] + dx: 0.3 + dy: 0.3 + time: ['2013', '2013'] europe-2013-sarah: - module: sarah - resolution: 0.2 - xs: [-12., 42.] - ys: [65., 33.] - years: [2013, 2013] + module: [sarah, era5] # in priority order + x: [-12., 45.] + y: [33., 65] + dx: 0.2 + dy: 0.2 + time: ['2013', '2013'] + sarah_interpolate: false + sarah_dir: + features: [influx, temperature] + renewable: onwind: cutout: europe-2013-era5 @@ -179,13 +189,16 @@ costs: year: 2030 discountrate: 0.07 # From a Lion Hirth paper, also reflects average of Noothout et al 2016 USD2013_to_EUR2013: 0.7532 # [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html - marginal_cost: + marginal_cost: # EUR/MWh solar: 0.01 onwind: 0.015 offwind: 0.015 hydro: 0. H2: 0. + electrolysis: 0. + fuel cell: 0. battery: 0. + battery inverter: 0. emission_prices: # in currency per tonne emission, only used with the option Ep co2: 0. diff --git a/config.tutorial.yaml b/config.tutorial.yaml index 5cc23e72..1dfde199 100755 --- a/config.tutorial.yaml +++ b/config.tutorial.yaml @@ -54,16 +54,15 @@ electricity: atlite: nprocesses: 4 cutouts: - europe-2013-era5: + europe-2013-era5-tutorial: module: era5 - xs: [4., 15.] - ys: [56., 46.] - months: [3, 3] - years: [2013, 2013] + x: [4., 15.] + y: [46., 56.] + time: ["2013-03", "2013-03"] renewable: onwind: - cutout: europe-2013-era5 + cutout: europe-2013-era5-tutorial resource: method: wind turbine: Vestas_V112_3MW @@ -80,7 +79,7 @@ renewable: potential: simple # or conservative clip_p_max_pu: 1.e-2 offwind-ac: - cutout: europe-2013-era5 + cutout: europe-2013-era5-tutorial resource: method: wind turbine: NREL_ReferenceTurbine_5MW_offshore @@ -92,7 +91,7 @@ renewable: potential: simple # or conservative clip_p_max_pu: 1.e-2 offwind-dc: - cutout: europe-2013-era5 + cutout: europe-2013-era5-tutorial resource: method: wind turbine: NREL_ReferenceTurbine_5MW_offshore @@ -105,7 +104,7 @@ renewable: potential: simple # or conservative clip_p_max_pu: 1.e-2 solar: - cutout: europe-2013-era5 + cutout: europe-2013-era5-tutorial resource: method: pv panel: CSi diff --git a/doc/configtables/atlite.csv b/doc/configtables/atlite.csv index 5f21bb05..7bb56040 100644 --- a/doc/configtables/atlite.csv +++ b/doc/configtables/atlite.csv @@ -1,8 +1,9 @@ ,Unit,Values,Description nprocesses,--,int,"Number of parallel processes in cutout preparation" cutouts,,, --- {name},--,"Convention is to name cutouts like ``--`` (e.g. ``europe-2013-era5``).","Directory to write cutout data to. The user may specify multiple cutouts under configuration ``atlite: cutouts:``. Reference is used in configuration ``renewable: {technology}: cutout:``" --- -- module,--,"One of {'era5','sarah'}","Source of the reanalysis weather dataset (e.g. `ERA5 `_ or `SARAH-2 `_)" --- -- xs,°,"Float interval within [-180, 180]","Range of longitudes to download weather data for." --- -- ys,°,"Float interval within [-90, 90]","Range of latitudes to download weather data for." --- -- years,--,"Integer interval within [1979,2018]","Range of years to download weather data for." +-- {name},--,"Convention is to name cutouts like ``--`` (e.g. ``europe-2013-era5``).","Name of the cutout netcdf file. The user may specify multiple cutouts under configuration ``atlite: cutouts:``. Reference is used in configuration ``renewable: {technology}: cutout:``. The cutout ``base`` may be used to automatically calculate temporal and spatial bounds of the network." +-- -- module,--,"Subset of {'era5','sarah'}","Source of the reanalysis weather dataset (e.g. `ERA5 `_ or `SARAH-2 `_)" +-- -- x,°,"Float interval within [-180, 180]","Range of longitudes to download weather data for. If not defined, it defaults to the spatial bounds of all bus shapes." +-- -- y,°,"Float interval within [-90, 90]","Range of latitudes to download weather data for. If not defined, it defaults to the spatial bounds of all bus shapes." +-- -- time,,"Time interval within ['1979', '2018'] (with valid pandas date time strings)","Time span to download weather data for. If not defined, it defaults to the time interval spanned by the snapshots." +-- -- features,,"String or list of strings with valid cutout features ('inlfux', 'wind').","When freshly building a cutout, retrieve data only for those features. If not defined, it defaults to all available features." diff --git a/doc/configtables/electricity.csv b/doc/configtables/electricity.csv index 144199e0..aaeab239 100644 --- a/doc/configtables/electricity.csv +++ b/doc/configtables/electricity.csv @@ -2,19 +2,18 @@ voltages,kV,"Any subset of {220., 300., 380.}",Voltage levels to consider when, co2limit,:math:`t_{CO_2-eq}/a`,float,Cap on total annual system carbon dioxide emissions, co2base,:math:`t_{CO_2-eq}/a`,float,Reference value of total annual system carbon dioxide emissions if relative emission reduction target is specified in ``{opts}`` wildcard., -agg_p_nom_limits,--,file,path,Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``. +agg_p_nom_limits,file,path,Reference to ``.csv`` file specifying per carrier generator nominal capacity constraints for individual countries if ``'CCL'`` is in ``{opts}`` wildcard. Defaults to ``data/agg_p_nom_minmax.csv``. extendable_carriers,,,, ---,Generator,--,"Any subset of {'OCGT','CCGT'}",Places extendable conventional power plants (OCGT and/or CCGT) where gas power plants are located today without capacity limits. ---,StorageUnit,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. ---,Store,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. ---,Link,--,Any subset of {'H2 pipeline'},Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``. +-- Generator,--,"Any subset of {'OCGT','CCGT'}",Places extendable conventional power plants (OCGT and/or CCGT) where gas power plants are located today without capacity limits. +-- StorageUnit,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. +-- Store,--,"Any subset of {'battery','H2'}",Adds extendable storage units (battery and/or hydrogen) at every node/bus after clustering without capacity limits and with zero initial capacity. +-- Link,--,Any subset of {'H2 pipeline'},Adds extendable links (H2 pipelines only) at every connection where there are lines or HVDC links without capacity limits and with zero initial capacity. Hydrogen pipelines require hydrogen storage to be modelled as ``Store``. max_hours,,,, ---,battery,h,float,Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_. ---,H2,h,float,Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_. +-- battery,h,float,Maximum state of charge capacity of the battery in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_. +-- H2,h,float,Maximum state of charge capacity of the hydrogen storage in terms of hours at full output capacity ``p_nom``. Cf. `PyPSA documentation `_. powerplants_filter,--,"use `pandas.query `_ strings here, e.g. Country not in ['Germany']",Filter query for the default powerplant database., custom_powerplants,--,"use `pandas.query `_ strings here, e.g. Country in ['Germany']",Filter query for the custom powerplant database., conventional_carriers,--,"Any subset of {nuclear, oil, OCGT, CCGT, coal, lignite, geothermal, biomass}",List of conventional power plants to include in the model from ``resources/powerplants.csv``., renewable_capacities_from_OPSD,,"[solar, onwind, offwind]",List of carriers (offwind-ac and offwind-dc are included in offwind) whose capacities 'p_nom' are aligned to the `OPSD renewable power plant list `_, -,"Fueltype [ppm], e.g. “Wind”","list of fueltypes stings in PyPSA-EUR, eg. “[onwind, offwind-ac, offwind-dc]”",converts ppm Fueltype to PyPSA-EUR Fueltype, estimate_renewable_capacities_from_capacitiy_stats,,,, -,"Fueltype [ppm], e.g. “Wind”","list of fueltypes stings in PyPSA-EUR, eg. “[onwind, offwind-ac, offwind-dc]”",converts ppm Fueltype to PyPSA-EUR Fueltype, +"-- Fueltype [ppm], e.g. Wind",,"list of fueltypes strings in PyPSA-Eur, e.g. [onwind, offwind-ac, offwind-dc]",converts ppm Fueltype to PyPSA-EUR Fueltype, diff --git a/doc/configuration.rst b/doc/configuration.rst index 1a42c70a..a75669cd 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -95,9 +95,12 @@ Specifies the temporal range to build an energy system model for as arguments to ``atlite`` ============= +Define and specify the ``atlite.Cutout`` used for calculating renewable potentials and time-series. All options except for ``features`` are directly used as `cutout parameters `_. + .. literalinclude:: ../config.default.yaml :language: yaml - :lines: 62-75 + :start-at: atlite: + :end-before: renewable: .. csv-table:: :header-rows: 1 diff --git a/doc/index.rst b/doc/index.rst index 02b02ce2..e7dabdf4 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -9,8 +9,8 @@ PyPSA-Eur: An Open Optimisation Model of the European Transmission System .. image:: https://img.shields.io/github/v/release/pypsa/pypsa-eur?include_prereleases :alt: GitHub release (latest by date including pre-releases) -.. image:: https://travis-ci.org/PyPSA/pypsa-eur.svg?branch=master - :target: https://travis-ci.org/PyPSA/pypsa-eur +.. image:: https://github.com/pypsa/pypsa-eur/actions/workflows/ci.yaml/badge.svg + :target: https://github.com/PyPSA/pypsa-eur/actions .. image:: https://readthedocs.org/projects/pypsa-eur/badge/?version=latest :target: https://pypsa-eur.readthedocs.io/en/latest/?badge=latest diff --git a/doc/plotting.rst b/doc/plotting.rst index cd404226..6b76a28c 100644 --- a/doc/plotting.rst +++ b/doc/plotting.rst @@ -9,50 +9,6 @@ Plotting and Summary .. warning:: The corresponding code is currently under revision and has only minimal documentation. -.. _flh: - -Rule ``build_country_flh`` -============================= - -.. graphviz:: - :align: center - - digraph snakemake_dag { - graph [bgcolor=white, - margin=0, - size="8,5" - ]; - node [fontname=sans, - fontsize=10, - penwidth=2, - shape=box, - style=rounded - ]; - edge [color=grey, - penwidth=2 - ]; - 0 [color="0.31 0.6 0.85", - fillcolor=gray, - label=build_country_flh, - style=filled]; - 1 [color="0.06 0.6 0.85", - label=base_network]; - 1 -> 0; - 2 [color="0.42 0.6 0.85", - label=build_natura_raster]; - 2 -> 0; - 3 [color="0.58 0.6 0.85", - label=build_shapes]; - 3 -> 0; - 4 [color="0.14 0.6 0.85", - label=build_cutout]; - 4 -> 0; - } - -| - -.. automodule:: build_country_flh - .. _plot_potentials: Rule ``plot_p_nom_max`` diff --git a/doc/preparation/retrieve.rst b/doc/preparation/retrieve.rst index ea8ecc3e..26f152c5 100644 --- a/doc/preparation/retrieve.rst +++ b/doc/preparation/retrieve.rst @@ -21,9 +21,59 @@ Rule ``retrieve_databundle`` Rule ``retrieve_cutout`` ------------------------ -.. automodule:: retrieve_cutout +.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3517949.svg + :target: https://doi.org/10.5281/zenodo.3517949 + +Cutouts are spatio-temporal subsets of the European weather data from the `ECMWF ERA5 `_ reanalysis dataset and the `CMSAF SARAH-2 `_ solar surface radiation dataset for the year 2013. +They have been prepared by and are for use with the `atlite `_ tool. You can either generate them yourself using the ``build_cutouts`` rule or retrieve them directly from `zenodo `_ through the rule ``retrieve_cutout``. +The :ref:`tutorial` uses a smaller cutout than required for the full model (30 MB), which is also automatically downloaded. + +.. note:: + To download cutouts yourself from the `ECMWF ERA5 `_ you need to `set up the CDS API `_. + + +**Relevant Settings** + +.. code:: yaml + + tutorial: + enable: + build_cutout: + +.. seealso:: + Documentation of the configuration file ``config.yaml`` at + :ref:`toplevel_cf` + +**Outputs** + +- ``cutouts/{cutout}``: weather data from either the `ERA5 `_ reanalysis weather dataset or `SARAH-2 `_ satellite-based historic weather data. + +.. seealso:: + For details see :mod:`build_cutout` and read the `atlite documentation `_. + Rule ``retrieve_natura_raster`` ------------------------------- -.. automodule:: retrieve_natura_raster +.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.4706686.svg + :target: https://doi.org/10.5281/zenodo.4706686 + +This rule, as a substitute for :mod:`build_natura_raster`, downloads an already rasterized version (`natura.tiff `_) of `Natura 2000 `_ natural protection areas to reduce computation times. The file is placed into the ``resources`` sub-directory. + +**Relevant Settings** + +.. code:: yaml + + enable: + build_natura_raster: + +.. seealso:: + Documentation of the configuration file ``config.yaml`` at + :ref:`toplevel_cf` + +**Outputs** + +- ``resources/natura.tiff``: Rasterized version of `Natura 2000 `_ natural protection areas to reduce computation times. + +.. seealso:: + For details see :mod:`build_natura_raster`. diff --git a/doc/release_notes.rst b/doc/release_notes.rst index e678f669..230fc67d 100644 --- a/doc/release_notes.rst +++ b/doc/release_notes.rst @@ -1,5 +1,5 @@ .. - SPDX-FileCopyrightText: 2019-2020 The PyPSA-Eur Authors + SPDX-FileCopyrightText: 2019-2021 The PyPSA-Eur Authors SPDX-License-Identifier: CC-BY-4.0 @@ -11,6 +11,19 @@ Release Notes Upcoming Release ================ +* Switch to new major release, ``>=v0.2.1`` of ``atlite``. The version upgrade comes along with significant speed up for the rule ``build_renewable_profiles.py`` (~factor 2). A lot of the code which calculated the landuse availability is now outsourced and does not rely on ``glaes``, ``geokit`` anymore. This facilitates the environment building and version compatibility of ``gdal``, ``libgdal`` with other packages. +* The minimum python version was set to ``3.8``. +* The rule and script ``build_country_flh`` are removed as they're no longer used or maintained. +* The flag ``keep_all_available_areas`` in the configuration for renewable potentials (config.yaml -> renewable -> {technology}) was deprecated and now defaults to ``True``. +* The tutorial cutout was renamed from ``cutouts/europe-2013-era5.nc`` to ``cutouts/europe-2013-era5-tutorial.nc`` to accomodate tutorial and productive cutouts side-by-side. +* Fix: Value for ``co2base`` in ``config.yaml`` adjusted to 1.487e9 t CO2-eq (from 3.1e9 t CO2-eq). The new value represents emissions related to the electricity sector for EU+UK. The old value was ~2x too high and used when the emissions wildcard in ``{opts}`` was used. +* Add option to include marginal costs of links representing fuel cells, electrolysis, and battery inverters + [`#232 `_]. +* Fix: Add escape in :mod:`base_network` if all TYNDP links are already contained in the network [`#246 `_]. +* Bugfix in :mod:`solve_operations_network`: optimised capacities are now fixed for all extendable links, not only HVDC links [`#244 `_]. +* The ``focus_weights`` are now also considered when pre-clustering in the :mod:`simplify_network` rule [`#241 `_]. +* Continuous integration testing switches to Github Actions from Travis CI [`#252 `_]. +* Bugfix in :mod:`build_renewable_profile` where offshore wind profiles could no longer be created [`#249 `_]. PyPSA-Eur 0.3.0 (7th December 2020) ================================== @@ -36,6 +49,7 @@ Using the ``{opts}`` wildcard for scenarios: uses the `tsam `_ package [`#186 `_]. + More OPSD integration: * Add renewable power plants from `OPSD `_ to the network for specified technologies. diff --git a/doc/wildcards.rst b/doc/wildcards.rst index 227997d1..b3267c23 100644 --- a/doc/wildcards.rst +++ b/doc/wildcards.rst @@ -130,8 +130,7 @@ It can take the values ``onwind``, ``offwind-ac``, ``offwind-dc``, and ``solar`` The wildcard can moreover be used to create technology specific figures and summaries. For instance ``{technology}`` can be used to plot regionally disaggregated potentials -with the rule :mod:`plot_p_nom_max` or to summarize a particular technology's -full load hours in various countries with the rule :mod:`build_country_flh`. +with the rule :mod:`plot_p_nom_max`. .. _attr: diff --git a/envs/environment.docs.yaml b/envs/environment.docs.yaml index 0c937e43..772583d4 100755 --- a/envs/environment.docs.yaml +++ b/envs/environment.docs.yaml @@ -9,7 +9,8 @@ dependencies: - python<=3.7 - pip - pypsa>=0.17.1 - - atlite=0.0.3 + - atlite>=0.2.2 + - dask<=2021.3.1 # until https://github.com/dask/dask/issues/7583 is solved - pre-commit # Dependencies of the workflow itself @@ -19,27 +20,13 @@ dependencies: - memory_profiler - yaml - pytables - - powerplantmatching>=0.4.3 - - # Second order dependencies which should really be deps of atlite - - xarray - - progressbar2 - - pyyaml>=5.1.0 + - powerplantmatching>=0.4.8 # GIS dependencies have to come all from conda-forge - cartopy - - fiona - - proj - - pyshp - - geopandas - - rasterio - - shapely - - libgdal + - descartes - pip: - vresutils==0.3.1 - - git+https://github.com/PyPSA/glaes.git#egg=glaes - - git+https://github.com/PyPSA/geokit.git#egg=geokit - - cdsapi - sphinx - sphinx_rtd_theme diff --git a/envs/environment.yaml b/envs/environment.yaml index 7c5faef3..3d0a3400 100644 --- a/envs/environment.yaml +++ b/envs/environment.yaml @@ -8,17 +8,17 @@ channels: - bioconda - http://conda.anaconda.org/gurobi dependencies: - - python + - python>=3.8 - pip - mamba # esp for windows build - - pypsa>=0.17.1 - - atlite=0.0.3 + - pypsa>=0.17.1 + - atlite>=0.2.4 + - dask<=2021.3.1 # until https://github.com/dask/dask/issues/7583 is solved # Dependencies of the workflow itself - xlrd - openpyxl - - scikit-learn - pycountry - seaborn - snakemake-minimal @@ -27,34 +27,32 @@ dependencies: - pytables - lxml - powerplantmatching>=0.4.8 - - numpy<=1.19.0 # otherwise macos fails - - # Second order dependencies which should really be deps of atlite + - numpy<=1.19 # until new PyPSA after 27-06-21 + - pandas + - geopandas - xarray - netcdf4 - - bottleneck - - toolz - - dask + - networkx + - scipy + - shapely - progressbar2 - - pyyaml>=5.1.0 + - pyomo + - matplotlib # Keep in conda environment when calling ipython - ipython # GIS dependencies: - cartopy - - fiona - - proj - - pyshp - - geopandas - - rasterio - - shapely - - libgdal<=3.0.4 - descartes + - rasterio + + # PyPSA-Eur-Sec Dependencies + - geopy + - tqdm + - pytz + - country_converter - pip: - vresutils==0.3.1 - tsam>=1.1.0 - - git+https://github.com/PyPSA/glaes.git#egg=glaes - - git+https://github.com/PyPSA/geokit.git#egg=geokit - - cdsapi diff --git a/scripts/_helpers.py b/scripts/_helpers.py index 807c439f..996baf73 100644 --- a/scripts/_helpers.py +++ b/scripts/_helpers.py @@ -156,7 +156,6 @@ def aggregate_p_curtailed(n): ]) def aggregate_costs(n, flatten=False, opts=None, existing_only=False): - from six import iterkeys, itervalues components = dict(Link=("p_nom", "p0"), Generator=("p_nom", "p"), @@ -167,8 +166,8 @@ def aggregate_costs(n, flatten=False, opts=None, existing_only=False): costs = {} for c, (p_nom, p_attr) in zip( - n.iterate_components(iterkeys(components), skip_empty=False), - itervalues(components) + n.iterate_components(components.keys(), skip_empty=False), + components.values() ): if c.df.empty: continue if not existing_only: p_nom += "_opt" diff --git a/scripts/add_electricity.py b/scripts/add_electricity.py index 8fc8ad5c..45d537c4 100755 --- a/scripts/add_electricity.py +++ b/scripts/add_electricity.py @@ -245,6 +245,11 @@ def update_transmission_costs(n, costs, length_factor=1.0, simple_hvdc_costs=Fal if n.links.empty: return dc_b = n.links.carrier == 'DC' + + # If there are no dc links, then the 'underwater_fraction' column + # may be missing. Therefore we have to return here. + if n.links.loc[dc_b].empty: return + if simple_hvdc_costs: costs = (n.links.loc[dc_b, 'length'] * length_factor * costs.at['HVDC overhead', 'capital_cost']) @@ -332,7 +337,7 @@ def attach_hydro(n, costs, ppl): country = ppl['bus'].map(n.buses.country).rename("country") - inflow_idx = ror.index | hydro.index + inflow_idx = ror.index.union(hydro.index) if not inflow_idx.empty: dist_key = ppl.loc[inflow_idx, 'p_nom'].groupby(country).transform(normed) diff --git a/scripts/add_extra_components.py b/scripts/add_extra_components.py index 00851d87..946c4433 100644 --- a/scripts/add_extra_components.py +++ b/scripts/add_extra_components.py @@ -114,7 +114,8 @@ def attach_stores(n, costs): carrier='H2 electrolysis', p_nom_extendable=True, efficiency=costs.at["electrolysis", "efficiency"], - capital_cost=costs.at["electrolysis", "capital_cost"]) + capital_cost=costs.at["electrolysis", "capital_cost"], + marginal_cost=costs.at["electrolysis", "marginal_cost"]) n.madd("Link", h2_buses_i + " Fuel Cell", bus0=h2_buses_i, @@ -123,7 +124,8 @@ def attach_stores(n, costs): p_nom_extendable=True, efficiency=costs.at["fuel cell", "efficiency"], #NB: fixed cost is per MWel - capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"]) + capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"], + marginal_cost=costs.at["fuel cell", "marginal_cost"]) if 'battery' in carriers: b_buses_i = n.madd("Bus", buses_i + " battery", carrier="battery", **bus_sub_dict) @@ -133,7 +135,8 @@ def attach_stores(n, costs): carrier='battery', e_cyclic=True, e_nom_extendable=True, - capital_cost=costs.at['battery storage', 'capital_cost']) + capital_cost=costs.at['battery storage', 'capital_cost'], + marginal_cost=costs.at["battery", "marginal_cost"]) n.madd("Link", b_buses_i + " charger", bus0=buses_i, @@ -141,15 +144,16 @@ def attach_stores(n, costs): carrier='battery charger', efficiency=costs.at['battery inverter', 'efficiency'], capital_cost=costs.at['battery inverter', 'capital_cost'], - p_nom_extendable=True) + p_nom_extendable=True, + marginal_cost=costs.at["battery inverter", "marginal_cost"]) n.madd("Link", b_buses_i + " discharger", bus0=b_buses_i, bus1=buses_i, carrier='battery discharger', efficiency=costs.at['battery inverter','efficiency'], - capital_cost=costs.at['battery inverter', 'capital_cost'], - p_nom_extendable=True) + p_nom_extendable=True, + marginal_cost=costs.at["battery inverter", "marginal_cost"]) def attach_hydrogen_pipelines(n, costs): diff --git a/scripts/base_network.py b/scripts/base_network.py index e43c4baf..778f8dc4 100644 --- a/scripts/base_network.py +++ b/scripts/base_network.py @@ -74,7 +74,6 @@ import scipy as sp import networkx as nx from scipy.sparse import csgraph -from six import iteritems from itertools import product from shapely.geometry import Point, LineString @@ -213,6 +212,7 @@ def _add_links_from_tyndp(buses, links): if links_tyndp["j"].notnull().any(): logger.info("TYNDP links already in the dataset (skipping): " + ", ".join(links_tyndp.loc[links_tyndp["j"].notnull(), "Name"])) links_tyndp = links_tyndp.loc[links_tyndp["j"].isnull()] + if links_tyndp.empty: return buses, links tree = sp.spatial.KDTree(buses[['x', 'y']]) _, ind0 = tree.query(links_tyndp[["x1", "y1"]]) @@ -268,13 +268,13 @@ def _apply_parameter_corrections(n): if corrections is None: return - for component, attrs in iteritems(corrections): + for component, attrs in corrections.items(): df = n.df(component) oid = _get_oid(df) if attrs is None: continue - for attr, repls in iteritems(attrs): - for i, r in iteritems(repls): + for attr, repls in attrs.items(): + for i, r in repls.items(): if i == 'oid': r = oid.map(repls["oid"]).dropna() elif i == 'index': diff --git a/scripts/build_country_flh.py b/scripts/build_country_flh.py deleted file mode 100644 index 459b8f38..00000000 --- a/scripts/build_country_flh.py +++ /dev/null @@ -1,243 +0,0 @@ -#!/usr/bin/env python - -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors -# -# SPDX-License-Identifier: GPL-3.0-or-later - -""" -Create ``.csv`` files and plots for comparing per country full load hours of renewable time series. - -Relevant Settings ------------------ - -.. code:: yaml - - snapshots: - - renewable: - {technology}: - cutout: - resource: - correction_factor: - -.. seealso:: - Documentation of the configuration file ``config.yaml`` at - :ref:`snapshots_cf`, :ref:`renewable_cf` - -Inputs ------- - -- ``data/bundle/corine/g250_clc06_V18_5.tif``: `CORINE Land Cover (CLC) `_ inventory on `44 classes `_ of land use (e.g. forests, arable land, industrial, urban areas). - - .. image:: img/corine.png - :scale: 33 % - -- ``data/bundle/GEBCO_2014_2D.nc``: A `bathymetric `_ data set with a global terrain model for ocean and land at 15 arc-second intervals by the `General Bathymetric Chart of the Oceans (GEBCO) `_. - - .. image:: img/gebco_2019_grid_image.jpg - :scale: 50 % - - **Source:** `GEBCO `_ - -- ``data/pietzker2014.xlsx``: `Supplementary material 2 `_ from `Pietzcker et al. `_; not part of the data bundle; download and place here yourself. -- ``resources/natura.tiff``: confer :ref:`natura` -- ``resources/country_shapes.geojson``: confer :ref:`shapes` -- ``resources/offshore_shapes.geojson``: confer :ref:`shapes` -- ``resources/regions_onshore.geojson``: (if not offshore wind), confer :ref:`busregions` -- ``resources/regions_offshore.geojson``: (if offshore wind), :ref:`busregions` -- ``"cutouts/" + config["renewable"][{technology}]['cutout']``: :ref:`cutout` -- ``networks/base.nc``: :ref:`base` - -Outputs -------- - -- ``resources/country_flh_area_{technology}.csv``: -- ``resources/country_flh_aggregated_{technology}.csv``: -- ``resources/country_flh_uncorrected_{technology}.csv``: -- ``resources/country_flh_{technology}.pdf``: -- ``resources/country_exclusion_{technology}``: - -Description ------------ - -""" - -import logging -from _helpers import configure_logging - -import os -import atlite -import numpy as np -import xarray as xr -import pandas as pd - -import geokit as gk -from scipy.sparse import vstack -import pycountry as pyc -import matplotlib.pyplot as plt - -from vresutils import landuse as vlanduse -from vresutils.array import spdiag - -import progressbar as pgb - -from build_renewable_profiles import init_globals, calculate_potential - -logger = logging.getLogger(__name__) - - -def build_area(flh, countries, areamatrix, breaks, fn): - area_unbinned = xr.DataArray(areamatrix.todense(), [countries, capacity_factor.coords['spatial']]) - bins = xr.DataArray(pd.cut(flh.to_series(), bins=breaks), flh.coords, name="bins") - area = area_unbinned.groupby(bins).sum(dim="spatial").to_pandas() - area.loc[:,slice(*area.sum()[lambda s: s > 0].index[[0,-1]])].to_csv(fn) - area.columns = area.columns.map(lambda s: s.left) - return area - - -def plot_area_not_solar(area, countries): - # onshore wind/offshore wind - a = area.T - - fig, axes = plt.subplots(nrows=len(countries), sharex=True) - for c, ax in zip(countries, axes): - d = a[[c]] / 1e3 - d.plot.bar(ax=ax, legend=False, align='edge', width=1.) - ax.set_ylabel(f"Potential {c} / GW") - ax.set_title(c) - ax.legend() - ax.set_xlabel("Full-load hours") - fig.savefig(snakemake.output.plot, transparent=True, bbox_inches='tight') - -def plot_area_solar(area, p_area, countries): - # onshore wind/offshore wind - p = p_area.T - a = area.T - - fig, axes = plt.subplots(nrows=len(countries), sharex=True, squeeze=False) - for c, ax in zip(countries, axes.flat): - d = pd.concat([a[c], p[c]], keys=['PyPSA-Eur', 'Pietzker'], axis=1) / 1e3 - d.plot.bar(ax=ax, legend=False, align='edge', width=1.) - # ax.set_ylabel(f"Potential {c} / GW") - ax.set_title(c) - ax.legend() - ax.set_xlabel("Full-load hours") - - fig.savefig(snakemake.output.plot, transparent=True, bbox_inches='tight') - - -def build_aggregate(flh, countries, areamatrix, breaks, p_area, fn): - agg_a = pd.Series(np.ravel((areamatrix / areamatrix.sum(axis=1)).dot(flh.values)), - countries, name="PyPSA-Eur") - - if p_area is None: - agg_a['Overall'] = float((np.asarray((areamatrix.sum(axis=0) / areamatrix.sum()) - .dot(flh.values)).squeeze())) - - agg = pd.DataFrame({'PyPSA-Eur': agg_a}) - else: - # Determine indices of countries which are also in Pietzcker - inds = pd.Index(countries).get_indexer(p_area.index) - areamatrix = areamatrix[inds] - - agg_a['Overall'] = float((np.asarray((areamatrix.sum(axis=0) / areamatrix.sum()) - .dot(flh.values)).squeeze())) - - midpoints = (breaks[1:] + breaks[:-1])/2. - p = p_area.T - - # Per-country FLH comparison - agg_p = pd.Series((p / p.sum()).multiply(midpoints, axis=0).sum(), name="Pietzker") - agg_p['Overall'] = float((p.sum(axis=1) / p.sum().sum()).multiply(midpoints, axis=0).sum()) - - agg = pd.DataFrame({'PyPSA-Eur': agg_a, 'Pietzcker': agg_p, 'Ratio': agg_p / agg_a}) - - agg.to_csv(fn) - -if __name__ == '__main__': - if 'snakemake' not in globals(): - from _helpers import mock_snakemake - snakemake = mock_snakemake('build_country_flh', technology='solar') - configure_logging(snakemake) - - pgb.streams.wrap_stderr() - - - config = snakemake.config['renewable'][snakemake.wildcards.technology] - - time = pd.date_range(freq='m', **snakemake.config['snapshots']) - params = dict(years=slice(*time.year[[0, -1]]), months=slice(*time.month[[0, -1]])) - - cutout = atlite.Cutout(config['cutout'], - cutout_dir=os.path.dirname(snakemake.input.cutout), - **params) - - minx, maxx, miny, maxy = cutout.extent - dx = (maxx - minx) / (cutout.shape[1] - 1) - dy = (maxy - miny) / (cutout.shape[0] - 1) - bounds = gk.Extent.from_xXyY((minx - dx/2., maxx + dx/2., - miny - dy/2., maxy + dy/2.)) - - # Use GLAES to compute available potentials and the transition matrix - paths = dict(snakemake.input) - - init_globals(bounds.xXyY, dx, dy, config, paths) - regions = gk.vector.extractFeatures(paths["regions"], onlyAttr=True) - countries = pd.Index(regions["name"], name="country") - - widgets = [ - pgb.widgets.Percentage(), - ' ', pgb.widgets.SimpleProgress(format='(%s)' % pgb.widgets.SimpleProgress.DEFAULT_FORMAT), - ' ', pgb.widgets.Bar(), - ' ', pgb.widgets.Timer(), - ' ', pgb.widgets.ETA() - ] - progressbar = pgb.ProgressBar(prefix='Compute GIS potentials: ', widgets=widgets, max_value=len(countries)) - - if not os.path.isdir(snakemake.output.exclusion): - os.makedirs(snakemake.output.exclusion) - - matrix = vstack([calculate_potential(i, save_map=os.path.join(snakemake.output.exclusion, countries[i])) - for i in progressbar(regions.index)]) - - areamatrix = matrix * spdiag(vlanduse._cutout_cell_areas(cutout).ravel()) - areamatrix.data[areamatrix.data < 1.] = 0 # ignore weather cells where only less than 1 km^2 can be installed - areamatrix.eliminate_zeros() - - resource = config['resource'] - func = getattr(cutout, resource.pop('method')) - correction_factor = config.get('correction_factor', 1.) - - capacity_factor = func(capacity_factor=True, show_progress='Compute capacity factors: ', **resource).stack(spatial=('y', 'x')) - flh_uncorr = capacity_factor * 8760 - flh_corr = correction_factor * flh_uncorr - - if snakemake.wildcards.technology == 'solar': - pietzcker = pd.read_excel(snakemake.input.pietzker, sheet_name="PV on all area", skiprows=2, header=[0,1]).iloc[1:177] - p_area1_50 = pietzcker['Usable Area at given FLh in 1-50km distance to settlement '].dropna(axis=1) - p_area1_50.columns = p_area1_50.columns.str.split(' ').str[0] - - p_area50_100 = pietzcker['Usable Area at given FLh in 50-100km distance to settlement '] - - p_area = p_area1_50 + p_area50_100 - cols = p_area.columns - breaks = cols.str.split('-').str[0].append(pd.Index([cols[-1].split('-')[1]])).astype(int) - p_area.columns = breaks[:-1] - - p_area = p_area.reindex(countries.map(lambda c: pyc.countries.get(alpha_2=c).name)) - p_area.index = countries - p_area = p_area.dropna() # Pietzcker does not have data for CZ and MK - else: - breaks = np.r_[0:8000:50] - p_area = None - - - area = build_area(flh_corr, countries, areamatrix, breaks, snakemake.output.area) - - if snakemake.wildcards.technology == 'solar': - plot_area_solar(area, p_area, p_area.index) - else: - plot_area_not_solar(area, countries) - - build_aggregate(flh_uncorr, countries, areamatrix, breaks, p_area, snakemake.output.uncorrected) - build_aggregate(flh_corr, countries, areamatrix, breaks, p_area, snakemake.output.aggregated) diff --git a/scripts/build_cutout.py b/scripts/build_cutout.py index 1e55faf5..79be84fc 100644 --- a/scripts/build_cutout.py +++ b/scripts/build_cutout.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors +# SPDX-FileCopyrightText: : 2017-2021 The PyPSA-Eur Authors # # SPDX-License-Identifier: GPL-3.0-or-later @@ -92,10 +92,11 @@ Description """ import logging +import atlite +import geopandas as gpd +import pandas as pd from _helpers import configure_logging -import os -import atlite logger = logging.getLogger(__name__) @@ -106,14 +107,24 @@ if __name__ == "__main__": configure_logging(snakemake) cutout_params = snakemake.config['atlite']['cutouts'][snakemake.wildcards.cutout] - for p in ('xs', 'ys', 'years', 'months'): - if p in cutout_params: - cutout_params[p] = slice(*cutout_params[p]) - cutout = atlite.Cutout(snakemake.wildcards.cutout, - cutout_dir=os.path.dirname(snakemake.output[0]), - **cutout_params) + snapshots = pd.date_range(freq='h', **snakemake.config['snapshots']) + time = [snapshots[0], snapshots[-1]] + cutout_params['time'] = slice(*cutout_params.get('time', time)) - nprocesses = snakemake.config['atlite'].get('nprocesses', 4) + if {'x', 'y', 'bounds'}.isdisjoint(cutout_params): + # Determine the bounds from bus regions with a buffer of two grid cells + onshore = gpd.read_file(snakemake.input.regions_onshore) + offshore = gpd.read_file(snakemake.input.regions_offshore) + regions = onshore.append(offshore) + d = max(cutout_params.get('dx', 0.25), cutout_params.get('dy', 0.25))*2 + cutout_params['bounds'] = regions.total_bounds + [-d, -d, d, d] + elif {'x', 'y'}.issubset(cutout_params): + cutout_params['x'] = slice(*cutout_params['x']) + cutout_params['y'] = slice(*cutout_params['y']) - cutout.prepare(nprocesses=nprocesses) + + logging.info(f"Preparing cutout with parameters {cutout_params}.") + features = cutout_params.pop('features', None) + cutout = atlite.Cutout(snakemake.output[0], **cutout_params) + cutout.prepare(features=features) diff --git a/scripts/build_hydro_profile.py b/scripts/build_hydro_profile.py index 339fccaf..395753c0 100644 --- a/scripts/build_hydro_profile.py +++ b/scripts/build_hydro_profile.py @@ -62,7 +62,6 @@ Description import logging from _helpers import configure_logging -import os import atlite import geopandas as gpd from vresutils import hydro as vhydro @@ -76,20 +75,21 @@ if __name__ == "__main__": configure_logging(snakemake) config = snakemake.config['renewable']['hydro'] - cutout_dir = os.path.dirname(snakemake.input.cutout) - cutout = atlite.Cutout(config['cutout'], cutout_dir=cutout_dir) + cutout = atlite.Cutout(snakemake.input.cutout) countries = snakemake.config['countries'] - country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry'].reindex(countries) + country_shapes = (gpd.read_file(snakemake.input.country_shapes) + .set_index('name')['geometry'].reindex(countries)) country_shapes.index.name = 'countries' - eia_stats = vhydro.get_eia_annual_hydro_generation(snakemake.input.eia_hydro_generation).reindex(columns=countries) + eia_stats = vhydro.get_eia_annual_hydro_generation( + snakemake.input.eia_hydro_generation).reindex(columns=countries) inflow = cutout.runoff(shapes=country_shapes, smooth=True, lower_threshold_quantile=True, normalize_using_yearly=eia_stats) if 'clip_min_inflow' in config: - inflow.values[inflow.values < config['clip_min_inflow']] = 0. + inflow = inflow.where(inflow > config['clip_min_inflow'], 0) inflow.to_netcdf(snakemake.output[0]) diff --git a/scripts/build_natura_raster.py b/scripts/build_natura_raster.py index 39667ca0..63b311e9 100644 --- a/scripts/build_natura_raster.py +++ b/scripts/build_natura_raster.py @@ -43,30 +43,49 @@ import logging from _helpers import configure_logging import atlite -import geokit as gk -from pathlib import Path +import geopandas as gpd +import rasterio as rio +from rasterio.features import geometry_mask +from rasterio.warp import transform_bounds logger = logging.getLogger(__name__) + def determine_cutout_xXyY(cutout_name): - cutout = atlite.Cutout(cutout_name, cutout_dir=cutout_dir) + cutout = atlite.Cutout(cutout_name) + assert cutout.crs.to_epsg() == 4326 x, X, y, Y = cutout.extent - dx = (X - x) / (cutout.shape[1] - 1) - dy = (Y - y) / (cutout.shape[0] - 1) + dx, dy = cutout.dx, cutout.dy return [x - dx/2., X + dx/2., y - dy/2., Y + dy/2.] +def get_transform_and_shape(bounds, res): + left, bottom = [(b // res)* res for b in bounds[:2]] + right, top = [(b // res + 1) * res for b in bounds[2:]] + shape = int((top - bottom) // res), int((right - left) / res) + transform = rio.Affine(res, 0, left, 0, -res, top) + return transform, shape + + if __name__ == "__main__": if 'snakemake' not in globals(): from _helpers import mock_snakemake snakemake = mock_snakemake('build_natura_raster') configure_logging(snakemake) - cutout_dir = Path(snakemake.input.cutouts[0]).parent.resolve() - cutout_names = {res['cutout'] for res in snakemake.config['renewable'].values()} - xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutout_names)) - xXyY = min(xs), max(Xs), min(ys), max(Ys) - natura = gk.vector.loadVector(snakemake.input.natura) - extent = gk.Extent.from_xXyY(xXyY).castTo(3035).fit(100) - extent.rasterize(natura, pixelWidth=100, pixelHeight=100, output=snakemake.output[0]) + cutouts = snakemake.input.cutouts + xs, Xs, ys, Ys = zip(*(determine_cutout_xXyY(cutout) for cutout in cutouts)) + bounds = transform_bounds(4326, 3035, min(xs), min(ys), max(Xs), max(Ys)) + transform, out_shape = get_transform_and_shape(bounds, res=100) + + # adjusted boundaries + shapes = gpd.read_file(snakemake.input.natura).to_crs(3035) + raster = ~geometry_mask(shapes.geometry, out_shape[::-1], transform) + raster = raster.astype(rio.uint8) + + with rio.open(snakemake.output[0], 'w', driver='GTiff', dtype=rio.uint8, + count=1, transform=transform, crs=3035, compress='lzw', + width=raster.shape[1], height=raster.shape[0]) as dst: + dst.write(raster, indexes=1) + diff --git a/scripts/build_renewable_profiles.py b/scripts/build_renewable_profiles.py index 71adb66e..111eb772 100644 --- a/scripts/build_renewable_profiles.py +++ b/scripts/build_renewable_profiles.py @@ -60,7 +60,6 @@ Inputs **Source:** `GEBCO `_ - ``resources/natura.tiff``: confer :ref:`natura` -- ``resources/country_shapes.geojson``: confer :ref:`shapes` - ``resources/offshore_shapes.geojson``: confer :ref:`shapes` - ``resources/regions_onshore.geojson``: (if not offshore wind), confer :ref:`busregions` - ``resources/regions_offshore.geojson``: (if offshore wind), :ref:`busregions` @@ -180,220 +179,158 @@ node (`p_nom_max`): ``simple`` and ``conservative``: reached. """ +import progressbar as pgb +import geopandas as gpd +import xarray as xr +import numpy as np +import functools +import atlite import logging +from pypsa.geo import haversine +from shapely.geometry import LineString +import time + from _helpers import configure_logging -import os -import atlite - -import numpy as np -import xarray as xr -import pandas as pd -import multiprocessing as mp -import matplotlib.pyplot as plt -import progressbar as pgb - -from scipy.sparse import csr_matrix, vstack -from pypsa.geo import haversine -from vresutils import landuse as vlanduse -from vresutils.array import spdiag - logger = logging.getLogger(__name__) -bounds = dx = dy = config = paths = gebco = clc = natura = None - - -def init_globals(bounds_xXyY, n_dx, n_dy, n_config, n_paths): - # Late import so that the GDAL Context is only created in the new processes - global gl, gk, gdal - import glaes as gl - import geokit as gk - from osgeo import gdal as gdal - - # global in each process of the multiprocessing.Pool - global bounds, dx, dy, config, paths, gebco, clc, natura - - bounds = gk.Extent.from_xXyY(bounds_xXyY) - dx = n_dx - dy = n_dy - config = n_config - paths = n_paths - - if "max_depth" in config: - gebco = gk.raster.loadRaster(paths["gebco"]) - gebco.SetProjection(gk.srs.loadSRS(4326).ExportToWkt()) - - clc = gk.raster.loadRaster(paths["corine"]) - clc.SetProjection(gk.srs.loadSRS(3035).ExportToWkt()) - - natura = gk.raster.loadRaster(paths["natura"]) - - -def downsample_to_coarse_grid(bounds, dx, dy, mask, data): - # The GDAL warp function with the 'average' resample algorithm needs a band of zero values of at least - # the size of one coarse cell around the original raster or it produces erroneous results - orig = mask.createRaster(data=data) - padded_extent = mask.extent.castTo(bounds.srs).pad(max(dx, dy)).castTo(mask.srs) - padded = padded_extent.fit((mask.pixelWidth, mask.pixelHeight)).warp(orig, mask.pixelWidth, mask.pixelHeight) - orig = None # free original raster - average = bounds.createRaster(dx, dy, dtype=gdal.GDT_Float32) - assert gdal.Warp(average, padded, resampleAlg='average') == 1, "gdal warp failed: %s" % gdal.GetLastErrorMsg() - return average - - -def calculate_potential(gid, save_map=None): - feature = gk.vector.extractFeature(paths["regions"], where=gid) - ec = gl.ExclusionCalculator(feature.geom) - - corine = config.get("corine", {}) - if isinstance(corine, list): - corine = {'grid_codes': corine} - if "grid_codes" in corine: - ec.excludeRasterType(clc, value=corine["grid_codes"], invert=True) - if corine.get("distance", 0.) > 0.: - ec.excludeRasterType(clc, value=corine["distance_grid_codes"], buffer=corine["distance"]) - - if config.get("natura", False): - ec.excludeRasterType(natura, value=1) - if "max_depth" in config: - ec.excludeRasterType(gebco, (None, -config["max_depth"])) - - # TODO compute a distance field as a raster beforehand - if 'max_shore_distance' in config: - ec.excludeVectorType(paths["country_shapes"], buffer=config['max_shore_distance'], invert=True) - if 'min_shore_distance' in config: - ec.excludeVectorType(paths["country_shapes"], buffer=config['min_shore_distance']) - - if save_map is not None: - ec.draw() - plt.savefig(save_map, transparent=True) - plt.close() - - availability = downsample_to_coarse_grid(bounds, dx, dy, ec.region, np.where(ec.region.mask, ec._availability, 0)) - - return csr_matrix(gk.raster.extractMatrix(availability).flatten() / 100.) - if __name__ == '__main__': if 'snakemake' not in globals(): from _helpers import mock_snakemake snakemake = mock_snakemake('build_renewable_profiles', technology='solar') configure_logging(snakemake) - pgb.streams.wrap_stderr() - + paths = snakemake.input + nprocesses = snakemake.config['atlite'].get('nprocesses') + noprogress = not snakemake.config['atlite'].get('show_progress', True) config = snakemake.config['renewable'][snakemake.wildcards.technology] - - time = pd.date_range(freq='m', **snakemake.config['snapshots']) - params = dict(years=slice(*time.year[[0, -1]]), months=slice(*time.month[[0, -1]])) - - cutout = atlite.Cutout(config['cutout'], - cutout_dir=os.path.dirname(snakemake.input.cutout), - **params) - - minx, maxx, miny, maxy = cutout.extent - dx = (maxx - minx) / (cutout.shape[1] - 1) - dy = (maxy - miny) / (cutout.shape[0] - 1) - bounds_xXyY = (minx - dx/2., maxx + dx/2., miny - dy/2., maxy + dy/2.) - - # Use GLAES to compute available potentials and the transition matrix - paths = dict(snakemake.input) - - # Use the following for testing the default windows method on linux - # mp.set_start_method('spawn') - with mp.Pool(initializer=init_globals, initargs=(bounds_xXyY, dx, dy, config, paths), - maxtasksperchild=20, processes=snakemake.config['atlite'].get('nprocesses', 2)) as pool: - - # The GDAL library creates a GDAL context on module import, which may not be shared over multiple - # processes or the PROJ4 library has a hickup, so we import only after forking. - import geokit as gk - - regions = gk.vector.extractFeatures(paths["regions"], onlyAttr=True) - buses = pd.Index(regions['name'], name="bus") - widgets = [ - pgb.widgets.Percentage(), - ' ', pgb.widgets.SimpleProgress(format='(%s)' % pgb.widgets.SimpleProgress.DEFAULT_FORMAT), - ' ', pgb.widgets.Bar(), - ' ', pgb.widgets.Timer(), - ' ', pgb.widgets.ETA() - ] - progressbar = pgb.ProgressBar(prefix='Compute GIS potentials: ', widgets=widgets, max_value=len(regions)) - matrix = vstack(list(progressbar(pool.imap(calculate_potential, regions.index)))) - - potentials = config['capacity_per_sqkm'] * vlanduse._cutout_cell_areas(cutout) - potmatrix = matrix * spdiag(potentials.ravel()) - if not config.get('keep_all_available_areas', False): - potmatrix.data[potmatrix.data < 1.] = 0 # ignore weather cells where only less than 1 MW can be installed - potmatrix.eliminate_zeros() - - resource = config['resource'] - func = getattr(cutout, resource.pop('method')) + resource = config['resource'] # pv panel config / wind turbine config correction_factor = config.get('correction_factor', 1.) - if correction_factor != 1.: - logger.warning('correction_factor is set as {}'.format(correction_factor)) - capacity_factor = correction_factor * func(capacity_factor=True, show_progress='Compute capacity factors: ', **resource).stack(spatial=('y', 'x')).values - layoutmatrix = potmatrix * spdiag(capacity_factor) - - profile, capacities = func(matrix=layoutmatrix, index=buses, per_unit=True, - return_capacity=True, show_progress='Compute profiles: ', - **resource) - + capacity_per_sqkm = config['capacity_per_sqkm'] p_nom_max_meth = config.get('potential', 'conservative') - if p_nom_max_meth == 'simple': - p_nom_max = xr.DataArray(np.asarray(potmatrix.sum(axis=1)).squeeze(), [buses]) - elif p_nom_max_meth == 'conservative': - # p_nom_max has to be calculated for each bus and is the minimal ratio - # (min over all weather grid cells of the bus region) between the available - # potential (potmatrix) and the used normalised layout (layoutmatrix / - # capacities), so we would like to calculate i.e. potmatrix / (layoutmatrix / - # capacities). Since layoutmatrix = potmatrix * capacity_factor, this - # corresponds to capacities/max(capacity factor in the voronoi cell) - p_nom_max = xr.DataArray([1./np.max(capacity_factor[inds]) if len(inds) else 0. - for inds in np.split(potmatrix.indices, potmatrix.indptr[1:-1])], [buses]) * capacities + if isinstance(config.get("corine", {}), list): + config['corine'] = {'grid_codes': config['corine']} + + if correction_factor != 1.: + logger.info(f'correction_factor is set as {correction_factor}') + + + cutout = atlite.Cutout(paths['cutout']) + regions = gpd.read_file(paths.regions).set_index('name').rename_axis('bus') + buses = regions.index + + excluder = atlite.ExclusionContainer(crs=3035, res=100) + + if config['natura']: + excluder.add_raster(paths.natura, nodata=0, allow_no_overlap=True) + + corine = config.get("corine", {}) + if "grid_codes" in corine: + codes = corine["grid_codes"] + excluder.add_raster(paths.corine, codes=codes, invert=True, crs=3035) + if corine.get("distance", 0.) > 0.: + codes = corine["distance_grid_codes"] + buffer = corine["distance"] + excluder.add_raster(paths.corine, codes=codes, buffer=buffer, crs=3035) + + if "max_depth" in config: + # lambda not supported for atlite + multiprocessing + # use named function np.greater with partially frozen argument instead + # and exclude areas where: -max_depth > grid cell depth + func = functools.partial(np.greater,-config['max_depth']) + excluder.add_raster(paths.gebco, codes=func, crs=4236, nodata=-1000) + + if 'min_shore_distance' in config: + buffer = config['min_shore_distance'] + excluder.add_geometry(paths.country_shapes, buffer=buffer) + + if 'max_shore_distance' in config: + buffer = config['max_shore_distance'] + excluder.add_geometry(paths.country_shapes, buffer=buffer, invert=True) + + kwargs = dict(nprocesses=nprocesses, disable_progressbar=noprogress) + if noprogress: + logger.info('Calculate landuse availabilities...') + start = time.time() + availability = cutout.availabilitymatrix(regions, excluder, **kwargs) + duration = time.time() - start + logger.info(f'Completed availability calculation ({duration:2.2f}s)') else: - raise AssertionError('Config key `potential` should be one of "simple" (default) or "conservative",' - ' not "{}"'.format(p_nom_max_meth)) + availability = cutout.availabilitymatrix(regions, excluder, **kwargs) - layout = xr.DataArray(np.asarray(potmatrix.sum(axis=0)).reshape(cutout.shape), - [cutout.meta.indexes[ax] for ax in ['y', 'x']]) + area = cutout.grid.to_crs(3035).area / 1e6 + area = xr.DataArray(area.values.reshape(cutout.shape), + [cutout.coords['y'], cutout.coords['x']]) - # Determine weighted average distance from substation - cell_coords = cutout.grid_coordinates() + potential = capacity_per_sqkm * availability.sum('bus') * area + func = getattr(cutout, resource.pop('method')) + resource['dask_kwargs'] = {'num_workers': nprocesses} + capacity_factor = correction_factor * func(capacity_factor=True, **resource) + layout = capacity_factor * area * capacity_per_sqkm + profile, capacities = func(matrix=availability.stack(spatial=['y','x']), + layout=layout, index=buses, + per_unit=True, return_capacity=True, **resource) + + logger.info(f"Calculating maximal capacity per bus (method '{p_nom_max_meth}')") + if p_nom_max_meth == 'simple': + p_nom_max = capacity_per_sqkm * availability @ area + elif p_nom_max_meth == 'conservative': + max_cap_factor = capacity_factor.where(availability!=0).max(['x', 'y']) + p_nom_max = capacities / max_cap_factor + else: + raise AssertionError('Config key `potential` should be one of "simple" ' + f'(default) or "conservative", not "{p_nom_max_meth}"') + + + + logger.info('Calculate average distances.') + layoutmatrix = (layout * availability).stack(spatial=['y','x']) + + coords = cutout.grid[['x', 'y']] + bus_coords = regions[['x', 'y']] average_distance = [] - for i in regions.index: - row = layoutmatrix[i] - distances = haversine(regions.loc[i, ['x', 'y']], cell_coords[row.indices])[0] - average_distance.append((distances * (row.data / row.data.sum())).sum()) + centre_of_mass = [] + for bus in buses: + row = layoutmatrix.sel(bus=bus).data + nz_b = row != 0 + row = row[nz_b] + co = coords[nz_b] + distances = haversine(bus_coords.loc[bus], co) + average_distance.append((distances * (row / row.sum())).sum()) + centre_of_mass.append(co.values.T @ (row / row.sum())) average_distance = xr.DataArray(average_distance, [buses]) + centre_of_mass = xr.DataArray(centre_of_mass, [buses, ('spatial', ['x', 'y'])]) + ds = xr.merge([(correction_factor * profile).rename('profile'), - capacities.rename('weight'), - p_nom_max.rename('p_nom_max'), - layout.rename('potential'), - average_distance.rename('average_distance')]) + capacities.rename('weight'), + p_nom_max.rename('p_nom_max'), + potential.rename('potential'), + average_distance.rename('average_distance')]) + if snakemake.wildcards.technology.startswith("offwind"): - import geopandas as gpd - from shapely.geometry import LineString - - offshore_shape = gpd.read_file(snakemake.input.offshore_shapes).unary_union + logger.info('Calculate underwater fraction of connections.') + offshore_shape = gpd.read_file(paths['offshore_shapes']).unary_union underwater_fraction = [] - for i in regions.index: - row = layoutmatrix[i] - centre_of_mass = (cell_coords[row.indices] * (row.data / row.data.sum())[:,np.newaxis]).sum(axis=0) - line = LineString([centre_of_mass, regions.loc[i, ['x', 'y']]]) - underwater_fraction.append(line.intersection(offshore_shape).length / line.length) + for bus in buses: + p = centre_of_mass.sel(bus=bus).data + line = LineString([p, regions.loc[bus, ['x', 'y']]]) + frac = line.intersection(offshore_shape).length/line.length + underwater_fraction.append(frac) ds['underwater_fraction'] = xr.DataArray(underwater_fraction, [buses]) # select only buses with some capacity and minimal capacity factor ds = ds.sel(bus=((ds['profile'].mean('time') > config.get('min_p_max_pu', 0.)) & - (ds['p_nom_max'] > config.get('min_p_nom_max', 0.)))) + (ds['p_nom_max'] > config.get('min_p_nom_max', 0.)))) if 'clip_p_max_pu' in config: - ds['profile'].values[ds['profile'].values < config['clip_p_max_pu']] = 0. + min_p_max_pu = config['clip_p_max_pu'] + ds['profile'] = ds['profile'].where(ds['profile'] >= min_p_max_pu, 0) ds.to_netcdf(snakemake.output.profile) diff --git a/scripts/build_shapes.py b/scripts/build_shapes.py index 2651837b..96d4a60f 100644 --- a/scripts/build_shapes.py +++ b/scripts/build_shapes.py @@ -73,7 +73,7 @@ from _helpers import configure_logging import os import numpy as np from operator import attrgetter -from six.moves import reduce +from functools import reduce from itertools import takewhile import pandas as pd diff --git a/scripts/cluster_network.py b/scripts/cluster_network.py index a01f682f..8356f83b 100644 --- a/scripts/cluster_network.py +++ b/scripts/cluster_network.py @@ -135,7 +135,7 @@ import pyomo.environ as po import matplotlib.pyplot as plt import seaborn as sns -from six.moves import reduce +from functools import reduce from pypsa.networkclustering import (busmap_by_kmeans, busmap_by_spectral_clustering, _make_consense, get_clustering_from_busmap) diff --git a/scripts/make_summary.py b/scripts/make_summary.py index ada3fa8a..f815d5f0 100644 --- a/scripts/make_summary.py +++ b/scripts/make_summary.py @@ -60,7 +60,6 @@ import os import pypsa import pandas as pd -from six import iteritems from add_electricity import load_costs, update_transmission_costs idx = pd.IndexSlice @@ -71,7 +70,7 @@ opt_name = {"Store": "e", "Line" : "s", "Transformer" : "s"} def _add_indexed_rows(df, raw_index): - new_index = df.index|pd.MultiIndex.from_product(raw_index) + new_index = df.index.union(pd.MultiIndex.from_product(raw_index)) if isinstance(new_index, pd.Index): new_index = pd.MultiIndex.from_tuples(new_index) @@ -126,7 +125,7 @@ def calculate_costs(n, label, costs): marginal_costs_grouped = marginal_costs.groupby(c.df.carrier).sum() - costs = costs.reindex(costs.index|pd.MultiIndex.from_product([[c.list_name],["marginal"],marginal_costs_grouped.index])) + costs = costs.reindex(costs.index.union(pd.MultiIndex.from_product([[c.list_name],["marginal"],marginal_costs_grouped.index]))) costs.loc[idx[c.list_name,"marginal",list(marginal_costs_grouped.index)],label] = marginal_costs_grouped.values @@ -222,7 +221,7 @@ def calculate_supply(n, label, supply): #lots of sign compensation for direction and to do maximums s = (-1)**(1-int(end))*((-1)**int(end)*c.pnl["p"+end][items]).max().groupby(c.df.loc[items,'carrier']).sum() - supply = supply.reindex(supply.index|pd.MultiIndex.from_product([[i],[c.list_name],s.index])) + supply = supply.reindex(supply.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index]))) supply.loc[idx[i,c.list_name,list(s.index)],label] = s.values return supply @@ -268,7 +267,7 @@ def calculate_supply_energy(n, label, supply_energy): s = (-1)*c.pnl["p"+end][items].sum().groupby(c.df.loc[items,'carrier']).sum() - supply_energy = supply_energy.reindex(supply_energy.index|pd.MultiIndex.from_product([[i],[c.list_name],s.index])) + supply_energy = supply_energy.reindex(supply_energy.index.union(pd.MultiIndex.from_product([[i],[c.list_name],s.index]))) supply_energy.loc[idx[i,c.list_name,list(s.index)],label] = s.values return supply_energy @@ -276,7 +275,7 @@ def calculate_supply_energy(n, label, supply_energy): def calculate_metrics(n,label,metrics): - metrics = metrics.reindex(metrics.index|pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"])) + metrics = metrics.reindex(metrics.index.union(pd.Index(["line_volume","line_volume_limit","line_volume_AC","line_volume_DC","line_volume_shadow","co2_shadow"]))) metrics.at["line_volume_DC",label] = (n.links.length*n.links.p_nom_opt)[n.links.carrier == "DC"].sum() metrics.at["line_volume_AC",label] = (n.lines.length*n.lines.s_nom_opt).sum() @@ -298,7 +297,7 @@ def calculate_prices(n,label,prices): bus_type = pd.Series(n.buses.index.str[3:],n.buses.index).replace("","electricity") - prices = prices.reindex(prices.index|bus_type.value_counts().index) + prices = prices.reindex(prices.index.union(bus_type.value_counts().index)) logger.warning("Prices are time-averaged, not load-weighted") prices[label] = n.buses_t.marginal_price.mean().groupby(bus_type).mean() @@ -386,7 +385,7 @@ def make_summaries(networks_dict, country='all'): for output in outputs: dfs[output] = pd.DataFrame(columns=columns,dtype=float) - for label, filename in iteritems(networks_dict): + for label, filename in networks_dict.items(): print(label, filename) if not os.path.exists(filename): print("does not exist!!") @@ -417,7 +416,7 @@ def make_summaries(networks_dict, country='all'): def to_csv(dfs): dir = snakemake.output[0] os.makedirs(dir, exist_ok=True) - for key, df in iteritems(dfs): + for key, df in dfs.items(): df.to_csv(os.path.join(dir, f"{key}.csv")) diff --git a/scripts/plot_network.py b/scripts/plot_network.py index 84423916..810f6284 100755 --- a/scripts/plot_network.py +++ b/scripts/plot_network.py @@ -25,7 +25,6 @@ from _helpers import (load_network_for_plots, aggregate_p, aggregate_costs, import pandas as pd import numpy as np -from six.moves import zip import cartopy.crs as ccrs import matplotlib.pyplot as plt @@ -164,7 +163,7 @@ def plot_map(n, ax=None, attribute='p_nom', opts={}): handler_map=make_handler_map_to_scale_circles_as_in(ax)) ax.add_artist(l2) - techs = (bus_sizes.index.levels[1]) & pd.Index(opts['vre_techs'] + opts['conv_techs'] + opts['storage_techs']) + techs = (bus_sizes.index.levels[1]).intersection(pd.Index(opts['vre_techs'] + opts['conv_techs'] + opts['storage_techs'])) handles = [] labels = [] for t in techs: diff --git a/scripts/prepare_network.py b/scripts/prepare_network.py index fc5c6e77..4caa5703 100755 --- a/scripts/prepare_network.py +++ b/scripts/prepare_network.py @@ -62,7 +62,6 @@ import re import pypsa import numpy as np import pandas as pd -from six import iteritems from add_electricity import load_costs, update_transmission_costs @@ -145,7 +144,7 @@ def average_every_nhours(n, offset): for c in n.iterate_components(): pnl = getattr(m, c.list_name+"_t") - for k, df in iteritems(c.pnl): + for k, df in c.pnl.items(): if not df.empty: pnl[k] = df.resample(offset).mean() diff --git a/scripts/retrieve_cutout.py b/scripts/retrieve_cutout.py deleted file mode 100644 index 719a32fc..00000000 --- a/scripts/retrieve_cutout.py +++ /dev/null @@ -1,75 +0,0 @@ -# SPDX-FileCopyrightText: 2019-2020 Fabian Hofmann (FIAS) -# -# SPDX-License-Identifier: GPL-3.0-or-later - -""" -.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3517949.svg - :target: https://doi.org/10.5281/zenodo.3517949 - -Cutouts are spatiotemporal subsets of the European weather data from the `ECMWF ERA5 `_ reanalysis dataset and the `CMSAF SARAH-2 `_ solar surface radiation dataset for the year 2013 (3.9 GB). -They have been prepared by and are for use with the `atlite `_ tool. You can either generate them yourself using the ``build_cutouts`` rule or retrieve them directly from `zenodo `_ through the rule ``retrieve_cutout`` described here. - -.. note:: - To download cutouts yourself from the `ECMWF ERA5 `_ you need to `set up the CDS API `_. - -The :ref:`tutorial` uses smaller `cutouts `_ than required for the full model (19 MB) - -.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3518020.svg - :target: https://doi.org/10.5281/zenodo.3518020 - - -**Relevant Settings** - -.. code:: yaml - - tutorial: - enable: - build_cutout: - -.. seealso:: - Documentation of the configuration file ``config.yaml`` at - :ref:`toplevel_cf` - -**Outputs** - -- ``cutouts/{cutout}``: weather data from either the `ERA5 `_ reanalysis weather dataset or `SARAH-2 `_ satellite-based historic weather data. - -.. seealso:: - For details see :mod:`build_cutout` and read the `atlite documentation `_. - -""" - -import logging -logger = logging.getLogger(__name__) - -from pathlib import Path -import tarfile -from _helpers import progress_retrieve, configure_logging - -if __name__ == "__main__": - if 'snakemake' not in globals(): - from _helpers import mock_snakemake - snakemake = mock_snakemake('retrieve_cutout') - rootpath = '..' - else: - rootpath = '.' - - configure_logging(snakemake) # TODO Make logging compatible with progressbar (see PR #102) - - if snakemake.config['tutorial']: - url = "https://zenodo.org/record/3518020/files/pypsa-eur-tutorial-cutouts.tar.xz" - else: - url = "https://zenodo.org/record/3517949/files/pypsa-eur-cutouts.tar.xz" - - # Save location - tarball_fn = Path(f"{rootpath}/cutouts.tar.xz") - - logger.info(f"Downloading cutouts from '{url}'.") - progress_retrieve(url, tarball_fn) - - logger.info(f"Extracting cutouts.") - tarfile.open(tarball_fn).extractall(path=rootpath) - - tarball_fn.unlink() - - logger.info(f"Cutouts available in '{Path(tarball_fn.stem).stem}'.") diff --git a/scripts/retrieve_natura_raster.py b/scripts/retrieve_natura_raster.py deleted file mode 100644 index b179b46a..00000000 --- a/scripts/retrieve_natura_raster.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2019-2020 Fabian Hofmann (FIAS) -# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors -# -# SPDX-License-Identifier: GPL-3.0-or-later - -""" -.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.3518215.svg - :target: https://doi.org/10.5281/zenodo.3518215 - -This rule, as a substitute for :mod:`build_natura_raster`, downloads an already rasterized version (`natura.tiff `_) of `Natura 2000 `_ natural protection areas to reduce computation times. The file is placed into the ``resources`` sub-directory. - -**Relevant Settings** - -.. code:: yaml - - enable: - build_natura_raster: - -.. seealso:: - Documentation of the configuration file ``config.yaml`` at - :ref:`toplevel_cf` - -**Outputs** - -- ``resources/natura.tiff``: Rasterized version of `Natura 2000 `_ natural protection areas to reduce computation times. - -.. seealso:: - For details see :mod:`build_natura_raster`. - -""" - -import logging - -from _helpers import progress_retrieve, configure_logging - -logger = logging.getLogger(__name__) - -if __name__ == "__main__": - if 'snakemake' not in globals(): - from _helpers import mock_snakemake - snakemake = mock_snakemake('retrieve_natura_raster') - configure_logging(snakemake) # TODO Make logging compatible with progressbar (see PR #102) - - url = "https://zenodo.org/record/3518215/files/natura.tiff" - - logger.info(f"Downloading natura raster from '{url}'.") - progress_retrieve(url, snakemake.output[0]) - - logger.info(f"Natura raster available as '{snakemake.output[0]}'.") diff --git a/scripts/simplify_network.py b/scripts/simplify_network.py index 26bf96e3..6f33b351 100644 --- a/scripts/simplify_network.py +++ b/scripts/simplify_network.py @@ -93,8 +93,7 @@ import numpy as np import scipy as sp from scipy.sparse.csgraph import connected_components, dijkstra -from six import iteritems -from six.moves import reduce +from functools import reduce import pypsa from pypsa.io import import_components_from_dataframe, import_series_from_dataframe @@ -193,7 +192,7 @@ def _aggregate_and_move_components(n, busmap, connection_costs_to_bus, aggregate n.mremove(c, n.df(c).index) import_components_from_dataframe(n, df, c) - for attr, df in iteritems(pnl): + for attr, df in pnl.items(): if not df.empty: import_series_from_dataframe(n, df, c, attr) @@ -237,7 +236,7 @@ def simplify_links(n): if len(G.adj[m]) > 2 or (set(G.adj[m]) - nodes)} for u in supernodes: - for m, ls in iteritems(G.adj[u]): + for m, ls in G.adj[u].items(): if m not in nodes or m in seen: continue buses = [u, m] @@ -245,7 +244,7 @@ def simplify_links(n): while m not in (supernodes | seen): seen.add(m) - for m2, ls in iteritems(G.adj[m]): + for m2, ls in G.adj[m].items(): if m2 in seen or m2 == u: continue buses.append(m2) links.append(list(ls)) # [name for name in ls]) @@ -324,6 +323,8 @@ def remove_stubs(n): def cluster(n, n_clusters): logger.info(f"Clustering to {n_clusters} buses") + focus_weights = snakemake.config.get('focus_weights', None) + renewable_carriers = pd.Index([tech for tech in n.generators.carrier.unique() if tech.split('-', 2)[0] in snakemake.config['renewable']]) @@ -337,7 +338,8 @@ def cluster(n, n_clusters): for tech in renewable_carriers])) if len(renewable_carriers) > 0 else 'conservative') clustering = clustering_for_n_clusters(n, n_clusters, custom_busmap=False, potential_mode=potential_mode, - solver_name=snakemake.config['solving']['solver']['name']) + solver_name=snakemake.config['solving']['solver']['name'], + focus_weights=focus_weights) return clustering.network, clustering.busmap diff --git a/scripts/solve_network.py b/scripts/solve_network.py index db64e576..24cd0464 100755 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -241,7 +241,7 @@ def extra_functionality(n, snapshots): add_battery_constraints(n) -def solve_network(n, config, solver_log=None, opts='', **kwargs): +def solve_network(n, config, opts='', **kwargs): solver_options = config['solving']['solver'].copy() solver_name = solver_options.pop('name') cf_solving = config['solving']['options'] @@ -282,8 +282,9 @@ if __name__ == "__main__": with memory_logger(filename=fn, interval=30.) as mem: n = pypsa.Network(snakemake.input[0]) n = prepare_network(n, solve_opts) - n = solve_network(n, config=snakemake.config, solver_dir=tmpdir, - solver_log=snakemake.log.solver, opts=opts) + n = solve_network(n, config=snakemake.config, opts=opts, + solver_dir=tmpdir, + solver_logfile=snakemake.log.solver) n.export_to_netcdf(snakemake.output[0]) logger.info("Maximum memory usage: {}".format(mem.mem_usage)) diff --git a/scripts/solve_operations_network.py b/scripts/solve_operations_network.py index c65e6889..b698c2f1 100644 --- a/scripts/solve_operations_network.py +++ b/scripts/solve_operations_network.py @@ -71,7 +71,7 @@ def set_parameters_from_optimized(n, n_optim): n_optim.lines[attr].reindex(lines_untyped_i, fill_value=0.) n.lines['s_nom_extendable'] = False - links_dc_i = n.links.index[n.links.carrier == 'DC'] + links_dc_i = n.links.index[n.links.p_nom_extendable] n.links.loc[links_dc_i, 'p_nom'] = \ n_optim.links['p_nom_opt'].reindex(links_dc_i, fill_value=0.) n.links.loc[links_dc_i, 'p_nom_extendable'] = False @@ -111,8 +111,9 @@ if __name__ == "__main__": fn = getattr(snakemake.log, 'memory', None) with memory_logger(filename=fn, interval=30.) as mem: n = prepare_network(n, solve_opts=snakemake.config['solving']['options']) - n = solve_network(n, config, solver_dir=tmpdir, - solver_log=snakemake.log.solver, opts=opts) + n = solve_network(n, config=config, opts=opts, + solver_dir=tmpdir, + solver_logfile=snakemake.log.solver) n.export_to_netcdf(snakemake.output[0]) logger.info("Maximum memory usage: {}".format(mem.mem_usage)) diff --git a/test/config.test1.yaml b/test/config.test1.yaml index 2a91aaf0..3ed02082 100755 --- a/test/config.test1.yaml +++ b/test/config.test1.yaml @@ -20,7 +20,7 @@ countries: ['DE'] snapshots: start: "2013-03-01" - end: "2014-04-01" + end: "2013-03-08" closed: 'left' # end is not inclusive enable: @@ -53,16 +53,15 @@ electricity: atlite: nprocesses: 4 cutouts: - europe-2013-era5: + europe-2013-era5-tutorial: module: era5 - xs: [4., 15.] - ys: [56., 46.] - months: [3, 3] - years: [2013, 2013] + x: [4., 15.] + y: [46., 56.] + time: ["2013-03-01", "2013-03-08"] renewable: onwind: - cutout: europe-2013-era5 + cutout: europe-2013-era5-tutorial resource: method: wind turbine: Vestas_V112_3MW @@ -79,7 +78,7 @@ renewable: potential: simple # or conservative clip_p_max_pu: 1.e-2 offwind-ac: - cutout: europe-2013-era5 + cutout: europe-2013-era5-tutorial resource: method: wind turbine: NREL_ReferenceTurbine_5MW_offshore @@ -91,7 +90,7 @@ renewable: potential: simple # or conservative clip_p_max_pu: 1.e-2 offwind-dc: - cutout: europe-2013-era5 + cutout: europe-2013-era5-tutorial resource: method: wind turbine: NREL_ReferenceTurbine_5MW_offshore @@ -104,7 +103,7 @@ renewable: potential: simple # or conservative clip_p_max_pu: 1.e-2 solar: - cutout: europe-2013-era5 + cutout: europe-2013-era5-tutorial resource: method: pv panel: CSi @@ -147,9 +146,9 @@ transformers: load: url: https://data.open-power-system-data.org/time_series/2019-06-05/time_series_60min_singleindex.csv - power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data + power_statistics: True # only for files from <2019; set false in order to get ENTSOE transparency data interpolate_limit: 3 # data gaps up until this size are interpolated linearly - time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from + time_shift_for_large_gaps: 1w # data gaps up until this size are copied by copying from manual_adjustments: true # false scaling_factor: 1.0