Merge branch 'master' into fneum/year-specific-techs
This commit is contained in:
commit
602375af8c
6
.github/workflows/ci.yaml
vendored
6
.github/workflows/ci.yaml
vendored
@ -81,11 +81,7 @@ jobs:
|
|||||||
key: data-cutouts-${{ env.WEEK }}-${{ env.DATA_CACHE_NUMBER }}
|
key: data-cutouts-${{ env.WEEK }}-${{ env.DATA_CACHE_NUMBER }}
|
||||||
|
|
||||||
- name: Test snakemake workflow
|
- name: Test snakemake workflow
|
||||||
run: |
|
run: ./test.sh
|
||||||
snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime
|
|
||||||
snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime
|
|
||||||
snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime
|
|
||||||
snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime
|
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4.3.0
|
uses: actions/upload-artifact@v4.3.0
|
||||||
|
@ -74,7 +74,7 @@ repos:
|
|||||||
|
|
||||||
# Format Snakemake rule / workflow files
|
# Format Snakemake rule / workflow files
|
||||||
- repo: https://github.com/snakemake/snakefmt
|
- repo: https://github.com/snakemake/snakefmt
|
||||||
rev: v0.9.0
|
rev: v0.10.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: snakefmt
|
- id: snakefmt
|
||||||
|
|
||||||
|
@ -31,7 +31,12 @@ CDIR = RDIR if not run.get("shared_cutouts") else ""
|
|||||||
|
|
||||||
LOGS = "logs/" + RDIR
|
LOGS = "logs/" + RDIR
|
||||||
BENCHMARKS = "benchmarks/" + RDIR
|
BENCHMARKS = "benchmarks/" + RDIR
|
||||||
RESOURCES = "resources/" + RDIR if not run.get("shared_resources") else "resources/"
|
if not (shared_resources := run.get("shared_resources")):
|
||||||
|
RESOURCES = "resources/" + RDIR
|
||||||
|
elif isinstance(shared_resources, str):
|
||||||
|
RESOURCES = "resources/" + shared_resources + "/"
|
||||||
|
else:
|
||||||
|
RESOURCES = "resources/"
|
||||||
RESULTS = "results/" + RDIR
|
RESULTS = "results/" + RDIR
|
||||||
|
|
||||||
|
|
||||||
|
@ -8,14 +8,14 @@ tutorial: true
|
|||||||
run:
|
run:
|
||||||
name: "test-elec" # use this to keep track of runs with different settings
|
name: "test-elec" # use this to keep track of runs with different settings
|
||||||
disable_progressbar: true
|
disable_progressbar: true
|
||||||
shared_resources: true
|
shared_resources: "test"
|
||||||
shared_cutouts: true
|
shared_cutouts: true
|
||||||
|
|
||||||
scenario:
|
scenario:
|
||||||
clusters:
|
clusters:
|
||||||
- 5
|
- 5
|
||||||
opts:
|
opts:
|
||||||
- Co2L-24H
|
- Co2L-24h
|
||||||
|
|
||||||
countries: ['BE']
|
countries: ['BE']
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@ tutorial: true
|
|||||||
run:
|
run:
|
||||||
name: "test-sector-myopic"
|
name: "test-sector-myopic"
|
||||||
disable_progressbar: true
|
disable_progressbar: true
|
||||||
shared_resources: true
|
shared_resources: "test"
|
||||||
shared_cutouts: true
|
shared_cutouts: true
|
||||||
|
|
||||||
foresight: myopic
|
foresight: myopic
|
||||||
@ -18,7 +18,7 @@ scenario:
|
|||||||
clusters:
|
clusters:
|
||||||
- 5
|
- 5
|
||||||
sector_opts:
|
sector_opts:
|
||||||
- 24H-T-H-B-I-A-dist1
|
- 24h-T-H-B-I-A-dist1
|
||||||
planning_horizons:
|
planning_horizons:
|
||||||
- 2030
|
- 2030
|
||||||
- 2040
|
- 2040
|
||||||
|
@ -7,7 +7,7 @@ tutorial: true
|
|||||||
run:
|
run:
|
||||||
name: "test-sector-overnight"
|
name: "test-sector-overnight"
|
||||||
disable_progressbar: true
|
disable_progressbar: true
|
||||||
shared_resources: true
|
shared_resources: "test"
|
||||||
shared_cutouts: true
|
shared_cutouts: true
|
||||||
|
|
||||||
|
|
||||||
@ -17,7 +17,7 @@ scenario:
|
|||||||
clusters:
|
clusters:
|
||||||
- 5
|
- 5
|
||||||
sector_opts:
|
sector_opts:
|
||||||
- CO2L0-24H-T-H-B-I-A-dist1
|
- CO2L0-24h-T-H-B-I-A-dist1
|
||||||
planning_horizons:
|
planning_horizons:
|
||||||
- 2030
|
- 2030
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@ tutorial: true
|
|||||||
run:
|
run:
|
||||||
name: "test-sector-perfect"
|
name: "test-sector-perfect"
|
||||||
disable_progressbar: true
|
disable_progressbar: true
|
||||||
shared_resources: true
|
shared_resources: "test"
|
||||||
shared_cutouts: true
|
shared_cutouts: true
|
||||||
|
|
||||||
foresight: perfect
|
foresight: perfect
|
||||||
@ -18,7 +18,7 @@ scenario:
|
|||||||
clusters:
|
clusters:
|
||||||
- 5
|
- 5
|
||||||
sector_opts:
|
sector_opts:
|
||||||
- 8760H-T-H-B-I-A-dist1
|
- 8760h-T-H-B-I-A-dist1
|
||||||
planning_horizons:
|
planning_horizons:
|
||||||
- 2030
|
- 2030
|
||||||
- 2040
|
- 2040
|
||||||
|
@ -57,6 +57,8 @@ Upcoming Release
|
|||||||
|
|
||||||
* Add the option to customise map projection in plotting config.
|
* Add the option to customise map projection in plotting config.
|
||||||
|
|
||||||
|
* The order of buses (bus0, bus1, ...) for DAC components has changed to meet the convention of the other components. Therefore, `bus0` refers to the electricity bus (input), `bus1` to the heat bus (input), 'bus2' to the CO2 atmosphere bus (input), and `bus3` to the CO2 storage bus (output).
|
||||||
|
|
||||||
* The rule ``plot_network`` has been split into separate rules for plotting
|
* The rule ``plot_network`` has been split into separate rules for plotting
|
||||||
electricity, hydrogen and gas networks.
|
electricity, hydrogen and gas networks.
|
||||||
|
|
||||||
@ -64,6 +66,12 @@ Upcoming Release
|
|||||||
|
|
||||||
* The ``highs`` solver was added to the default environment file.
|
* The ``highs`` solver was added to the default environment file.
|
||||||
|
|
||||||
|
* Various minor bugfixes to the perfect foresight workflow, though perfect foresight must still be considered experimental.
|
||||||
|
|
||||||
|
* It is now possible to determine the directory for shared resources by setting `shared_resources` to a string.
|
||||||
|
|
||||||
|
* A ``test.sh`` script was added to the repository to run the tests locally.
|
||||||
|
|
||||||
* Default settings for recycling rates and primary product shares of high-value
|
* Default settings for recycling rates and primary product shares of high-value
|
||||||
chemicals have been set in accordance with the values used in `Neumann et al.
|
chemicals have been set in accordance with the values used in `Neumann et al.
|
||||||
(2023) <https://doi.org/10.1016/j.joule.2023.06.016>`_ linearly interpolated
|
(2023) <https://doi.org/10.1016/j.joule.2023.06.016>`_ linearly interpolated
|
||||||
@ -78,6 +86,7 @@ Upcoming Release
|
|||||||
workflows with foresight "myopic" and still needs to be added foresight option
|
workflows with foresight "myopic" and still needs to be added foresight option
|
||||||
"perfect".
|
"perfect".
|
||||||
|
|
||||||
|
|
||||||
PyPSA-Eur 0.9.0 (5th January 2024)
|
PyPSA-Eur 0.9.0 (5th January 2024)
|
||||||
==================================
|
==================================
|
||||||
|
|
||||||
|
@ -401,18 +401,22 @@ rule add_electricity:
|
|||||||
if str(fn).startswith("data/")
|
if str(fn).startswith("data/")
|
||||||
},
|
},
|
||||||
base_network=RESOURCES + "networks/base.nc",
|
base_network=RESOURCES + "networks/base.nc",
|
||||||
line_rating=RESOURCES + "networks/line_rating.nc"
|
line_rating=(
|
||||||
if config["lines"]["dynamic_line_rating"]["activate"]
|
RESOURCES + "networks/line_rating.nc"
|
||||||
else RESOURCES + "networks/base.nc",
|
if config["lines"]["dynamic_line_rating"]["activate"]
|
||||||
|
else RESOURCES + "networks/base.nc"
|
||||||
|
),
|
||||||
tech_costs=COSTS,
|
tech_costs=COSTS,
|
||||||
regions=RESOURCES + "regions_onshore.geojson",
|
regions=RESOURCES + "regions_onshore.geojson",
|
||||||
powerplants=RESOURCES + "powerplants.csv",
|
powerplants=RESOURCES + "powerplants.csv",
|
||||||
hydro_capacities=ancient("data/bundle/hydro_capacities.csv"),
|
hydro_capacities=ancient("data/bundle/hydro_capacities.csv"),
|
||||||
geth_hydro_capacities="data/geth2015_hydro_capacities.csv",
|
geth_hydro_capacities="data/geth2015_hydro_capacities.csv",
|
||||||
unit_commitment="data/unit_commitment.csv",
|
unit_commitment="data/unit_commitment.csv",
|
||||||
fuel_price=RESOURCES + "monthly_fuel_price.csv"
|
fuel_price=(
|
||||||
if config["conventional"]["dynamic_fuel_price"]
|
RESOURCES + "monthly_fuel_price.csv"
|
||||||
else [],
|
if config["conventional"]["dynamic_fuel_price"]
|
||||||
|
else []
|
||||||
|
),
|
||||||
load=RESOURCES + "load.csv",
|
load=RESOURCES + "load.csv",
|
||||||
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
|
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
|
||||||
ua_md_gdp="data/GDP_PPP_30arcsec_v3_mapped_default.csv",
|
ua_md_gdp="data/GDP_PPP_30arcsec_v3_mapped_default.csv",
|
||||||
|
@ -730,6 +730,40 @@ rule build_district_heat_share:
|
|||||||
"../scripts/build_district_heat_share.py"
|
"../scripts/build_district_heat_share.py"
|
||||||
|
|
||||||
|
|
||||||
|
rule build_existing_heating_distribution:
|
||||||
|
params:
|
||||||
|
baseyear=config["scenario"]["planning_horizons"][0],
|
||||||
|
sector=config["sector"],
|
||||||
|
existing_capacities=config["existing_capacities"],
|
||||||
|
input:
|
||||||
|
existing_heating="data/existing_infrastructure/existing_heating_raw.csv",
|
||||||
|
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
|
||||||
|
clustered_pop_energy_layout=RESOURCES
|
||||||
|
+ "pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
|
||||||
|
district_heat_share=RESOURCES
|
||||||
|
+ "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||||
|
output:
|
||||||
|
existing_heating_distribution=RESOURCES
|
||||||
|
+ "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||||
|
wildcard_constraints:
|
||||||
|
planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear
|
||||||
|
threads: 1
|
||||||
|
resources:
|
||||||
|
mem_mb=2000,
|
||||||
|
log:
|
||||||
|
LOGS
|
||||||
|
+ "build_existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.log",
|
||||||
|
benchmark:
|
||||||
|
(
|
||||||
|
BENCHMARKS
|
||||||
|
+ "build_existing_heating_distribution/elec_s{simpl}_{clusters}_{planning_horizons}"
|
||||||
|
)
|
||||||
|
conda:
|
||||||
|
"../envs/environment.yaml"
|
||||||
|
script:
|
||||||
|
"../scripts/build_existing_heating_distribution.py"
|
||||||
|
|
||||||
|
|
||||||
rule prepare_sector_network:
|
rule prepare_sector_network:
|
||||||
params:
|
params:
|
||||||
co2_budget=config["co2_budget"],
|
co2_budget=config["co2_budget"],
|
||||||
@ -763,15 +797,19 @@ rule prepare_sector_network:
|
|||||||
dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv",
|
dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv",
|
||||||
co2_totals_name=RESOURCES + "co2_totals.csv",
|
co2_totals_name=RESOURCES + "co2_totals.csv",
|
||||||
co2="data/bundle-sector/eea/UNFCCC_v23.csv",
|
co2="data/bundle-sector/eea/UNFCCC_v23.csv",
|
||||||
biomass_potentials=RESOURCES
|
biomass_potentials=(
|
||||||
+ "biomass_potentials_s{simpl}_{clusters}_"
|
RESOURCES
|
||||||
+ "{}.csv".format(config["biomass"]["year"])
|
+ "biomass_potentials_s{simpl}_{clusters}_"
|
||||||
if config["foresight"] == "overnight"
|
+ "{}.csv".format(config["biomass"]["year"])
|
||||||
else RESOURCES
|
if config["foresight"] == "overnight"
|
||||||
+ "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv",
|
else RESOURCES
|
||||||
costs="data/costs_{}.csv".format(config["costs"]["year"])
|
+ "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv"
|
||||||
if config["foresight"] == "overnight"
|
),
|
||||||
else "data/costs_{planning_horizons}.csv",
|
costs=(
|
||||||
|
"data/costs_{}.csv".format(config["costs"]["year"])
|
||||||
|
if config["foresight"] == "overnight"
|
||||||
|
else "data/costs_{planning_horizons}.csv"
|
||||||
|
),
|
||||||
profile_offwind_ac=RESOURCES + "profile_offwind-ac.nc",
|
profile_offwind_ac=RESOURCES + "profile_offwind-ac.nc",
|
||||||
profile_offwind_dc=RESOURCES + "profile_offwind-dc.nc",
|
profile_offwind_dc=RESOURCES + "profile_offwind-dc.nc",
|
||||||
h2_cavern=RESOURCES + "salt_cavern_potentials_s{simpl}_{clusters}.csv",
|
h2_cavern=RESOURCES + "salt_cavern_potentials_s{simpl}_{clusters}.csv",
|
||||||
@ -797,18 +835,21 @@ rule prepare_sector_network:
|
|||||||
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
|
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
|
||||||
cop_air_rural=RESOURCES + "cop_air_rural_elec_s{simpl}_{clusters}.nc",
|
cop_air_rural=RESOURCES + "cop_air_rural_elec_s{simpl}_{clusters}.nc",
|
||||||
cop_air_urban=RESOURCES + "cop_air_urban_elec_s{simpl}_{clusters}.nc",
|
cop_air_urban=RESOURCES + "cop_air_urban_elec_s{simpl}_{clusters}.nc",
|
||||||
solar_thermal_total=RESOURCES
|
solar_thermal_total=(
|
||||||
+ "solar_thermal_total_elec_s{simpl}_{clusters}.nc"
|
RESOURCES + "solar_thermal_total_elec_s{simpl}_{clusters}.nc"
|
||||||
if config["sector"]["solar_thermal"]
|
if config["sector"]["solar_thermal"]
|
||||||
else [],
|
else []
|
||||||
solar_thermal_urban=RESOURCES
|
),
|
||||||
+ "solar_thermal_urban_elec_s{simpl}_{clusters}.nc"
|
solar_thermal_urban=(
|
||||||
if config["sector"]["solar_thermal"]
|
RESOURCES + "solar_thermal_urban_elec_s{simpl}_{clusters}.nc"
|
||||||
else [],
|
if config["sector"]["solar_thermal"]
|
||||||
solar_thermal_rural=RESOURCES
|
else []
|
||||||
+ "solar_thermal_rural_elec_s{simpl}_{clusters}.nc"
|
),
|
||||||
if config["sector"]["solar_thermal"]
|
solar_thermal_rural=(
|
||||||
else [],
|
RESOURCES + "solar_thermal_rural_elec_s{simpl}_{clusters}.nc"
|
||||||
|
if config["sector"]["solar_thermal"]
|
||||||
|
else []
|
||||||
|
),
|
||||||
output:
|
output:
|
||||||
RESULTS
|
RESULTS
|
||||||
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||||
|
@ -29,7 +29,7 @@ rule prepare_elec_networks:
|
|||||||
input:
|
input:
|
||||||
expand(
|
expand(
|
||||||
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||||
**config["scenario"]
|
**config["scenario"],
|
||||||
),
|
),
|
||||||
|
|
||||||
|
|
||||||
@ -38,7 +38,7 @@ rule prepare_sector_networks:
|
|||||||
expand(
|
expand(
|
||||||
RESULTS
|
RESULTS
|
||||||
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||||
**config["scenario"]
|
**config["scenario"],
|
||||||
),
|
),
|
||||||
|
|
||||||
|
|
||||||
@ -46,7 +46,7 @@ rule solve_elec_networks:
|
|||||||
input:
|
input:
|
||||||
expand(
|
expand(
|
||||||
RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||||
**config["scenario"]
|
**config["scenario"],
|
||||||
),
|
),
|
||||||
|
|
||||||
|
|
||||||
@ -55,7 +55,7 @@ rule solve_sector_networks:
|
|||||||
expand(
|
expand(
|
||||||
RESULTS
|
RESULTS
|
||||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||||
**config["scenario"]
|
**config["scenario"],
|
||||||
),
|
),
|
||||||
|
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ rule solve_sector_networks_perfect:
|
|||||||
expand(
|
expand(
|
||||||
RESULTS
|
RESULTS
|
||||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
|
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
|
||||||
**config["scenario"]
|
**config["scenario"],
|
||||||
),
|
),
|
||||||
|
|
||||||
|
|
||||||
@ -73,11 +73,11 @@ rule validate_elec_networks:
|
|||||||
expand(
|
expand(
|
||||||
RESULTS
|
RESULTS
|
||||||
+ "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
|
+ "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
|
||||||
**config["scenario"]
|
**config["scenario"],
|
||||||
),
|
),
|
||||||
expand(
|
expand(
|
||||||
RESULTS
|
RESULTS
|
||||||
+ "figures/.validation_{kind}_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
|
+ "figures/.validation_{kind}_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
|
||||||
**config["scenario"],
|
**config["scenario"],
|
||||||
kind=["production", "prices", "cross_border"]
|
kind=["production", "prices", "cross_border"],
|
||||||
),
|
),
|
||||||
|
@ -54,6 +54,7 @@ if config["foresight"] != "perfect":
|
|||||||
rule plot_hydrogen_network:
|
rule plot_hydrogen_network:
|
||||||
params:
|
params:
|
||||||
plotting=config["plotting"],
|
plotting=config["plotting"],
|
||||||
|
foresight=config["foresight"],
|
||||||
input:
|
input:
|
||||||
network=RESULTS
|
network=RESULTS
|
||||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||||
@ -152,38 +153,44 @@ rule make_summary:
|
|||||||
input:
|
input:
|
||||||
expand(
|
expand(
|
||||||
RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf",
|
RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf",
|
||||||
**config["scenario"]
|
**config["scenario"],
|
||||||
),
|
),
|
||||||
networks=expand(
|
networks=expand(
|
||||||
RESULTS
|
RESULTS
|
||||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||||
**config["scenario"]
|
**config["scenario"],
|
||||||
|
),
|
||||||
|
costs=(
|
||||||
|
"data/costs_{}.csv".format(config["costs"]["year"])
|
||||||
|
if config["foresight"] == "overnight"
|
||||||
|
else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0])
|
||||||
),
|
),
|
||||||
costs="data/costs_{}.csv".format(config["costs"]["year"])
|
|
||||||
if config["foresight"] == "overnight"
|
|
||||||
else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]),
|
|
||||||
ac_plot=expand(
|
ac_plot=expand(
|
||||||
RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf",
|
RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf",
|
||||||
**config["scenario"]
|
**config["scenario"],
|
||||||
),
|
),
|
||||||
costs_plot=expand(
|
costs_plot=expand(
|
||||||
RESULTS
|
RESULTS
|
||||||
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
|
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
|
||||||
**config["scenario"]
|
**config["scenario"],
|
||||||
),
|
),
|
||||||
h2_plot=expand(
|
h2_plot=expand(
|
||||||
RESULTS
|
(
|
||||||
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf"
|
RESULTS
|
||||||
if config["sector"]["H2_network"]
|
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf"
|
||||||
else [],
|
if config["sector"]["H2_network"]
|
||||||
**config["scenario"]
|
else []
|
||||||
|
),
|
||||||
|
**config["scenario"],
|
||||||
),
|
),
|
||||||
ch4_plot=expand(
|
ch4_plot=expand(
|
||||||
RESULTS
|
(
|
||||||
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf"
|
RESULTS
|
||||||
if config["sector"]["gas_network"]
|
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf"
|
||||||
else [],
|
if config["sector"]["gas_network"]
|
||||||
**config["scenario"]
|
else []
|
||||||
|
),
|
||||||
|
**config["scenario"],
|
||||||
),
|
),
|
||||||
output:
|
output:
|
||||||
nodal_costs=RESULTS + "csvs/nodal_costs.csv",
|
nodal_costs=RESULTS + "csvs/nodal_costs.csv",
|
||||||
|
@ -191,9 +191,11 @@ if config["enable"]["retrieve"]:
|
|||||||
input:
|
input:
|
||||||
HTTP.remote(
|
HTTP.remote(
|
||||||
"data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv".format(
|
"data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv".format(
|
||||||
version="2019-06-05"
|
version=(
|
||||||
if config["snapshots"]["end"] < "2019"
|
"2019-06-05"
|
||||||
else "2020-10-06"
|
if config["snapshots"]["end"] < "2019"
|
||||||
|
else "2020-10-06"
|
||||||
|
)
|
||||||
),
|
),
|
||||||
keep_local=True,
|
keep_local=True,
|
||||||
static=True,
|
static=True,
|
||||||
|
@ -3,40 +3,6 @@
|
|||||||
# SPDX-License-Identifier: MIT
|
# SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
|
||||||
rule build_existing_heating_distribution:
|
|
||||||
params:
|
|
||||||
baseyear=config["scenario"]["planning_horizons"][0],
|
|
||||||
sector=config["sector"],
|
|
||||||
existing_capacities=config["existing_capacities"],
|
|
||||||
input:
|
|
||||||
existing_heating="data/existing_infrastructure/existing_heating_raw.csv",
|
|
||||||
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
|
|
||||||
clustered_pop_energy_layout=RESOURCES
|
|
||||||
+ "pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
|
|
||||||
district_heat_share=RESOURCES
|
|
||||||
+ "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv",
|
|
||||||
output:
|
|
||||||
existing_heating_distribution=RESOURCES
|
|
||||||
+ "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv",
|
|
||||||
wildcard_constraints:
|
|
||||||
planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear
|
|
||||||
threads: 1
|
|
||||||
resources:
|
|
||||||
mem_mb=2000,
|
|
||||||
log:
|
|
||||||
LOGS
|
|
||||||
+ "build_existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.log",
|
|
||||||
benchmark:
|
|
||||||
(
|
|
||||||
BENCHMARKS
|
|
||||||
+ "build_existing_heating_distribution/elec_s{simpl}_{clusters}_{planning_horizons}"
|
|
||||||
)
|
|
||||||
conda:
|
|
||||||
"../envs/environment.yaml"
|
|
||||||
script:
|
|
||||||
"../scripts/build_existing_heating_distribution.py"
|
|
||||||
|
|
||||||
|
|
||||||
rule add_existing_baseyear:
|
rule add_existing_baseyear:
|
||||||
params:
|
params:
|
||||||
baseyear=config["scenario"]["planning_horizons"][0],
|
baseyear=config["scenario"]["planning_horizons"][0],
|
||||||
|
@ -45,38 +45,6 @@ rule add_existing_baseyear:
|
|||||||
"../scripts/add_existing_baseyear.py"
|
"../scripts/add_existing_baseyear.py"
|
||||||
|
|
||||||
|
|
||||||
rule add_brownfield:
|
|
||||||
params:
|
|
||||||
H2_retrofit=config["sector"]["H2_retrofit"],
|
|
||||||
H2_retrofit_capacity_per_CH4=config["sector"]["H2_retrofit_capacity_per_CH4"],
|
|
||||||
threshold_capacity=config["existing_capacities"]["threshold_capacity"],
|
|
||||||
input:
|
|
||||||
network=RESULTS
|
|
||||||
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
|
||||||
network_p=solved_previous_horizon, #solved network at previous time step
|
|
||||||
costs="data/costs_{planning_horizons}.csv",
|
|
||||||
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
|
|
||||||
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
|
|
||||||
output:
|
|
||||||
RESULTS
|
|
||||||
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
|
||||||
threads: 4
|
|
||||||
resources:
|
|
||||||
mem_mb=10000,
|
|
||||||
log:
|
|
||||||
LOGS
|
|
||||||
+ "add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
|
|
||||||
benchmark:
|
|
||||||
(
|
|
||||||
BENCHMARKS
|
|
||||||
+ "add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
|
||||||
)
|
|
||||||
conda:
|
|
||||||
"../envs/environment.yaml"
|
|
||||||
script:
|
|
||||||
"../scripts/add_brownfield.py"
|
|
||||||
|
|
||||||
|
|
||||||
rule prepare_perfect_foresight:
|
rule prepare_perfect_foresight:
|
||||||
input:
|
input:
|
||||||
**{
|
**{
|
||||||
@ -192,6 +160,3 @@ rule make_summary_perfect:
|
|||||||
"../envs/environment.yaml"
|
"../envs/environment.yaml"
|
||||||
script:
|
script:
|
||||||
"../scripts/make_summary_perfect.py"
|
"../scripts/make_summary_perfect.py"
|
||||||
|
|
||||||
|
|
||||||
ruleorder: add_existing_baseyear > add_brownfield
|
|
||||||
|
@ -178,6 +178,15 @@ def sanitize_carriers(n, config):
|
|||||||
n.carriers["color"] = n.carriers.color.where(n.carriers.color != "", colors)
|
n.carriers["color"] = n.carriers.color.where(n.carriers.color != "", colors)
|
||||||
|
|
||||||
|
|
||||||
|
def sanitize_locations(n):
|
||||||
|
n.buses["x"] = n.buses.x.where(n.buses.x != 0, n.buses.location.map(n.buses.x))
|
||||||
|
n.buses["y"] = n.buses.y.where(n.buses.y != 0, n.buses.location.map(n.buses.y))
|
||||||
|
n.buses["country"] = n.buses.country.where(
|
||||||
|
n.buses.country.ne("") & n.buses.country.notnull(),
|
||||||
|
n.buses.location.map(n.buses.country),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def add_co2_emissions(n, costs, carriers):
|
def add_co2_emissions(n, costs, carriers):
|
||||||
"""
|
"""
|
||||||
Add CO2 emissions to the network's carriers attribute.
|
Add CO2 emissions to the network's carriers attribute.
|
||||||
@ -288,7 +297,7 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1.
|
|||||||
|
|
||||||
ua_md_gdp = pd.read_csv(ua_md_gdp, dtype={"name": "str"}).set_index("name")
|
ua_md_gdp = pd.read_csv(ua_md_gdp, dtype={"name": "str"}).set_index("name")
|
||||||
|
|
||||||
logger.info(f"Load data scaled with scalling factor {scaling}.")
|
logger.info(f"Load data scaled by factor {scaling}.")
|
||||||
opsd_load *= scaling
|
opsd_load *= scaling
|
||||||
|
|
||||||
nuts3 = gpd.read_file(nuts3_shapes).set_index("index")
|
nuts3 = gpd.read_file(nuts3_shapes).set_index("index")
|
||||||
@ -327,7 +336,9 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1.
|
|||||||
axis=1,
|
axis=1,
|
||||||
)
|
)
|
||||||
|
|
||||||
n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load)
|
n.madd(
|
||||||
|
"Load", substation_lv_i, bus=substation_lv_i, p_set=load
|
||||||
|
) # carrier="electricity"
|
||||||
|
|
||||||
|
|
||||||
def update_transmission_costs(n, costs, length_factor=1.0):
|
def update_transmission_costs(n, costs, length_factor=1.0):
|
||||||
@ -508,8 +519,8 @@ def attach_conventional_generators(
|
|||||||
snakemake.input[f"conventional_{carrier}_{attr}"], index_col=0
|
snakemake.input[f"conventional_{carrier}_{attr}"], index_col=0
|
||||||
).iloc[:, 0]
|
).iloc[:, 0]
|
||||||
bus_values = n.buses.country.map(values)
|
bus_values = n.buses.country.map(values)
|
||||||
n.generators[attr].update(
|
n.generators.update(
|
||||||
n.generators.loc[idx].bus.map(bus_values).dropna()
|
{attr: n.generators.loc[idx].bus.map(bus_values).dropna()}
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# Single value affecting all generators of technology k indiscriminantely of country
|
# Single value affecting all generators of technology k indiscriminantely of country
|
||||||
@ -753,8 +764,8 @@ def attach_OPSD_renewables(n: pypsa.Network, tech_map: Dict[str, List[str]]) ->
|
|||||||
caps = caps.groupby(["bus"]).Capacity.sum()
|
caps = caps.groupby(["bus"]).Capacity.sum()
|
||||||
caps = caps / gens_per_bus.reindex(caps.index, fill_value=1)
|
caps = caps / gens_per_bus.reindex(caps.index, fill_value=1)
|
||||||
|
|
||||||
n.generators.p_nom.update(gens.bus.map(caps).dropna())
|
n.generators.update({"p_nom": gens.bus.map(caps).dropna()})
|
||||||
n.generators.p_nom_min.update(gens.bus.map(caps).dropna())
|
n.generators.update({"p_nom_min": gens.bus.map(caps).dropna()})
|
||||||
|
|
||||||
|
|
||||||
def estimate_renewable_capacities(
|
def estimate_renewable_capacities(
|
||||||
|
@ -48,7 +48,7 @@ def add_build_year_to_new_assets(n, baseyear):
|
|||||||
"series"
|
"series"
|
||||||
) & n.component_attrs[c.name].status.str.contains("Input")
|
) & n.component_attrs[c.name].status.str.contains("Input")
|
||||||
for attr in n.component_attrs[c.name].index[selection]:
|
for attr in n.component_attrs[c.name].index[selection]:
|
||||||
c.pnl[attr].rename(columns=rename, inplace=True)
|
c.pnl[attr] = c.pnl[attr].rename(columns=rename)
|
||||||
|
|
||||||
|
|
||||||
def add_existing_renewables(df_agg):
|
def add_existing_renewables(df_agg):
|
||||||
|
@ -56,7 +56,7 @@ import numpy as np
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
import pypsa
|
import pypsa
|
||||||
from _helpers import configure_logging
|
from _helpers import configure_logging
|
||||||
from add_electricity import load_costs, sanitize_carriers
|
from add_electricity import load_costs, sanitize_carriers, sanitize_locations
|
||||||
|
|
||||||
idx = pd.IndexSlice
|
idx = pd.IndexSlice
|
||||||
|
|
||||||
@ -100,10 +100,9 @@ def attach_stores(n, costs, extendable_carriers):
|
|||||||
n.madd("Carrier", carriers)
|
n.madd("Carrier", carriers)
|
||||||
|
|
||||||
buses_i = n.buses.index
|
buses_i = n.buses.index
|
||||||
bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]}
|
|
||||||
|
|
||||||
if "H2" in carriers:
|
if "H2" in carriers:
|
||||||
h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict)
|
h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", location=buses_i)
|
||||||
|
|
||||||
n.madd(
|
n.madd(
|
||||||
"Store",
|
"Store",
|
||||||
@ -143,7 +142,7 @@ def attach_stores(n, costs, extendable_carriers):
|
|||||||
|
|
||||||
if "battery" in carriers:
|
if "battery" in carriers:
|
||||||
b_buses_i = n.madd(
|
b_buses_i = n.madd(
|
||||||
"Bus", buses_i + " battery", carrier="battery", **bus_sub_dict
|
"Bus", buses_i + " battery", carrier="battery", location=buses_i
|
||||||
)
|
)
|
||||||
|
|
||||||
n.madd(
|
n.madd(
|
||||||
@ -246,6 +245,7 @@ if __name__ == "__main__":
|
|||||||
attach_hydrogen_pipelines(n, costs, extendable_carriers)
|
attach_hydrogen_pipelines(n, costs, extendable_carriers)
|
||||||
|
|
||||||
sanitize_carriers(n, snakemake.config)
|
sanitize_carriers(n, snakemake.config)
|
||||||
|
sanitize_locations(n)
|
||||||
|
|
||||||
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
||||||
n.export_to_netcdf(snakemake.output[0])
|
n.export_to_netcdf(snakemake.output[0])
|
||||||
|
@ -138,7 +138,9 @@ def _load_buses_from_eg(eg_buses, europe_shape, config_elec):
|
|||||||
)
|
)
|
||||||
|
|
||||||
buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"})
|
buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"})
|
||||||
buses["under_construction"] = buses["under_construction"].fillna(False).astype(bool)
|
buses["under_construction"] = buses.under_construction.where(
|
||||||
|
lambda s: s.notnull(), False
|
||||||
|
).astype(bool)
|
||||||
|
|
||||||
# remove all buses outside of all countries including exclusive economic zones (offshore)
|
# remove all buses outside of all countries including exclusive economic zones (offshore)
|
||||||
europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"]
|
europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"]
|
||||||
@ -525,9 +527,9 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
|
|||||||
gb = buses.loc[substation_b].groupby(
|
gb = buses.loc[substation_b].groupby(
|
||||||
["x", "y"], as_index=False, group_keys=False, sort=False
|
["x", "y"], as_index=False, group_keys=False, sort=False
|
||||||
)
|
)
|
||||||
bus_map_low = gb.apply(prefer_voltage, "min")
|
bus_map_low = gb.apply(prefer_voltage, "min", include_groups=False)
|
||||||
lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False)
|
lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False)
|
||||||
bus_map_high = gb.apply(prefer_voltage, "max")
|
bus_map_high = gb.apply(prefer_voltage, "max", include_groups=False)
|
||||||
hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False)
|
hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False)
|
||||||
|
|
||||||
onshore_b = pd.Series(False, buses.index)
|
onshore_b = pd.Series(False, buses.index)
|
||||||
|
@ -132,14 +132,14 @@ def disaggregate_nuts0(bio):
|
|||||||
pop = build_nuts_population_data()
|
pop = build_nuts_population_data()
|
||||||
|
|
||||||
# get population in nuts2
|
# get population in nuts2
|
||||||
pop_nuts2 = pop.loc[pop.index.str.len() == 4]
|
pop_nuts2 = pop.loc[pop.index.str.len() == 4].copy()
|
||||||
by_country = pop_nuts2.total.groupby(pop_nuts2.ct).sum()
|
by_country = pop_nuts2.total.groupby(pop_nuts2.ct).sum()
|
||||||
pop_nuts2.loc[:, "fraction"] = pop_nuts2.total / pop_nuts2.ct.map(by_country)
|
pop_nuts2["fraction"] = pop_nuts2.total / pop_nuts2.ct.map(by_country)
|
||||||
|
|
||||||
# distribute nuts0 data to nuts2 by population
|
# distribute nuts0 data to nuts2 by population
|
||||||
bio_nodal = bio.loc[pop_nuts2.ct]
|
bio_nodal = bio.loc[pop_nuts2.ct]
|
||||||
bio_nodal.index = pop_nuts2.index
|
bio_nodal.index = pop_nuts2.index
|
||||||
bio_nodal = bio_nodal.mul(pop_nuts2.fraction, axis=0)
|
bio_nodal = bio_nodal.mul(pop_nuts2.fraction, axis=0).astype(float)
|
||||||
|
|
||||||
# update inplace
|
# update inplace
|
||||||
bio.update(bio_nodal)
|
bio.update(bio_nodal)
|
||||||
|
@ -114,12 +114,10 @@ def prepare_dataset(
|
|||||||
df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity)
|
df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity)
|
||||||
ratio = df.p_nom / df.p_nom_diameter
|
ratio = df.p_nom / df.p_nom_diameter
|
||||||
not_nordstream = df.max_pressure_bar < 220
|
not_nordstream = df.max_pressure_bar < 220
|
||||||
df.p_nom.update(
|
df["p_nom"] = df.p_nom_diameter.where(
|
||||||
df.p_nom_diameter.where(
|
(df.p_nom <= 500)
|
||||||
(df.p_nom <= 500)
|
| ((ratio > correction_threshold_p_nom) & not_nordstream)
|
||||||
| ((ratio > correction_threshold_p_nom) & not_nordstream)
|
| ((ratio < 1 / correction_threshold_p_nom) & not_nordstream)
|
||||||
| ((ratio < 1 / correction_threshold_p_nom) & not_nordstream)
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# lines which have way too discrepant line lengths
|
# lines which have way too discrepant line lengths
|
||||||
@ -130,12 +128,10 @@ def prepare_dataset(
|
|||||||
axis=1,
|
axis=1,
|
||||||
)
|
)
|
||||||
ratio = df.eval("length / length_haversine")
|
ratio = df.eval("length / length_haversine")
|
||||||
df["length"].update(
|
df["length"] = df.length_haversine.where(
|
||||||
df.length_haversine.where(
|
(df["length"] < 20)
|
||||||
(df["length"] < 20)
|
| (ratio > correction_threshold_length)
|
||||||
| (ratio > correction_threshold_length)
|
| (ratio < 1 / correction_threshold_length)
|
||||||
| (ratio < 1 / correction_threshold_length)
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return df
|
return df
|
||||||
|
@ -98,7 +98,7 @@ def calculate_line_rating(n, cutout):
|
|||||||
-------
|
-------
|
||||||
xarray DataArray object with maximal power.
|
xarray DataArray object with maximal power.
|
||||||
"""
|
"""
|
||||||
relevant_lines = n.lines[~n.lines["underground"]]
|
relevant_lines = n.lines[~n.lines["underground"]].copy()
|
||||||
buses = relevant_lines[["bus0", "bus1"]].values
|
buses = relevant_lines[["bus0", "bus1"]].values
|
||||||
x = n.buses.x
|
x = n.buses.x
|
||||||
y = n.buses.y
|
y = n.buses.y
|
||||||
|
@ -83,7 +83,8 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
# correct for imprecision of Iinv*I
|
# correct for imprecision of Iinv*I
|
||||||
pop_ct = nuts3.loc[nuts3.country == ct, "pop"].sum()
|
pop_ct = nuts3.loc[nuts3.country == ct, "pop"].sum()
|
||||||
pop_cells_ct *= pop_ct / pop_cells_ct.sum()
|
if pop_cells_ct.sum() != 0:
|
||||||
|
pop_cells_ct *= pop_ct / pop_cells_ct.sum()
|
||||||
|
|
||||||
# The first low density grid cells to reach rural fraction are rural
|
# The first low density grid cells to reach rural fraction are rural
|
||||||
asc_density_i = density_cells_ct.sort_values().index
|
asc_density_i = density_cells_ct.sort_values().index
|
||||||
|
@ -297,8 +297,8 @@ def prepare_building_stock_data():
|
|||||||
errors="ignore",
|
errors="ignore",
|
||||||
)
|
)
|
||||||
|
|
||||||
u_values.subsector.replace(rename_sectors, inplace=True)
|
u_values["subsector"] = u_values.subsector.replace(rename_sectors)
|
||||||
u_values.btype.replace(rename_sectors, inplace=True)
|
u_values["btype"] = u_values.btype.replace(rename_sectors)
|
||||||
|
|
||||||
# for missing weighting of surfaces of building types assume MFH
|
# for missing weighting of surfaces of building types assume MFH
|
||||||
u_values["assumed_subsector"] = u_values.subsector
|
u_values["assumed_subsector"] = u_values.subsector
|
||||||
@ -306,8 +306,8 @@ def prepare_building_stock_data():
|
|||||||
~u_values.subsector.isin(rename_sectors.values()), "assumed_subsector"
|
~u_values.subsector.isin(rename_sectors.values()), "assumed_subsector"
|
||||||
] = "MFH"
|
] = "MFH"
|
||||||
|
|
||||||
u_values.country_code.replace({"UK": "GB"}, inplace=True)
|
u_values["country_code"] = u_values.country_code.replace({"UK": "GB"})
|
||||||
u_values.bage.replace({"Berfore 1945": "Before 1945"}, inplace=True)
|
u_values["bage"] = u_values.bage.replace({"Berfore 1945": "Before 1945"})
|
||||||
u_values = u_values[~u_values.bage.isna()]
|
u_values = u_values[~u_values.bage.isna()]
|
||||||
|
|
||||||
u_values.set_index(["country_code", "subsector", "bage", "type"], inplace=True)
|
u_values.set_index(["country_code", "subsector", "bage", "type"], inplace=True)
|
||||||
|
@ -488,7 +488,9 @@ if __name__ == "__main__":
|
|||||||
gens.efficiency, bins=[0, low, high, 1], labels=labels
|
gens.efficiency, bins=[0, low, high, 1], labels=labels
|
||||||
).astype(str)
|
).astype(str)
|
||||||
carriers += [f"{c} {label} efficiency" for label in labels]
|
carriers += [f"{c} {label} efficiency" for label in labels]
|
||||||
n.generators.carrier.update(gens.carrier + " " + suffix + " efficiency")
|
n.generators.update(
|
||||||
|
{"carrier": gens.carrier + " " + suffix + " efficiency"}
|
||||||
|
)
|
||||||
aggregate_carriers = carriers
|
aggregate_carriers = carriers
|
||||||
|
|
||||||
if n_clusters == len(n.buses):
|
if n_clusters == len(n.buses):
|
||||||
|
@ -507,7 +507,7 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
|||||||
if carrier in ["H2", "gas"]:
|
if carrier in ["H2", "gas"]:
|
||||||
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
|
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
|
||||||
else:
|
else:
|
||||||
load = n.loads_t.p_set[buses]
|
load = n.loads_t.p_set[buses.intersection(n.loads.index)]
|
||||||
|
|
||||||
for tech in value:
|
for tech in value:
|
||||||
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
|
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
|
||||||
@ -560,7 +560,10 @@ def calculate_market_values(n, label, market_values):
|
|||||||
)
|
)
|
||||||
revenue = dispatch * n.buses_t.marginal_price[buses]
|
revenue = dispatch * n.buses_t.marginal_price[buses]
|
||||||
|
|
||||||
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
|
if total_dispatch := dispatch.sum().sum():
|
||||||
|
market_values.at[tech, label] = revenue.sum().sum() / total_dispatch
|
||||||
|
else:
|
||||||
|
market_values.at[tech, label] = np.nan
|
||||||
|
|
||||||
## Now do market value of links ##
|
## Now do market value of links ##
|
||||||
|
|
||||||
@ -583,7 +586,10 @@ def calculate_market_values(n, label, market_values):
|
|||||||
|
|
||||||
revenue = dispatch * n.buses_t.marginal_price[buses]
|
revenue = dispatch * n.buses_t.marginal_price[buses]
|
||||||
|
|
||||||
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
|
if total_dispatch := dispatch.sum().sum():
|
||||||
|
market_values.at[tech, label] = revenue.sum().sum() / total_dispatch
|
||||||
|
else:
|
||||||
|
market_values.at[tech, label] = np.nan
|
||||||
|
|
||||||
return market_values
|
return market_values
|
||||||
|
|
||||||
|
@ -265,7 +265,7 @@ def calculate_energy(n, label, energy):
|
|||||||
totals[no_bus] = float(
|
totals[no_bus] = float(
|
||||||
n.component_attrs[c.name].loc["p" + port, "default"]
|
n.component_attrs[c.name].loc["p" + port, "default"]
|
||||||
)
|
)
|
||||||
c_energies -= totals.groupby(c.df.carrier, axis=1).sum()
|
c_energies -= totals.T.groupby(c.df.carrier).sum().T
|
||||||
|
|
||||||
c_energies = pd.concat([c_energies.T], keys=[c.list_name])
|
c_energies = pd.concat([c_energies.T], keys=[c.list_name])
|
||||||
|
|
||||||
@ -376,9 +376,8 @@ def calculate_supply_energy(n, label, supply_energy):
|
|||||||
.groupby(level=0)
|
.groupby(level=0)
|
||||||
.sum()
|
.sum()
|
||||||
.multiply(c.df.loc[items, "sign"])
|
.multiply(c.df.loc[items, "sign"])
|
||||||
.groupby(c.df.loc[items, "carrier"], axis=1)
|
.T.groupby(c.df.loc[items, "carrier"])
|
||||||
.sum()
|
.sum()
|
||||||
.T
|
|
||||||
)
|
)
|
||||||
s = pd.concat([s], keys=[c.list_name])
|
s = pd.concat([s], keys=[c.list_name])
|
||||||
s = pd.concat([s], keys=[i])
|
s = pd.concat([s], keys=[i])
|
||||||
@ -525,9 +524,12 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
|||||||
# stores[stores > 0.] = 0.
|
# stores[stores > 0.] = 0.
|
||||||
# load += -stores
|
# load += -stores
|
||||||
|
|
||||||
weighted_prices.loc[carrier, label] = (
|
if total_load := load.sum().sum():
|
||||||
load * n.buses_t.marginal_price[buses]
|
weighted_prices.loc[carrier, label] = (
|
||||||
).sum().sum() / load.sum().sum()
|
load * n.buses_t.marginal_price[buses]
|
||||||
|
).sum().sum() / total_load
|
||||||
|
else:
|
||||||
|
weighted_prices.loc[carrier, label] = np.nan
|
||||||
|
|
||||||
if carrier[:5] == "space":
|
if carrier[:5] == "space":
|
||||||
print(load * n.buses_t.marginal_price[buses])
|
print(load * n.buses_t.marginal_price[buses])
|
||||||
@ -562,7 +564,10 @@ def calculate_market_values(n, label, market_values):
|
|||||||
|
|
||||||
revenue = dispatch * n.buses_t.marginal_price[buses]
|
revenue = dispatch * n.buses_t.marginal_price[buses]
|
||||||
|
|
||||||
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
|
if total_dispatch := dispatch.sum().sum():
|
||||||
|
market_values.at[tech, label] = revenue.sum().sum() / total_dispatch
|
||||||
|
else:
|
||||||
|
market_values.at[tech, label] = np.nan
|
||||||
|
|
||||||
## Now do market value of links ##
|
## Now do market value of links ##
|
||||||
|
|
||||||
@ -585,7 +590,10 @@ def calculate_market_values(n, label, market_values):
|
|||||||
|
|
||||||
revenue = dispatch * n.buses_t.marginal_price[buses]
|
revenue = dispatch * n.buses_t.marginal_price[buses]
|
||||||
|
|
||||||
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
|
if total_dispatch := dispatch.sum().sum():
|
||||||
|
market_values.at[tech, label] = revenue.sum().sum() / total_dispatch
|
||||||
|
else:
|
||||||
|
market_values.at[tech, label] = np.nan
|
||||||
|
|
||||||
return market_values
|
return market_values
|
||||||
|
|
||||||
|
@ -36,7 +36,9 @@ def group_pipes(df, drop_direction=False):
|
|||||||
lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}",
|
lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}",
|
||||||
axis=1,
|
axis=1,
|
||||||
)
|
)
|
||||||
return df.groupby(level=0).agg({"p_nom_opt": sum, "bus0": "first", "bus1": "first"})
|
return df.groupby(level=0).agg(
|
||||||
|
{"p_nom_opt": "sum", "bus0": "first", "bus1": "first"}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def plot_h2_map(n, regions):
|
def plot_h2_map(n, regions):
|
||||||
|
@ -98,7 +98,7 @@ def plot_map(
|
|||||||
|
|
||||||
logger.debug(f"{comp}, {costs}")
|
logger.debug(f"{comp}, {costs}")
|
||||||
|
|
||||||
costs = costs.groupby(costs.columns, axis=1).sum()
|
costs = costs.T.groupby(costs.columns).sum().T
|
||||||
|
|
||||||
costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True)
|
costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True)
|
||||||
|
|
||||||
|
@ -269,8 +269,8 @@ def set_line_nom_max(
|
|||||||
hvdc = n.links.index[n.links.carrier == "DC"]
|
hvdc = n.links.index[n.links.carrier == "DC"]
|
||||||
n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + p_nom_max_ext
|
n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + p_nom_max_ext
|
||||||
|
|
||||||
n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True)
|
n.lines["s_nom_max"] = n.lines.s_nom_max.clip(upper=s_nom_max_set)
|
||||||
n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True)
|
n.links["p_nom_max"] = n.links.p_nom_max.clip(upper=p_nom_max_set)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -162,15 +162,17 @@ def concat_networks(years):
|
|||||||
add_build_year_to_new_assets(network, year)
|
add_build_year_to_new_assets(network, year)
|
||||||
|
|
||||||
# static ----------------------------------
|
# static ----------------------------------
|
||||||
# (1) add buses and carriers
|
|
||||||
for component in network.iterate_components(["Bus", "Carrier"]):
|
|
||||||
df_year = component.df
|
|
||||||
# get missing assets
|
|
||||||
missing = get_missing(df_year, n, component.list_name)
|
|
||||||
import_components_from_dataframe(n, missing, component.name)
|
|
||||||
# (2) add generators, links, stores and loads
|
|
||||||
for component in network.iterate_components(
|
for component in network.iterate_components(
|
||||||
["Generator", "Link", "Store", "Load", "Line", "StorageUnit"]
|
[
|
||||||
|
"Bus",
|
||||||
|
"Carrier",
|
||||||
|
"Generator",
|
||||||
|
"Link",
|
||||||
|
"Store",
|
||||||
|
"Load",
|
||||||
|
"Line",
|
||||||
|
"StorageUnit",
|
||||||
|
]
|
||||||
):
|
):
|
||||||
df_year = component.df.copy()
|
df_year = component.df.copy()
|
||||||
missing = get_missing(df_year, n, component.list_name)
|
missing = get_missing(df_year, n, component.list_name)
|
||||||
@ -199,8 +201,13 @@ def concat_networks(years):
|
|||||||
pnl[k].loc[pnl_year.index, pnl_year.columns] = pnl_year
|
pnl[k].loc[pnl_year.index, pnl_year.columns] = pnl_year
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# this is to avoid adding multiple times assets with
|
# For components that aren't new, we just extend
|
||||||
# infinite lifetime as ror
|
# time-varying data from the previous investment
|
||||||
|
# period.
|
||||||
|
if i > 0:
|
||||||
|
pnl[k].loc[(year,)] = pnl[k].loc[(years[i - 1],)].values
|
||||||
|
|
||||||
|
# Now, add time-varying data for new components.
|
||||||
cols = pnl_year.columns.difference(pnl[k].columns)
|
cols = pnl_year.columns.difference(pnl[k].columns)
|
||||||
pnl[k] = pd.concat([pnl[k], pnl_year[cols]], axis=1)
|
pnl[k] = pd.concat([pnl[k], pnl_year[cols]], axis=1)
|
||||||
|
|
||||||
@ -214,7 +221,7 @@ def concat_networks(years):
|
|||||||
# set investment periods
|
# set investment periods
|
||||||
n.investment_periods = n.snapshots.levels[0]
|
n.investment_periods = n.snapshots.levels[0]
|
||||||
# weighting of the investment period -> assuming last period same weighting as the period before
|
# weighting of the investment period -> assuming last period same weighting as the period before
|
||||||
time_w = n.investment_periods.to_series().diff().shift(-1).fillna(method="ffill")
|
time_w = n.investment_periods.to_series().diff().shift(-1).ffill()
|
||||||
n.investment_period_weightings["years"] = time_w
|
n.investment_period_weightings["years"] = time_w
|
||||||
# set objective weightings
|
# set objective weightings
|
||||||
objective_w = get_investment_weighting(
|
objective_w = get_investment_weighting(
|
||||||
|
@ -19,7 +19,7 @@ import pandas as pd
|
|||||||
import pypsa
|
import pypsa
|
||||||
import xarray as xr
|
import xarray as xr
|
||||||
from _helpers import update_config_with_sector_opts
|
from _helpers import update_config_with_sector_opts
|
||||||
from add_electricity import calculate_annuity, sanitize_carriers
|
from add_electricity import calculate_annuity, sanitize_carriers, sanitize_locations
|
||||||
from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2
|
from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2
|
||||||
from networkx.algorithms import complement
|
from networkx.algorithms import complement
|
||||||
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
|
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
|
||||||
@ -551,6 +551,17 @@ def patch_electricity_network(n):
|
|||||||
n.loads_t.p_set.rename(lambda x: x.strip(), axis=1, inplace=True)
|
n.loads_t.p_set.rename(lambda x: x.strip(), axis=1, inplace=True)
|
||||||
|
|
||||||
|
|
||||||
|
def add_eu_bus(n, x=-5.5, y=46):
|
||||||
|
"""
|
||||||
|
Add EU bus to the network.
|
||||||
|
|
||||||
|
This cosmetic bus serves as a reference point for the location of
|
||||||
|
the EU buses in the plots and summaries.
|
||||||
|
"""
|
||||||
|
n.add("Bus", "EU", location="EU", x=x, y=y, carrier="none")
|
||||||
|
n.add("Carrier", "none")
|
||||||
|
|
||||||
|
|
||||||
def add_co2_tracking(n, costs, options):
|
def add_co2_tracking(n, costs, options):
|
||||||
# minus sign because opposite to how fossil fuels used:
|
# minus sign because opposite to how fossil fuels used:
|
||||||
# CH4 burning puts CH4 down, atmosphere up
|
# CH4 burning puts CH4 down, atmosphere up
|
||||||
@ -715,27 +726,27 @@ def add_dac(n, costs):
|
|||||||
heat_buses = n.buses.index[n.buses.carrier.isin(heat_carriers)]
|
heat_buses = n.buses.index[n.buses.carrier.isin(heat_carriers)]
|
||||||
locations = n.buses.location[heat_buses]
|
locations = n.buses.location[heat_buses]
|
||||||
|
|
||||||
efficiency2 = -(
|
electricity_input = (
|
||||||
costs.at["direct air capture", "electricity-input"]
|
costs.at["direct air capture", "electricity-input"]
|
||||||
+ costs.at["direct air capture", "compression-electricity-input"]
|
+ costs.at["direct air capture", "compression-electricity-input"]
|
||||||
)
|
) # MWh_el / tCO2
|
||||||
efficiency3 = -(
|
heat_input = (
|
||||||
costs.at["direct air capture", "heat-input"]
|
costs.at["direct air capture", "heat-input"]
|
||||||
- costs.at["direct air capture", "compression-heat-output"]
|
- costs.at["direct air capture", "compression-heat-output"]
|
||||||
)
|
) # MWh_th / tCO2
|
||||||
|
|
||||||
n.madd(
|
n.madd(
|
||||||
"Link",
|
"Link",
|
||||||
heat_buses.str.replace(" heat", " DAC"),
|
heat_buses.str.replace(" heat", " DAC"),
|
||||||
bus0="co2 atmosphere",
|
bus0=locations.values,
|
||||||
bus1=spatial.co2.df.loc[locations, "nodes"].values,
|
bus1=heat_buses,
|
||||||
bus2=locations.values,
|
bus2="co2 atmosphere",
|
||||||
bus3=heat_buses,
|
bus3=spatial.co2.df.loc[locations, "nodes"].values,
|
||||||
carrier="DAC",
|
carrier="DAC",
|
||||||
capital_cost=costs.at["direct air capture", "fixed"],
|
capital_cost=costs.at["direct air capture", "fixed"] / electricity_input,
|
||||||
efficiency=1.0,
|
efficiency=-heat_input / electricity_input,
|
||||||
efficiency2=efficiency2,
|
efficiency2=-1 / electricity_input,
|
||||||
efficiency3=efficiency3,
|
efficiency3=1 / electricity_input,
|
||||||
p_nom_extendable=True,
|
p_nom_extendable=True,
|
||||||
lifetime=costs.at["direct air capture", "lifetime"],
|
lifetime=costs.at["direct air capture", "lifetime"],
|
||||||
)
|
)
|
||||||
@ -1010,6 +1021,7 @@ def insert_electricity_distribution_grid(n, costs):
|
|||||||
"Store",
|
"Store",
|
||||||
nodes + " home battery",
|
nodes + " home battery",
|
||||||
bus=nodes + " home battery",
|
bus=nodes + " home battery",
|
||||||
|
location=nodes,
|
||||||
e_cyclic=True,
|
e_cyclic=True,
|
||||||
e_nom_extendable=True,
|
e_nom_extendable=True,
|
||||||
carrier="home battery",
|
carrier="home battery",
|
||||||
@ -3599,6 +3611,8 @@ if __name__ == "__main__":
|
|||||||
for carrier in conventional:
|
for carrier in conventional:
|
||||||
add_carrier_buses(n, carrier)
|
add_carrier_buses(n, carrier)
|
||||||
|
|
||||||
|
add_eu_bus(n)
|
||||||
|
|
||||||
add_co2_tracking(n, costs, options)
|
add_co2_tracking(n, costs, options)
|
||||||
|
|
||||||
add_generation(n, costs)
|
add_generation(n, costs)
|
||||||
@ -3738,5 +3752,6 @@ if __name__ == "__main__":
|
|||||||
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
||||||
|
|
||||||
sanitize_carriers(n, snakemake.config)
|
sanitize_carriers(n, snakemake.config)
|
||||||
|
sanitize_locations(n)
|
||||||
|
|
||||||
n.export_to_netcdf(snakemake.output[0])
|
n.export_to_netcdf(snakemake.output[0])
|
||||||
|
@ -418,7 +418,7 @@ def add_CCL_constraints(n, config):
|
|||||||
Example
|
Example
|
||||||
-------
|
-------
|
||||||
scenario:
|
scenario:
|
||||||
opts: [Co2L-CCL-24H]
|
opts: [Co2L-CCL-24h]
|
||||||
electricity:
|
electricity:
|
||||||
agg_p_nom_limits: data/agg_p_nom_minmax.csv
|
agg_p_nom_limits: data/agg_p_nom_minmax.csv
|
||||||
"""
|
"""
|
||||||
@ -463,7 +463,7 @@ def add_EQ_constraints(n, o, scaling=1e-1):
|
|||||||
Example
|
Example
|
||||||
-------
|
-------
|
||||||
scenario:
|
scenario:
|
||||||
opts: [Co2L-EQ0.7-24H]
|
opts: [Co2L-EQ0.7-24h]
|
||||||
|
|
||||||
Require each country or node to on average produce a minimal share
|
Require each country or node to on average produce a minimal share
|
||||||
of its total electricity consumption itself. Example: EQ0.7c demands each country
|
of its total electricity consumption itself. Example: EQ0.7c demands each country
|
||||||
@ -527,7 +527,7 @@ def add_BAU_constraints(n, config):
|
|||||||
Example
|
Example
|
||||||
-------
|
-------
|
||||||
scenario:
|
scenario:
|
||||||
opts: [Co2L-BAU-24H]
|
opts: [Co2L-BAU-24h]
|
||||||
electricity:
|
electricity:
|
||||||
BAU_mincapacities:
|
BAU_mincapacities:
|
||||||
solar: 0
|
solar: 0
|
||||||
@ -564,7 +564,7 @@ def add_SAFE_constraints(n, config):
|
|||||||
config.yaml requires to specify opts:
|
config.yaml requires to specify opts:
|
||||||
|
|
||||||
scenario:
|
scenario:
|
||||||
opts: [Co2L-SAFE-24H]
|
opts: [Co2L-SAFE-24h]
|
||||||
electricity:
|
electricity:
|
||||||
SAFE_reservemargin: 0.1
|
SAFE_reservemargin: 0.1
|
||||||
Which sets a reserve margin of 10% above the peak demand.
|
Which sets a reserve margin of 10% above the peak demand.
|
||||||
|
8
test.sh
Executable file
8
test.sh
Executable file
@ -0,0 +1,8 @@
|
|||||||
|
# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: CC0-1.0
|
||||||
|
|
||||||
|
snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime && \
|
||||||
|
snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime && \
|
||||||
|
snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime && \
|
||||||
|
snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime
|
Loading…
Reference in New Issue
Block a user