Merge branch 'master' into fneum/year-specific-techs
This commit is contained in:
commit
602375af8c
6
.github/workflows/ci.yaml
vendored
6
.github/workflows/ci.yaml
vendored
@ -81,11 +81,7 @@ jobs:
|
||||
key: data-cutouts-${{ env.WEEK }}-${{ env.DATA_CACHE_NUMBER }}
|
||||
|
||||
- name: Test snakemake workflow
|
||||
run: |
|
||||
snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime
|
||||
snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime
|
||||
snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime
|
||||
snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime
|
||||
run: ./test.sh
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4.3.0
|
||||
|
@ -74,7 +74,7 @@ repos:
|
||||
|
||||
# Format Snakemake rule / workflow files
|
||||
- repo: https://github.com/snakemake/snakefmt
|
||||
rev: v0.9.0
|
||||
rev: v0.10.0
|
||||
hooks:
|
||||
- id: snakefmt
|
||||
|
||||
|
@ -31,7 +31,12 @@ CDIR = RDIR if not run.get("shared_cutouts") else ""
|
||||
|
||||
LOGS = "logs/" + RDIR
|
||||
BENCHMARKS = "benchmarks/" + RDIR
|
||||
RESOURCES = "resources/" + RDIR if not run.get("shared_resources") else "resources/"
|
||||
if not (shared_resources := run.get("shared_resources")):
|
||||
RESOURCES = "resources/" + RDIR
|
||||
elif isinstance(shared_resources, str):
|
||||
RESOURCES = "resources/" + shared_resources + "/"
|
||||
else:
|
||||
RESOURCES = "resources/"
|
||||
RESULTS = "results/" + RDIR
|
||||
|
||||
|
||||
|
@ -8,14 +8,14 @@ tutorial: true
|
||||
run:
|
||||
name: "test-elec" # use this to keep track of runs with different settings
|
||||
disable_progressbar: true
|
||||
shared_resources: true
|
||||
shared_resources: "test"
|
||||
shared_cutouts: true
|
||||
|
||||
scenario:
|
||||
clusters:
|
||||
- 5
|
||||
opts:
|
||||
- Co2L-24H
|
||||
- Co2L-24h
|
||||
|
||||
countries: ['BE']
|
||||
|
||||
|
@ -7,7 +7,7 @@ tutorial: true
|
||||
run:
|
||||
name: "test-sector-myopic"
|
||||
disable_progressbar: true
|
||||
shared_resources: true
|
||||
shared_resources: "test"
|
||||
shared_cutouts: true
|
||||
|
||||
foresight: myopic
|
||||
@ -18,7 +18,7 @@ scenario:
|
||||
clusters:
|
||||
- 5
|
||||
sector_opts:
|
||||
- 24H-T-H-B-I-A-dist1
|
||||
- 24h-T-H-B-I-A-dist1
|
||||
planning_horizons:
|
||||
- 2030
|
||||
- 2040
|
||||
|
@ -7,7 +7,7 @@ tutorial: true
|
||||
run:
|
||||
name: "test-sector-overnight"
|
||||
disable_progressbar: true
|
||||
shared_resources: true
|
||||
shared_resources: "test"
|
||||
shared_cutouts: true
|
||||
|
||||
|
||||
@ -17,7 +17,7 @@ scenario:
|
||||
clusters:
|
||||
- 5
|
||||
sector_opts:
|
||||
- CO2L0-24H-T-H-B-I-A-dist1
|
||||
- CO2L0-24h-T-H-B-I-A-dist1
|
||||
planning_horizons:
|
||||
- 2030
|
||||
|
||||
|
@ -7,7 +7,7 @@ tutorial: true
|
||||
run:
|
||||
name: "test-sector-perfect"
|
||||
disable_progressbar: true
|
||||
shared_resources: true
|
||||
shared_resources: "test"
|
||||
shared_cutouts: true
|
||||
|
||||
foresight: perfect
|
||||
@ -18,7 +18,7 @@ scenario:
|
||||
clusters:
|
||||
- 5
|
||||
sector_opts:
|
||||
- 8760H-T-H-B-I-A-dist1
|
||||
- 8760h-T-H-B-I-A-dist1
|
||||
planning_horizons:
|
||||
- 2030
|
||||
- 2040
|
||||
|
@ -57,6 +57,8 @@ Upcoming Release
|
||||
|
||||
* Add the option to customise map projection in plotting config.
|
||||
|
||||
* The order of buses (bus0, bus1, ...) for DAC components has changed to meet the convention of the other components. Therefore, `bus0` refers to the electricity bus (input), `bus1` to the heat bus (input), 'bus2' to the CO2 atmosphere bus (input), and `bus3` to the CO2 storage bus (output).
|
||||
|
||||
* The rule ``plot_network`` has been split into separate rules for plotting
|
||||
electricity, hydrogen and gas networks.
|
||||
|
||||
@ -64,6 +66,12 @@ Upcoming Release
|
||||
|
||||
* The ``highs`` solver was added to the default environment file.
|
||||
|
||||
* Various minor bugfixes to the perfect foresight workflow, though perfect foresight must still be considered experimental.
|
||||
|
||||
* It is now possible to determine the directory for shared resources by setting `shared_resources` to a string.
|
||||
|
||||
* A ``test.sh`` script was added to the repository to run the tests locally.
|
||||
|
||||
* Default settings for recycling rates and primary product shares of high-value
|
||||
chemicals have been set in accordance with the values used in `Neumann et al.
|
||||
(2023) <https://doi.org/10.1016/j.joule.2023.06.016>`_ linearly interpolated
|
||||
@ -78,6 +86,7 @@ Upcoming Release
|
||||
workflows with foresight "myopic" and still needs to be added foresight option
|
||||
"perfect".
|
||||
|
||||
|
||||
PyPSA-Eur 0.9.0 (5th January 2024)
|
||||
==================================
|
||||
|
||||
|
@ -401,18 +401,22 @@ rule add_electricity:
|
||||
if str(fn).startswith("data/")
|
||||
},
|
||||
base_network=RESOURCES + "networks/base.nc",
|
||||
line_rating=RESOURCES + "networks/line_rating.nc"
|
||||
if config["lines"]["dynamic_line_rating"]["activate"]
|
||||
else RESOURCES + "networks/base.nc",
|
||||
line_rating=(
|
||||
RESOURCES + "networks/line_rating.nc"
|
||||
if config["lines"]["dynamic_line_rating"]["activate"]
|
||||
else RESOURCES + "networks/base.nc"
|
||||
),
|
||||
tech_costs=COSTS,
|
||||
regions=RESOURCES + "regions_onshore.geojson",
|
||||
powerplants=RESOURCES + "powerplants.csv",
|
||||
hydro_capacities=ancient("data/bundle/hydro_capacities.csv"),
|
||||
geth_hydro_capacities="data/geth2015_hydro_capacities.csv",
|
||||
unit_commitment="data/unit_commitment.csv",
|
||||
fuel_price=RESOURCES + "monthly_fuel_price.csv"
|
||||
if config["conventional"]["dynamic_fuel_price"]
|
||||
else [],
|
||||
fuel_price=(
|
||||
RESOURCES + "monthly_fuel_price.csv"
|
||||
if config["conventional"]["dynamic_fuel_price"]
|
||||
else []
|
||||
),
|
||||
load=RESOURCES + "load.csv",
|
||||
nuts3_shapes=RESOURCES + "nuts3_shapes.geojson",
|
||||
ua_md_gdp="data/GDP_PPP_30arcsec_v3_mapped_default.csv",
|
||||
|
@ -730,6 +730,40 @@ rule build_district_heat_share:
|
||||
"../scripts/build_district_heat_share.py"
|
||||
|
||||
|
||||
rule build_existing_heating_distribution:
|
||||
params:
|
||||
baseyear=config["scenario"]["planning_horizons"][0],
|
||||
sector=config["sector"],
|
||||
existing_capacities=config["existing_capacities"],
|
||||
input:
|
||||
existing_heating="data/existing_infrastructure/existing_heating_raw.csv",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
|
||||
clustered_pop_energy_layout=RESOURCES
|
||||
+ "pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
|
||||
district_heat_share=RESOURCES
|
||||
+ "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||
output:
|
||||
existing_heating_distribution=RESOURCES
|
||||
+ "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||
wildcard_constraints:
|
||||
planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear
|
||||
threads: 1
|
||||
resources:
|
||||
mem_mb=2000,
|
||||
log:
|
||||
LOGS
|
||||
+ "build_existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.log",
|
||||
benchmark:
|
||||
(
|
||||
BENCHMARKS
|
||||
+ "build_existing_heating_distribution/elec_s{simpl}_{clusters}_{planning_horizons}"
|
||||
)
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
"../scripts/build_existing_heating_distribution.py"
|
||||
|
||||
|
||||
rule prepare_sector_network:
|
||||
params:
|
||||
co2_budget=config["co2_budget"],
|
||||
@ -763,15 +797,19 @@ rule prepare_sector_network:
|
||||
dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv",
|
||||
co2_totals_name=RESOURCES + "co2_totals.csv",
|
||||
co2="data/bundle-sector/eea/UNFCCC_v23.csv",
|
||||
biomass_potentials=RESOURCES
|
||||
+ "biomass_potentials_s{simpl}_{clusters}_"
|
||||
+ "{}.csv".format(config["biomass"]["year"])
|
||||
if config["foresight"] == "overnight"
|
||||
else RESOURCES
|
||||
+ "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||
costs="data/costs_{}.csv".format(config["costs"]["year"])
|
||||
if config["foresight"] == "overnight"
|
||||
else "data/costs_{planning_horizons}.csv",
|
||||
biomass_potentials=(
|
||||
RESOURCES
|
||||
+ "biomass_potentials_s{simpl}_{clusters}_"
|
||||
+ "{}.csv".format(config["biomass"]["year"])
|
||||
if config["foresight"] == "overnight"
|
||||
else RESOURCES
|
||||
+ "biomass_potentials_s{simpl}_{clusters}_{planning_horizons}.csv"
|
||||
),
|
||||
costs=(
|
||||
"data/costs_{}.csv".format(config["costs"]["year"])
|
||||
if config["foresight"] == "overnight"
|
||||
else "data/costs_{planning_horizons}.csv"
|
||||
),
|
||||
profile_offwind_ac=RESOURCES + "profile_offwind-ac.nc",
|
||||
profile_offwind_dc=RESOURCES + "profile_offwind-dc.nc",
|
||||
h2_cavern=RESOURCES + "salt_cavern_potentials_s{simpl}_{clusters}.csv",
|
||||
@ -797,18 +835,21 @@ rule prepare_sector_network:
|
||||
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
|
||||
cop_air_rural=RESOURCES + "cop_air_rural_elec_s{simpl}_{clusters}.nc",
|
||||
cop_air_urban=RESOURCES + "cop_air_urban_elec_s{simpl}_{clusters}.nc",
|
||||
solar_thermal_total=RESOURCES
|
||||
+ "solar_thermal_total_elec_s{simpl}_{clusters}.nc"
|
||||
if config["sector"]["solar_thermal"]
|
||||
else [],
|
||||
solar_thermal_urban=RESOURCES
|
||||
+ "solar_thermal_urban_elec_s{simpl}_{clusters}.nc"
|
||||
if config["sector"]["solar_thermal"]
|
||||
else [],
|
||||
solar_thermal_rural=RESOURCES
|
||||
+ "solar_thermal_rural_elec_s{simpl}_{clusters}.nc"
|
||||
if config["sector"]["solar_thermal"]
|
||||
else [],
|
||||
solar_thermal_total=(
|
||||
RESOURCES + "solar_thermal_total_elec_s{simpl}_{clusters}.nc"
|
||||
if config["sector"]["solar_thermal"]
|
||||
else []
|
||||
),
|
||||
solar_thermal_urban=(
|
||||
RESOURCES + "solar_thermal_urban_elec_s{simpl}_{clusters}.nc"
|
||||
if config["sector"]["solar_thermal"]
|
||||
else []
|
||||
),
|
||||
solar_thermal_rural=(
|
||||
RESOURCES + "solar_thermal_rural_elec_s{simpl}_{clusters}.nc"
|
||||
if config["sector"]["solar_thermal"]
|
||||
else []
|
||||
),
|
||||
output:
|
||||
RESULTS
|
||||
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
|
@ -29,7 +29,7 @@ rule prepare_elec_networks:
|
||||
input:
|
||||
expand(
|
||||
RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
**config["scenario"]
|
||||
**config["scenario"],
|
||||
),
|
||||
|
||||
|
||||
@ -38,7 +38,7 @@ rule prepare_sector_networks:
|
||||
expand(
|
||||
RESULTS
|
||||
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
**config["scenario"]
|
||||
**config["scenario"],
|
||||
),
|
||||
|
||||
|
||||
@ -46,7 +46,7 @@ rule solve_elec_networks:
|
||||
input:
|
||||
expand(
|
||||
RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
**config["scenario"]
|
||||
**config["scenario"],
|
||||
),
|
||||
|
||||
|
||||
@ -55,7 +55,7 @@ rule solve_sector_networks:
|
||||
expand(
|
||||
RESULTS
|
||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
**config["scenario"]
|
||||
**config["scenario"],
|
||||
),
|
||||
|
||||
|
||||
@ -64,7 +64,7 @@ rule solve_sector_networks_perfect:
|
||||
expand(
|
||||
RESULTS
|
||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_brownfield_all_years.nc",
|
||||
**config["scenario"]
|
||||
**config["scenario"],
|
||||
),
|
||||
|
||||
|
||||
@ -73,11 +73,11 @@ rule validate_elec_networks:
|
||||
expand(
|
||||
RESULTS
|
||||
+ "figures/.statistics_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
|
||||
**config["scenario"]
|
||||
**config["scenario"],
|
||||
),
|
||||
expand(
|
||||
RESULTS
|
||||
+ "figures/.validation_{kind}_plots_elec_s{simpl}_{clusters}_ec_l{ll}_{opts}",
|
||||
**config["scenario"],
|
||||
kind=["production", "prices", "cross_border"]
|
||||
kind=["production", "prices", "cross_border"],
|
||||
),
|
||||
|
@ -54,6 +54,7 @@ if config["foresight"] != "perfect":
|
||||
rule plot_hydrogen_network:
|
||||
params:
|
||||
plotting=config["plotting"],
|
||||
foresight=config["foresight"],
|
||||
input:
|
||||
network=RESULTS
|
||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
@ -152,38 +153,44 @@ rule make_summary:
|
||||
input:
|
||||
expand(
|
||||
RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf",
|
||||
**config["scenario"]
|
||||
**config["scenario"],
|
||||
),
|
||||
networks=expand(
|
||||
RESULTS
|
||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
**config["scenario"]
|
||||
**config["scenario"],
|
||||
),
|
||||
costs=(
|
||||
"data/costs_{}.csv".format(config["costs"]["year"])
|
||||
if config["foresight"] == "overnight"
|
||||
else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0])
|
||||
),
|
||||
costs="data/costs_{}.csv".format(config["costs"]["year"])
|
||||
if config["foresight"] == "overnight"
|
||||
else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]),
|
||||
ac_plot=expand(
|
||||
RESULTS + "maps/power-network-s{simpl}-{clusters}.pdf",
|
||||
**config["scenario"]
|
||||
**config["scenario"],
|
||||
),
|
||||
costs_plot=expand(
|
||||
RESULTS
|
||||
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
|
||||
**config["scenario"]
|
||||
**config["scenario"],
|
||||
),
|
||||
h2_plot=expand(
|
||||
RESULTS
|
||||
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf"
|
||||
if config["sector"]["H2_network"]
|
||||
else [],
|
||||
**config["scenario"]
|
||||
(
|
||||
RESULTS
|
||||
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-h2_network_{planning_horizons}.pdf"
|
||||
if config["sector"]["H2_network"]
|
||||
else []
|
||||
),
|
||||
**config["scenario"],
|
||||
),
|
||||
ch4_plot=expand(
|
||||
RESULTS
|
||||
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf"
|
||||
if config["sector"]["gas_network"]
|
||||
else [],
|
||||
**config["scenario"]
|
||||
(
|
||||
RESULTS
|
||||
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-ch4_network_{planning_horizons}.pdf"
|
||||
if config["sector"]["gas_network"]
|
||||
else []
|
||||
),
|
||||
**config["scenario"],
|
||||
),
|
||||
output:
|
||||
nodal_costs=RESULTS + "csvs/nodal_costs.csv",
|
||||
|
@ -191,9 +191,11 @@ if config["enable"]["retrieve"]:
|
||||
input:
|
||||
HTTP.remote(
|
||||
"data.open-power-system-data.org/time_series/{version}/time_series_60min_singleindex.csv".format(
|
||||
version="2019-06-05"
|
||||
if config["snapshots"]["end"] < "2019"
|
||||
else "2020-10-06"
|
||||
version=(
|
||||
"2019-06-05"
|
||||
if config["snapshots"]["end"] < "2019"
|
||||
else "2020-10-06"
|
||||
)
|
||||
),
|
||||
keep_local=True,
|
||||
static=True,
|
||||
|
@ -3,40 +3,6 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
|
||||
|
||||
rule build_existing_heating_distribution:
|
||||
params:
|
||||
baseyear=config["scenario"]["planning_horizons"][0],
|
||||
sector=config["sector"],
|
||||
existing_capacities=config["existing_capacities"],
|
||||
input:
|
||||
existing_heating="data/existing_infrastructure/existing_heating_raw.csv",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
|
||||
clustered_pop_energy_layout=RESOURCES
|
||||
+ "pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
|
||||
district_heat_share=RESOURCES
|
||||
+ "district_heat_share_elec_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||
output:
|
||||
existing_heating_distribution=RESOURCES
|
||||
+ "existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||
wildcard_constraints:
|
||||
planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear
|
||||
threads: 1
|
||||
resources:
|
||||
mem_mb=2000,
|
||||
log:
|
||||
LOGS
|
||||
+ "build_existing_heating_distribution_elec_s{simpl}_{clusters}_{planning_horizons}.log",
|
||||
benchmark:
|
||||
(
|
||||
BENCHMARKS
|
||||
+ "build_existing_heating_distribution/elec_s{simpl}_{clusters}_{planning_horizons}"
|
||||
)
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
"../scripts/build_existing_heating_distribution.py"
|
||||
|
||||
|
||||
rule add_existing_baseyear:
|
||||
params:
|
||||
baseyear=config["scenario"]["planning_horizons"][0],
|
||||
|
@ -45,38 +45,6 @@ rule add_existing_baseyear:
|
||||
"../scripts/add_existing_baseyear.py"
|
||||
|
||||
|
||||
rule add_brownfield:
|
||||
params:
|
||||
H2_retrofit=config["sector"]["H2_retrofit"],
|
||||
H2_retrofit_capacity_per_CH4=config["sector"]["H2_retrofit_capacity_per_CH4"],
|
||||
threshold_capacity=config["existing_capacities"]["threshold_capacity"],
|
||||
input:
|
||||
network=RESULTS
|
||||
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
network_p=solved_previous_horizon, #solved network at previous time step
|
||||
costs="data/costs_{planning_horizons}.csv",
|
||||
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
|
||||
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
|
||||
output:
|
||||
RESULTS
|
||||
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
threads: 4
|
||||
resources:
|
||||
mem_mb=10000,
|
||||
log:
|
||||
LOGS
|
||||
+ "add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
|
||||
benchmark:
|
||||
(
|
||||
BENCHMARKS
|
||||
+ "add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||
)
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
"../scripts/add_brownfield.py"
|
||||
|
||||
|
||||
rule prepare_perfect_foresight:
|
||||
input:
|
||||
**{
|
||||
@ -192,6 +160,3 @@ rule make_summary_perfect:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
"../scripts/make_summary_perfect.py"
|
||||
|
||||
|
||||
ruleorder: add_existing_baseyear > add_brownfield
|
||||
|
@ -178,6 +178,15 @@ def sanitize_carriers(n, config):
|
||||
n.carriers["color"] = n.carriers.color.where(n.carriers.color != "", colors)
|
||||
|
||||
|
||||
def sanitize_locations(n):
|
||||
n.buses["x"] = n.buses.x.where(n.buses.x != 0, n.buses.location.map(n.buses.x))
|
||||
n.buses["y"] = n.buses.y.where(n.buses.y != 0, n.buses.location.map(n.buses.y))
|
||||
n.buses["country"] = n.buses.country.where(
|
||||
n.buses.country.ne("") & n.buses.country.notnull(),
|
||||
n.buses.location.map(n.buses.country),
|
||||
)
|
||||
|
||||
|
||||
def add_co2_emissions(n, costs, carriers):
|
||||
"""
|
||||
Add CO2 emissions to the network's carriers attribute.
|
||||
@ -288,7 +297,7 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1.
|
||||
|
||||
ua_md_gdp = pd.read_csv(ua_md_gdp, dtype={"name": "str"}).set_index("name")
|
||||
|
||||
logger.info(f"Load data scaled with scalling factor {scaling}.")
|
||||
logger.info(f"Load data scaled by factor {scaling}.")
|
||||
opsd_load *= scaling
|
||||
|
||||
nuts3 = gpd.read_file(nuts3_shapes).set_index("index")
|
||||
@ -327,7 +336,9 @@ def attach_load(n, regions, load, nuts3_shapes, ua_md_gdp, countries, scaling=1.
|
||||
axis=1,
|
||||
)
|
||||
|
||||
n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load)
|
||||
n.madd(
|
||||
"Load", substation_lv_i, bus=substation_lv_i, p_set=load
|
||||
) # carrier="electricity"
|
||||
|
||||
|
||||
def update_transmission_costs(n, costs, length_factor=1.0):
|
||||
@ -508,8 +519,8 @@ def attach_conventional_generators(
|
||||
snakemake.input[f"conventional_{carrier}_{attr}"], index_col=0
|
||||
).iloc[:, 0]
|
||||
bus_values = n.buses.country.map(values)
|
||||
n.generators[attr].update(
|
||||
n.generators.loc[idx].bus.map(bus_values).dropna()
|
||||
n.generators.update(
|
||||
{attr: n.generators.loc[idx].bus.map(bus_values).dropna()}
|
||||
)
|
||||
else:
|
||||
# Single value affecting all generators of technology k indiscriminantely of country
|
||||
@ -753,8 +764,8 @@ def attach_OPSD_renewables(n: pypsa.Network, tech_map: Dict[str, List[str]]) ->
|
||||
caps = caps.groupby(["bus"]).Capacity.sum()
|
||||
caps = caps / gens_per_bus.reindex(caps.index, fill_value=1)
|
||||
|
||||
n.generators.p_nom.update(gens.bus.map(caps).dropna())
|
||||
n.generators.p_nom_min.update(gens.bus.map(caps).dropna())
|
||||
n.generators.update({"p_nom": gens.bus.map(caps).dropna()})
|
||||
n.generators.update({"p_nom_min": gens.bus.map(caps).dropna()})
|
||||
|
||||
|
||||
def estimate_renewable_capacities(
|
||||
|
@ -48,7 +48,7 @@ def add_build_year_to_new_assets(n, baseyear):
|
||||
"series"
|
||||
) & n.component_attrs[c.name].status.str.contains("Input")
|
||||
for attr in n.component_attrs[c.name].index[selection]:
|
||||
c.pnl[attr].rename(columns=rename, inplace=True)
|
||||
c.pnl[attr] = c.pnl[attr].rename(columns=rename)
|
||||
|
||||
|
||||
def add_existing_renewables(df_agg):
|
||||
|
@ -56,7 +56,7 @@ import numpy as np
|
||||
import pandas as pd
|
||||
import pypsa
|
||||
from _helpers import configure_logging
|
||||
from add_electricity import load_costs, sanitize_carriers
|
||||
from add_electricity import load_costs, sanitize_carriers, sanitize_locations
|
||||
|
||||
idx = pd.IndexSlice
|
||||
|
||||
@ -100,10 +100,9 @@ def attach_stores(n, costs, extendable_carriers):
|
||||
n.madd("Carrier", carriers)
|
||||
|
||||
buses_i = n.buses.index
|
||||
bus_sub_dict = {k: n.buses[k].values for k in ["x", "y", "country"]}
|
||||
|
||||
if "H2" in carriers:
|
||||
h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", **bus_sub_dict)
|
||||
h2_buses_i = n.madd("Bus", buses_i + " H2", carrier="H2", location=buses_i)
|
||||
|
||||
n.madd(
|
||||
"Store",
|
||||
@ -143,7 +142,7 @@ def attach_stores(n, costs, extendable_carriers):
|
||||
|
||||
if "battery" in carriers:
|
||||
b_buses_i = n.madd(
|
||||
"Bus", buses_i + " battery", carrier="battery", **bus_sub_dict
|
||||
"Bus", buses_i + " battery", carrier="battery", location=buses_i
|
||||
)
|
||||
|
||||
n.madd(
|
||||
@ -246,6 +245,7 @@ if __name__ == "__main__":
|
||||
attach_hydrogen_pipelines(n, costs, extendable_carriers)
|
||||
|
||||
sanitize_carriers(n, snakemake.config)
|
||||
sanitize_locations(n)
|
||||
|
||||
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
||||
n.export_to_netcdf(snakemake.output[0])
|
||||
|
@ -138,7 +138,9 @@ def _load_buses_from_eg(eg_buses, europe_shape, config_elec):
|
||||
)
|
||||
|
||||
buses["carrier"] = buses.pop("dc").map({True: "DC", False: "AC"})
|
||||
buses["under_construction"] = buses["under_construction"].fillna(False).astype(bool)
|
||||
buses["under_construction"] = buses.under_construction.where(
|
||||
lambda s: s.notnull(), False
|
||||
).astype(bool)
|
||||
|
||||
# remove all buses outside of all countries including exclusive economic zones (offshore)
|
||||
europe_shape = gpd.read_file(europe_shape).loc[0, "geometry"]
|
||||
@ -525,9 +527,9 @@ def _set_countries_and_substations(n, config, country_shapes, offshore_shapes):
|
||||
gb = buses.loc[substation_b].groupby(
|
||||
["x", "y"], as_index=False, group_keys=False, sort=False
|
||||
)
|
||||
bus_map_low = gb.apply(prefer_voltage, "min")
|
||||
bus_map_low = gb.apply(prefer_voltage, "min", include_groups=False)
|
||||
lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False)
|
||||
bus_map_high = gb.apply(prefer_voltage, "max")
|
||||
bus_map_high = gb.apply(prefer_voltage, "max", include_groups=False)
|
||||
hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False)
|
||||
|
||||
onshore_b = pd.Series(False, buses.index)
|
||||
|
@ -132,14 +132,14 @@ def disaggregate_nuts0(bio):
|
||||
pop = build_nuts_population_data()
|
||||
|
||||
# get population in nuts2
|
||||
pop_nuts2 = pop.loc[pop.index.str.len() == 4]
|
||||
pop_nuts2 = pop.loc[pop.index.str.len() == 4].copy()
|
||||
by_country = pop_nuts2.total.groupby(pop_nuts2.ct).sum()
|
||||
pop_nuts2.loc[:, "fraction"] = pop_nuts2.total / pop_nuts2.ct.map(by_country)
|
||||
pop_nuts2["fraction"] = pop_nuts2.total / pop_nuts2.ct.map(by_country)
|
||||
|
||||
# distribute nuts0 data to nuts2 by population
|
||||
bio_nodal = bio.loc[pop_nuts2.ct]
|
||||
bio_nodal.index = pop_nuts2.index
|
||||
bio_nodal = bio_nodal.mul(pop_nuts2.fraction, axis=0)
|
||||
bio_nodal = bio_nodal.mul(pop_nuts2.fraction, axis=0).astype(float)
|
||||
|
||||
# update inplace
|
||||
bio.update(bio_nodal)
|
||||
|
@ -114,12 +114,10 @@ def prepare_dataset(
|
||||
df["p_nom_diameter"] = df.diameter_mm.apply(diameter_to_capacity)
|
||||
ratio = df.p_nom / df.p_nom_diameter
|
||||
not_nordstream = df.max_pressure_bar < 220
|
||||
df.p_nom.update(
|
||||
df.p_nom_diameter.where(
|
||||
(df.p_nom <= 500)
|
||||
| ((ratio > correction_threshold_p_nom) & not_nordstream)
|
||||
| ((ratio < 1 / correction_threshold_p_nom) & not_nordstream)
|
||||
)
|
||||
df["p_nom"] = df.p_nom_diameter.where(
|
||||
(df.p_nom <= 500)
|
||||
| ((ratio > correction_threshold_p_nom) & not_nordstream)
|
||||
| ((ratio < 1 / correction_threshold_p_nom) & not_nordstream)
|
||||
)
|
||||
|
||||
# lines which have way too discrepant line lengths
|
||||
@ -130,12 +128,10 @@ def prepare_dataset(
|
||||
axis=1,
|
||||
)
|
||||
ratio = df.eval("length / length_haversine")
|
||||
df["length"].update(
|
||||
df.length_haversine.where(
|
||||
(df["length"] < 20)
|
||||
| (ratio > correction_threshold_length)
|
||||
| (ratio < 1 / correction_threshold_length)
|
||||
)
|
||||
df["length"] = df.length_haversine.where(
|
||||
(df["length"] < 20)
|
||||
| (ratio > correction_threshold_length)
|
||||
| (ratio < 1 / correction_threshold_length)
|
||||
)
|
||||
|
||||
return df
|
||||
|
@ -98,7 +98,7 @@ def calculate_line_rating(n, cutout):
|
||||
-------
|
||||
xarray DataArray object with maximal power.
|
||||
"""
|
||||
relevant_lines = n.lines[~n.lines["underground"]]
|
||||
relevant_lines = n.lines[~n.lines["underground"]].copy()
|
||||
buses = relevant_lines[["bus0", "bus1"]].values
|
||||
x = n.buses.x
|
||||
y = n.buses.y
|
||||
|
@ -83,7 +83,8 @@ if __name__ == "__main__":
|
||||
|
||||
# correct for imprecision of Iinv*I
|
||||
pop_ct = nuts3.loc[nuts3.country == ct, "pop"].sum()
|
||||
pop_cells_ct *= pop_ct / pop_cells_ct.sum()
|
||||
if pop_cells_ct.sum() != 0:
|
||||
pop_cells_ct *= pop_ct / pop_cells_ct.sum()
|
||||
|
||||
# The first low density grid cells to reach rural fraction are rural
|
||||
asc_density_i = density_cells_ct.sort_values().index
|
||||
|
@ -297,8 +297,8 @@ def prepare_building_stock_data():
|
||||
errors="ignore",
|
||||
)
|
||||
|
||||
u_values.subsector.replace(rename_sectors, inplace=True)
|
||||
u_values.btype.replace(rename_sectors, inplace=True)
|
||||
u_values["subsector"] = u_values.subsector.replace(rename_sectors)
|
||||
u_values["btype"] = u_values.btype.replace(rename_sectors)
|
||||
|
||||
# for missing weighting of surfaces of building types assume MFH
|
||||
u_values["assumed_subsector"] = u_values.subsector
|
||||
@ -306,8 +306,8 @@ def prepare_building_stock_data():
|
||||
~u_values.subsector.isin(rename_sectors.values()), "assumed_subsector"
|
||||
] = "MFH"
|
||||
|
||||
u_values.country_code.replace({"UK": "GB"}, inplace=True)
|
||||
u_values.bage.replace({"Berfore 1945": "Before 1945"}, inplace=True)
|
||||
u_values["country_code"] = u_values.country_code.replace({"UK": "GB"})
|
||||
u_values["bage"] = u_values.bage.replace({"Berfore 1945": "Before 1945"})
|
||||
u_values = u_values[~u_values.bage.isna()]
|
||||
|
||||
u_values.set_index(["country_code", "subsector", "bage", "type"], inplace=True)
|
||||
|
@ -488,7 +488,9 @@ if __name__ == "__main__":
|
||||
gens.efficiency, bins=[0, low, high, 1], labels=labels
|
||||
).astype(str)
|
||||
carriers += [f"{c} {label} efficiency" for label in labels]
|
||||
n.generators.carrier.update(gens.carrier + " " + suffix + " efficiency")
|
||||
n.generators.update(
|
||||
{"carrier": gens.carrier + " " + suffix + " efficiency"}
|
||||
)
|
||||
aggregate_carriers = carriers
|
||||
|
||||
if n_clusters == len(n.buses):
|
||||
|
@ -507,7 +507,7 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
||||
if carrier in ["H2", "gas"]:
|
||||
load = pd.DataFrame(index=n.snapshots, columns=buses, data=0.0)
|
||||
else:
|
||||
load = n.loads_t.p_set[buses]
|
||||
load = n.loads_t.p_set[buses.intersection(n.loads.index)]
|
||||
|
||||
for tech in value:
|
||||
names = n.links.index[n.links.index.to_series().str[-len(tech) :] == tech]
|
||||
@ -560,7 +560,10 @@ def calculate_market_values(n, label, market_values):
|
||||
)
|
||||
revenue = dispatch * n.buses_t.marginal_price[buses]
|
||||
|
||||
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
|
||||
if total_dispatch := dispatch.sum().sum():
|
||||
market_values.at[tech, label] = revenue.sum().sum() / total_dispatch
|
||||
else:
|
||||
market_values.at[tech, label] = np.nan
|
||||
|
||||
## Now do market value of links ##
|
||||
|
||||
@ -583,7 +586,10 @@ def calculate_market_values(n, label, market_values):
|
||||
|
||||
revenue = dispatch * n.buses_t.marginal_price[buses]
|
||||
|
||||
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
|
||||
if total_dispatch := dispatch.sum().sum():
|
||||
market_values.at[tech, label] = revenue.sum().sum() / total_dispatch
|
||||
else:
|
||||
market_values.at[tech, label] = np.nan
|
||||
|
||||
return market_values
|
||||
|
||||
|
@ -265,7 +265,7 @@ def calculate_energy(n, label, energy):
|
||||
totals[no_bus] = float(
|
||||
n.component_attrs[c.name].loc["p" + port, "default"]
|
||||
)
|
||||
c_energies -= totals.groupby(c.df.carrier, axis=1).sum()
|
||||
c_energies -= totals.T.groupby(c.df.carrier).sum().T
|
||||
|
||||
c_energies = pd.concat([c_energies.T], keys=[c.list_name])
|
||||
|
||||
@ -376,9 +376,8 @@ def calculate_supply_energy(n, label, supply_energy):
|
||||
.groupby(level=0)
|
||||
.sum()
|
||||
.multiply(c.df.loc[items, "sign"])
|
||||
.groupby(c.df.loc[items, "carrier"], axis=1)
|
||||
.T.groupby(c.df.loc[items, "carrier"])
|
||||
.sum()
|
||||
.T
|
||||
)
|
||||
s = pd.concat([s], keys=[c.list_name])
|
||||
s = pd.concat([s], keys=[i])
|
||||
@ -525,9 +524,12 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
||||
# stores[stores > 0.] = 0.
|
||||
# load += -stores
|
||||
|
||||
weighted_prices.loc[carrier, label] = (
|
||||
load * n.buses_t.marginal_price[buses]
|
||||
).sum().sum() / load.sum().sum()
|
||||
if total_load := load.sum().sum():
|
||||
weighted_prices.loc[carrier, label] = (
|
||||
load * n.buses_t.marginal_price[buses]
|
||||
).sum().sum() / total_load
|
||||
else:
|
||||
weighted_prices.loc[carrier, label] = np.nan
|
||||
|
||||
if carrier[:5] == "space":
|
||||
print(load * n.buses_t.marginal_price[buses])
|
||||
@ -562,7 +564,10 @@ def calculate_market_values(n, label, market_values):
|
||||
|
||||
revenue = dispatch * n.buses_t.marginal_price[buses]
|
||||
|
||||
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
|
||||
if total_dispatch := dispatch.sum().sum():
|
||||
market_values.at[tech, label] = revenue.sum().sum() / total_dispatch
|
||||
else:
|
||||
market_values.at[tech, label] = np.nan
|
||||
|
||||
## Now do market value of links ##
|
||||
|
||||
@ -585,7 +590,10 @@ def calculate_market_values(n, label, market_values):
|
||||
|
||||
revenue = dispatch * n.buses_t.marginal_price[buses]
|
||||
|
||||
market_values.at[tech, label] = revenue.sum().sum() / dispatch.sum().sum()
|
||||
if total_dispatch := dispatch.sum().sum():
|
||||
market_values.at[tech, label] = revenue.sum().sum() / total_dispatch
|
||||
else:
|
||||
market_values.at[tech, label] = np.nan
|
||||
|
||||
return market_values
|
||||
|
||||
|
@ -36,7 +36,9 @@ def group_pipes(df, drop_direction=False):
|
||||
lambda x: f"H2 pipeline {x.bus0.replace(' H2', '')} -> {x.bus1.replace(' H2', '')}",
|
||||
axis=1,
|
||||
)
|
||||
return df.groupby(level=0).agg({"p_nom_opt": sum, "bus0": "first", "bus1": "first"})
|
||||
return df.groupby(level=0).agg(
|
||||
{"p_nom_opt": "sum", "bus0": "first", "bus1": "first"}
|
||||
)
|
||||
|
||||
|
||||
def plot_h2_map(n, regions):
|
||||
|
@ -98,7 +98,7 @@ def plot_map(
|
||||
|
||||
logger.debug(f"{comp}, {costs}")
|
||||
|
||||
costs = costs.groupby(costs.columns, axis=1).sum()
|
||||
costs = costs.T.groupby(costs.columns).sum().T
|
||||
|
||||
costs.drop(list(costs.columns[(costs == 0.0).all()]), axis=1, inplace=True)
|
||||
|
||||
|
@ -269,8 +269,8 @@ def set_line_nom_max(
|
||||
hvdc = n.links.index[n.links.carrier == "DC"]
|
||||
n.links.loc[hvdc, "p_nom_max"] = n.links.loc[hvdc, "p_nom"] + p_nom_max_ext
|
||||
|
||||
n.lines.s_nom_max.clip(upper=s_nom_max_set, inplace=True)
|
||||
n.links.p_nom_max.clip(upper=p_nom_max_set, inplace=True)
|
||||
n.lines["s_nom_max"] = n.lines.s_nom_max.clip(upper=s_nom_max_set)
|
||||
n.links["p_nom_max"] = n.links.p_nom_max.clip(upper=p_nom_max_set)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -162,15 +162,17 @@ def concat_networks(years):
|
||||
add_build_year_to_new_assets(network, year)
|
||||
|
||||
# static ----------------------------------
|
||||
# (1) add buses and carriers
|
||||
for component in network.iterate_components(["Bus", "Carrier"]):
|
||||
df_year = component.df
|
||||
# get missing assets
|
||||
missing = get_missing(df_year, n, component.list_name)
|
||||
import_components_from_dataframe(n, missing, component.name)
|
||||
# (2) add generators, links, stores and loads
|
||||
for component in network.iterate_components(
|
||||
["Generator", "Link", "Store", "Load", "Line", "StorageUnit"]
|
||||
[
|
||||
"Bus",
|
||||
"Carrier",
|
||||
"Generator",
|
||||
"Link",
|
||||
"Store",
|
||||
"Load",
|
||||
"Line",
|
||||
"StorageUnit",
|
||||
]
|
||||
):
|
||||
df_year = component.df.copy()
|
||||
missing = get_missing(df_year, n, component.list_name)
|
||||
@ -199,8 +201,13 @@ def concat_networks(years):
|
||||
pnl[k].loc[pnl_year.index, pnl_year.columns] = pnl_year
|
||||
|
||||
else:
|
||||
# this is to avoid adding multiple times assets with
|
||||
# infinite lifetime as ror
|
||||
# For components that aren't new, we just extend
|
||||
# time-varying data from the previous investment
|
||||
# period.
|
||||
if i > 0:
|
||||
pnl[k].loc[(year,)] = pnl[k].loc[(years[i - 1],)].values
|
||||
|
||||
# Now, add time-varying data for new components.
|
||||
cols = pnl_year.columns.difference(pnl[k].columns)
|
||||
pnl[k] = pd.concat([pnl[k], pnl_year[cols]], axis=1)
|
||||
|
||||
@ -214,7 +221,7 @@ def concat_networks(years):
|
||||
# set investment periods
|
||||
n.investment_periods = n.snapshots.levels[0]
|
||||
# weighting of the investment period -> assuming last period same weighting as the period before
|
||||
time_w = n.investment_periods.to_series().diff().shift(-1).fillna(method="ffill")
|
||||
time_w = n.investment_periods.to_series().diff().shift(-1).ffill()
|
||||
n.investment_period_weightings["years"] = time_w
|
||||
# set objective weightings
|
||||
objective_w = get_investment_weighting(
|
||||
|
@ -19,7 +19,7 @@ import pandas as pd
|
||||
import pypsa
|
||||
import xarray as xr
|
||||
from _helpers import update_config_with_sector_opts
|
||||
from add_electricity import calculate_annuity, sanitize_carriers
|
||||
from add_electricity import calculate_annuity, sanitize_carriers, sanitize_locations
|
||||
from build_energy_totals import build_co2_totals, build_eea_co2, build_eurostat_co2
|
||||
from networkx.algorithms import complement
|
||||
from networkx.algorithms.connectivity.edge_augmentation import k_edge_augmentation
|
||||
@ -551,6 +551,17 @@ def patch_electricity_network(n):
|
||||
n.loads_t.p_set.rename(lambda x: x.strip(), axis=1, inplace=True)
|
||||
|
||||
|
||||
def add_eu_bus(n, x=-5.5, y=46):
|
||||
"""
|
||||
Add EU bus to the network.
|
||||
|
||||
This cosmetic bus serves as a reference point for the location of
|
||||
the EU buses in the plots and summaries.
|
||||
"""
|
||||
n.add("Bus", "EU", location="EU", x=x, y=y, carrier="none")
|
||||
n.add("Carrier", "none")
|
||||
|
||||
|
||||
def add_co2_tracking(n, costs, options):
|
||||
# minus sign because opposite to how fossil fuels used:
|
||||
# CH4 burning puts CH4 down, atmosphere up
|
||||
@ -715,27 +726,27 @@ def add_dac(n, costs):
|
||||
heat_buses = n.buses.index[n.buses.carrier.isin(heat_carriers)]
|
||||
locations = n.buses.location[heat_buses]
|
||||
|
||||
efficiency2 = -(
|
||||
electricity_input = (
|
||||
costs.at["direct air capture", "electricity-input"]
|
||||
+ costs.at["direct air capture", "compression-electricity-input"]
|
||||
)
|
||||
efficiency3 = -(
|
||||
) # MWh_el / tCO2
|
||||
heat_input = (
|
||||
costs.at["direct air capture", "heat-input"]
|
||||
- costs.at["direct air capture", "compression-heat-output"]
|
||||
)
|
||||
) # MWh_th / tCO2
|
||||
|
||||
n.madd(
|
||||
"Link",
|
||||
heat_buses.str.replace(" heat", " DAC"),
|
||||
bus0="co2 atmosphere",
|
||||
bus1=spatial.co2.df.loc[locations, "nodes"].values,
|
||||
bus2=locations.values,
|
||||
bus3=heat_buses,
|
||||
bus0=locations.values,
|
||||
bus1=heat_buses,
|
||||
bus2="co2 atmosphere",
|
||||
bus3=spatial.co2.df.loc[locations, "nodes"].values,
|
||||
carrier="DAC",
|
||||
capital_cost=costs.at["direct air capture", "fixed"],
|
||||
efficiency=1.0,
|
||||
efficiency2=efficiency2,
|
||||
efficiency3=efficiency3,
|
||||
capital_cost=costs.at["direct air capture", "fixed"] / electricity_input,
|
||||
efficiency=-heat_input / electricity_input,
|
||||
efficiency2=-1 / electricity_input,
|
||||
efficiency3=1 / electricity_input,
|
||||
p_nom_extendable=True,
|
||||
lifetime=costs.at["direct air capture", "lifetime"],
|
||||
)
|
||||
@ -1010,6 +1021,7 @@ def insert_electricity_distribution_grid(n, costs):
|
||||
"Store",
|
||||
nodes + " home battery",
|
||||
bus=nodes + " home battery",
|
||||
location=nodes,
|
||||
e_cyclic=True,
|
||||
e_nom_extendable=True,
|
||||
carrier="home battery",
|
||||
@ -3599,6 +3611,8 @@ if __name__ == "__main__":
|
||||
for carrier in conventional:
|
||||
add_carrier_buses(n, carrier)
|
||||
|
||||
add_eu_bus(n)
|
||||
|
||||
add_co2_tracking(n, costs, options)
|
||||
|
||||
add_generation(n, costs)
|
||||
@ -3738,5 +3752,6 @@ if __name__ == "__main__":
|
||||
n.meta = dict(snakemake.config, **dict(wildcards=dict(snakemake.wildcards)))
|
||||
|
||||
sanitize_carriers(n, snakemake.config)
|
||||
sanitize_locations(n)
|
||||
|
||||
n.export_to_netcdf(snakemake.output[0])
|
||||
|
@ -418,7 +418,7 @@ def add_CCL_constraints(n, config):
|
||||
Example
|
||||
-------
|
||||
scenario:
|
||||
opts: [Co2L-CCL-24H]
|
||||
opts: [Co2L-CCL-24h]
|
||||
electricity:
|
||||
agg_p_nom_limits: data/agg_p_nom_minmax.csv
|
||||
"""
|
||||
@ -463,7 +463,7 @@ def add_EQ_constraints(n, o, scaling=1e-1):
|
||||
Example
|
||||
-------
|
||||
scenario:
|
||||
opts: [Co2L-EQ0.7-24H]
|
||||
opts: [Co2L-EQ0.7-24h]
|
||||
|
||||
Require each country or node to on average produce a minimal share
|
||||
of its total electricity consumption itself. Example: EQ0.7c demands each country
|
||||
@ -527,7 +527,7 @@ def add_BAU_constraints(n, config):
|
||||
Example
|
||||
-------
|
||||
scenario:
|
||||
opts: [Co2L-BAU-24H]
|
||||
opts: [Co2L-BAU-24h]
|
||||
electricity:
|
||||
BAU_mincapacities:
|
||||
solar: 0
|
||||
@ -564,7 +564,7 @@ def add_SAFE_constraints(n, config):
|
||||
config.yaml requires to specify opts:
|
||||
|
||||
scenario:
|
||||
opts: [Co2L-SAFE-24H]
|
||||
opts: [Co2L-SAFE-24h]
|
||||
electricity:
|
||||
SAFE_reservemargin: 0.1
|
||||
Which sets a reserve margin of 10% above the peak demand.
|
||||
|
8
test.sh
Executable file
8
test.sh
Executable file
@ -0,0 +1,8 @@
|
||||
# SPDX-FileCopyrightText: : 2021-2024 The PyPSA-Eur Authors
|
||||
#
|
||||
# SPDX-License-Identifier: CC0-1.0
|
||||
|
||||
snakemake -call solve_elec_networks --configfile config/test/config.electricity.yaml --rerun-triggers=mtime && \
|
||||
snakemake -call all --configfile config/test/config.overnight.yaml --rerun-triggers=mtime && \
|
||||
snakemake -call all --configfile config/test/config.myopic.yaml --rerun-triggers=mtime && \
|
||||
snakemake -call all --configfile config/test/config.perfect.yaml --rerun-triggers=mtime
|
Loading…
Reference in New Issue
Block a user