From b7b7407756401917a851b2f5dac21843b82ea5f7 Mon Sep 17 00:00:00 2001 From: martavp Date: Sat, 26 Dec 2020 17:47:32 +0100 Subject: [PATCH 1/4] Adapt nomenclature from "YearCommissioned" to "DataIn" This was breaking add_existing_baseyear.py and it is now fixed. Column name for commissioning year in powerplantmatching has changed. Now "DataIn" is used as column name, also when renewable capacities per country are added to the power plants dataframe --- scripts/add_existing_baseyear.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/scripts/add_existing_baseyear.py b/scripts/add_existing_baseyear.py index 852c5bb0..09b47da6 100644 --- a/scripts/add_existing_baseyear.py +++ b/scripts/add_existing_baseyear.py @@ -125,7 +125,7 @@ def add_existing_renewables(df_agg): if capacity > 0.: df_agg.at[name,"Fueltype"] = tech df_agg.at[name,"Capacity"] = capacity - df_agg.at[name,"YearCommissioned"] = year + df_agg.at[name,"DateIn"] = year df_agg.at[name,"cluster_bus"] = node def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, baseyear): @@ -182,7 +182,7 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas add_existing_renewables(df_agg) df_agg["grouping_year"] = np.take(grouping_years, - np.digitize(df_agg.YearCommissioned, + np.digitize(df_agg.DateIn, grouping_years, right=True)) @@ -249,7 +249,7 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years grouping_years : intervals to group existing capacities - linear decomissioning of heating capacities from 2020 to 2045 is + linear decommissioning of heating capacities from 2020 to 2045 is currently assumed heating capacities split between residential and services proportional @@ -408,18 +408,18 @@ if __name__ == "__main__": if 'snakemake' not in globals(): from vresutils.snakemake import MockSnakemake snakemake = MockSnakemake( - wildcards=dict(network='elec', simpl='', clusters='39', lv='1.0', - sector_opts='Co2L0-168H-T-H-B-I-solar3-dist1', - co2_budget_name='b30b3', + wildcards=dict(network='elec', simpl='', clusters='45', lv='1.0', + sector_opts='Co2L0-3H-T-H-B-I-solar3-dist1', planning_horizons='2020'), - input=dict(network='pypsa-eur-sec/results/test/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{co2_budget_name}_{planning_horizons}.nc', + input=dict(network='pypsa-eur-sec/results/version-2/prenetworks/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc', powerplants='pypsa-eur/resources/powerplants.csv', busmap_s='pypsa-eur/resources/busmap_{network}_s{simpl}.csv', busmap='pypsa-eur/resources/busmap_{network}_s{simpl}_{clusters}.csv', - costs='pypsa-eur-sec/data/costs/costs_{planning_horizons}.csv', + costs='technology_data/outputs/costs_{planning_horizons}.csv', cop_air_total="pypsa-eur-sec/resources/cop_air_total_{network}_s{simpl}_{clusters}.nc", - cop_soil_total="pypsa-eur-sec/resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc"), - output=['pypsa-eur-sec/results/test/prenetworks_brownfield/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'], + cop_soil_total="pypsa-eur-sec/resources/cop_soil_total_{network}_s{simpl}_{clusters}.nc", + clustered_pop_layout="pypsa-eur-sec/resources/pop_layout_{network}_s{simpl}_{clusters}.csv",), + output=['pypsa-eur-sec/results/version-2/prenetworks_brownfield/{network}_s{simpl}_{clusters}_lv{lv}__{sector_opts}_{planning_horizons}.nc'], ) import yaml with open('config.yaml', encoding='utf8') as f: From c623b82b3967f3d841125a47d3f3b024febbac7f Mon Sep 17 00:00:00 2001 From: martavp Date: Mon, 28 Dec 2020 15:39:05 +0100 Subject: [PATCH 2/4] Make explicit solver_dir=tmpdir Running the rule solve_network in the university cluster, I was getting a "No space left on device" error. Making solve_dir=tmpdir by default avoids this error and makes it easier to identify any problem with temp files --- scripts/solve_network.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/solve_network.py b/scripts/solve_network.py index 795c3327..85251caa 100644 --- a/scripts/solve_network.py +++ b/scripts/solve_network.py @@ -272,6 +272,7 @@ def solve_network(n, config=None, solver_log=None, opts=None): solver_name=solver_name, solver_logfile=solver_log, solver_options=solver_options, + solver_dir=tmpdir, extra_functionality=extra_functionality, formulation=solve_opts['formulation']) #extra_postprocessing=extra_postprocessing From 6a7b1d545009fb55bc9f53d5f26de1fd9718899d Mon Sep 17 00:00:00 2001 From: martavp Date: Wed, 30 Dec 2020 12:14:08 +0100 Subject: [PATCH 3/4] Fix unicode error due to dash before sawdust A quick fix to https://github.com/PyPSA/pypsa-eur-sec/issues/79 --- config.default.yaml | 2 +- scripts/build_biomass_potentials.py | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/config.default.yaml b/config.default.yaml index d43d0e29..7907859a 100644 --- a/config.default.yaml +++ b/config.default.yaml @@ -69,7 +69,7 @@ biomass: year: 2030 scenario: "Med" classes: - solid biomass: ['Primary agricultural residues', 'Forestry energy residue', 'Secondary forestry residues', 'Secondary Forestry residues – sawdust', 'Forestry residues from landscape care biomass', 'Municipal waste'] + solid biomass: ['Primary agricultural residues', 'Forestry energy residue', 'Secondary forestry residues', 'Secondary Forestry residues sawdust', 'Forestry residues from landscape care biomass', 'Municipal waste'] not included: ['Bioethanol sugar beet biomass', 'Rapeseeds for biodiesel', 'sunflower and soya for Biodiesel', 'Starchy crops biomass', 'Grassy crops biomass', 'Willow biomass', 'Poplar biomass potential', 'Roundwood fuelwood', 'Roundwood Chips & Pellets'] biogas: ['Manure biomass potential', 'Sludge biomass'] diff --git a/scripts/build_biomass_potentials.py b/scripts/build_biomass_potentials.py index c959680f..44fd04b5 100644 --- a/scripts/build_biomass_potentials.py +++ b/scripts/build_biomass_potentials.py @@ -1,3 +1,4 @@ +# coding: utf-8 import pandas as pd @@ -57,7 +58,12 @@ if __name__ == "__main__": snakemake.input['jrc_potentials'] = "data/biomass/JRC Biomass Potentials.xlsx" snakemake.output = Dict() snakemake.output['biomass_potentials'] = 'data/biomass_potentials.csv' + snakemake.output['biomass_potentials_all']='resources/biomass_potentials_all.csv' with open('config.yaml', encoding='utf8') as f: snakemake.config = yaml.safe_load(f) - + + if 'Secondary Forestry residues sawdust' in snakemake.config['biomass']['classes']['solid biomass']: + snakemake.config['biomass']['classes']['solid biomass'].remove('Secondary Forestry residues sawdust') + snakemake.config['biomass']['classes']['solid biomass'].append('Secondary Forestry residues – sawdust') + build_biomass_potentials() From 918d803c0d9b56f7f67898c93b7b7a29a9a88401 Mon Sep 17 00:00:00 2001 From: martavp Date: Tue, 12 Jan 2021 11:57:22 +0100 Subject: [PATCH 4/4] Add commented line regarding hack for unicode error in snakemake --- scripts/build_biomass_potentials.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/build_biomass_potentials.py b/scripts/build_biomass_potentials.py index 44fd04b5..a918eefb 100644 --- a/scripts/build_biomass_potentials.py +++ b/scripts/build_biomass_potentials.py @@ -62,6 +62,7 @@ if __name__ == "__main__": with open('config.yaml', encoding='utf8') as f: snakemake.config = yaml.safe_load(f) + # This is a hack, to be replaced once snakemake is unicode-conform if 'Secondary Forestry residues sawdust' in snakemake.config['biomass']['classes']['solid biomass']: snakemake.config['biomass']['classes']['solid biomass'].remove('Secondary Forestry residues sawdust') snakemake.config['biomass']['classes']['solid biomass'].append('Secondary Forestry residues – sawdust')