Merge branch 'multiyear-sec' into multiyear
This commit is contained in:
commit
6acd5da4d4
@ -700,6 +700,21 @@ solving:
|
||||
mem: 30000 #memory in MB; 20 GB enough for 50+B+I+H2; 100 GB for 181+B+I+H2
|
||||
|
||||
|
||||
operations:
|
||||
rolling_horizon:
|
||||
window: 10 # days
|
||||
overlap: 8 # days
|
||||
bidding_prices: # EUR/MW
|
||||
H2 Electrolysis: -10
|
||||
H2 Fuel Cell: 200
|
||||
urban central water tanks charger: -10
|
||||
urban central water tanks discharger: 10
|
||||
hydro: 70
|
||||
solid biomass: 150
|
||||
biogas: 100
|
||||
co2_price: 500 # EUR/t
|
||||
co2_sequestation_limit: 200 # Mt/a
|
||||
|
||||
plotting:
|
||||
map:
|
||||
boundaries: [-11, 30, 34, 71]
|
||||
|
34
data/era5-annual-HDD-per-country.csv
Normal file
34
data/era5-annual-HDD-per-country.csv
Normal file
@ -0,0 +1,34 @@
|
||||
name,1951,1952,1953,1954,1955,1956,1957,1958,1959,1960,1961,1962,1963,1964,1965,1966,1967,1968,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021
|
||||
AL,56,58,65,67,55,70,58,59,61,54,54,62,62,62,68,59,60,57,57,58,61,57,64,60,61,61,54,63,57,65,63,58,61,57,57,58,61,62,56,53,67,62,62,53,61,61,60,60,56,58,57,53,61,58,65,64,55,55,54,53,58,58,50,46,54,50,54,47,49,50,54
|
||||
AT,374,416,379,422,419,449,387,393,378,389,367,437,425,406,424,376,385,400,409,413,394,407,408,379,384,400,380,414,405,428,397,384,377,411,420,406,409,378,369,372,415,373,383,334,380,416,371,373,368,343,371,337,370,370,384,362,333,340,351,388,337,353,358,302,324,336,347,315,320,322,358
|
||||
BE,95,105,94,102,109,113,91,97,88,89,84,111,114,100,100,91,94,101,102,101,96,102,101,90,97,97,92,103,108,105,99,94,96,99,112,107,108,85,84,83,101,92,94,82,89,110,90,89,84,79,89,78,89,90,87,86,76,87,87,107,75,90,99,69,81,88,82,81,80,71,89
|
||||
BG,283,294,334,367,295,363,306,283,320,268,277,303,324,313,323,259,297,292,320,286,300,306,323,303,296,314,287,310,293,326,301,312,295,306,326,313,333,320,276,272,332,303,318,266,307,335,325,303,273,275,284,272,322,277,298,296,259,266,266,273,313,295,258,250,261,265,281,261,232,250,277
|
||||
BA,144,164,167,183,158,194,157,157,152,143,141,173,180,176,176,155,162,157,165,165,163,154,170,156,153,168,145,170,153,176,163,154,157,159,167,160,166,158,146,139,174,152,162,143,157,171,156,158,147,132,145,130,156,146,164,148,134,132,139,147,144,147,133,113,137,134,144,128,125,127,141
|
||||
CH,214,225,210,226,221,240,211,215,203,214,200,234,231,214,229,213,217,219,225,227,217,221,224,213,216,220,208,219,220,229,216,208,208,227,224,217,217,200,202,200,217,204,208,183,206,217,195,205,203,192,200,189,199,204,209,193,187,194,193,213,179,196,204,178,181,189,191,174,184,176,197
|
||||
CZ,297,344,307,351,350,377,313,322,301,314,294,358,361,345,351,297,296,320,342,336,319,326,322,282,296,315,299,330,328,347,316,308,298,323,350,326,346,297,280,283,333,299,316,279,312,364,319,296,288,265,309,285,308,302,311,300,269,275,289,338,281,298,306,242,261,283,287,262,255,259,302
|
||||
DE,1254,1428,1237,1416,1484,1567,1282,1342,1229,1266,1183,1486,1528,1390,1399,1269,1211,1341,1447,1429,1300,1388,1356,1184,1258,1339,1221,1377,1433,1433,1348,1288,1273,1384,1492,1425,1491,1201,1156,1135,1367,1241,1312,1155,1295,1548,1288,1220,1172,1085,1250,1168,1269,1236,1239,1198,1072,1159,1202,1459,1112,1232,1305,992,1095,1188,1148,1092,1070,1019,1230
|
||||
DK,160,176,152,175,186,190,163,178,154,169,154,184,194,176,182,181,153,170,182,186,159,166,159,147,147,172,161,171,190,177,175,164,155,162,191,181,190,151,137,132,160,147,166,154,162,191,162,154,147,134,161,148,158,151,150,144,134,137,150,193,144,159,160,125,138,145,139,142,134,125,150
|
||||
ES,962,911,927,948,785,1092,934,848,821,890,742,952,981,967,949,878,890,839,933,899,971,961,943,931,928,928,797,906,887,911,793,787,807,920,860,891,781,783,684,767,911,852,889,724,627,759,605,735,796,750,757,677,746,813,856,714,776,785,736,856,673,810,830,649,684,722,688,768,708,672,728
|
||||
EE,297,310,283,297,321,336,278,310,281,305,253,298,323,292,306,324,283,310,333,317,285,285,289,256,245,327,302,324,299,313,289,278,260,271,328,294,332,281,232,240,257,257,285,288,265,306,281,278,262,235,273,271,278,270,270,260,250,230,268,315,253,286,259,254,225,261,255,254,235,209,270
|
||||
FI,3285,3442,3085,3179,3880,3819,3296,3626,3180,3386,3065,3513,3467,3329,3526,3873,3284,3693,3640,3494,3572,3178,3458,2963,3074,3691,3490,3743,3419,3607,3572,3331,3296,3192,3894,3516,3793,3402,2888,3075,3209,3163,3319,3341,3213,3343,3302,3468,3259,2915,3303,3307,3231,3159,2998,3101,3018,3009,3219,3604,2961,3351,2985,2983,2795,3030,3127,3061,3116,2726,3260
|
||||
FR,1430,1480,1422,1489,1396,1633,1338,1378,1240,1332,1196,1584,1630,1499,1471,1331,1408,1412,1473,1469,1475,1453,1545,1356,1462,1445,1312,1454,1502,1545,1396,1315,1424,1484,1599,1518,1520,1269,1237,1227,1501,1375,1389,1168,1280,1454,1222,1337,1263,1190,1299,1132,1294,1346,1376,1263,1199,1301,1277,1522,1060,1300,1408,1034,1143,1266,1236,1150,1165,1043,1279
|
||||
GB,906,934,809,901,925,935,813,890,787,849,825,1000,1046,912,970,917,839,874,894,869,805,880,862,842,838,851,863,873,972,880,896,823,824,838,923,944,903,801,764,746,859,824,857,793,794,886,738,752,745,771,816,726,751,743,748,734,706,790,777,930,707,832,841,683,759,769,720,764,750,725,764
|
||||
GR,166,162,210,211,160,215,184,169,196,164,172,182,184,192,199,162,185,176,171,164,184,185,195,186,188,190,160,186,172,191,184,192,190,179,174,180,194,193,176,165,206,197,195,161,179,191,192,180,167,177,174,164,191,172,184,189,161,158,155,143,188,179,148,136,165,150,164,139,146,148,156
|
||||
HR,116,140,140,155,137,166,131,132,122,119,114,149,158,152,144,124,133,129,143,138,137,131,144,124,124,139,121,140,130,149,133,132,130,133,148,141,144,128,118,114,143,123,138,115,126,145,132,129,124,106,120,109,135,127,140,123,109,110,116,130,123,123,118,92,114,116,119,111,102,105,119
|
||||
HU,236,291,282,321,289,333,272,273,246,248,235,288,321,309,298,244,272,268,293,283,276,264,283,244,255,281,267,287,271,311,272,282,265,274,313,294,305,274,243,238,299,262,288,240,271,310,291,272,267,232,269,243,300,267,289,265,229,230,243,275,267,259,250,198,235,251,255,234,211,228,259
|
||||
IE,216,215,191,208,213,211,190,204,184,205,195,234,250,208,232,210,216,213,222,214,190,226,204,213,198,211,210,205,239,212,209,204,201,211,229,241,217,200,192,192,209,209,213,201,195,219,180,186,191,204,206,189,192,193,187,188,173,202,204,241,192,205,210,186,205,199,184,200,191,194,189
|
||||
IT,652,683,660,700,615,778,640,653,616,631,587,715,726,674,710,665,655,656,682,672,678,639,695,659,655,670,604,681,675,729,684,645,657,695,688,671,676,630,616,610,712,626,650,555,623,639,578,618,619,563,585,546,629,612,675,597,548,568,590,637,562,602,587,481,541,528,570,529,540,520,577
|
||||
LT,344,371,342,368,368,405,318,353,329,346,303,355,386,362,371,354,322,365,401,372,330,345,339,312,289,388,348,370,370,382,342,326,301,326,393,352,397,335,275,272,313,311,336,329,325,377,335,325,306,274,323,313,328,324,325,316,299,277,316,366,305,332,314,298,273,308,296,302,267,252,327
|
||||
LU,8,9,8,9,10,10,8,9,8,8,7,10,10,9,9,8,8,9,9,9,9,9,9,8,9,9,8,9,10,10,9,9,9,9,10,10,10,8,8,8,9,8,9,8,8,10,8,8,8,7,8,7,8,8,8,8,7,8,8,9,7,8,9,6,7,8,8,7,7,7,8
|
||||
LV,383,402,372,394,407,439,351,391,361,384,326,386,420,385,402,401,360,401,434,407,361,372,371,337,316,424,385,411,392,411,372,357,330,354,425,379,428,362,300,301,334,337,366,366,348,402,365,358,334,302,353,346,357,352,354,337,324,300,346,402,330,365,342,329,297,337,330,328,297,272,351
|
||||
MK,64,66,74,79,66,79,67,66,71,63,64,72,73,70,77,66,69,66,67,67,70,67,75,69,70,69,62,72,66,73,71,69,70,67,69,67,70,71,65,63,76,68,69,59,69,70,71,68,64,65,65,62,69,64,70,68,60,59,61,59,67,66,56,53,61,58,62,54,55,57,63
|
||||
ME,40,43,45,48,41,50,42,42,42,39,39,44,45,45,48,43,43,42,42,43,43,41,44,43,42,45,39,45,42,46,45,41,43,42,42,41,44,44,42,39,47,43,43,38,43,44,42,43,39,39,40,37,42,41,45,43,38,38,38,38,39,40,36,32,37,36,39,34,35,35,39
|
||||
NL,113,126,110,121,130,137,108,118,107,107,104,132,141,121,120,114,105,119,124,122,111,120,116,104,111,116,107,120,133,122,118,112,110,118,134,127,130,99,99,94,119,106,113,102,109,138,111,102,97,93,106,98,109,103,100,100,87,102,103,131,93,108,117,82,95,104,96,98,93,85,105
|
||||
NO,3339,3541,3020,3307,3639,3731,3315,3547,3122,3353,3400,3807,3641,3496,3783,3951,3259,3533,3425,3431,3339,3128,3312,3028,3094,3405,3369,3436,3457,3380,3502,3181,3141,3117,3541,3359,3449,3200,2890,2848,3057,3032,3196,3207,3170,3301,3060,3203,3062,2878,3166,3028,2983,2943,2874,2828,2918,2954,3018,3442,2796,3157,2946,2754,2793,2859,2963,2930,2981,2711,3035
|
||||
PL,1249,1438,1294,1478,1446,1586,1279,1349,1269,1305,1215,1435,1527,1450,1467,1280,1193,1321,1504,1431,1293,1347,1314,1200,1184,1419,1270,1393,1431,1491,1336,1265,1178,1304,1494,1377,1507,1255,1077,1078,1312,1252,1310,1197,1302,1512,1326,1249,1171,1052,1268,1192,1296,1230,1262,1244,1117,1092,1220,1426,1177,1274,1251,1068,1063,1163,1158,1120,1019,1017,1252
|
||||
PT,107,103,99,109,80,131,106,93,93,97,78,106,104,115,103,98,103,97,112,110,115,116,106,109,109,109,99,139,107,102,89,90,95,104,99,108,85,89,75,87,104,96,104,85,66,88,62,82,96,90,90,78,87,97,103,90,91,91,84,96,80,101,100,81,79,87,79,96,81,72,78
|
||||
RO,736,811,883,948,822,968,810,789,826,731,744,826,890,867,873,706,791,776,858,792,787,798,838,782,752,855,772,836,780,880,801,811,763,807,917,815,882,827,710,700,846,801,848,701,806,869,842,805,740,710,760,721,844,754,799,780,684,695,695,757,793,781,709,662,681,721,720,690,624,648,740
|
||||
RS,222,252,273,300,249,313,248,249,254,231,226,270,283,271,274,233,253,243,267,254,251,246,272,242,243,265,233,265,241,273,251,255,244,253,275,256,268,254,231,220,275,243,263,223,250,270,259,250,237,217,234,217,262,238,265,247,217,214,222,229,251,244,214,192,222,223,232,211,197,212,234
|
||||
SK,178,210,195,217,208,228,192,193,185,185,178,208,213,211,219,179,187,192,203,200,192,188,195,178,178,197,188,204,193,215,191,190,180,193,212,198,207,187,172,173,202,186,191,171,186,204,193,184,177,162,184,174,190,184,191,182,163,161,170,188,175,179,174,142,161,170,176,157,152,158,180
|
||||
SI,61,72,67,73,69,78,65,66,62,61,59,75,75,73,73,62,65,67,71,69,67,68,71,62,61,69,62,71,68,74,67,65,65,69,73,71,71,63,59,58,70,61,63,55,61,71,63,62,62,54,59,55,65,63,67,61,54,57,57,65,58,59,59,46,55,57,58,53,52,53,61
|
||||
SE,3891,4219,3560,3919,4426,4488,3950,4223,3662,3988,3814,4451,4260,4021,4358,4613,3929,4280,4255,4254,4043,3806,3975,3634,3625,4238,4132,4314,4246,4287,4301,3913,3840,3819,4588,4139,4376,3931,3476,3446,3785,3695,3893,3991,3916,4073,3757,3950,3781,3446,3898,3778,3755,3769,3632,3561,3606,3590,3806,4397,3474,3935,3675,3452,3421,3635,3693,3705,3689,3247,3807
|
|
@ -9,15 +9,15 @@ rule build_population_layouts:
|
||||
urban_percent="data/urban_percent.csv",
|
||||
cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc",
|
||||
output:
|
||||
pop_layout_total=RESOURCES + "pop_layout_total.nc",
|
||||
pop_layout_urban=RESOURCES + "pop_layout_urban.nc",
|
||||
pop_layout_rural=RESOURCES + "pop_layout_rural.nc",
|
||||
pop_layout_total=RESOURCES + "pop_layout_total{weather_year}.nc",
|
||||
pop_layout_urban=RESOURCES + "pop_layout_urban{weather_year}.nc",
|
||||
pop_layout_rural=RESOURCES + "pop_layout_rural{weather_year}.nc",
|
||||
log:
|
||||
LOGS + "build_population_layouts.log",
|
||||
LOGS + "build_population_layouts{weather_year}.log",
|
||||
resources:
|
||||
mem_mb=20000,
|
||||
benchmark:
|
||||
BENCHMARKS + "build_population_layouts"
|
||||
BENCHMARKS + "build_population_layouts{weather_year}"
|
||||
threads: 8
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
@ -27,19 +27,19 @@ rule build_population_layouts:
|
||||
|
||||
rule build_clustered_population_layouts:
|
||||
input:
|
||||
pop_layout_total=RESOURCES + "pop_layout_total.nc",
|
||||
pop_layout_urban=RESOURCES + "pop_layout_urban.nc",
|
||||
pop_layout_rural=RESOURCES + "pop_layout_rural.nc",
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||
pop_layout_total=RESOURCES + "pop_layout_total{weather_year}.nc",
|
||||
pop_layout_urban=RESOURCES + "pop_layout_urban{weather_year}.nc",
|
||||
pop_layout_rural=RESOURCES + "pop_layout_rural{weather_year}.nc",
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc",
|
||||
output:
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
log:
|
||||
LOGS + "build_clustered_population_layouts_{simpl}_{clusters}.log",
|
||||
LOGS + "build_clustered_population_layouts{weather_year}_{simpl}_{clusters}.log",
|
||||
resources:
|
||||
mem_mb=10000,
|
||||
benchmark:
|
||||
BENCHMARKS + "build_clustered_population_layouts/s{simpl}_{clusters}"
|
||||
BENCHMARKS + "build_clustered_population_layouts/{weather_year}_s{simpl}_{clusters}"
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -48,19 +48,19 @@ rule build_clustered_population_layouts:
|
||||
|
||||
rule build_simplified_population_layouts:
|
||||
input:
|
||||
pop_layout_total=RESOURCES + "pop_layout_total.nc",
|
||||
pop_layout_urban=RESOURCES + "pop_layout_urban.nc",
|
||||
pop_layout_rural=RESOURCES + "pop_layout_rural.nc",
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}.geojson",
|
||||
pop_layout_total=RESOURCES + "pop_layout_total{weather_year}.nc",
|
||||
pop_layout_urban=RESOURCES + "pop_layout_urban{weather_year}.nc",
|
||||
pop_layout_rural=RESOURCES + "pop_layout_rural{weather_year}.nc",
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}.geojson",
|
||||
cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc",
|
||||
output:
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}.csv",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec{weather_year}_s{simpl}.csv",
|
||||
resources:
|
||||
mem_mb=10000,
|
||||
log:
|
||||
LOGS + "build_simplified_population_layouts_{simpl}",
|
||||
LOGS + "build_simplified_population_layouts{weather_year}_{simpl}",
|
||||
benchmark:
|
||||
BENCHMARKS + "build_simplified_population_layouts/s{simpl}"
|
||||
BENCHMARKS + "build_simplified_population_layouts/{weather_year}_s{simpl}"
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -92,18 +92,18 @@ if config["sector"]["gas_network"] or config["sector"]["H2_retrofit"]:
|
||||
entry="data/gas_network/scigrid-gas/data/IGGIELGN_BorderPoints.geojson",
|
||||
production="data/gas_network/scigrid-gas/data/IGGIELGN_Productions.geojson",
|
||||
regions_onshore=RESOURCES
|
||||
+ "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||
+ "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
regions_offshore=RESOURCES
|
||||
+ "regions_offshore_elec_s{simpl}_{clusters}.geojson",
|
||||
+ "regions_offshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
output:
|
||||
gas_input_nodes=RESOURCES
|
||||
+ "gas_input_locations_s{simpl}_{clusters}.geojson",
|
||||
+ "gas_input_locations{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
gas_input_nodes_simplified=RESOURCES
|
||||
+ "gas_input_locations_s{simpl}_{clusters}_simplified.csv",
|
||||
+ "gas_input_locations{weather_year}_s{simpl}_{clusters}_simplified.csv",
|
||||
resources:
|
||||
mem_mb=2000,
|
||||
log:
|
||||
LOGS + "build_gas_input_locations_s{simpl}_{clusters}.log",
|
||||
LOGS + "build_gas_input_locations{weather_year}_s{simpl}_{clusters}.log",
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -113,15 +113,15 @@ if config["sector"]["gas_network"] or config["sector"]["H2_retrofit"]:
|
||||
input:
|
||||
cleaned_gas_network=RESOURCES + "gas_network.csv",
|
||||
regions_onshore=RESOURCES
|
||||
+ "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||
+ "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
regions_offshore=RESOURCES
|
||||
+ "regions_offshore_elec_s{simpl}_{clusters}.geojson",
|
||||
+ "regions_offshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
output:
|
||||
clustered_gas_network=RESOURCES + "gas_network_elec_s{simpl}_{clusters}.csv",
|
||||
clustered_gas_network=RESOURCES + "gas_network_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
resources:
|
||||
mem_mb=4000,
|
||||
log:
|
||||
LOGS + "cluster_gas_network_s{simpl}_{clusters}.log",
|
||||
LOGS + "cluster_gas_network{weather_year}_s{simpl}_{clusters}.log",
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -141,18 +141,18 @@ if not (config["sector"]["gas_network"] or config["sector"]["H2_retrofit"]):
|
||||
|
||||
rule build_heat_demands:
|
||||
input:
|
||||
pop_layout=RESOURCES + "pop_layout_{scope}.nc",
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||
pop_layout=RESOURCES + "pop_layout{weather_year}_{scope}.nc",
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc",
|
||||
output:
|
||||
heat_demand=RESOURCES + "heat_demand_{scope}_elec_s{simpl}_{clusters}.nc",
|
||||
heat_demand=RESOURCES + "heat_demand_{scope}_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
resources:
|
||||
mem_mb=20000,
|
||||
threads: 8
|
||||
log:
|
||||
LOGS + "build_heat_demands_{scope}_{simpl}_{clusters}.loc",
|
||||
LOGS + "build_heat_demands_{scope}_{weather_year}_{simpl}_{clusters}.loc",
|
||||
benchmark:
|
||||
BENCHMARKS + "build_heat_demands/{scope}_s{simpl}_{clusters}"
|
||||
BENCHMARKS + "build_heat_demands/{scope}_{weather_year}_s{simpl}_{clusters}"
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -161,19 +161,19 @@ rule build_heat_demands:
|
||||
|
||||
rule build_temperature_profiles:
|
||||
input:
|
||||
pop_layout=RESOURCES + "pop_layout_{scope}.nc",
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||
pop_layout=RESOURCES + "pop_layout{weather_year}_{scope}.nc",
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc",
|
||||
output:
|
||||
temp_soil=RESOURCES + "temp_soil_{scope}_elec_s{simpl}_{clusters}.nc",
|
||||
temp_air=RESOURCES + "temp_air_{scope}_elec_s{simpl}_{clusters}.nc",
|
||||
temp_soil=RESOURCES + "temp_soil_{scope}_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
temp_air=RESOURCES + "temp_air_{scope}_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
resources:
|
||||
mem_mb=20000,
|
||||
threads: 8
|
||||
log:
|
||||
LOGS + "build_temperature_profiles_{scope}_{simpl}_{clusters}.log",
|
||||
LOGS + "build_temperature_profiles_{scope}_{weather_year}_{simpl}_{clusters}.log",
|
||||
benchmark:
|
||||
BENCHMARKS + "build_temperature_profiles/{scope}_s{simpl}_{clusters}"
|
||||
BENCHMARKS + "build_temperature_profiles/{scope}_{weather_year}_s{simpl}_{clusters}"
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -182,25 +182,25 @@ rule build_temperature_profiles:
|
||||
|
||||
rule build_cop_profiles:
|
||||
input:
|
||||
temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc",
|
||||
temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc",
|
||||
temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc",
|
||||
temp_air_total=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc",
|
||||
temp_air_rural=RESOURCES + "temp_air_rural_elec_s{simpl}_{clusters}.nc",
|
||||
temp_air_urban=RESOURCES + "temp_air_urban_elec_s{simpl}_{clusters}.nc",
|
||||
temp_soil_total=RESOURCES + "temp_soil_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
temp_soil_rural=RESOURCES + "temp_soil_rural_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
temp_soil_urban=RESOURCES + "temp_soil_urban_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
temp_air_total=RESOURCES + "temp_air_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
temp_air_rural=RESOURCES + "temp_air_rural_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
temp_air_urban=RESOURCES + "temp_air_urban_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
output:
|
||||
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
|
||||
cop_soil_rural=RESOURCES + "cop_soil_rural_elec_s{simpl}_{clusters}.nc",
|
||||
cop_soil_urban=RESOURCES + "cop_soil_urban_elec_s{simpl}_{clusters}.nc",
|
||||
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
|
||||
cop_air_rural=RESOURCES + "cop_air_rural_elec_s{simpl}_{clusters}.nc",
|
||||
cop_air_urban=RESOURCES + "cop_air_urban_elec_s{simpl}_{clusters}.nc",
|
||||
cop_soil_total=RESOURCES + "cop_soil_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
cop_soil_rural=RESOURCES + "cop_soil_rural_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
cop_soil_urban=RESOURCES + "cop_soil_urban_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
cop_air_total=RESOURCES + "cop_air_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
cop_air_rural=RESOURCES + "cop_air_rural_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
cop_air_urban=RESOURCES + "cop_air_urban_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
resources:
|
||||
mem_mb=20000,
|
||||
log:
|
||||
LOGS + "build_cop_profiles_s{simpl}_{clusters}.log",
|
||||
LOGS + "build_cop_profiles{weather_year}_s{simpl}_{clusters}.log",
|
||||
benchmark:
|
||||
BENCHMARKS + "build_cop_profiles/s{simpl}_{clusters}"
|
||||
BENCHMARKS + "build_cop_profiles/{weather_year}_s{simpl}_{clusters}"
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -209,18 +209,18 @@ rule build_cop_profiles:
|
||||
|
||||
rule build_solar_thermal_profiles:
|
||||
input:
|
||||
pop_layout=RESOURCES + "pop_layout_{scope}.nc",
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||
pop_layout=RESOURCES + "pop_layout{weather_year}_{scope}.nc",
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
cutout="cutouts/" + CDIR + config["atlite"]["default_cutout"] + ".nc",
|
||||
output:
|
||||
solar_thermal=RESOURCES + "solar_thermal_{scope}_elec_s{simpl}_{clusters}.nc",
|
||||
solar_thermal=RESOURCES + "solar_thermal_{scope}_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
resources:
|
||||
mem_mb=20000,
|
||||
threads: 16
|
||||
log:
|
||||
LOGS + "build_solar_thermal_profiles_{scope}_s{simpl}_{clusters}.log",
|
||||
LOGS + "build_solar_thermal_profiles_{scope}_{weather_year}_s{simpl}_{clusters}.log",
|
||||
benchmark:
|
||||
BENCHMARKS + "build_solar_thermal_profiles/{scope}_s{simpl}_{clusters}"
|
||||
BENCHMARKS + "build_solar_thermal_profiles/{scope}_{weather_year}_s{simpl}_{clusters}"
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -234,7 +234,7 @@ rule build_energy_totals:
|
||||
swiss="data/switzerland-sfoe/switzerland-new_format.csv",
|
||||
idees="data/jrc-idees-2015",
|
||||
district_heat_share="data/district_heat_share.csv",
|
||||
eurostat=input_eurostat,
|
||||
eurostat=directory("data/eurostat-energy_balances-june_2021_edition"),
|
||||
output:
|
||||
energy_name=RESOURCES + "energy_totals.csv",
|
||||
co2_name=RESOURCES + "co2_totals.csv",
|
||||
@ -252,6 +252,24 @@ rule build_energy_totals:
|
||||
"../scripts/build_energy_totals.py"
|
||||
|
||||
|
||||
rule build_heat_totals:
|
||||
input:
|
||||
hdd="data/era5-annual-HDD-per-country.csv",
|
||||
energy_totals=RESOURCES + "energy_totals.csv",
|
||||
output:
|
||||
heat_totals=RESOURCES + "heat_totals.csv"
|
||||
threads: 1
|
||||
resources: mem_mb=2000
|
||||
log:
|
||||
LOGS + "build_heat_totals.log",
|
||||
benchmark:
|
||||
BENCHMARKS + "build_heat_totals",
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
"../scripts/build_heat_totals.py"
|
||||
|
||||
|
||||
rule build_biomass_potentials:
|
||||
input:
|
||||
enspreso_biomass=HTTP.remote(
|
||||
@ -259,22 +277,22 @@ rule build_biomass_potentials:
|
||||
keep_local=True,
|
||||
),
|
||||
nuts2="data/nuts/NUTS_RG_10M_2013_4326_LEVL_2.geojson", # https://gisco-services.ec.europa.eu/distribution/v2/nuts/download/#nuts21
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
nuts3_population=ancient("data/bundle/nama_10r_3popgdp.tsv.gz"),
|
||||
swiss_cantons=ancient("data/bundle/ch_cantons.csv"),
|
||||
swiss_population=ancient("data/bundle/je-e-21.03.02.xls"),
|
||||
country_shapes=RESOURCES + "country_shapes.geojson",
|
||||
output:
|
||||
biomass_potentials_all=RESOURCES
|
||||
+ "biomass_potentials_all_s{simpl}_{clusters}.csv",
|
||||
biomass_potentials=RESOURCES + "biomass_potentials_s{simpl}_{clusters}.csv",
|
||||
+ "biomass_potentials_all{weather_year}_s{simpl}_{clusters}.csv",
|
||||
biomass_potentials=RESOURCES + "biomass_potentials{weather_year}_s{simpl}_{clusters}.csv",
|
||||
threads: 1
|
||||
resources:
|
||||
mem_mb=1000,
|
||||
log:
|
||||
LOGS + "build_biomass_potentials_s{simpl}_{clusters}.log",
|
||||
LOGS + "build_biomass_potentials{weather_year}_s{simpl}_{clusters}.log",
|
||||
benchmark:
|
||||
BENCHMARKS + "build_biomass_potentials_s{simpl}_{clusters}"
|
||||
BENCHMARKS + "build_biomass_potentials{weather_year}_s{simpl}_{clusters}"
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -321,19 +339,19 @@ if config["sector"]["regional_co2_sequestration_potential"]["enable"]:
|
||||
keep_local=True,
|
||||
),
|
||||
regions_onshore=RESOURCES
|
||||
+ "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||
+ "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
regions_offshore=RESOURCES
|
||||
+ "regions_offshore_elec_s{simpl}_{clusters}.geojson",
|
||||
+ "regions_offshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
output:
|
||||
sequestration_potential=RESOURCES
|
||||
+ "co2_sequestration_potential_elec_s{simpl}_{clusters}.csv",
|
||||
+ "co2_sequestration_potential_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
threads: 1
|
||||
resources:
|
||||
mem_mb=4000,
|
||||
log:
|
||||
LOGS + "build_sequestration_potentials_s{simpl}_{clusters}.log",
|
||||
LOGS + "build_sequestration_potentials{weather_year}_s{simpl}_{clusters}.log",
|
||||
benchmark:
|
||||
BENCHMARKS + "build_sequestration_potentials_s{simpl}_{clusters}"
|
||||
BENCHMARKS + "build_sequestration_potentials{weather_year}_s{simpl}_{clusters}"
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -350,17 +368,17 @@ if not config["sector"]["regional_co2_sequestration_potential"]["enable"]:
|
||||
rule build_salt_cavern_potentials:
|
||||
input:
|
||||
salt_caverns="data/h2_salt_caverns_GWh_per_sqkm.geojson",
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||
regions_offshore=RESOURCES + "regions_offshore_elec_s{simpl}_{clusters}.geojson",
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
regions_offshore=RESOURCES + "regions_offshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
output:
|
||||
h2_cavern_potential=RESOURCES + "salt_cavern_potentials_s{simpl}_{clusters}.csv",
|
||||
h2_cavern_potential=RESOURCES + "salt_cavern_potentials{weather_year}_s{simpl}_{clusters}.csv",
|
||||
threads: 1
|
||||
resources:
|
||||
mem_mb=2000,
|
||||
log:
|
||||
LOGS + "build_salt_cavern_potentials_s{simpl}_{clusters}.log",
|
||||
LOGS + "build_salt_cavern_potentials{weather_year}_s{simpl}_{clusters}.log",
|
||||
benchmark:
|
||||
BENCHMARKS + "build_salt_cavern_potentials_s{simpl}_{clusters}"
|
||||
BENCHMARKS + "build_salt_cavern_potentials{weather_year}_s{simpl}_{clusters}"
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -451,19 +469,19 @@ rule build_industrial_production_per_country_tomorrow:
|
||||
|
||||
rule build_industrial_distribution_key:
|
||||
input:
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
|
||||
regions_onshore=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
hotmaps_industrial_database="data/Industrial_Database.csv",
|
||||
output:
|
||||
industrial_distribution_key=RESOURCES
|
||||
+ "industrial_distribution_key_elec_s{simpl}_{clusters}.csv",
|
||||
+ "industrial_distribution_key_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
threads: 1
|
||||
resources:
|
||||
mem_mb=1000,
|
||||
log:
|
||||
LOGS + "build_industrial_distribution_key_s{simpl}_{clusters}.log",
|
||||
LOGS + "build_industrial_distribution_key{weather_year}_s{simpl}_{clusters}.log",
|
||||
benchmark:
|
||||
BENCHMARKS + "build_industrial_distribution_key/s{simpl}_{clusters}"
|
||||
BENCHMARKS + "build_industrial_distribution_key/{weather_year}_s{simpl}_{clusters}"
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -473,22 +491,22 @@ rule build_industrial_distribution_key:
|
||||
rule build_industrial_production_per_node:
|
||||
input:
|
||||
industrial_distribution_key=RESOURCES
|
||||
+ "industrial_distribution_key_elec_s{simpl}_{clusters}.csv",
|
||||
+ "industrial_distribution_key_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
industrial_production_per_country_tomorrow=RESOURCES
|
||||
+ "industrial_production_per_country_tomorrow_{planning_horizons}.csv",
|
||||
output:
|
||||
industrial_production_per_node=RESOURCES
|
||||
+ "industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||
+ "industrial_production_elec{weather_year}_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||
threads: 1
|
||||
resources:
|
||||
mem_mb=1000,
|
||||
log:
|
||||
LOGS
|
||||
+ "build_industrial_production_per_node_s{simpl}_{clusters}_{planning_horizons}.log",
|
||||
+ "build_industrial_production_per_node{weather_year}_s{simpl}_{clusters}_{planning_horizons}.log",
|
||||
benchmark:
|
||||
(
|
||||
BENCHMARKS
|
||||
+ "build_industrial_production_per_node/s{simpl}_{clusters}_{planning_horizons}"
|
||||
+ "build_industrial_production_per_node/{weather_year}_s{simpl}_{clusters}_{planning_horizons}"
|
||||
)
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
@ -500,22 +518,22 @@ rule build_industrial_energy_demand_per_node:
|
||||
input:
|
||||
industry_sector_ratios=RESOURCES + "industry_sector_ratios.csv",
|
||||
industrial_production_per_node=RESOURCES
|
||||
+ "industrial_production_elec_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||
+ "industrial_production_elec{weather_year}_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||
industrial_energy_demand_per_node_today=RESOURCES
|
||||
+ "industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv",
|
||||
+ "industrial_energy_demand_today_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
output:
|
||||
industrial_energy_demand_per_node=RESOURCES
|
||||
+ "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||
+ "industrial_energy_demand_elec{weather_year}_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||
threads: 1
|
||||
resources:
|
||||
mem_mb=1000,
|
||||
log:
|
||||
LOGS
|
||||
+ "build_industrial_energy_demand_per_node_s{simpl}_{clusters}_{planning_horizons}.log",
|
||||
+ "build_industrial_energy_demand_per_node{weather_year}_s{simpl}_{clusters}_{planning_horizons}.log",
|
||||
benchmark:
|
||||
(
|
||||
BENCHMARKS
|
||||
+ "build_industrial_energy_demand_per_node/s{simpl}_{clusters}_{planning_horizons}"
|
||||
+ "build_industrial_energy_demand_per_node/{weather_year}_s{simpl}_{clusters}_{planning_horizons}"
|
||||
)
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
@ -548,19 +566,19 @@ rule build_industrial_energy_demand_per_country_today:
|
||||
rule build_industrial_energy_demand_per_node_today:
|
||||
input:
|
||||
industrial_distribution_key=RESOURCES
|
||||
+ "industrial_distribution_key_elec_s{simpl}_{clusters}.csv",
|
||||
+ "industrial_distribution_key_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
industrial_energy_demand_per_country_today=RESOURCES
|
||||
+ "industrial_energy_demand_per_country_today.csv",
|
||||
output:
|
||||
industrial_energy_demand_per_node_today=RESOURCES
|
||||
+ "industrial_energy_demand_today_elec_s{simpl}_{clusters}.csv",
|
||||
+ "industrial_energy_demand_today_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
threads: 1
|
||||
resources:
|
||||
mem_mb=1000,
|
||||
log:
|
||||
LOGS + "build_industrial_energy_demand_per_node_today_s{simpl}_{clusters}.log",
|
||||
LOGS + "build_industrial_energy_demand_per_node_today{weather_year}_s{simpl}_{clusters}.log",
|
||||
benchmark:
|
||||
BENCHMARKS + "build_industrial_energy_demand_per_node_today/s{simpl}_{clusters}"
|
||||
BENCHMARKS + "build_industrial_energy_demand_per_node_today/{weather_year}_s{simpl}_{clusters}"
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -573,23 +591,23 @@ if config["sector"]["retrofitting"]["retro_endogen"]:
|
||||
input:
|
||||
building_stock="data/retro/data_building_stock.csv",
|
||||
data_tabula="data/retro/tabula-calculator-calcsetbuilding.csv",
|
||||
air_temperature=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc",
|
||||
air_temperature=RESOURCES + "temp_air_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
u_values_PL="data/retro/u_values_poland.csv",
|
||||
tax_w="data/retro/electricity_taxes_eu.csv",
|
||||
construction_index="data/retro/comparative_level_investment.csv",
|
||||
floor_area_missing="data/retro/floor_area_missing.csv",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
cost_germany="data/retro/retro_cost_germany.csv",
|
||||
window_assumptions="data/retro/window_assumptions.csv",
|
||||
output:
|
||||
retro_cost=RESOURCES + "retro_cost_elec_s{simpl}_{clusters}.csv",
|
||||
floor_area=RESOURCES + "floor_area_elec_s{simpl}_{clusters}.csv",
|
||||
retro_cost=RESOURCES + "retro_cost_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
floor_area=RESOURCES + "floor_area_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
resources:
|
||||
mem_mb=1000,
|
||||
log:
|
||||
LOGS + "build_retro_cost_s{simpl}_{clusters}.log",
|
||||
LOGS + "build_retro_cost{weather_year}_s{simpl}_{clusters}.log",
|
||||
benchmark:
|
||||
BENCHMARKS + "build_retro_cost/s{simpl}_{clusters}"
|
||||
BENCHMARKS + "build_retro_cost/{weather_year}_s{simpl}_{clusters}"
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -605,15 +623,15 @@ if not config["sector"]["retrofitting"]["retro_endogen"]:
|
||||
|
||||
rule build_population_weighted_energy_totals:
|
||||
input:
|
||||
energy_totals=RESOURCES + "energy_totals.csv",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
|
||||
energy_totals=RESOURCES + "{kind}}_totals.csv",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
output:
|
||||
RESOURCES + "pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
|
||||
RESOURCES + "pop_weighted_{kind}_totals{weather_year}_s{simpl}_{clusters}.csv",
|
||||
threads: 1
|
||||
resources:
|
||||
mem_mb=2000,
|
||||
log:
|
||||
LOGS + "build_population_weighted_energy_totals_s{simpl}_{clusters}.log",
|
||||
LOGS + "build_population_weighted_{kind}_totals{weather_year}_s{simpl}_{clusters}.log",
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -624,15 +642,15 @@ rule build_shipping_demand:
|
||||
input:
|
||||
ports="data/attributed_ports.json",
|
||||
scope=RESOURCES + "europe_shape.geojson",
|
||||
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||
regions=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
demand=RESOURCES + "energy_totals.csv",
|
||||
output:
|
||||
RESOURCES + "shipping_demand_s{simpl}_{clusters}.csv",
|
||||
RESOURCES + "shipping_demand{weather_year}_s{simpl}_{clusters}.csv",
|
||||
threads: 1
|
||||
resources:
|
||||
mem_mb=2000,
|
||||
log:
|
||||
LOGS + "build_shipping_demand_s{simpl}_{clusters}.log",
|
||||
LOGS + "build_shipping_demand{weather_year}_s{simpl}_{clusters}.log",
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -641,23 +659,23 @@ rule build_shipping_demand:
|
||||
|
||||
rule build_transport_demand:
|
||||
input:
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
pop_weighted_energy_totals=RESOURCES
|
||||
+ "pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
|
||||
+ "pop_weighted_energy_totals{weather_year}_s{simpl}_{clusters}.csv",
|
||||
transport_data=RESOURCES + "transport_data.csv",
|
||||
traffic_data_KFZ="data/emobility/KFZ__count",
|
||||
traffic_data_Pkw="data/emobility/Pkw__count",
|
||||
temp_air_total=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc",
|
||||
temp_air_total=RESOURCES + "temp_air_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
output:
|
||||
transport_demand=RESOURCES + "transport_demand_s{simpl}_{clusters}.csv",
|
||||
transport_data=RESOURCES + "transport_data_s{simpl}_{clusters}.csv",
|
||||
avail_profile=RESOURCES + "avail_profile_s{simpl}_{clusters}.csv",
|
||||
dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv",
|
||||
transport_demand=RESOURCES + "transport_demand{weather_year}_s{simpl}_{clusters}.csv",
|
||||
transport_data=RESOURCES + "transport_data{weather_year}_s{simpl}_{clusters}.csv",
|
||||
avail_profile=RESOURCES + "avail_profile{weather_year}_s{simpl}_{clusters}.csv",
|
||||
dsm_profile=RESOURCES + "dsm_profile{weather_year}_s{simpl}_{clusters}.csv",
|
||||
threads: 1
|
||||
resources:
|
||||
mem_mb=2000,
|
||||
log:
|
||||
LOGS + "build_transport_demand_s{simpl}_{clusters}.log",
|
||||
LOGS + "build_transport_demand{weather_year}_s{simpl}_{clusters}.log",
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
@ -673,72 +691,72 @@ rule prepare_sector_network:
|
||||
**gas_infrastructure,
|
||||
**build_sequestration_potentials_output,
|
||||
overrides="data/override_component_attrs",
|
||||
network=RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
energy_totals_name=RESOURCES + "energy_totals.csv",
|
||||
eurostat=input_eurostat,
|
||||
network=RESOURCES + "networks/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
pop_weighted_energy_totals=RESOURCES
|
||||
+ "pop_weighted_energy_totals_s{simpl}_{clusters}.csv",
|
||||
shipping_demand=RESOURCES + "shipping_demand_s{simpl}_{clusters}.csv",
|
||||
transport_demand=RESOURCES + "transport_demand_s{simpl}_{clusters}.csv",
|
||||
transport_data=RESOURCES + "transport_data_s{simpl}_{clusters}.csv",
|
||||
avail_profile=RESOURCES + "avail_profile_s{simpl}_{clusters}.csv",
|
||||
dsm_profile=RESOURCES + "dsm_profile_s{simpl}_{clusters}.csv",
|
||||
+ "pop_weighted_energy_totals{weather_year}_s{simpl}_{clusters}.csv",
|
||||
pop_weighted_heat_totals=RESOURCES
|
||||
+ "pop_weighted_heat_totals{weather_year}_s{simpl}_{clusters}.csv"
|
||||
shipping_demand=RESOURCES + "shipping_demand{weather_year}_s{simpl}_{clusters}.csv",
|
||||
transport_demand=RESOURCES + "transport_demand{weather_year}_s{simpl}_{clusters}.csv",
|
||||
transport_data=RESOURCES + "transport_data{weather_year}_s{simpl}_{clusters}.csv",
|
||||
avail_profile=RESOURCES + "avail_profile{weather_year}_s{simpl}_{clusters}.csv",
|
||||
dsm_profile=RESOURCES + "dsm_profile{weather_year}_s{simpl}_{clusters}.csv",
|
||||
co2_totals_name=RESOURCES + "co2_totals.csv",
|
||||
co2="data/eea/UNFCCC_v23.csv",
|
||||
biomass_potentials=RESOURCES + "biomass_potentials_s{simpl}_{clusters}.csv",
|
||||
biomass_potentials=RESOURCES + "biomass_potentials{weather_year}_s{simpl}_{clusters}.csv",
|
||||
heat_profile="data/heat_load_profile_BDEW.csv",
|
||||
costs="data/costs_{}.csv".format(config["costs"]["year"])
|
||||
if config["foresight"] == "overnight"
|
||||
else "data/costs_{planning_horizons}.csv",
|
||||
profile_offwind_ac=RESOURCES + "profile_offwind-ac.nc",
|
||||
profile_offwind_dc=RESOURCES + "profile_offwind-dc.nc",
|
||||
h2_cavern=RESOURCES + "salt_cavern_potentials_s{simpl}_{clusters}.csv",
|
||||
busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv",
|
||||
busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
|
||||
simplified_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}.csv",
|
||||
profile_offwind_ac=RESOURCES + "profile{weather_year}_offwind-ac.nc",
|
||||
profile_offwind_dc=RESOURCES + "profile{weather_year}_offwind-dc.nc",
|
||||
h2_cavern=RESOURCES + "salt_cavern_potentials{weather_year}_s{simpl}_{clusters}.csv",
|
||||
busmap_s=RESOURCES + "busmap_elec{weather_year}_s{simpl}.csv",
|
||||
busmap=RESOURCES + "busmap_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
simplified_pop_layout=RESOURCES + "pop_layout_elec{weather_year}_s{simpl}.csv",
|
||||
industrial_demand=RESOURCES
|
||||
+ "industrial_energy_demand_elec_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||
heat_demand_urban=RESOURCES + "heat_demand_urban_elec_s{simpl}_{clusters}.nc",
|
||||
heat_demand_rural=RESOURCES + "heat_demand_rural_elec_s{simpl}_{clusters}.nc",
|
||||
heat_demand_total=RESOURCES + "heat_demand_total_elec_s{simpl}_{clusters}.nc",
|
||||
temp_soil_total=RESOURCES + "temp_soil_total_elec_s{simpl}_{clusters}.nc",
|
||||
temp_soil_rural=RESOURCES + "temp_soil_rural_elec_s{simpl}_{clusters}.nc",
|
||||
temp_soil_urban=RESOURCES + "temp_soil_urban_elec_s{simpl}_{clusters}.nc",
|
||||
temp_air_total=RESOURCES + "temp_air_total_elec_s{simpl}_{clusters}.nc",
|
||||
temp_air_rural=RESOURCES + "temp_air_rural_elec_s{simpl}_{clusters}.nc",
|
||||
temp_air_urban=RESOURCES + "temp_air_urban_elec_s{simpl}_{clusters}.nc",
|
||||
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
|
||||
cop_soil_rural=RESOURCES + "cop_soil_rural_elec_s{simpl}_{clusters}.nc",
|
||||
cop_soil_urban=RESOURCES + "cop_soil_urban_elec_s{simpl}_{clusters}.nc",
|
||||
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
|
||||
cop_air_rural=RESOURCES + "cop_air_rural_elec_s{simpl}_{clusters}.nc",
|
||||
cop_air_urban=RESOURCES + "cop_air_urban_elec_s{simpl}_{clusters}.nc",
|
||||
+ "industrial_energy_demand_elec{weather_year}_s{simpl}_{clusters}_{planning_horizons}.csv",
|
||||
heat_demand_urban=RESOURCES + "heat_demand_urban_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
heat_demand_rural=RESOURCES + "heat_demand_rural_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
heat_demand_total=RESOURCES + "heat_demand_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
temp_soil_total=RESOURCES + "temp_soil_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
temp_soil_rural=RESOURCES + "temp_soil_rural_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
temp_soil_urban=RESOURCES + "temp_soil_urban_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
temp_air_total=RESOURCES + "temp_air_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
temp_air_rural=RESOURCES + "temp_air_rural_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
temp_air_urban=RESOURCES + "temp_air_urban_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
cop_soil_total=RESOURCES + "cop_soil_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
cop_soil_rural=RESOURCES + "cop_soil_rural_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
cop_soil_urban=RESOURCES + "cop_soil_urban_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
cop_air_total=RESOURCES + "cop_air_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
cop_air_rural=RESOURCES + "cop_air_rural_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
cop_air_urban=RESOURCES + "cop_air_urban_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
solar_thermal_total=RESOURCES
|
||||
+ "solar_thermal_total_elec_s{simpl}_{clusters}.nc"
|
||||
+ "solar_thermal_total_elec{weather_year}_s{simpl}_{clusters}.nc"
|
||||
if config["sector"]["solar_thermal"]
|
||||
else [],
|
||||
solar_thermal_urban=RESOURCES
|
||||
+ "solar_thermal_urban_elec_s{simpl}_{clusters}.nc"
|
||||
+ "solar_thermal_urban_elec{weather_year}_s{simpl}_{clusters}.nc"
|
||||
if config["sector"]["solar_thermal"]
|
||||
else [],
|
||||
solar_thermal_rural=RESOURCES
|
||||
+ "solar_thermal_rural_elec_s{simpl}_{clusters}.nc"
|
||||
+ "solar_thermal_rural_elec{weather_year}_s{simpl}_{clusters}.nc"
|
||||
if config["sector"]["solar_thermal"]
|
||||
else [],
|
||||
output:
|
||||
RESULTS
|
||||
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
+ "prenetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
threads: 1
|
||||
resources:
|
||||
mem_mb=2000,
|
||||
log:
|
||||
LOGS
|
||||
+ "prepare_sector_network_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
|
||||
+ "prepare_sector_network_elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
|
||||
benchmark:
|
||||
(
|
||||
BENCHMARKS
|
||||
+ "prepare_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||
+ "prepare_sector_network/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||
)
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
|
@ -23,19 +23,13 @@ def memory(w):
|
||||
return int(factor * (10000 + 195 * int(w.clusters)))
|
||||
|
||||
|
||||
def input_eurostat(w):
|
||||
# 2016 includes BA, 2017 does not
|
||||
report_year = config["energy"]["eurostat_report_year"]
|
||||
return f"data/eurostat-energy_balances-june_{report_year}_edition"
|
||||
|
||||
|
||||
def solved_previous_horizon(wildcards):
|
||||
planning_horizons = config["scenario"]["planning_horizons"]
|
||||
i = planning_horizons.index(int(wildcards.planning_horizons))
|
||||
planning_horizon_p = str(planning_horizons[i - 1])
|
||||
return (
|
||||
RESULTS
|
||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
|
||||
+ "postnetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_"
|
||||
+ planning_horizon_p
|
||||
+ ".nc"
|
||||
)
|
||||
|
@ -12,20 +12,20 @@ rule plot_network:
|
||||
input:
|
||||
overrides="data/override_component_attrs",
|
||||
network=RESULTS
|
||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
regions=RESOURCES + "regions_onshore_elec_s{simpl}_{clusters}.geojson",
|
||||
+ "postnetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
regions=RESOURCES + "regions_onshore_elec{weather_year}_s{simpl}_{clusters}.geojson",
|
||||
output:
|
||||
map=RESULTS
|
||||
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
|
||||
+ "maps/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
|
||||
today=RESULTS
|
||||
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf",
|
||||
+ "maps/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}-today.pdf",
|
||||
threads: 2
|
||||
resources:
|
||||
mem_mb=10000,
|
||||
benchmark:
|
||||
(
|
||||
BENCHMARKS
|
||||
+ "plot_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||
+ "plot_network/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||
)
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
@ -72,7 +72,7 @@ rule make_summary:
|
||||
overrides="data/override_component_attrs",
|
||||
networks=expand(
|
||||
RESULTS
|
||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
+ "postnetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
**config["scenario"]
|
||||
),
|
||||
costs="data/costs_{}.csv".format(config["costs"]["year"])
|
||||
@ -80,7 +80,7 @@ rule make_summary:
|
||||
else "data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]),
|
||||
plots=expand(
|
||||
RESULTS
|
||||
+ "maps/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
|
||||
+ "maps/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}-costs-all_{planning_horizons}.pdf",
|
||||
**config["scenario"]
|
||||
),
|
||||
output:
|
||||
|
@ -103,8 +103,7 @@ if config["enable"].get("retrieve_sector_databundle", True):
|
||||
"data/emobility/KFZ__count",
|
||||
"data/emobility/Pkw__count",
|
||||
"data/h2_salt_caverns_GWh_per_sqkm.geojson",
|
||||
directory("data/eurostat-energy_balances-june_2016_edition"),
|
||||
directory("data/eurostat-energy_balances-may_2018_edition"),
|
||||
directory("data/eurostat-energy_balances-june_2021_edition"),
|
||||
directory("data/jrc-idees-2015"),
|
||||
]
|
||||
|
||||
|
@ -5,19 +5,19 @@
|
||||
|
||||
rule solve_network:
|
||||
input:
|
||||
network=RESOURCES + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
network=RESOURCES + "networks/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
output:
|
||||
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
network=RESULTS + "networks/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
log:
|
||||
solver=normpath(
|
||||
LOGS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"
|
||||
LOGS + "solve_network/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}_solver.log"
|
||||
),
|
||||
python=LOGS
|
||||
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
|
||||
+ "solve_network/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}_python.log",
|
||||
memory=LOGS
|
||||
+ "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log",
|
||||
+ "solve_network/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}_memory.log",
|
||||
benchmark:
|
||||
BENCHMARKS + "solve_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
|
||||
BENCHMARKS + "solve_network/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}"
|
||||
threads: 4
|
||||
resources:
|
||||
mem_mb=memory,
|
||||
@ -31,22 +31,22 @@ rule solve_network:
|
||||
|
||||
rule solve_operations_network:
|
||||
input:
|
||||
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
network=RESULTS + "networks/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}.nc",
|
||||
output:
|
||||
network=RESULTS + "networks/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc",
|
||||
network=RESULTS + "networks/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op.nc",
|
||||
log:
|
||||
solver=normpath(
|
||||
LOGS
|
||||
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"
|
||||
+ "solve_operations_network/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_solver.log"
|
||||
),
|
||||
python=LOGS
|
||||
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
|
||||
+ "solve_operations_network/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_python.log",
|
||||
memory=LOGS
|
||||
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log",
|
||||
+ "solve_operations_network/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}_op_memory.log",
|
||||
benchmark:
|
||||
(
|
||||
BENCHMARKS
|
||||
+ "solve_operations_network/elec_s{simpl}_{clusters}_ec_l{ll}_{opts}"
|
||||
+ "solve_operations_network/elec{weather_year}_s{simpl}_{clusters}_ec_l{ll}_{opts}"
|
||||
)
|
||||
threads: 4
|
||||
resources:
|
||||
|
@ -7,21 +7,21 @@ rule add_existing_baseyear:
|
||||
input:
|
||||
overrides="data/override_component_attrs",
|
||||
network=RESULTS
|
||||
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
+ "prenetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
powerplants=RESOURCES + "powerplants.csv",
|
||||
busmap_s=RESOURCES + "busmap_elec_s{simpl}.csv",
|
||||
busmap=RESOURCES + "busmap_elec_s{simpl}_{clusters}.csv",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec_s{simpl}_{clusters}.csv",
|
||||
busmap_s=RESOURCES + "busmap_elec{weather_year}_s{simpl}.csv",
|
||||
busmap=RESOURCES + "busmap_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
clustered_pop_layout=RESOURCES + "pop_layout_elec{weather_year}_s{simpl}_{clusters}.csv",
|
||||
costs="data/costs_{}.csv".format(config["scenario"]["planning_horizons"][0]),
|
||||
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
|
||||
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
|
||||
cop_soil_total=RESOURCES + "cop_soil_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
cop_air_total=RESOURCES + "cop_air_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
existing_heating="data/existing_infrastructure/existing_heating_raw.csv",
|
||||
existing_solar="data/existing_infrastructure/solar_capacity_IRENA.csv",
|
||||
existing_onwind="data/existing_infrastructure/onwind_capacity_IRENA.csv",
|
||||
existing_offwind="data/existing_infrastructure/offwind_capacity_IRENA.csv",
|
||||
output:
|
||||
RESULTS
|
||||
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
+ "prenetworks-brownfield/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
wildcard_constraints:
|
||||
planning_horizons=config["scenario"]["planning_horizons"][0], #only applies to baseyear
|
||||
threads: 1
|
||||
@ -29,11 +29,11 @@ rule add_existing_baseyear:
|
||||
mem_mb=2000,
|
||||
log:
|
||||
LOGS
|
||||
+ "add_existing_baseyear_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
|
||||
+ "add_existing_baseyear_elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
|
||||
benchmark:
|
||||
(
|
||||
BENCHMARKS
|
||||
+ "add_existing_baseyear/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||
+ "add_existing_baseyear/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||
)
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
@ -45,24 +45,24 @@ rule add_brownfield:
|
||||
input:
|
||||
overrides="data/override_component_attrs",
|
||||
network=RESULTS
|
||||
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
+ "prenetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
network_p=solved_previous_horizon, #solved network at previous time step
|
||||
costs="data/costs_{planning_horizons}.csv",
|
||||
cop_soil_total=RESOURCES + "cop_soil_total_elec_s{simpl}_{clusters}.nc",
|
||||
cop_air_total=RESOURCES + "cop_air_total_elec_s{simpl}_{clusters}.nc",
|
||||
cop_soil_total=RESOURCES + "cop_soil_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
cop_air_total=RESOURCES + "cop_air_total_elec{weather_year}_s{simpl}_{clusters}.nc",
|
||||
output:
|
||||
RESULTS
|
||||
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
+ "prenetworks-brownfield/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
threads: 4
|
||||
resources:
|
||||
mem_mb=10000,
|
||||
log:
|
||||
LOGS
|
||||
+ "add_brownfield_elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
|
||||
+ "add_brownfield_elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.log",
|
||||
benchmark:
|
||||
(
|
||||
BENCHMARKS
|
||||
+ "add_brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||
+ "add_brownfield/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||
)
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
@ -77,28 +77,28 @@ rule solve_sector_network_myopic:
|
||||
input:
|
||||
overrides="data/override_component_attrs",
|
||||
network=RESULTS
|
||||
+ "prenetworks-brownfield/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
+ "prenetworks-brownfield/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
costs="data/costs_{planning_horizons}.csv",
|
||||
config=RESULTS + "config/config.yaml",
|
||||
output:
|
||||
RESULTS
|
||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
+ "postnetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
shadow:
|
||||
"shallow"
|
||||
log:
|
||||
solver=LOGS
|
||||
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
|
||||
+ "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
|
||||
python=LOGS
|
||||
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
|
||||
+ "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
|
||||
memory=LOGS
|
||||
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log",
|
||||
+ "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log",
|
||||
threads: 4
|
||||
resources:
|
||||
mem_mb=config["solving"]["mem"],
|
||||
benchmark:
|
||||
(
|
||||
BENCHMARKS
|
||||
+ "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||
+ "solve_sector_network/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||
)
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
|
@ -7,22 +7,22 @@ rule solve_sector_network:
|
||||
input:
|
||||
overrides="data/override_component_attrs",
|
||||
network=RESULTS
|
||||
+ "prenetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
+ "prenetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
costs="data/costs_{}.csv".format(config["costs"]["year"]),
|
||||
config=RESULTS + "config/config.yaml",
|
||||
#env=RDIR + 'config/environment.yaml',
|
||||
output:
|
||||
RESULTS
|
||||
+ "postnetworks/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
+ "postnetworks/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
shadow:
|
||||
"shallow"
|
||||
log:
|
||||
solver=LOGS
|
||||
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
|
||||
+ "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_solver.log",
|
||||
python=LOGS
|
||||
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
|
||||
+ "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_python.log",
|
||||
memory=LOGS
|
||||
+ "elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log",
|
||||
+ "elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}_memory.log",
|
||||
threads: config["solving"]["solver"].get("threads", 4)
|
||||
resources:
|
||||
mem_mb=config["solving"]["mem"],
|
||||
@ -30,9 +30,42 @@ rule solve_sector_network:
|
||||
(
|
||||
RESULTS
|
||||
+ BENCHMARKS
|
||||
+ "solve_sector_network/elec_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||
+ "solve_sector_network/elec{weather_year}_s{simpl}_{clusters}_l{ll}_{opts}_{sector_opts}_{planning_horizons}"
|
||||
)
|
||||
conda:
|
||||
"../envs/environment.yaml"
|
||||
script:
|
||||
"../scripts/solve_network.py"
|
||||
|
||||
|
||||
rule solve_operations_network_other_year:
|
||||
input:
|
||||
overrides="data/override_component_attrs",
|
||||
pre=RDIR + "/prenetworks/elec{weather_year}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
post=RDIR + "/postnetworks/elec{capacity_year}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
output: RDIR + "/operations/elec{capacity_year}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_{weather_year}.nc"
|
||||
shadow: "shallow"
|
||||
log:
|
||||
solver=RDIR + "/logs/elec{capacity_year}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_{weather_year}_solver.log",
|
||||
python=RDIR + "/logs/elec{capacity_year}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_{weather_year}_python.log",
|
||||
threads: 4
|
||||
resources: mem_mb=10000
|
||||
benchmark: RDIR + "/benchmarks/solve_operations_network/elec{capacity_year}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_{weather_year}"
|
||||
script: "../scripts/solve_operations_network_other_year.py"
|
||||
|
||||
|
||||
rule solve_operations_network_other_year_myopic:
|
||||
input:
|
||||
overrides="data/override_component_attrs",
|
||||
pre=RDIR + "/prenetworks/elec{weather_year}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
post=RDIR + "/postnetworks/elec{capacity_year}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc",
|
||||
previous=solved_previous_year
|
||||
output: RDIR + "/operations/elec{capacity_year}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_{weather_year}_myopic.nc"
|
||||
shadow: "shallow"
|
||||
log:
|
||||
solver=RDIR + "/logs/elec{capacity_year}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_{weather_year}_solver.log",
|
||||
python=RDIR + "/logs/elec{capacity_year}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_{weather_year}_python.log",
|
||||
threads: 4
|
||||
resources: mem_mb=10000
|
||||
benchmark: RDIR + "/benchmarks/solve_operations_network_myopic/elec{capacity_year}_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}_{weather_year}"
|
||||
script: "../scripts/solve_operations_network_other_year_myopic.py"
|
||||
|
@ -131,6 +131,7 @@ if __name__ == "__main__":
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"add_brownfield",
|
||||
weather_year="",
|
||||
simpl="",
|
||||
clusters="37",
|
||||
opts="",
|
||||
|
@ -722,7 +722,7 @@ if __name__ == "__main__":
|
||||
weather_year = snakemake.wildcards.weather_year
|
||||
if weather_year:
|
||||
snapshots = dict(
|
||||
start=weather_year, end=str(int(weather_year) + 1), closed="left"
|
||||
start=weather_year, end=str(int(weather_year) + 1), inclusive="left"
|
||||
)
|
||||
else:
|
||||
snapshots = snakemake.config["snapshots"]
|
||||
|
@ -600,6 +600,7 @@ if __name__ == "__main__":
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"add_existing_baseyear",
|
||||
weather_year="",
|
||||
simpl="",
|
||||
clusters="45",
|
||||
ll="v1.0",
|
||||
|
@ -23,7 +23,7 @@ if __name__ == "__main__":
|
||||
snapshots = dict(
|
||||
start=weather_year,
|
||||
end=str(int(weather_year)+1),
|
||||
closed="left"
|
||||
inclusive="left"
|
||||
)
|
||||
else:
|
||||
snapshots = snakemake.config['snapshots']
|
||||
|
@ -208,7 +208,7 @@ if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake("build_biomass_potentials", simpl="", clusters="5")
|
||||
snakemake = mock_snakemake("build_biomass_potentials", weather_year="", simpl="", clusters="5")
|
||||
|
||||
config = snakemake.config["biomass"]
|
||||
year = config["year"]
|
||||
|
@ -18,11 +18,15 @@ if __name__ == "__main__":
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"build_clustered_population_layouts",
|
||||
weather_year="",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
||||
cutout = atlite.Cutout(snakemake.input.cutout)
|
||||
cutout_name = snakemake.input.cutout
|
||||
year = snakemake.wildcards.weather_year
|
||||
if year: cutout_name = cutout_name.format(weather_year=year)
|
||||
cutout = atlite.Cutout(cutout_name)
|
||||
|
||||
clustered_regions = (
|
||||
gpd.read_file(snakemake.input.regions_onshore)
|
||||
|
@ -31,6 +31,7 @@ if __name__ == "__main__":
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"build_cop_profiles",
|
||||
weather_year="",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
@ -282,7 +282,7 @@ if __name__ == "__main__":
|
||||
weather_year = snakemake.wildcards.weather_year
|
||||
if weather_year:
|
||||
snapshots = dict(
|
||||
start=weather_year, end=str(int(weather_year) + 1), closed="left"
|
||||
start=weather_year, end=str(int(weather_year) + 1), inclusive="left"
|
||||
)
|
||||
else:
|
||||
snapshots = snakemake.config["snapshots"]
|
||||
|
@ -38,54 +38,7 @@ def reverse(dictionary):
|
||||
"""
|
||||
return {v: k for k, v in dictionary.items()}
|
||||
|
||||
|
||||
eurostat_codes = {
|
||||
"EU28": "EU",
|
||||
"EA19": "EA",
|
||||
"Belgium": "BE",
|
||||
"Bulgaria": "BG",
|
||||
"Czech Republic": "CZ",
|
||||
"Denmark": "DK",
|
||||
"Germany": "DE",
|
||||
"Estonia": "EE",
|
||||
"Ireland": "IE",
|
||||
"Greece": "GR",
|
||||
"Spain": "ES",
|
||||
"France": "FR",
|
||||
"Croatia": "HR",
|
||||
"Italy": "IT",
|
||||
"Cyprus": "CY",
|
||||
"Latvia": "LV",
|
||||
"Lithuania": "LT",
|
||||
"Luxembourg": "LU",
|
||||
"Hungary": "HU",
|
||||
"Malta": "MA",
|
||||
"Netherlands": "NL",
|
||||
"Austria": "AT",
|
||||
"Poland": "PL",
|
||||
"Portugal": "PT",
|
||||
"Romania": "RO",
|
||||
"Slovenia": "SI",
|
||||
"Slovakia": "SK",
|
||||
"Finland": "FI",
|
||||
"Sweden": "SE",
|
||||
"United Kingdom": "GB",
|
||||
"Iceland": "IS",
|
||||
"Norway": "NO",
|
||||
"Montenegro": "ME",
|
||||
"FYR of Macedonia": "MK",
|
||||
"Albania": "AL",
|
||||
"Serbia": "RS",
|
||||
"Turkey": "TU",
|
||||
"Bosnia and Herzegovina": "BA",
|
||||
"Kosovo\n(UNSCR 1244/99)": "KO", # 2017 version
|
||||
# 2016 version
|
||||
"Kosovo\n(under United Nations Security Council Resolution 1244/99)": "KO",
|
||||
"Moldova": "MO",
|
||||
"Ukraine": "UK",
|
||||
"Switzerland": "CH",
|
||||
}
|
||||
|
||||
non_EU = ["NO", "CH", "ME", "MK", "RS", "BA", "AL"]
|
||||
|
||||
idees_rename = {"GR": "EL", "GB": "UK"}
|
||||
|
||||
@ -119,51 +72,72 @@ to_ipcc = {
|
||||
}
|
||||
|
||||
|
||||
def build_eurostat(input_eurostat, countries, report_year, year):
|
||||
"""
|
||||
Return multi-index for all countries' energy data in TWh/a.
|
||||
"""
|
||||
filenames = {
|
||||
2016: f"/{year}-Energy-Balances-June2016edition.xlsx",
|
||||
2017: f"/{year}-ENERGY-BALANCES-June2017edition.xlsx",
|
||||
}
|
||||
def eurostat_per_country(country):
|
||||
|
||||
country_fn = idees_rename.get(country, country)
|
||||
fn = snakemake.input.eurostat + f"/{country_fn}-Energy-balance-sheets-June-2021-edition.xlsb"
|
||||
|
||||
with mute_print():
|
||||
dfs = pd.read_excel(
|
||||
input_eurostat + filenames[report_year],
|
||||
sheet_name=None,
|
||||
skiprows=1,
|
||||
index_col=list(range(4)),
|
||||
)
|
||||
df = pd.read_excel(
|
||||
fn,
|
||||
sheet_name=None,
|
||||
skiprows=4,
|
||||
index_col=list(range(3)),
|
||||
na_values=["+", "-", "=", "Z", ":"],
|
||||
)
|
||||
|
||||
# sorted_index necessary for slicing
|
||||
lookup = eurostat_codes
|
||||
labelled_dfs = {
|
||||
lookup[df.columns[0]]: df
|
||||
for df in dfs.values()
|
||||
if lookup[df.columns[0]] in countries
|
||||
}
|
||||
df = pd.concat(labelled_dfs, sort=True).sort_index()
|
||||
df.pop("Cover")
|
||||
|
||||
# drop non-numeric and country columns
|
||||
non_numeric_cols = df.columns[df.dtypes != float]
|
||||
country_cols = df.columns.intersection(lookup.keys())
|
||||
to_drop = non_numeric_cols.union(country_cols)
|
||||
df.drop(to_drop, axis=1, inplace=True)
|
||||
return pd.concat(df)
|
||||
|
||||
|
||||
def build_eurostat(countries, year=None):
|
||||
"""Return multi-index for all countries' energy data in TWh/a."""
|
||||
|
||||
nprocesses = snakemake.threads
|
||||
tqdm_kwargs = dict(ascii=False, unit=' country', total=len(countries),
|
||||
desc='Build from eurostat database')
|
||||
with mp.Pool(processes=nprocesses) as pool:
|
||||
dfs = list(tqdm(pool.imap(eurostat_per_country, countries), **tqdm_kwargs))
|
||||
|
||||
index_names = ['country', 'year', 'lvl1', 'lvl2', 'lvl3']
|
||||
df = pd.concat(dfs, keys=countries, names=index_names)
|
||||
|
||||
df.dropna(how='all', axis=0, inplace=True)
|
||||
df.dropna(how='all', axis=1, inplace=True)
|
||||
df = df[df.index.get_level_values('lvl1') != 'ktoe']
|
||||
|
||||
i = df.index.to_frame(index=False)
|
||||
i.loc[i.lvl2 == 'Primary production', ['lvl1', 'lvl3']] = 'Main'
|
||||
i.loc[i.lvl2 == 'Gross electricity production', 'lvl1'] = "Gross production"
|
||||
i.ffill(inplace=True)
|
||||
df.index = pd.MultiIndex.from_frame(i)
|
||||
|
||||
df.drop(list(range(1990, 2020)), axis=1, inplace=True)
|
||||
df.drop("Unnamed: 7", axis=1, inplace=True)
|
||||
df.fillna(0., inplace=True)
|
||||
|
||||
# convert ktoe/a to TWh/a
|
||||
df *= 11.63 / 1e3
|
||||
|
||||
df.index = df.index.set_levels(df.index.levels[1].astype(int), level=1)
|
||||
|
||||
if year:
|
||||
df = df.xs(year, level='year')
|
||||
|
||||
return df
|
||||
|
||||
|
||||
def build_swiss(year):
|
||||
"""
|
||||
Return a pd.Series of Swiss energy data in TWh/a.
|
||||
"""
|
||||
def build_swiss(year=None):
|
||||
"""Return a pd.DataFrame of Swiss energy data in TWh/a"""
|
||||
|
||||
fn = snakemake.input.swiss
|
||||
|
||||
df = pd.read_csv(fn, index_col=[0, 1]).loc["CH", str(year)]
|
||||
df = pd.read_csv(fn, index_col=[0,1]).stack().unstack('item')
|
||||
df.index.names = ["country", "year"]
|
||||
df.index = df.index.set_levels(df.index.levels[1].astype(int), level=1)
|
||||
|
||||
if year:
|
||||
df = df.xs(year, level='year')
|
||||
|
||||
# convert PJ/a to TWh/a
|
||||
df /= 3.6
|
||||
@ -171,86 +145,87 @@ def build_swiss(year):
|
||||
return df
|
||||
|
||||
|
||||
def idees_per_country(ct, year, base_dir):
|
||||
def idees_per_country(country, base_dir):
|
||||
|
||||
ct_totals = {}
|
||||
|
||||
ct_idees = idees_rename.get(ct, ct)
|
||||
ct_idees = idees_rename.get(country, country)
|
||||
fn_residential = f"{base_dir}/JRC-IDEES-2015_Residential_{ct_idees}.xlsx"
|
||||
fn_tertiary = f"{base_dir}/JRC-IDEES-2015_Tertiary_{ct_idees}.xlsx"
|
||||
fn_transport = f"{base_dir}/JRC-IDEES-2015_Transport_{ct_idees}.xlsx"
|
||||
|
||||
# residential
|
||||
|
||||
df = pd.read_excel(fn_residential, "RES_hh_fec", index_col=0)[year]
|
||||
df = pd.read_excel(fn_residential, "RES_hh_fec", index_col=0)
|
||||
|
||||
ct_totals["total residential space"] = df["Space heating"]
|
||||
ct_totals["total residential space"] = df.loc["Space heating"]
|
||||
|
||||
rows = ["Advanced electric heating", "Conventional electric heating"]
|
||||
ct_totals["electricity residential space"] = df[rows].sum()
|
||||
ct_totals["electricity residential space"] = df.loc[rows].sum()
|
||||
|
||||
ct_totals["total residential water"] = df.at["Water heating"]
|
||||
ct_totals["total residential water"] = df.loc["Water heating"]
|
||||
|
||||
assert df.index[23] == "Electricity"
|
||||
ct_totals["electricity residential water"] = df[23]
|
||||
ct_totals["electricity residential water"] = df.iloc[23]
|
||||
|
||||
ct_totals["total residential cooking"] = df["Cooking"]
|
||||
ct_totals["total residential cooking"] = df.loc["Cooking"]
|
||||
|
||||
assert df.index[30] == "Electricity"
|
||||
ct_totals["electricity residential cooking"] = df[30]
|
||||
ct_totals["electricity residential cooking"] = df.iloc[30]
|
||||
|
||||
df = pd.read_excel(fn_residential, "RES_summary", index_col=0)[year]
|
||||
df = pd.read_excel(fn_residential, "RES_summary", index_col=0)
|
||||
|
||||
row = "Energy consumption by fuel - Eurostat structure (ktoe)"
|
||||
ct_totals["total residential"] = df[row]
|
||||
ct_totals["total residential"] = df.loc[row]
|
||||
|
||||
assert df.index[47] == "Electricity"
|
||||
ct_totals["electricity residential"] = df[47]
|
||||
ct_totals["electricity residential"] = df.iloc[47]
|
||||
|
||||
assert df.index[46] == "Derived heat"
|
||||
ct_totals["derived heat residential"] = df[46]
|
||||
ct_totals["derived heat residential"] = df.iloc[46]
|
||||
|
||||
assert df.index[50] == "Thermal uses"
|
||||
ct_totals["thermal uses residential"] = df[50]
|
||||
assert df.index[50] == 'Thermal uses'
|
||||
ct_totals["thermal uses residential"] = df.iloc[50]
|
||||
|
||||
# services
|
||||
|
||||
df = pd.read_excel(fn_tertiary, "SER_hh_fec", index_col=0)[year]
|
||||
df = pd.read_excel(fn_tertiary, "SER_hh_fec", index_col=0)
|
||||
|
||||
ct_totals["total services space"] = df["Space heating"]
|
||||
ct_totals["total services space"] = df.loc["Space heating"]
|
||||
|
||||
rows = ["Advanced electric heating", "Conventional electric heating"]
|
||||
ct_totals["electricity services space"] = df[rows].sum()
|
||||
ct_totals["electricity services space"] = df.loc[rows].sum()
|
||||
|
||||
ct_totals["total services water"] = df["Hot water"]
|
||||
ct_totals["total services water"] = df.loc["Hot water"]
|
||||
|
||||
assert df.index[24] == "Electricity"
|
||||
ct_totals["electricity services water"] = df[24]
|
||||
ct_totals["electricity services water"] = df.iloc[24]
|
||||
|
||||
ct_totals["total services cooking"] = df["Catering"]
|
||||
ct_totals["total services cooking"] = df.loc["Catering"]
|
||||
|
||||
assert df.index[31] == "Electricity"
|
||||
ct_totals["electricity services cooking"] = df[31]
|
||||
ct_totals["electricity services cooking"] = df.iloc[31]
|
||||
|
||||
df = pd.read_excel(fn_tertiary, "SER_summary", index_col=0)[year]
|
||||
df = pd.read_excel(fn_tertiary, "SER_summary", index_col=0)
|
||||
|
||||
row = "Energy consumption by fuel - Eurostat structure (ktoe)"
|
||||
ct_totals["total services"] = df[row]
|
||||
ct_totals["total services"] = df.loc[row]
|
||||
|
||||
assert df.index[50] == "Electricity"
|
||||
ct_totals["electricity services"] = df[50]
|
||||
ct_totals["electricity services"] = df.iloc[50]
|
||||
|
||||
assert df.index[49] == "Derived heat"
|
||||
ct_totals["derived heat services"] = df[49]
|
||||
ct_totals["derived heat services"] = df.iloc[49]
|
||||
|
||||
assert df.index[53] == "Thermal uses"
|
||||
ct_totals["thermal uses services"] = df[53]
|
||||
assert df.index[53] == 'Thermal uses'
|
||||
ct_totals["thermal uses services"] = df.iloc[53]
|
||||
|
||||
# agriculture, forestry and fishing
|
||||
|
||||
start = "Detailed split of energy consumption (ktoe)"
|
||||
end = "Market shares of energy uses (%)"
|
||||
|
||||
df = pd.read_excel(fn_tertiary, "AGR_fec", index_col=0).loc[start:end, year]
|
||||
df = pd.read_excel(fn_tertiary, "AGR_fec", index_col=0).loc[start:end]
|
||||
|
||||
rows = [
|
||||
"Lighting",
|
||||
@ -258,95 +233,95 @@ def idees_per_country(ct, year, base_dir):
|
||||
"Specific electricity uses",
|
||||
"Pumping devices (electric)",
|
||||
]
|
||||
ct_totals["total agriculture electricity"] = df[rows].sum()
|
||||
ct_totals["total agriculture electricity"] = df.loc[rows].sum()
|
||||
|
||||
rows = ["Specific heat uses", "Low enthalpy heat"]
|
||||
ct_totals["total agriculture heat"] = df[rows].sum()
|
||||
ct_totals["total agriculture heat"] = df.loc[rows].sum()
|
||||
|
||||
rows = [
|
||||
"Motor drives",
|
||||
"Farming machine drives (diesel oil incl. biofuels)",
|
||||
"Pumping devices (diesel oil incl. biofuels)",
|
||||
]
|
||||
ct_totals["total agriculture machinery"] = df[rows].sum()
|
||||
ct_totals["total agriculture machinery"] = df.loc[rows].sum()
|
||||
|
||||
row = "Agriculture, forestry and fishing"
|
||||
ct_totals["total agriculture"] = df[row]
|
||||
ct_totals["total agriculture"] = df.loc[row]
|
||||
|
||||
# transport
|
||||
|
||||
df = pd.read_excel(fn_transport, "TrRoad_ene", index_col=0)[year]
|
||||
df = pd.read_excel(fn_transport, "TrRoad_ene", index_col=0)
|
||||
|
||||
ct_totals["total road"] = df["by fuel (EUROSTAT DATA)"]
|
||||
ct_totals["total road"] = df.loc["by fuel (EUROSTAT DATA)"]
|
||||
|
||||
ct_totals["electricity road"] = df["Electricity"]
|
||||
ct_totals["electricity road"] = df.loc["Electricity"]
|
||||
|
||||
ct_totals["total two-wheel"] = df["Powered 2-wheelers (Gasoline)"]
|
||||
ct_totals["total two-wheel"] = df.loc["Powered 2-wheelers (Gasoline)"]
|
||||
|
||||
assert df.index[19] == "Passenger cars"
|
||||
ct_totals["total passenger cars"] = df[19]
|
||||
ct_totals["total passenger cars"] = df.iloc[19]
|
||||
|
||||
assert df.index[30] == "Battery electric vehicles"
|
||||
ct_totals["electricity passenger cars"] = df[30]
|
||||
ct_totals["electricity passenger cars"] = df.iloc[30]
|
||||
|
||||
assert df.index[31] == "Motor coaches, buses and trolley buses"
|
||||
ct_totals["total other road passenger"] = df[31]
|
||||
ct_totals["total other road passenger"] = df.iloc[31]
|
||||
|
||||
assert df.index[39] == "Battery electric vehicles"
|
||||
ct_totals["electricity other road passenger"] = df[39]
|
||||
ct_totals["electricity other road passenger"] = df.iloc[39]
|
||||
|
||||
assert df.index[41] == "Light duty vehicles"
|
||||
ct_totals["total light duty road freight"] = df[41]
|
||||
ct_totals["total light duty road freight"] = df.iloc[41]
|
||||
|
||||
assert df.index[49] == "Battery electric vehicles"
|
||||
ct_totals["electricity light duty road freight"] = df[49]
|
||||
ct_totals["electricity light duty road freight"] = df.iloc[49]
|
||||
|
||||
row = "Heavy duty vehicles (Diesel oil incl. biofuels)"
|
||||
ct_totals["total heavy duty road freight"] = df[row]
|
||||
ct_totals["total heavy duty road freight"] = df.loc[row]
|
||||
|
||||
assert df.index[61] == "Passenger cars"
|
||||
ct_totals["passenger car efficiency"] = df[61]
|
||||
ct_totals["passenger car efficiency"] = df.iloc[61]
|
||||
|
||||
df = pd.read_excel(fn_transport, "TrRail_ene", index_col=0)[year]
|
||||
df = pd.read_excel(fn_transport, "TrRail_ene", index_col=0)
|
||||
|
||||
ct_totals["total rail"] = df["by fuel (EUROSTAT DATA)"]
|
||||
ct_totals["total rail"] = df.loc["by fuel (EUROSTAT DATA)"]
|
||||
|
||||
ct_totals["electricity rail"] = df["Electricity"]
|
||||
ct_totals["electricity rail"] = df.loc["Electricity"]
|
||||
|
||||
assert df.index[15] == "Passenger transport"
|
||||
ct_totals["total rail passenger"] = df[15]
|
||||
ct_totals["total rail passenger"] = df.iloc[15]
|
||||
|
||||
assert df.index[16] == "Metro and tram, urban light rail"
|
||||
assert df.index[19] == "Electric"
|
||||
assert df.index[20] == "High speed passenger trains"
|
||||
ct_totals["electricity rail passenger"] = df[[16, 19, 20]].sum()
|
||||
ct_totals["electricity rail passenger"] = df.iloc[[16, 19, 20]].sum()
|
||||
|
||||
assert df.index[21] == "Freight transport"
|
||||
ct_totals["total rail freight"] = df[21]
|
||||
ct_totals["total rail freight"] = df.iloc[21]
|
||||
|
||||
assert df.index[23] == "Electric"
|
||||
ct_totals["electricity rail freight"] = df[23]
|
||||
ct_totals["electricity rail freight"] = df.iloc[23]
|
||||
|
||||
df = pd.read_excel(fn_transport, "TrAvia_ene", index_col=0)[year]
|
||||
df = pd.read_excel(fn_transport, "TrAvia_ene", index_col=0)
|
||||
|
||||
assert df.index[6] == "Passenger transport"
|
||||
ct_totals["total aviation passenger"] = df[6]
|
||||
ct_totals["total aviation passenger"] = df.iloc[6]
|
||||
|
||||
assert df.index[10] == "Freight transport"
|
||||
ct_totals["total aviation freight"] = df[10]
|
||||
ct_totals["total aviation freight"] = df.iloc[10]
|
||||
|
||||
assert df.index[7] == "Domestic"
|
||||
ct_totals["total domestic aviation passenger"] = df[7]
|
||||
ct_totals["total domestic aviation passenger"] = df.iloc[7]
|
||||
|
||||
assert df.index[8] == "International - Intra-EU"
|
||||
assert df.index[9] == "International - Extra-EU"
|
||||
ct_totals["total international aviation passenger"] = df[[8, 9]].sum()
|
||||
ct_totals["total international aviation passenger"] = df.iloc[[8,9]].sum()
|
||||
|
||||
assert df.index[11] == "Domestic and International - Intra-EU"
|
||||
ct_totals["total domestic aviation freight"] = df[11]
|
||||
ct_totals["total domestic aviation freight"] = df.iloc[11]
|
||||
|
||||
assert df.index[12] == "International - Extra-EU"
|
||||
ct_totals["total international aviation freight"] = df[12]
|
||||
ct_totals["total international aviation freight"] = df.iloc[12]
|
||||
|
||||
ct_totals["total domestic aviation"] = (
|
||||
ct_totals["total domestic aviation freight"]
|
||||
@ -358,67 +333,91 @@ def idees_per_country(ct, year, base_dir):
|
||||
+ ct_totals["total international aviation passenger"]
|
||||
)
|
||||
|
||||
df = pd.read_excel(fn_transport, "TrNavi_ene", index_col=0)[year]
|
||||
df = pd.read_excel(fn_transport, "TrNavi_ene", index_col=0)
|
||||
|
||||
# coastal and inland
|
||||
ct_totals["total domestic navigation"] = df["by fuel (EUROSTAT DATA)"]
|
||||
ct_totals["total domestic navigation"] = df.loc["by fuel (EUROSTAT DATA)"]
|
||||
|
||||
df = pd.read_excel(fn_transport, "TrRoad_act", index_col=0)[year]
|
||||
df = pd.read_excel(fn_transport, "TrRoad_act", index_col=0)
|
||||
|
||||
assert df.index[85] == "Passenger cars"
|
||||
ct_totals["passenger cars"] = df[85]
|
||||
ct_totals["passenger cars"] = df.iloc[85]
|
||||
|
||||
return pd.Series(ct_totals, name=ct)
|
||||
return pd.DataFrame(ct_totals)
|
||||
|
||||
|
||||
def build_idees(countries, year):
|
||||
def build_idees(countries, year=None):
|
||||
|
||||
nprocesses = snakemake.threads
|
||||
disable_progress = snakemake.config["run"].get("disable_progressbar", False)
|
||||
|
||||
|
||||
func = partial(idees_per_country, year=year, base_dir=snakemake.input.idees)
|
||||
tqdm_kwargs = dict(
|
||||
ascii=False,
|
||||
unit=" country",
|
||||
unit="" country",
|
||||
total=len(countries),
|
||||
desc="Build from IDEES database",
|
||||
disable=disable_progress,
|
||||
disable=disable_progress
|
||||
)
|
||||
|
||||
with mute_print():
|
||||
with mp.Pool(processes=nprocesses) as pool:
|
||||
totals_list = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))
|
||||
dfs = list(tqdm(pool.imap(func, countries), **tqdm_kwargs))
|
||||
|
||||
totals = pd.concat(totals_list, axis=1)
|
||||
df = pd.concat(dfs, keys=countries, names=['country', 'year'])
|
||||
|
||||
# convert ktoe to TWh
|
||||
exclude = totals.index.str.fullmatch("passenger cars")
|
||||
totals.loc[~exclude] *= 11.63 / 1e3
|
||||
exclude = df.columns.str.fullmatch("passenger cars")
|
||||
df.loc[:,~exclude] *= 11.63 / 1e3
|
||||
|
||||
# convert TWh/100km to kWh/km
|
||||
totals.loc["passenger car efficiency"] *= 10
|
||||
df["passenger car efficiency"] *= 10
|
||||
|
||||
# district heating share
|
||||
district_heat = totals.loc[
|
||||
["derived heat residential", "derived heat services"]
|
||||
].sum()
|
||||
total_heat = totals.loc[["thermal uses residential", "thermal uses services"]].sum()
|
||||
totals.loc["district heat share"] = district_heat.div(total_heat)
|
||||
subset = ["derived heat residential", "derived heat services"]
|
||||
district_heat = df[subset].sum(axis=1)
|
||||
subset = ["thermal uses residential", "thermal uses services"]
|
||||
total_heat = df[subset].sum(axis=1)
|
||||
df["district heat share"] = district_heat.div(total_heat)
|
||||
|
||||
return totals.T
|
||||
if year:
|
||||
df = df.xs(int(year), level='year')
|
||||
|
||||
df.columns.name = 'item'
|
||||
|
||||
return df
|
||||
|
||||
|
||||
def build_energy_totals(countries, eurostat, swiss, idees):
|
||||
eurostat_fuels = {"electricity": "Electricity", "total": "Total all products"}
|
||||
|
||||
eurostat_fuels = dict(
|
||||
electricity="Electricity",
|
||||
total="Total"
|
||||
)
|
||||
|
||||
eurostat_sectors = dict(
|
||||
residential="Households",
|
||||
services="Commercial & public services",
|
||||
road="Road",
|
||||
rail="Rail"
|
||||
)
|
||||
|
||||
to_drop = ["passenger cars", "passenger car efficiency"]
|
||||
df = idees.reindex(countries).drop(to_drop, axis=1)
|
||||
|
||||
new_index = pd.MultiIndex.from_product(
|
||||
[countries, eurostat.index.levels[1]],
|
||||
names=["country", "year"]
|
||||
)
|
||||
|
||||
df = idees.reindex(new_index).drop(to_drop, axis=1)
|
||||
|
||||
eurostat_countries = eurostat.index.levels[0]
|
||||
in_eurostat = df.index.intersection(eurostat_countries)
|
||||
in_eurostat = df.index.levels[0].intersection(eurostat_countries)
|
||||
|
||||
# add international navigation
|
||||
|
||||
slicer = idx[in_eurostat, :, "Bunkers", :]
|
||||
fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=0).sum()
|
||||
slicer = idx[in_eurostat, :, :, "International maritime bunkers", :]
|
||||
fill_values = eurostat.loc[slicer, "Total"].groupby(level=[0,1]).sum()
|
||||
df.loc[in_eurostat, "total international navigation"] = fill_values
|
||||
|
||||
# add swiss energy data
|
||||
@ -428,20 +427,20 @@ def build_energy_totals(countries, eurostat, swiss, idees):
|
||||
# get values for missing countries based on Eurostat EnergyBalances
|
||||
# divide cooking/space/water according to averages in EU28
|
||||
|
||||
missing = df.index[df["total residential"].isna()]
|
||||
to_fill = missing.intersection(eurostat_countries)
|
||||
to_fill = df.index[df["total residential"].isna() & df.index.get_level_values('country').isin(eurostat_countries)]
|
||||
uses = ["space", "cooking", "water"]
|
||||
|
||||
c = to_fill.get_level_values('country')
|
||||
y = to_fill.get_level_values('year')
|
||||
|
||||
for sector in ["residential", "services", "road", "rail"]:
|
||||
eurostat_sector = sector.capitalize()
|
||||
|
||||
# fuel use
|
||||
|
||||
for fuel in ["electricity", "total"]:
|
||||
slicer = idx[to_fill, :, :, eurostat_sector]
|
||||
fill_values = (
|
||||
eurostat.loc[slicer, eurostat_fuels[fuel]].groupby(level=0).sum()
|
||||
)
|
||||
|
||||
slicer = idx[c, y, :, :, eurostat_sectors[sector]]
|
||||
fill_values = eurostat.loc[slicer, eurostat_fuels[fuel]].groupby(level=[0,1]).sum()
|
||||
df.loc[to_fill, f"{fuel} {sector}"] = fill_values
|
||||
|
||||
for sector in ["residential", "services"]:
|
||||
@ -502,30 +501,30 @@ def build_energy_totals(countries, eurostat, swiss, idees):
|
||||
no_norway[f"total {sector}"] - no_norway[f"electricity {sector}"]
|
||||
)
|
||||
fraction = nonelectric_use.div(nonelectric).mean()
|
||||
df.loc["NO", f"total {sector} {use}"] = total_heating * fraction
|
||||
df.loc["NO", f"total {sector} {use}"] = (total_heating * fraction).values
|
||||
df.loc["NO", f"electricity {sector} {use}"] = (
|
||||
total_heating * fraction * elec_fraction
|
||||
)
|
||||
).values
|
||||
|
||||
# Missing aviation
|
||||
|
||||
slicer = idx[to_fill, :, :, "Domestic aviation"]
|
||||
fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=0).sum()
|
||||
slicer = idx[c, y, :, :, "Domestic aviation"]
|
||||
fill_values = eurostat.loc[slicer, "Total"].groupby(level=[0,1]).sum()
|
||||
df.loc[to_fill, "total domestic aviation"] = fill_values
|
||||
|
||||
slicer = idx[to_fill, :, :, "International aviation"]
|
||||
fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=0).sum()
|
||||
slicer = idx[c, y, :, "International aviation", :]
|
||||
fill_values = eurostat.loc[slicer, "Total"].groupby(level=[0,1]).sum()
|
||||
df.loc[to_fill, "total international aviation"] = fill_values
|
||||
|
||||
# missing domestic navigation
|
||||
|
||||
slicer = idx[to_fill, :, :, "Domestic Navigation"]
|
||||
fill_values = eurostat.loc[slicer, "Total all products"].groupby(level=0).sum()
|
||||
slicer = idx[c, y, :, :, "Domestic navigation"]
|
||||
fill_values = eurostat.loc[slicer, "Total"].groupby(level=[0,1]).sum()
|
||||
df.loc[to_fill, "total domestic navigation"] = fill_values
|
||||
|
||||
# split road traffic for non-IDEES
|
||||
missing = df.index[df["total passenger cars"].isna()]
|
||||
for fuel in ["total", "electricity"]:
|
||||
for fuel in ["electricity", "total"]:
|
||||
selection = [
|
||||
f"{fuel} passenger cars",
|
||||
f"{fuel} other road passenger",
|
||||
@ -571,20 +570,20 @@ def build_energy_totals(countries, eurostat, swiss, idees):
|
||||
)
|
||||
|
||||
if "BA" in df.index:
|
||||
# fill missing data for BA (services and road energy data)
|
||||
# proportional to RS with ratio of total residential demand
|
||||
missing = df.loc["BA"] == 0.0
|
||||
ratio = df.at["BA", "total residential"] / df.at["RS", "total residential"]
|
||||
df.loc["BA", missing] = ratio * df.loc["RS", missing]
|
||||
# fill missing data for BA proportional to RS
|
||||
ratio = (df.loc["BA"].loc[2014:2020] / df.loc["RS"].loc[2014:2020]).mean()
|
||||
df.loc["BA"] = (ratio * df.loc["RS"]).values
|
||||
|
||||
# Missing district heating share
|
||||
dh_share = pd.read_csv(
|
||||
snakemake.input.district_heat_share, index_col=0, usecols=[0, 1]
|
||||
)
|
||||
dh_share = pd.read_csv(snakemake.input.district_heat_share,
|
||||
index_col=0, usecols=[0, 1])
|
||||
|
||||
dh_share = pd.concat({y: dh_share for y in range(1990, 2021)}, names=["year", "country"]).swaplevel()
|
||||
dh_share = dh_share.div(100).reindex(df.index)
|
||||
|
||||
# make conservative assumption and take minimum from both data sets
|
||||
df["district heat share"] = pd.concat(
|
||||
[df["district heat share"], dh_share.reindex(index=df.index) / 100], axis=1
|
||||
).min(axis=1)
|
||||
item = "district heat share"
|
||||
df[item] = pd.concat([dh_share, df[item]], axis=1).min(axis=1)
|
||||
|
||||
return df
|
||||
|
||||
@ -599,8 +598,6 @@ def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"):
|
||||
index_col = ["Country_code", "Pollutant_name", "Year", "Sector_name"]
|
||||
df = df.set_index(index_col).sort_index()
|
||||
|
||||
emissions_scope = emissions_scope
|
||||
|
||||
cts = ["CH", "EUA", "NO"] + eu28_eea
|
||||
|
||||
slicer = idx[cts, emissions_scope, year, to_ipcc.values()]
|
||||
@ -643,22 +640,26 @@ def build_eea_co2(input_co2, year=1990, emissions_scope="CO2"):
|
||||
return emissions / 1e3
|
||||
|
||||
|
||||
def build_eurostat_co2(input_eurostat, countries, report_year, year=1990):
|
||||
eurostat = build_eurostat(input_eurostat, countries, report_year, year)
|
||||
def build_eurostat_co2(countries, eurostat=None, year=1990):
|
||||
|
||||
specific_emissions = pd.Series(index=eurostat.columns, dtype=float)
|
||||
if eurostat is None:
|
||||
df = build_eurostat(countries, year)
|
||||
else:
|
||||
df = eurostat.xs(year, level='year')
|
||||
|
||||
specific_emissions = pd.Series(index=df.columns, dtype=float)
|
||||
|
||||
# emissions in tCO2_equiv per MWh_th
|
||||
specific_emissions["Solid fuels"] = 0.36 # Approximates coal
|
||||
specific_emissions["Oil (total)"] = 0.285 # Average of distillate and residue
|
||||
specific_emissions["Gas"] = 0.2 # For natural gas
|
||||
specific_emissions["Solid fossil fuels"] = 0.36 # Approximates coal
|
||||
specific_emissions["Oil and petroleum products"] = 0.285 # Average of distillate and residue
|
||||
specific_emissions["Natural gas"] = 0.2 # For natural gas
|
||||
|
||||
# oil values from https://www.eia.gov/tools/faqs/faq.cfm?id=74&t=11
|
||||
# Distillate oil (No. 2) 0.276
|
||||
# Residual oil (No. 6) 0.298
|
||||
# https://www.eia.gov/electricity/annual/html/epa_a_03.html
|
||||
|
||||
return eurostat.multiply(specific_emissions).sum(axis=1)
|
||||
return df.multiply(specific_emissions).sum(axis=1)
|
||||
|
||||
|
||||
def build_co2_totals(countries, eea_co2, eurostat_co2):
|
||||
@ -666,25 +667,19 @@ def build_co2_totals(countries, eea_co2, eurostat_co2):
|
||||
|
||||
for ct in pd.Index(countries).intersection(["BA", "RS", "AL", "ME", "MK"]):
|
||||
mappings = {
|
||||
"electricity": (
|
||||
ct,
|
||||
"+",
|
||||
"Conventional Thermal Power Stations",
|
||||
"of which From Coal",
|
||||
),
|
||||
"residential non-elec": (ct, "+", "+", "Residential"),
|
||||
"services non-elec": (ct, "+", "+", "Services"),
|
||||
"road non-elec": (ct, "+", "+", "Road"),
|
||||
"rail non-elec": (ct, "+", "+", "Rail"),
|
||||
"domestic navigation": (ct, "+", "+", "Domestic Navigation"),
|
||||
"international navigation": (ct, "-", "Bunkers"),
|
||||
"domestic aviation": (ct, "+", "+", "Domestic aviation"),
|
||||
"international aviation": (ct, "+", "+", "International aviation"),
|
||||
"electricity": (ct, "Transformation input", "Electricity & heat generation", "Main"),
|
||||
"residential non-elec": (ct, "Final energy consumption", "Other sectors", "Households"),
|
||||
"services non-elec": (ct, "Final energy consumption", "Other sectors", "Commercial & public services"),
|
||||
"road non-elec": (ct, "Final energy consumption", "Transport sector", "Road"),
|
||||
"rail non-elec": (ct, "Final energy consumption", "Transport sector", "Rail"),
|
||||
"domestic navigation": (ct, "Final energy consumption", "Transport sector", "Domestic navigation"),
|
||||
"international navigation": (ct, "Main", "International maritime bunkers"),
|
||||
"domestic aviation": (ct, "Final energy consumption", "Transport sector", "Domestic aviation"),
|
||||
"international aviation": (ct, "Main", "International aviation"),
|
||||
# does not include industrial process emissions or fuel processing/refining
|
||||
"industrial non-elec": (ct, "+", "Industry"),
|
||||
"industrial non-elec": (ct, "Final energy consumption", "Industry sector", "Non-energy use in industry sector"),
|
||||
# does not include non-energy emissions
|
||||
"agriculture": (eurostat_co2.index.get_level_values(0) == ct)
|
||||
& eurostat_co2.index.isin(["Agriculture / Forestry", "Fishing"], level=3),
|
||||
"agriculture": (eurostat_co2.index.get_level_values(0) == ct) & eurostat_co2.index.isin(["Agriculture & forestry", "Fishing"], level=3),
|
||||
}
|
||||
|
||||
for i, mi in mappings.items():
|
||||
@ -738,6 +733,7 @@ if __name__ == "__main__":
|
||||
logging.basicConfig(level=snakemake.config["logging"]["level"])
|
||||
|
||||
config = snakemake.config["energy"]
|
||||
data_year = int(config["energy_totals_year"])
|
||||
|
||||
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index")
|
||||
population = nuts3["pop"].groupby(nuts3.country).sum()
|
||||
@ -745,25 +741,20 @@ if __name__ == "__main__":
|
||||
countries = snakemake.config["countries"]
|
||||
idees_countries = pd.Index(countries).intersection(eu28)
|
||||
|
||||
data_year = config["energy_totals_year"]
|
||||
report_year = snakemake.config["energy"]["eurostat_report_year"]
|
||||
input_eurostat = snakemake.input.eurostat
|
||||
eurostat = build_eurostat(input_eurostat, countries, report_year, data_year)
|
||||
swiss = build_swiss(data_year)
|
||||
idees = build_idees(idees_countries, data_year)
|
||||
eurostat = build_eurostat(countries.difference(['CH']))
|
||||
swiss = build_swiss()
|
||||
idees = build_idees(idees_countries)
|
||||
|
||||
energy = build_energy_totals(countries, eurostat, swiss, idees)
|
||||
energy.to_csv(snakemake.output.energy_name)
|
||||
|
||||
base_year_emissions = config["base_emissions_year"]
|
||||
emissions_scope = snakemake.config["energy"]["emissions"]
|
||||
eea_co2 = build_eea_co2(snakemake.input.co2, base_year_emissions, emissions_scope)
|
||||
eurostat_co2 = build_eurostat_co2(
|
||||
input_eurostat, countries, report_year, base_year_emissions
|
||||
)
|
||||
eurostat_co2 = build_eurostat_co2(countries, eurostat, base_year_emissions)
|
||||
|
||||
co2 = build_co2_totals(countries, eea_co2, eurostat_co2)
|
||||
co2.to_csv(snakemake.output.co2_name)
|
||||
|
||||
transport = build_transport_data(countries, population, idees)
|
||||
idees_transport = idees.xs(data_year, level='year')
|
||||
transport = build_transport_data(countries, population, idees_transport)
|
||||
transport.to_csv(snakemake.output.transport_name)
|
||||
|
@ -19,6 +19,7 @@ if __name__ == "__main__":
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"build_heat_demands",
|
||||
weather_year="",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
@ -27,8 +28,24 @@ if __name__ == "__main__":
|
||||
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)
|
||||
client = Client(cluster, asynchronous=True)
|
||||
|
||||
time = pd.date_range(freq="h", **snakemake.config["snapshots"])
|
||||
cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time)
|
||||
|
||||
cutout_name = snakemake.input.cutout
|
||||
year = snakemake.wildcards.weather_year
|
||||
|
||||
if year:
|
||||
snapshots = dict(start=year, end=str(int(year)+1), inclusive="left")
|
||||
cutout_name = cutout_name.format(weather_year=year)
|
||||
else:
|
||||
snapshots = snakemake.config['snapshots']
|
||||
|
||||
drop_leap_day = snakemake.config["atlite"].get("drop_leap_day", False)
|
||||
time = pd.date_range(freq='h', **snapshots)
|
||||
daily = pd.date_range(freq='D', **snapshots)
|
||||
if drop_leap_day:
|
||||
time = time[~((time.month == 2) & (time.day == 29))]
|
||||
daily = daily[~((daily.month == 2) & (daily.day == 29))]
|
||||
|
||||
cutout = atlite.Cutout(cutout_name).sel(time=time)
|
||||
|
||||
clustered_regions = (
|
||||
gpd.read_file(snakemake.input.regions_onshore)
|
||||
@ -49,6 +66,6 @@ if __name__ == "__main__":
|
||||
index=clustered_regions.index,
|
||||
dask_kwargs=dict(scheduler=client),
|
||||
show_progress=False,
|
||||
)
|
||||
).sel(time=daily)
|
||||
|
||||
heat_demand.to_netcdf(snakemake.output.heat_demand)
|
||||
|
63
scripts/build_heat_totals.py
Normal file
63
scripts/build_heat_totals.py
Normal file
@ -0,0 +1,63 @@
|
||||
"""Approximate heat demand for all weather years."""
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from itertools import product
|
||||
from numpy.polynomial import Polynomial
|
||||
|
||||
idx = pd.IndexSlice
|
||||
|
||||
|
||||
def approximate_heat_demand(energy_totals, hdd):
|
||||
|
||||
if isinstance(hdd, str):
|
||||
hdd = pd.read_csv(hdd, index_col=0).T
|
||||
hdd.index = hdd.index.astype(int)
|
||||
|
||||
demands = {}
|
||||
|
||||
for kind, sector in product(["total", "electricity"], ["services", "residential"]):
|
||||
|
||||
row = idx[:, 2007:2015]
|
||||
col = f"{kind} {sector} space"
|
||||
demand = energy_totals.loc[row, col].unstack(0)
|
||||
|
||||
demand_approx = {}
|
||||
|
||||
for c in countries:
|
||||
|
||||
Y = demand[c].dropna()
|
||||
X = hdd.loc[Y.index, c]
|
||||
|
||||
to_predict = hdd.index.difference(Y.index)
|
||||
X_pred = hdd.loc[to_predict, c]
|
||||
|
||||
p = Polynomial.fit(X, Y, 1)
|
||||
Y_pred = p(X_pred)
|
||||
|
||||
demand_approx[c] = pd.Series(Y_pred, index=to_predict)
|
||||
|
||||
demand_approx = pd.DataFrame(demand_approx)
|
||||
demand_approx = pd.concat([demand, demand_approx]).sort_index()
|
||||
demands[f"{kind} {sector} space"] = demand_approx.groupby(demand_approx.index).sum()
|
||||
|
||||
demands = pd.concat(demands).unstack().T.clip(lower=0)
|
||||
demands.index.names = ["country", "year"]
|
||||
|
||||
return demands
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_energy_totals')
|
||||
|
||||
hdd = pd.read_csv(snakemake.input.hdd, index_col=0).T
|
||||
|
||||
energy_totals = pd.read_csv(snakemake.input.energy_totals, index_col=[0,1])
|
||||
|
||||
countries = hdd.columns
|
||||
|
||||
heat_demand = approximate_heat_demand(energy_totals, hdd)
|
||||
|
||||
heat_demand.to_csv(snakemake.output.heat_totals)
|
@ -137,6 +137,7 @@ if __name__ == "__main__":
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"build_industrial_distribution_key",
|
||||
weather_year="",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
@ -14,6 +14,7 @@ if __name__ == "__main__":
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"build_industrial_energy_demand_per_node",
|
||||
weather_year="",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
planning_horizons=2030,
|
||||
|
@ -72,6 +72,7 @@ if __name__ == "__main__":
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"build_industrial_energy_demand_per_node_today",
|
||||
weather_year="",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
@ -20,12 +20,17 @@ import xarray as xr
|
||||
if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake("build_population_layouts")
|
||||
snakemake = mock_snakemake(
|
||||
'build_population_layouts',
|
||||
weather_year="",
|
||||
)
|
||||
|
||||
logging.basicConfig(level=snakemake.config["logging"]["level"])
|
||||
|
||||
cutout = atlite.Cutout(snakemake.input.cutout)
|
||||
cutout_name = snakemake.input.cutout
|
||||
year = snakemake.wildcards.weather_year
|
||||
if year: cutout_name = cutout_name.format(weather_year=year)
|
||||
cutout = atlite.Cutout(cutout_name)
|
||||
|
||||
grid_cells = cutout.grid.geometry
|
||||
|
||||
|
@ -14,16 +14,23 @@ if __name__ == "__main__":
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"build_population_weighted_energy_totals",
|
||||
weather_year="",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
|
||||
config = snakemake.config["energy"]
|
||||
data_year = int(config["energy_totals_year"])
|
||||
if snakemake.wildcards.weather_year and snakemake.wildcards.kind == 'heat':
|
||||
data_year = int(snakemake.wildcards.weather_year)
|
||||
|
||||
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
|
||||
|
||||
energy_totals = pd.read_csv(snakemake.input.energy_totals, index_col=0)
|
||||
totals = pd.read_csv(snakemake.input.totals, index_col=[0,1])
|
||||
totals = totals.xs(data_year, level='year')
|
||||
|
||||
nodal_energy_totals = energy_totals.loc[pop_layout.ct].fillna(0.0)
|
||||
nodal_energy_totals.index = pop_layout.index
|
||||
nodal_energy_totals = nodal_energy_totals.multiply(pop_layout.fraction, axis=0)
|
||||
nodal_totals = totals.loc[pop_layout.ct].fillna(0.)
|
||||
nodal_totals.index = pop_layout.index
|
||||
nodal_totals = nodal_totals.multiply(pop_layout.fraction, axis=0)
|
||||
|
||||
nodal_energy_totals.to_csv(snakemake.output[0])
|
||||
nodal_totals.to_csv(snakemake.output[0])
|
||||
|
@ -19,6 +19,7 @@ if __name__ == "__main__":
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"build_solar_thermal_profiles",
|
||||
weather_year="",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
@ -27,10 +28,22 @@ if __name__ == "__main__":
|
||||
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)
|
||||
client = Client(cluster, asynchronous=True)
|
||||
|
||||
config = snakemake.config["solar_thermal"]
|
||||
config = snakemake.config['solar_thermal']
|
||||
|
||||
cutout_name = snakemake.input.cutout
|
||||
year = snakemake.wildcards.weather_year
|
||||
|
||||
time = pd.date_range(freq="h", **snakemake.config["snapshots"])
|
||||
cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time)
|
||||
if year:
|
||||
snapshots = dict(start=year, end=str(int(year)+1), inclusive="left")
|
||||
cutout_name = cutout_name.format(weather_year=year)
|
||||
else:
|
||||
snapshots = snakemake.config['snapshots']
|
||||
|
||||
time = pd.date_range(freq='h', **snapshots)
|
||||
if snakemake.config["atlite"].get("drop_leap_day", False):
|
||||
time = time[~((time.month == 2) & (time.day == 29))]
|
||||
|
||||
cutout = atlite.Cutout(cutout_name).sel(time=time)
|
||||
|
||||
clustered_regions = (
|
||||
gpd.read_file(snakemake.input.regions_onshore)
|
||||
|
@ -19,6 +19,7 @@ if __name__ == "__main__":
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"build_temperature_profiles",
|
||||
weather_year="",
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
@ -26,9 +27,20 @@ if __name__ == "__main__":
|
||||
nprocesses = int(snakemake.threads)
|
||||
cluster = LocalCluster(n_workers=nprocesses, threads_per_worker=1)
|
||||
client = Client(cluster, asynchronous=True)
|
||||
cutout_name = snakemake.input.cutout
|
||||
year = snakemake.wildcards.weather_year
|
||||
|
||||
time = pd.date_range(freq="h", **snakemake.config["snapshots"])
|
||||
cutout = atlite.Cutout(snakemake.input.cutout).sel(time=time)
|
||||
if year:
|
||||
snapshots = dict(start=year, end=str(int(year)+1), inclusive="left")
|
||||
cutout_name = cutout_name.format(weather_year=year)
|
||||
else:
|
||||
snapshots = snakemake.config['snapshots']
|
||||
|
||||
time = pd.date_range(freq='h', **snapshots)
|
||||
if snakemake.config["atlite"].get("drop_leap_day", False):
|
||||
time = time[~((time.month == 2) & (time.day == 29))]
|
||||
|
||||
cutout = atlite.Cutout(cutout_name).sel(time=time)
|
||||
|
||||
clustered_regions = (
|
||||
gpd.read_file(snakemake.input.regions_onshore)
|
||||
|
@ -163,6 +163,7 @@ if __name__ == "__main__":
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"build_transport_demand",
|
||||
weather_year='',
|
||||
simpl="",
|
||||
clusters=48,
|
||||
)
|
||||
@ -177,7 +178,12 @@ if __name__ == "__main__":
|
||||
|
||||
options = snakemake.config["sector"]
|
||||
|
||||
snapshots = pd.date_range(freq="h", **snakemake.config["snapshots"], tz="UTC")
|
||||
year = snakemake.wildcards.weather_year
|
||||
snapshots = dict(start=year, end=str(int(year)+1), inclusive="left") if year else snakemake.config['snapshots']
|
||||
snapshots = pd.date_range(freq='h', **snapshots, tz="UTC")
|
||||
if snakemake.config["atlite"].get("drop_leap_day", False):
|
||||
leap_day = (snapshots.month == 2) & (snapshots.day == 29)
|
||||
snapshots = snapshots[~leap_day]
|
||||
|
||||
nyears = len(snapshots) / 8760
|
||||
|
||||
|
@ -108,7 +108,7 @@ if __name__ == "__main__":
|
||||
if "snakemake" not in globals():
|
||||
from _helpers import mock_snakemake
|
||||
|
||||
snakemake = mock_snakemake("cluster_gas_network", simpl="", clusters="37")
|
||||
snakemake = mock_snakemake("cluster_gas_network", weather_year="", simpl="", clusters="37")
|
||||
|
||||
logging.basicConfig(level=snakemake.config["logging"]["level"])
|
||||
|
||||
|
@ -648,7 +648,7 @@ def make_summaries(networks_dict):
|
||||
]
|
||||
|
||||
columns = pd.MultiIndex.from_tuples(
|
||||
networks_dict.keys(), names=["cluster", "ll", "opt", "planning_horizon"]
|
||||
networks_dict.keys(), names=["weather_year", "cluster", "ll", "opt", "planning_horizon"]
|
||||
)
|
||||
|
||||
df = {}
|
||||
@ -685,9 +685,10 @@ if __name__ == "__main__":
|
||||
logging.basicConfig(level=snakemake.config["logging"]["level"])
|
||||
|
||||
networks_dict = {
|
||||
(cluster, ll, opt + sector_opt, planning_horizon): "results/"
|
||||
(weather_year, cluster, ll, opt + sector_opt, planning_horizon): "results/"
|
||||
+ snakemake.params.RDIR
|
||||
+ f"/postnetworks/elec_s{simpl}_{cluster}_l{ll}_{opt}_{sector_opt}_{planning_horizon}.nc"
|
||||
for weather_year in snakemake.config['scenario']['weather_year']
|
||||
for simpl in snakemake.config["scenario"]["simpl"]
|
||||
for cluster in snakemake.config["scenario"]["clusters"]
|
||||
for opt in snakemake.config["scenario"]["opts"]
|
||||
|
@ -922,6 +922,7 @@ if __name__ == "__main__":
|
||||
|
||||
snakemake = mock_snakemake(
|
||||
"plot_network",
|
||||
weather_year="",
|
||||
simpl="",
|
||||
opts="",
|
||||
clusters="5",
|
||||
|
@ -695,7 +695,7 @@ def add_co2limit(n, nyears=1.0, limit=0.0):
|
||||
|
||||
|
||||
# TODO PyPSA-Eur merge issue
|
||||
def average_every_nhours(n, offset):
|
||||
def average_every_nhours(n, offset, drop_leap_day=False):
|
||||
logger.info(f"Resampling the network to {offset}")
|
||||
m = n.copy(with_time=False)
|
||||
|
||||
@ -714,6 +714,10 @@ def average_every_nhours(n, offset):
|
||||
else:
|
||||
pnl[k] = df.resample(offset).mean()
|
||||
|
||||
if drop_leap_day:
|
||||
sns = m.snapshots[~((m.snapshots.month == 2) & (m.snapshots.day == 29))]
|
||||
m.set_snapshots(sns)
|
||||
|
||||
return m
|
||||
|
||||
|
||||
@ -3201,7 +3205,7 @@ def apply_time_segmentation(
|
||||
return n
|
||||
|
||||
|
||||
def set_temporal_aggregation(n, opts, solver_name):
|
||||
def set_temporal_aggregation(n, opts, solver_name, drop_leap_day=False):
|
||||
"""
|
||||
Aggregate network temporally.
|
||||
"""
|
||||
@ -3236,6 +3240,7 @@ if __name__ == "__main__":
|
||||
snakemake = mock_snakemake(
|
||||
"prepare_sector_network",
|
||||
configfiles="test/config.overnight.yaml",
|
||||
weather_year="",
|
||||
simpl="",
|
||||
opts="",
|
||||
clusters="5",
|
||||
@ -3267,9 +3272,9 @@ if __name__ == "__main__":
|
||||
nyears,
|
||||
)
|
||||
|
||||
pop_weighted_energy_totals = (
|
||||
pd.read_csv(snakemake.input.pop_weighted_energy_totals, index_col=0) * nyears
|
||||
)
|
||||
pop_weighted_energy_totals = pd.read_csv(snakemake.input.pop_weighted_energy_totals, index_col=0) * nyears
|
||||
pop_weighted_heat_totals = pd.read_csv(snakemake.input.pop_weighted_heat_totals, index_col=0) * nyears
|
||||
pop_weighted_energy_totals.update(pop_weighted_heat_totals)
|
||||
|
||||
patch_electricity_network(n)
|
||||
|
||||
@ -3344,7 +3349,8 @@ if __name__ == "__main__":
|
||||
add_allam(n, costs)
|
||||
|
||||
solver_name = snakemake.config["solving"]["solver"]["name"]
|
||||
n = set_temporal_aggregation(n, opts, solver_name)
|
||||
drop_leap_day = snakemake.config["atlite"].get("drop_leap_day", False)
|
||||
n = set_temporal_aggregation(n, opts, solver_name, drop_leap_day)
|
||||
|
||||
limit_type = "config"
|
||||
limit = get(snakemake.config["co2_budget"], investment_year)
|
||||
|
126
scripts/solve_operations_network_other_year.py
Normal file
126
scripts/solve_operations_network_other_year.py
Normal file
@ -0,0 +1,126 @@
|
||||
"""Solve operations network."""
|
||||
|
||||
|
||||
import pypsa
|
||||
import numpy as np
|
||||
|
||||
from solve_network import solve_network, prepare_network
|
||||
from helper import override_component_attrs
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
pypsa.pf.logger.setLevel(logging.WARNING)
|
||||
|
||||
|
||||
def set_parameters_from_optimized(n, n_optim):
|
||||
lines_typed_i = n.lines.index[n.lines.type != '']
|
||||
n.lines.loc[lines_typed_i, 'num_parallel'] = \
|
||||
n_optim.lines['num_parallel'].reindex(lines_typed_i, fill_value=0.)
|
||||
n.lines.loc[lines_typed_i, 's_nom'] = (
|
||||
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
|
||||
n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel)
|
||||
|
||||
lines_untyped_i = n.lines.index[n.lines.type == '']
|
||||
for attr in ('s_nom', 'r', 'x'):
|
||||
n.lines.loc[lines_untyped_i, attr] = \
|
||||
n_optim.lines[attr].reindex(lines_untyped_i, fill_value=0.)
|
||||
n.lines['s_nom_extendable'] = False
|
||||
|
||||
links_dc_i = n.links.index[n.links.p_nom_extendable]
|
||||
n.links.loc[links_dc_i, 'p_nom'] = \
|
||||
n_optim.links['p_nom_opt'].reindex(links_dc_i, fill_value=0.)
|
||||
n.links.loc[links_dc_i, 'p_nom_extendable'] = False
|
||||
|
||||
gen_extend_i = n.generators.index[n.generators.p_nom_extendable]
|
||||
n.generators.loc[gen_extend_i, 'p_nom'] = \
|
||||
n_optim.generators['p_nom_opt'].reindex(gen_extend_i, fill_value=0.)
|
||||
n.generators.loc[gen_extend_i, 'p_nom_extendable'] = False
|
||||
|
||||
stor_units_extend_i = n.storage_units.index[n.storage_units.p_nom_extendable]
|
||||
n.storage_units.loc[stor_units_extend_i, 'p_nom'] = \
|
||||
n_optim.storage_units['p_nom_opt'].reindex(stor_units_extend_i, fill_value=0.)
|
||||
n.storage_units.loc[stor_units_extend_i, 'p_nom_extendable'] = False
|
||||
|
||||
stor_extend_i = n.stores.index[n.stores.e_nom_extendable]
|
||||
n.stores.loc[stor_extend_i, 'e_nom'] = \
|
||||
n_optim.stores['e_nom_opt'].reindex(stor_extend_i, fill_value=0.)
|
||||
n.stores.loc[stor_extend_i, 'e_nom_extendable'] = False
|
||||
|
||||
return n
|
||||
|
||||
|
||||
def remove_unused_components(n, threshold=50):
|
||||
logger.info("Remove assets that are barely used to speed things up.")
|
||||
|
||||
for c in n.iterate_components({"Store", "Link", "Generator"}):
|
||||
attr = "e_nom" if c.name == "Store" else "p_nom"
|
||||
to_remove = c.df.loc[c.df[attr] < threshold].index
|
||||
logger.info(f"Removing barely used {c.name}s:\n{to_remove}")
|
||||
n.mremove(c.name, to_remove)
|
||||
|
||||
return n
|
||||
|
||||
|
||||
def add_load_shedding(n, voll=1e4):
|
||||
logger.info("Add load shedding to all buses.")
|
||||
|
||||
if "load" in n.generators.carrier.unique():
|
||||
to_remove = n.generators.query("carrier == 'load'").index
|
||||
logger.info(f"Removing pre-existing load shedding:\n{to_remove}")
|
||||
n.mremove("Generator", to_remove)
|
||||
|
||||
n.madd("Generator", n.buses.index,
|
||||
suffix=" load",
|
||||
bus=n.buses.index,
|
||||
carrier='load',
|
||||
marginal_cost=voll,
|
||||
p_nom=1e6
|
||||
)
|
||||
|
||||
return n
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake(
|
||||
'solve_operations_network',
|
||||
capacity_year=1952,
|
||||
simpl='',
|
||||
opts='',
|
||||
clusters=37,
|
||||
lv=2.0,
|
||||
sector_opts='Co2L0-25H-T-H-B-I-A',
|
||||
planning_horizons=2030,
|
||||
weather_year=2013
|
||||
)
|
||||
|
||||
logging.basicConfig(filename=snakemake.log.python,
|
||||
level=snakemake.config['logging_level'])
|
||||
|
||||
tmpdir = snakemake.config['solving'].get('tmpdir')
|
||||
if tmpdir is not None:
|
||||
from pathlib import Path
|
||||
Path(tmpdir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
overrides = override_component_attrs(snakemake.input.overrides)
|
||||
|
||||
n = pypsa.Network(snakemake.input.pre, override_component_attrs=overrides)
|
||||
n_post = pypsa.Network(snakemake.input.post, override_component_attrs=overrides)
|
||||
n = set_parameters_from_optimized(n, n_post)
|
||||
del n_post
|
||||
|
||||
n = remove_unused_components(n)
|
||||
n = add_load_shedding(n)
|
||||
|
||||
opts = snakemake.wildcards.sector_opts.split('-')
|
||||
solve_opts = snakemake.config['solving']['options']
|
||||
solve_opts['skip_iterations'] = True
|
||||
|
||||
n = prepare_network(n, solve_opts)
|
||||
|
||||
n = solve_network(n, config=snakemake.config, opts=opts,
|
||||
solver_dir=tmpdir,
|
||||
solver_logfile=snakemake.log.solver)
|
||||
|
||||
n.export_to_netcdf(snakemake.output[0])
|
148
scripts/solve_operations_network_other_year_myopic.py
Normal file
148
scripts/solve_operations_network_other_year_myopic.py
Normal file
@ -0,0 +1,148 @@
|
||||
"""Solve myopic operations network."""
|
||||
|
||||
|
||||
import pypsa
|
||||
import pandas as pd
|
||||
|
||||
from solve_network import solve_network, prepare_network
|
||||
from solve_operations_network import set_parameters_from_optimized, remove_unused_components, add_load_shedding
|
||||
from helper import override_component_attrs
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
pypsa.pf.logger.setLevel(logging.WARNING)
|
||||
|
||||
|
||||
def prepare_myopic(n, config, store_soc, storage_unit_soc):
|
||||
|
||||
n.stores.e_cyclic = False
|
||||
n.storage_units.cyclic_state_of_charge = False
|
||||
|
||||
biomass_stores = n.stores.carrier.str.isin(["solid biomass", "biogas"])
|
||||
biomass_potential = n.stores.loc[biomass_stores, "e_initial"]
|
||||
|
||||
# storage level contiguity across years
|
||||
n.stores.e_initial = store_soc
|
||||
n.storage_units.state_of_charge_initial = storage_unit_soc
|
||||
|
||||
# replace co2 limit with co2 price
|
||||
n.remove("GlobalConstraint", "CO2Limit")
|
||||
n.stores.at["co2 atmosphere", "marginal_cost"] = -config["co2_price"]
|
||||
|
||||
# handle co2 sequestration
|
||||
assert sum(n.stores.carriers == "co2 stored") == 1, "Myopic operation not implemented for spatially resolved CO2 sequestration."
|
||||
n.stores.at["co2 stored", 'e_nom'] = config['co2_sequestration_limit'] * 1e6 # t/a
|
||||
|
||||
# reset co2 emissions
|
||||
n.stores.loc[n.stores.carrier == 'co2 stored', "e_initial"] = 0.
|
||||
n.stores.at["co2 atmosphere", "e_initial"] = 0.
|
||||
|
||||
# replenish fossil gas and oil with 1000 TWh each
|
||||
fossil_stores = n.stores.carrier.str.isin(["gas", "oil"])
|
||||
n.stores.loc[fossil_stores, 'e_initial'] = 1e9
|
||||
n.stores.loc[fossil_stores, 'e_nom'] = 10e9
|
||||
|
||||
# replenish annual solid biomass and biogas potentials
|
||||
n.stores.loc[biomass_stores, "e_initial"] = biomass_potential
|
||||
|
||||
# set storage bidding prices
|
||||
bidding_prices = config["bidding_prices"]
|
||||
for c in n.iterate_components({"Store", "Link", "StorageUnit"}):
|
||||
c.df.marginal_cost.update(c.df.carrier.map(bidding_prices).dropna())
|
||||
|
||||
# deduct industry solid biomass
|
||||
assert sum(n.stores.carriers == "solid biomass") == 1, "Myopic operation not implemented for spatially resolved solid biomass."
|
||||
n.stores.at["EU solid biomass", "e_initial"] -= n.loads.at["solid biomass for industry", "p_set"] * 8760
|
||||
n.remove("Load", "solid biomass for industry")
|
||||
|
||||
return n
|
||||
|
||||
|
||||
def solve_network_myopic(n, config, opts='', **kwargs):
|
||||
|
||||
rolling_horizon = config["operations"]["rolling_horizon"]
|
||||
|
||||
freq = int(pd.infer_freq(n.snapshots)[:-1])
|
||||
window = rolling_horizon["window"] * 24 // freq
|
||||
overlap = rolling_horizon["overlap"] * 24 // freq
|
||||
kept = window - overlap
|
||||
length = len(n.snapshots)
|
||||
|
||||
assert kept > 0, f"Overlap ({overlap} days) must be smaller than windows ({window} days)."
|
||||
|
||||
for i in range(length // kept):
|
||||
|
||||
snapshots = n.snapshots[i * kept:(i + 1) * kept + overlap]
|
||||
logger.info(f"Optimising operations from {snapshots[0]} to {snapshots[-1]}")
|
||||
|
||||
n = solve_network(n, config, opts=opts, snapshots=snapshots, **kwargs)
|
||||
|
||||
last_kept = n.snapshots[(i + 1) * kept - 1]
|
||||
logger.info(f"Setting initial SOCs from {last_kept} for next iteration.\n")
|
||||
|
||||
n.stores.e_initial = n.stores_t.e.loc[last_kept]
|
||||
n.storage_units.state_of_charge_initial = n.storage_units_t.state_of_charge.loc[last_kept]
|
||||
|
||||
# final segment until end of year
|
||||
snapshots = n.snapshots[(i + 1) * kept:]
|
||||
n = solve_network(n, config, opts=opts, snapshots=snapshots, **kwargs)
|
||||
|
||||
return n
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if 'snakemake' not in globals():
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake(
|
||||
'solve_operations_network_myopic',
|
||||
capacity_year=1952,
|
||||
simpl='',
|
||||
opts='',
|
||||
clusters=37,
|
||||
lv=2.0,
|
||||
sector_opts='Co2L0-25H-T-H-B-I-A',
|
||||
planning_horizons=2030,
|
||||
weather_year=2013
|
||||
)
|
||||
|
||||
logging.basicConfig(filename=snakemake.log.python,
|
||||
level=snakemake.config['logging_level'])
|
||||
|
||||
tmpdir = snakemake.config['solving'].get('tmpdir')
|
||||
if tmpdir is not None:
|
||||
from pathlib import Path
|
||||
Path(tmpdir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
config = snakemake.config["operations"]
|
||||
overrides = override_component_attrs(snakemake.input.overrides)
|
||||
|
||||
n = pypsa.Network(snakemake.input.pre, override_component_attrs=overrides)
|
||||
|
||||
n_post = pypsa.Network(snakemake.input.post, override_component_attrs=overrides)
|
||||
n = set_parameters_from_optimized(n, n_post)
|
||||
del n_post
|
||||
|
||||
n_previous = pypsa.Network(snakemake.input.previous, override_component_attrs=overrides)
|
||||
store_soc = n_previous.stores_t.e.iloc[-1]
|
||||
storage_unit_soc = n_previous.storage_units_t.state_of_charge.iloc[-1]
|
||||
del n_previous
|
||||
|
||||
n = remove_unused_components(n)
|
||||
n = add_load_shedding(n)
|
||||
n = prepare_myopic(n, config, store_soc, storage_unit_soc)
|
||||
|
||||
opts = snakemake.wildcards.sector_opts.split('-')
|
||||
solve_opts = snakemake.config['solving']['options']
|
||||
solve_opts['skip_iterations'] = True
|
||||
|
||||
n = prepare_network(n, solve_opts)
|
||||
|
||||
n = solve_network_myopic(
|
||||
n,
|
||||
config=snakemake.config,
|
||||
opts=opts,
|
||||
solver_dir=tmpdir,
|
||||
solver_logfile=snakemake.log.solver
|
||||
)
|
||||
|
||||
n.export_to_netcdf(snakemake.output[0])
|
Loading…
Reference in New Issue
Block a user