Integrate bundled landuse and load data instead of relying on vresutils

This commit is contained in:
Jonas Hörsch 2018-08-03 11:53:14 +02:00
parent 8e4abc3fce
commit 6866ce6b06
5 changed files with 45 additions and 11 deletions

View File

@ -78,6 +78,9 @@ rule build_bus_regions:
script: "scripts/build_bus_regions.py" script: "scripts/build_bus_regions.py"
rule build_renewable_potentials: rule build_renewable_potentials:
input:
corine="data/bundle/corine/g250_clc06_V18_5.tif",
natura="data/bundle/natura/Natura2000_end2015.shp"
output: "resources/potentials_{technology}.nc" output: "resources/potentials_{technology}.nc"
resources: mem_mb=10000 resources: mem_mb=10000
benchmark: "benchmarks/build_renewable_potentials_{technology}" benchmark: "benchmarks/build_renewable_potentials_{technology}"
@ -97,6 +100,9 @@ rule build_renewable_profiles:
script: "scripts/build_renewable_profiles.py" script: "scripts/build_renewable_profiles.py"
rule build_hydro_profile: rule build_hydro_profile:
input:
country_shapes='resources/country_shapes.geojson',
eia_hydro_generation='data/bundle/EIA_hydro_generation_2000_2014.csv'
output: 'resources/profile_hydro.nc' output: 'resources/profile_hydro.nc'
resources: mem_mb=5000 resources: mem_mb=5000
script: 'scripts/build_hydro_profile.py' script: 'scripts/build_hydro_profile.py'
@ -107,6 +113,9 @@ rule add_electricity:
tech_costs='data/costs.csv', tech_costs='data/costs.csv',
regions="resources/regions_onshore.geojson", regions="resources/regions_onshore.geojson",
powerplants='resources/powerplants.csv', powerplants='resources/powerplants.csv',
hydro_capacities='data/bundle/hydro_capacities.csv',
opsd_load='data/bundle/time_series_60min_singleindex_filtered.csv',
nuts3_shapes='resources/nuts3_shapes.geojson',
**{'profile_' + t: "resources/profile_" + t + ".nc" **{'profile_' + t: "resources/profile_" + t + ".nc"
for t in config['renewable']} for t in config['renewable']}
output: "networks/elec.nc" output: "networks/elec.nc"

View File

@ -13,8 +13,7 @@ import xarray as xr
import geopandas as gpd import geopandas as gpd
from vresutils.costdata import annuity from vresutils.costdata import annuity
from vresutils.load import timeseries_shapes as timeseries_load from vresutils.load import timeseries_opsd
from vresutils import hydro as vhydro
import pypsa import pypsa
import powerplantmatching as ppm import powerplantmatching as ppm
@ -108,9 +107,31 @@ def load_powerplants(n, ppl_fn=None):
def attach_load(n): def attach_load(n):
substation_lv_i = n.buses.index[n.buses['substation_lv']] substation_lv_i = n.buses.index[n.buses['substation_lv']]
regions = gpd.read_file(snakemake.input.regions).set_index('name').reindex(substation_lv_i) regions = gpd.read_file(snakemake.input.regions).set_index('name').reindex(substation_lv_i)
n.madd("Load", substation_lv_i, opsd_load = timeseries_opsd(snakemake.input.opsd_load)
bus=substation_lv_i,
p_set=timeseries_load(regions.geometry, regions.country)) nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index('id')
def normed(x): return x.divide(x.sum())
def upsample(cntry, group):
l = opsd_load[cntry]
if len(group) == 1:
return pd.DataFrame({group.index[0]: l})
else:
nuts3_cntry = nuts3.loc[nuts3.country == cntry]
transfer = vtransfer.Shapes2Shapes(group, nuts3_cntry.geometry, normed=False).T.tocsr()
gdp_n = pd.Series(transfer.dot(nuts3.gdp.fillna(1.).values), index=group.index)
pop_n = pd.Series(transfer.dot(nuts3.pop.fillna(1.).values), index=group.index)
# relative factors 0.6 and 0.4 have been determined from a linear
# regression on the country to continent load data (refer to vresutils.load._upsampling_weights)
factors = normed(0.6 * normed(gdp_n) + 0.4 * normed(pop_n))
return pd.DataFrame(factors.values * l.values[:,np.newaxis], index=l.index, columns=factors.index)
load = pd.concat([upsample(cntry, group)
for cntry, group in regions.geometry.groupby(regions.country)], axis=1)
n.madd("Load", substation_lv_i, bus=substation_lv_i, p_set=load)
### Set line costs ### Set line costs
@ -234,7 +255,7 @@ def attach_hydro(n, costs, ppl):
hydro_max_hours = c.get('hydro_max_hours') hydro_max_hours = c.get('hydro_max_hours')
if hydro_max_hours is None: if hydro_max_hours is None:
hydro_e_country = vhydro.get_hydro_capas()['E_store[TWh]'].clip(lower=0.2)*1e6 hydro_e_country = pd.read_csv(snakemake.input.hydro_capacities, index_col=0).clip(lower=0.2)*1e6
hydro_max_hours_country = hydro_e_country / hydro.p_nom.groupby(country).sum() hydro_max_hours_country = hydro_e_country / hydro.p_nom.groupby(country).sum()
hydro_max_hours = country.loc[hydro.index].map(hydro_max_hours_country) hydro_max_hours = country.loc[hydro.index].map(hydro_max_hours_country)
@ -368,6 +389,8 @@ if __name__ == "__main__":
tech_costs='data/costs/costs.csv', tech_costs='data/costs/costs.csv',
regions="resources/regions_onshore.geojson", regions="resources/regions_onshore.geojson",
powerplants="resources/powerplants.csv", powerplants="resources/powerplants.csv",
hydro_capacities='data/hydro_capacities.csv',
opsd_load='data/time_series_60min_singleindex_filtered.csv',
**{'profile_' + t: "resources/profile_" + t + ".nc" **{'profile_' + t: "resources/profile_" + t + ".nc"
for t in snakemake.config['renewable']}) for t in snakemake.config['renewable']})
) )

View File

@ -2,7 +2,8 @@
import atlite import atlite
import pandas as pd import pandas as pd
from vresutils import shapes as vshapes, hydro as vhydro import geopandas as gpd
from vresutils import hydro as vhydro
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
logger.setLevel(level=snakemake.config['logging_level']) logger.setLevel(level=snakemake.config['logging_level'])
@ -10,10 +11,10 @@ logger.setLevel(level=snakemake.config['logging_level'])
cutout = atlite.Cutout(snakemake.config['renewable']['hydro']['cutout']) cutout = atlite.Cutout(snakemake.config['renewable']['hydro']['cutout'])
countries = snakemake.config['countries'] countries = snakemake.config['countries']
country_shapes = pd.Series(vshapes.countries(countries)).reindex(countries) country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('id')['geometry'].reindex(countries)
country_shapes.index.name = 'countries' country_shapes.index.name = 'countries'
eia_stats = vhydro.get_eia_annual_hydro_generation().reindex(columns=countries) eia_stats = vhydro.get_eia_annual_hydro_generation(snakemake.input.eia_hydro_generation).reindex(columns=countries)
inflow = cutout.runoff(shapes=country_shapes, inflow = cutout.runoff(shapes=country_shapes,
smooth=True, smooth=True,
lower_threshold_quantile=True, lower_threshold_quantile=True,

View File

@ -9,7 +9,9 @@ config = snakemake.config['renewable'][snakemake.wildcards.technology]
cutout = atlite.Cutout(config['cutout']) cutout = atlite.Cutout(config['cutout'])
total_capacity = config['capacity_per_sqm'] * vlanduse._cutout_cell_areas(cutout) total_capacity = config['capacity_per_sqm'] * vlanduse._cutout_cell_areas(cutout)
potentials = xr.DataArray(total_capacity * vlanduse.corine_for_cutout(cutout, **config['corine']), potentials = xr.DataArray(total_capacity *
vlanduse.corine_for_cutout(cutout, fn=snakemake.input.corine,
natura_fn=snakemake.input.natura, **config['corine']),
[cutout.meta.indexes['y'], cutout.meta.indexes['x']]) [cutout.meta.indexes['y'], cutout.meta.indexes['x']])
if 'height_cutoff' in config: if 'height_cutoff' in config:

View File

@ -6,7 +6,6 @@ import xarray as xr
import pandas as pd import pandas as pd
import geopandas as gpd import geopandas as gpd
from vresutils import landuse as vlanduse
import logging import logging
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
logging.basicConfig(level=snakemake.config['logging_level']) logging.basicConfig(level=snakemake.config['logging_level'])