Electricity network, Clustering and Solving

This commit is contained in:
Jonas Hoersch 2018-01-29 22:28:33 +01:00
parent a8340cacc0
commit a867e245b3
13 changed files with 1370 additions and 137 deletions

169
Snakefile
View File

@ -3,13 +3,13 @@ configfile: "config.yaml"
localrules: all, prepare_links_p_nom, base_network, add_electricity, add_sectors, extract_summaries, plot_network, scenario_comparions
wildcard_constraints:
resarea="[a-zA-Z0-9]+",
cost="[-a-zA-Z0-9]+",
lv="[0-9\.]+",
clusters="[0-9]+",
sectors="[+a-zA-Z0-9]+",
opts="[-+a-zA-Z0-9]+"
rule all:
input: "results/version-{version}/summaries/costs2-summary.csv".format(version=config['version'])
input: "results/summaries/costs2-summary.csv"
rule prepare_links_p_nom:
output: 'data/links_p_nom.csv'
@ -26,92 +26,102 @@ rule base_network:
eg_transformers='data/entsoegridkit/transformers.csv',
parameter_corrections='data/parameter_corrections.yaml',
links_p_nom='data/links_p_nom.csv'
output: "networks/base_{opts}.h5"
benchmark: "benchmarks/base_network_{opts}"
output: "networks/base.nc"
benchmark: "benchmarks/base_network"
threads: 1
resources: mem_mb=500
script: "scripts/base_network.py"
rule landuse_remove_protected_and_conservation_areas:
rule build_bus_regions:
input:
landuse = "data/Original_UTM35north/sa_lcov_2013-14_gti_utm35n_vs22b.tif",
protected_areas = "data/SAPAD_OR_2017_Q2/",
conservation_areas = "data/SACAD_OR_2017_Q2/"
output: "resources/landuse_without_protected_conservation.tiff"
benchmark: "benchmarks/landuse_remove_protected_and_conservation_areas"
threads: 1
resources: mem_mb=10000
script: "scripts/landuse_remove_protected_and_conservation_areas.py"
rule landuse_map_to_tech_and_supply_region:
input:
landuse = "resources/landuse_without_protected_conservation.tiff",
supply_regions = "data/supply_regions/supply_regions.shp",
resarea = lambda w: config['data']['resarea'][w.resarea]
base_network="networks/base.nc"
output:
raster = "resources/raster_{tech}_percent_{resarea}.tiff",
area = "resources/area_{tech}_{resarea}.csv"
benchmark: "benchmarks/landuse_map_to_tech_and_supply_region/{tech}_{resarea}"
threads: 1
resources: mem_mb=17000
script: "scripts/landuse_map_to_tech_and_supply_region.py"
regions_onshore="resources/regions_onshore.geojson",
regions_offshore="resources/regions_offshore.geojson"
script: "scripts/build_bus_regions.py"
rule inflow_per_country:
input: EIA_hydro_gen="data/EIA_hydro_generation_2011_2014.csv"
output: "resources/hydro_inflow.nc"
benchmark: "benchmarks/inflow_per_country"
threads: 1
resources: mem_mb=1000
script: "scripts/inflow_per_country.py"
rule build_renewable_potentials:
output: "resources/potentials_{technology}.nc"
script: "scripts/build_renewable_potentials.py"
rule build_renewable_profiles:
input:
base_network="networks/base.nc",
potentials="resources/potentials_{technology}.nc",
regions=lambda wildcards: ("resources/regions_onshore.geojson"
if wildcards.technology in ('onwind', 'solar')
else "resources/regions_offshore.geojson")
output:
profile="resources/profile_{technology}.nc",
script: "scripts/build_renewable_profiles.py"
rule build_hydro_profile:
output: 'resources/profile_hydro.nc'
script: 'scripts/build_hydro_profile.py'
rule add_electricity:
input:
base_network='networks/base_{opts}.h5',
supply_regions='data/supply_regions/supply_regions.shp',
load='data/SystemEnergy2009_13.csv',
wind_pv_profiles='data/Wind_PV_Normalised_Profiles.xlsx',
wind_area='resources/area_wind_{resarea}.csv',
solar_area='resources/area_solar_{resarea}.csv',
existing_generators="data/Existing Power Stations SA.xlsx",
hydro_inflow="resources/hydro_inflow.csv",
tech_costs="data/technology_costs.xlsx"
output: "networks/elec_{cost}_{resarea}_{opts}.h5"
benchmark: "benchmarks/add_electricity/elec_{resarea}_{opts}"
base_network='networks/base.nc',
tech_costs='data/costs/costs.csv',
regions="resources/regions_onshore.geojson",
**{'profile_' + t: "resources/profile_" + t + ".nc"
for t in config['renewable']}
output: "networks/elec.nc"
benchmark: "benchmarks/add_electricity"
threads: 1
resources: mem_mb=1000
script: "scripts/add_electricity.py"
rule cluster_network:
input:
network='networks/{network}.nc',
regions_onshore="resources/regions_onshore.geojson",
regions_offshore="resources/regions_offshore.geojson"
output:
network='networks/{network}_{clusters}.nc',
regions_onshore="resources/regions_onshore_{network}_{clusters}.geojson",
regions_offshore="resources/regions_offshore_{network}_{clusters}.geojson"
benchmark: "benchmarks/cluster_network/{network}_{clusters}"
threads: 1
resources: mem_mb=1000
script: "scripts/cluster_network.py"
rule add_sectors:
input:
network="networks/elec_{cost}_{resarea}_{opts}.h5",
network="networks/elec_{cost}_{resarea}_{opts}.nc",
emobility="data/emobility"
output: "networks/sector_{cost}_{resarea}_{sectors}_{opts}.h5"
output: "networks/sector_{cost}_{resarea}_{sectors}_{opts}.nc"
benchmark: "benchmarks/add_sectors/sector_{resarea}_{sectors}_{opts}"
threads: 1
resources: mem_mb=1000
script: "scripts/add_sectors.py"
rule prepare_network:
input: 'networks/elec_{clusters}.nc'
output: 'networks/elec_{clusters}_lv{lv}_{opts}.nc'
threads: 1
resources: mem_mb=1000
script: "scripts/prepare_network.py"
rule solve_network:
input: network="networks/sector_{cost}_{resarea}_{sectors}_{opts}.h5"
output: "results/version-{version}/networks/{{cost}}_{{resarea}}_{{sectors}}_{{opts}}.h5".format(version=config['version'])
input: "networks/elec_{clusters}_lv{lv}_{opts}.nc"
output: "results/networks/{clusters}_lv{lv}_{opts}.nc"
shadow: "shallow"
log:
gurobi="logs/{cost}_{resarea}_{sectors}_{opts}_gurobi.log",
python="logs/{cost}_{resarea}_{sectors}_{opts}_python.log"
benchmark: "benchmarks/solve_network/{cost}_{resarea}_{sectors}_{opts}"
gurobi="logs/{clusters}_lv{lv}_{opts}_gurobi.log",
python="logs/{clusters}_lv{lv}_{opts}_python.log"
benchmark: "benchmarks/solve_network/{clusters}_lv{lv}_{opts}"
threads: 4
resources: mem_mb=19000 # for electricity only
resources: mem_mb=lambda w: 100000 * int(w.clusters) // 362
script: "scripts/solve_network.py"
rule plot_network:
input:
network='results/version-{version}/networks/{{cost}}_{{resarea}}_{{sectors}}_{{opts}}.h5'.format(version=config['version']),
network='results/networks/{cost}_{resarea}_{sectors}_{opts}.nc',
supply_regions='data/supply_regions/supply_regions.shp',
resarea=lambda w: config['data']['resarea'][w.resarea]
output:
only_map=touch('results/version-{version}/plots/network_{{cost}}_{{resarea}}_{{sectors}}_{{opts}}_{{attr}}'.format(version=config['version'])),
ext=touch('results/version-{version}/plots/network_{{cost}}_{{resarea}}_{{sectors}}_{{opts}}_{{attr}}_ext'.format(version=config['version']))
params: ext=['png', 'pdf']
'results/plots/network_{cost}_{resarea}_{sectors}_{opts}_{attr}.pdf'
script: "scripts/plot_network.py"
# rule plot_costs:
@ -125,31 +135,30 @@ rule plot_network:
# exts=["pdf", "png"]
# scripts: "scripts/plot_costs.py"
rule scenario_comparison:
input:
expand('results/version-{version}/plots/network_{cost}_{sectors}_{opts}_{attr}_ext',
version=config['version'],
attr=['p_nom'],
**config['scenario'])
output:
html='results/version-{version}/plots/scenario_{{param}}.html'.format(version=config['version'])
params:
tmpl="network_[cost]_[resarea]_[sectors]_[opts]_[attr]_ext",
plot_dir='results/version-{}/plots'.format(config['version'])
script: "scripts/scenario_comparison.py"
# rule scenario_comparison:
# input:
# expand('results/plots/network_{cost}_{sectors}_{opts}_{attr}.pdf',
# version=config['version'],
# attr=['p_nom'],
# **config['scenario'])
# output:
# html='results/plots/scenario_{param}.html'
# params:
# tmpl="network_[cost]_[resarea]_[sectors]_[opts]_[attr]",
# plot_dir='results/plots'
# script: "scripts/scenario_comparison.py"
rule extract_summaries:
input:
expand("results/version-{version}/networks/{cost}_{sectors}_{opts}.h5",
version=config['version'],
**config['scenario'])
output:
**{n: "results/version-{version}/summaries/{}-summary.csv".format(n, version=config['version'])
for n in ['costs', 'costs2', 'e_curtailed', 'e_nom_opt', 'e', 'p_nom_opt']}
params:
scenario_tmpl="[cost]_[resarea]_[sectors]_[opts]",
scenarios=config['scenario']
script: "scripts/extract_summaries.py"
# rule extract_summaries:
# input:
# expand("results/networks/{cost}_{sectors}_{opts}.nc",
# **config['scenario'])
# output:
# **{n: "results/summaries/{}-summary.csv".format(n)
# for n in ['costs', 'costs2', 'e_curtailed', 'e_nom_opt', 'e', 'p_nom_opt']}
# params:
# scenario_tmpl="[cost]_[resarea]_[sectors]_[opts]",
# scenarios=config['scenario']
# script: "scripts/extract_summaries.py"
# Local Variables:

View File

@ -3,45 +3,76 @@ logging_level: INFO
scenario:
sectors: [E] # ,E+EV,E+BEV,E+BEV+V2G] # [ E+EV, E+BEV, E+BEV+V2G ]
cost: [diw2030]
lv: [1., 1.125, 1.25, 1.5, 2.0, 3.0]
opts: [Co2L, Co2L-T] #, LC-FL, LC-T, Ep-T, Co2L-T]
clusters: [37, 45, 64, 90, 128, 181, 256, 362] # np.r_[37, (2**np.arange(5.5, 9, 0.5)).astype(int)]
opts: [Co2L] #, LC-FL, LC-T, Ep-T, Co2L-T]
countries: ['AL', 'AT', 'BA', 'BE', 'BG', 'CH', 'CZ', 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GR', 'HR', 'HU', 'IE', 'IT', 'LT', 'LU', 'LV', 'ME', 'MK', 'NL', 'NO', 'PL', 'PT', 'RO', 'RS', 'SE', 'SI', 'SK']
historical_year: "2012"
snapshots:
# arguments to pd.date_range
start: "2013-01-01"
end: "2014-01-01"
closed: 'left' # end is not inclusive
electricity:
voltages: [220., 300., 380.]
co2limit: xxx
co2limit: 7.75e+7 # 0.05 * 3.1e9*0.5
extendable_carriers:
Generator: [OCGT]
StorageUnit: [Battery, H2] # [CAES]
SAFE_reservemargin: 0.1
StorageUnit: [battery, H2] # [CAES]
max_hours:
Battery: 3
battery: 3
H2: 10
reanalysis:
cutout: europe_2011_2016
renewable:
onwind:
cutout: europe-2012-2016-era5
resource:
method: wind
turbine: Vestas_V112_3MW
# ScholzPhd Tab 4.3.1: 10MW/km^2
capacity_per_sqm: 3
corine:
#The selection of CORINE Land Cover [1] types that are allowed for wind and solar are based on [2] p.42 / p.28
#
#[1] https://www.eea.europa.eu/ds_resolveuid/C9RK15EA06
#
#[2] Scholz, Y. (2012). Renewable energy based electricity supply at low costs: development of the REMix model and application for Europe.
grid_codes: [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 31, 32]
distance: 1000
distance_grid_codes: [1, 2, 3, 4, 5, 6]
natura: true
offwind:
cutout: europe-2012-2016-era5
resource:
method: wind
turbine: NREL_ReferenceTurbine_5MW_offshore
# ScholzPhd Tab 4.3.1: 10MW/km^2
capacity_per_sqm: 3
height_cutoff: 50
corine:
grid_codes: [44, 255]
natura: true
solar:
cutout: europe-2013-2015-sarah
resource:
method: pv
panel: CSi
orientation: latitude_optimal
# ScholzPhd Tab 4.3.1: 170 MW/km^2
capacity_per_sqm: 1.7
corine:
grid_codes: [44, 255]
natura: true
hydro:
cutout: europe-2012-2016-era5
carriers: [ror, PHS, hydro]
PHS_max_hours: 6
landusetype_percent:
wind:
- [[7, 8, 9, 41], 80]
# - [[5, 6], 50]
# - [[11, 12, 14, 15], 10]
solar:
- [[7, 8, 9, 41], 80]
# - [[11, 12, 14, 15], 50]
# - [[46, 47, 51, 56, 64, 68, 72], 10]
capacity_per_sqm:
wind: 5 # half of 10 (IWES)
solar: 16.5 # half of 33 (IWES)
lines:
types:
@ -61,23 +92,24 @@ transformers:
type: ''
costs:
year: 2030
# From a Lion Hirth paper, also reflects average of Noothout et al 2016
discountrate: 0.07
# [EUR/USD] ECB: https://www.ecb.europa.eu/stats/exchange/eurofxref/html/eurofxref-graph-usd.en.html # noqa: E501
USD2013_to_EUR2013: 0.7532
# Marginal and capital costs can be overwritten
# capital_cost:
# Wind: Bla
marginal_cost: #
PV: 0.01
Wind: 0.015
EUR_to_ZAR: 15.63
solar: 0.01
onwind: 0.015
offwind: 0.015
hydro: 0.
emission_prices: # only used with the option Ep (emission prices)
# Externality costs from Integrated Energy Plan by the ZA DOE
co2: 0.27e+3
sox: 7.6e+3
nox: 4.5e+3
hg: 41484.e-6 # is also part of the excel sheet
particulate: 11.3e+3
co2: 0.
solving:
options:

@ -1 +1 @@
Subproject commit 7cf4a6117db06a8dee7d8203d6bb427fa0336872
Subproject commit 4b475048c82040d1c4dd307a7729a12a8c1a533f

View File

@ -0,0 +1,311 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import yaml"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"with open('../config.yaml') as f:\n",
" config = yaml.load(f)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from vresutils import shapes as vshapes\n",
"from vresutils import plot as vplot"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import pypsa\n",
"import numpy as np\n",
"import xarray as xr\n",
"import pandas as pd\n",
"import geopandas as gpd"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import matplotlib as mpl\n",
"import matplotlib.pyplot as plt\n",
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"n = pypsa.Network(\"../networks/base.nc\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Regions and p_nom_max"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Onshore"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"regions = gpd.read_file('../resources/regions_onshore.geojson').set_index('id')\n",
"regions.index.name = 'name'\n",
"regions['area'] = regions.to_crs(dict(proj='aea')).area / 1e6"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ds = xr.open_dataset('../resources/profile_onwind.nc')\n",
"p_nom_max = ds['p_nom_max']"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ds['profile'].mean()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ds.sel(name=ds['profile'].mean('time') > 0.01)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fig, ax = plt.subplots(figsize=(12, 8))\n",
"plt.colorbar(vplot.shapes(regions['geometry'], p_nom_max.to_pandas()/regions['area'], ax=ax))\n",
"ax.set_aspect('equal')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ds['profile'].mean('time')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fig, ax = plt.subplots(figsize=(12, 8))\n",
"plt.colorbar(vplot.shapes(regions['geometry'], ds['profile'].mean('time').to_pandas(), ax=ax))\n",
"ax.set_aspect('equal')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Offshore"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"regions = gpd.read_file('../resources/regions_offshore.geojson').set_index('id')\n",
"regions.index.name = 'name'\n",
"regions['area'] = regions.to_crs(dict(proj='aea')).area / 1e6"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ds = xr.open_dataset('../resources/profile_offwind.nc')\n",
"p_nom_max = ds['p_nom_max']\n",
"\n",
"fig, ax = plt.subplots(figsize=(12, 8))\n",
"plt.colorbar(vplot.shapes(regions['geometry'], p_nom_max.to_pandas()/regions['area'], ax=ax))\n",
"ax.set_aspect('equal')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"potentials = xr.open_dataarray('../resources/potentials_onwind.nc')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vsha"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vshapes.country_cover(config['countries']).bounds"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vplot.shapes([vshapes.country_cover(config['countries'])])\n",
"vplot.shapes([vshapes.country_cover(config['countries'], include_eez=False)], facecolors='yellow')\n",
"vplot.shapes(vshapes.countries(config['countries']), facecolors='None')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fig, ax = plt.subplots(figsize=(12, 8))\n",
"potentials.plot()\n",
"#vplot.shapes(regions['geometry'], p_nom_max.to_pandas() == 0., facecolors='None', ax=ax)\n",
"\n",
"ax.set_aspect('equal')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import atlite"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"atlite.Cutout(config['renewable']['onwind']['cutout'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
},
"toc": {
"colors": {
"hover_highlight": "#DAA520",
"navigate_num": "#000000",
"navigate_text": "#333333",
"running_highlight": "#FF0000",
"selected_highlight": "#FFD700",
"sidebar_border": "#EEEEEE",
"wrapper_background": "#FFFFFF"
},
"moveMenuLeft": true,
"nav_menu": {
"height": "12px",
"width": "252px"
},
"navigate_menu": true,
"number_sections": true,
"sideBar": true,
"skip_h1_title": false,
"threshold": 4,
"toc_cell": false,
"toc_section_display": "block",
"toc_window_display": false,
"widenNotebook": false
}
},
"nbformat": 4,
"nbformat_minor": 2
}

361
scripts/add_electricity.py Normal file
View File

@ -0,0 +1,361 @@
# coding: utf-8
import logging
logger = logging.getLogger(__name__)
import pandas as pd
idx = pd.IndexSlice
import numpy as np
import scipy as sp
import xarray as xr
import geopandas as gpd
from vresutils.costdata import annuity
from vresutils.load import timeseries_shapes as timeseries_load
from vresutils import hydro as vhydro
import pypsa
import powerplantmatching as ppm
def normed(s): return s/s.sum()
def _add_missing_carriers_from_costs(n, costs, carriers):
missing_carriers = pd.Index(carriers).difference(n.carriers.index)
emissions_cols = costs.columns.to_series().loc[lambda s: s.str.endswith('_emissions')].values
n.import_components_from_dataframe(costs.loc[missing_carriers, emissions_cols].fillna(0.), 'Carrier')
def load_costs(Nyears=1.):
c = snakemake.config['costs']
# set all asset costs and other parameters
costs = pd.read_csv(snakemake.input.tech_costs, index_col=list(range(3))).sort_index()
# correct units to MW and EUR
costs.loc[costs.unit.str.contains("/kW"),"value"] *= 1e3
costs.loc[costs.unit.str.contains("USD"),"value"] *= c['USD2013_to_EUR2013']
costs = costs.loc[idx[:,c['year'],:], "value"].unstack(level=2).groupby("technology").sum()
costs = costs.fillna({"CO2 intensity" : 0,
"FOM" : 0,
"VOM" : 0,
"discount rate" : c['discountrate'],
"efficiency" : 1,
"fuel" : 0,
"investment" : 0,
"lifetime" : 25})
costs["capital_cost"] = ((annuity(costs["lifetime"], costs["discount rate"]) + costs["FOM"]/100.) *
costs["investment"] * Nyears)
costs['marginal_cost'] = costs['VOM'] + costs['fuel'] / costs['efficiency']
costs = costs.rename(columns={"CO2 intensity": "co2_emissions"})
def costs_for_storage(store, link1, link2=None, max_hours=1.):
capital_cost = link1['capital_cost'] + max_hours * store['capital_cost']
efficiency = link1['efficiency']**0.5
if link2 is not None:
capital_cost += link2['capital_cost']
efficiency *= link2['efficiency']**0.5
return pd.Series(dict(capital_cost=capital_cost,
efficiency=efficiency,
co2_emissions=0.))
max_hours = snakemake.config['electricity']['max_hours']
costs.loc["battery"] = \
costs_for_storage(costs.loc["battery storage"], costs.loc["battery inverter"],
max_hours=max_hours['battery'])
costs.loc["H2"] = \
costs_for_storage(costs.loc["hydrogen storage"], costs.loc["fuel cell"], costs.loc["electrolysis"],
max_hours=max_hours['H2'])
for attr in ('marginal_cost', 'capital_cost'):
overwrites = c.get(attr)
if overwrites is not None:
overwrites = pd.Series(overwrites)
costs.loc[overwrites.index, attr] = overwrites
return costs
def load_powerplants(n):
ppl = ppm.collection.MATCHED_dataset(aggregated_hydros=False, include_unavailables=True,
subsume_uncommon_fueltypes=True)
ppl = ppl.loc[ppl.lon.notnull() & ppl.lat.notnull()]
substation_lv_i = n.buses.index[n.buses['substation_lv']]
kdtree = sp.spatial.cKDTree(n.buses.loc[substation_lv_i, ['x','y']].values)
ppl = ppl.assign(bus=substation_lv_i[kdtree.query(ppl[['lon','lat']].values)[1]])
return ppl
# ## Attach components
# ### Load
def attach_load(n):
substation_lv_i = n.buses.index[n.buses['substation_lv']]
regions = gpd.read_file(snakemake.input.regions).set_index('name').reindex(substation_lv_i)
n.madd("Load", substation_lv_i,
bus=substation_lv_i,
p_set=timeseries_load(regions.geometry, regions.country))
### Set line costs
def update_transmission_costs(n, costs):
c = snakemake.config['lines']
n.lines['capital_cost'] = (n.lines['length'] * c['length_factor'] *
costs.at['HVAC overhead', 'capital_cost'])
dc_b = n.links.carrier == 'DC'
n.links.loc[dc_b, 'capital_cost'] = (n.links.loc[dc_b, 'length'] * c['length_factor'] *
costs.at['HVDC overhead', 'capital_cost'] +
costs.at['HVDC inverter pair', 'capital_cost'])
# ### Generators
def attach_wind_and_solar(n, costs):
for tech in snakemake.config['renewable']:
if tech == 'hydro': continue
n.add("Carrier", name=tech)
with xr.open_dataset(getattr(snakemake.input, 'profile_' + tech)) as ds:
n.madd("Generator", ds.indexes['bus'], ' ' + tech,
bus=ds.indexes['bus'],
carrier=tech,
p_nom_extendable=True,
p_nom_max=ds['p_nom_max'].to_pandas(),
weight=ds['weight'].to_pandas(),
marginal_cost=costs.at[tech, 'marginal_cost'],
capital_cost=costs.at[tech, 'capital_cost'],
efficiency=costs.at[tech, 'efficiency'],
p_max_pu=ds['profile'].transpose('time', 'bus').to_pandas())
# # Generators
def attach_existing_generators(n, costs):
raise NotImplementedError("comes later")
def attach_hydro(n, costs, ppl):
c = snakemake.config['renewable']['hydro']
carriers = c.get('carriers', ['ror', 'PHS', 'hydro'])
_add_missing_carriers_from_costs(n, costs, carriers)
ppl = ppl.loc[ppl['Fueltype'] == 'Hydro']
ppl = ppl.set_index(pd.RangeIndex(len(ppl)).astype(str) + ' hydro', drop=False)
ppl = ppl.rename(columns={'Capacity':'p_nom', 'Technology': 'technology'})
ppl = ppl.loc[ppl.technology.notnull(), ['bus', 'p_nom', 'technology']]
ppl = ppl.assign(
has_inflow=ppl.technology.str.contains('Reservoir|Run-Of-River|Natural Inflow'),
has_store=ppl.technology.str.contains('Reservoir|Pumped Storage'),
has_pump=ppl.technology.str.contains('Pumped Storage')
)
country = ppl.loc[ppl.has_inflow, 'bus'].map(n.buses.country)
# distribute by p_nom in each country
dist_key = ppl.loc[ppl.has_inflow, 'p_nom'].groupby(country).transform(normed)
with xr.open_dataarray(snakemake.input.profile_hydro) as inflow:
inflow_t = (
inflow.sel(countries=country.values)
.rename({'countries': 'name'})
.assign_coords(name=ppl.index[ppl.has_inflow])
.transpose('time', 'name')
.to_pandas()
.multiply(dist_key / ppl.loc[ppl.has_inflow, 'p_nom'], axis=1)
)
if 'ror' in carriers:
ror = ppl.loc[ppl.has_inflow & ~ ppl.has_store]
n.madd("Generator", ror.index,
carrier='ror',
bus=ror['bus'],
efficiency=costs.at['ror', 'efficiency'],
capital_cost=costs.at['ror', 'capital_cost'],
weight=ror['p_nom'],
p_max_pu=(inflow_t.loc[:, ror.index]
.where(lambda df: df<=1., other=1.)))
if 'PHS' in carriers:
phs = ppl.loc[ppl.has_store & ppl.has_pump]
n.madd('StorageUnit', phs.index,
carrier='PHS',
bus=phs['bus'],
capital_cost=costs.at['PHS', 'capital_cost'],
max_hours=c['PHS_max_hours'],
efficiency_store=np.sqrt(costs.at['PHS','efficiency']),
efficiency_dispatch=np.sqrt(costs.at['PHS','efficiency']),
cyclic_state_of_charge=True,
inflow=inflow_t.loc[:, phs.index[phs.has_inflow]])
if 'hydro' in carriers:
hydro = ppl.loc[ppl.has_store & ~ ppl.has_pump & ppl.has_inflow]
hydro_max_hours = c.get('hydro_max_hours')
if hydro_max_hours is None:
hydro_e_country = vhydro.get_hydro_capas()['E_store[TWh]'].clip(lower=0.2)*1e6
hydro_max_hours_country = hydro_e_country / hydro.p_nom.groupby(country).sum()
hydro_max_hours = country.loc[hydro.index].map(hydro_max_hours_country)
n.madd('StorageUnit', hydro.index, carrier='hydro',
bus=hydro['bus'],
max_hours=hydro_max_hours,
capital_cost=(costs.at['hydro', 'capital_cost']
if c.get('hydro_capital_cost') else 0.),
marginal_cost=costs.at['hydro', 'marginal_cost'],
p_max_pu=1., # dispatch
p_min_pu=0., # store
efficiency_dispatch=costs.at['hydro', 'efficiency'],
efficiency_store=0.,
cyclic_state_of_charge=True,
inflow=inflow_t.loc[:, hydro.index])
def attach_extendable_generators(n, costs, ppl):
elec_opts = snakemake.config['electricity']
carriers = list(elec_opts['extendable_carriers']['Generator'])
assert carriers == ['OCGT'], "Only OCGT plants as extendable generators allowed for now"
_add_missing_carriers_from_costs(n, costs, carriers)
if 'OCGT' in carriers:
ocgt = ppl.loc[ppl.Fueltype == 'Natural Gas'].groupby('bus', as_index=False).first()
n.madd('Generator', ocgt.index,
bus=ocgt['bus'],
carrier='OCGT',
p_nom_extendable=True,
p_nom=0.,
capital_cost=costs.at['OCGT', 'capital_cost'],
marginal_cost=costs.at['OCGT', 'marginal_cost'],
efficiency=costs.at['OCGT', 'efficiency'])
def attach_storage(n, costs):
elec_opts = snakemake.config['electricity']
carriers = elec_opts['extendable_carriers']['StorageUnit']
max_hours = elec_opts['max_hours']
_add_missing_carriers_from_costs(n, costs, carriers)
buses_i = n.buses.index[n.buses.substation_lv]
for carrier in carriers:
n.madd("StorageUnit", buses_i, ' ' + carrier,
bus=buses_i,
carrier=carrier,
p_nom_extendable=True,
capital_cost=costs.at[carrier, 'capital_cost'],
marginal_cost=costs.at[carrier, 'marginal_cost'],
efficiency_store=costs.at[carrier, 'efficiency'],
efficiency_dispatch=costs.at[carrier, 'efficiency'],
max_hours=max_hours[carrier],
cyclic_state_of_charge=True)
## Implementing them separately will come later!
##
# if 'H2' in carriers:
# h2_buses = n.madd("Bus", buses + " H2", carrier="H2")
# n.madd("Link", h2_buses + " Electrolysis",
# bus1=h2_buses,
# bus0=buses,
# p_nom_extendable=True,
# efficiency=costs.at["electrolysis", "efficiency"],
# capital_cost=costs.at["electrolysis", "capital_cost"])
# n.madd("Link", h2_buses + " Fuel Cell",
# bus0=h2_buses,
# bus1=buses,
# p_nom_extendable=True,
# efficiency=costs.at["fuel cell", "efficiency"],
# #NB: fixed cost is per MWel
# capital_cost=costs.at["fuel cell", "capital_cost"] * costs.at["fuel cell", "efficiency"])
# n.madd("Store", h2_buses,
# bus=h2_buses,
# e_nom_extendable=True,
# e_cyclic=True,
# capital_cost=costs.at["hydrogen storage", "capital_cost"])
# if 'battery' in carriers:
# b_buses = n.madd("Bus", buses + " battery", carrier="battery")
# network.madd("Store", b_buses,
# bus=b_buses,
# e_cyclic=True,
# e_nom_extendable=True,
# capital_cost=costs.at['battery storage', 'capital_cost'])
# network.madd("Link", b_buses + " charger",
# bus0=buses,
# bus1=b_buses,
# efficiency=costs.at['battery inverter', 'efficiency']**0.5,
# capital_cost=costs.at['battery inverter', 'capital_cost'],
# p_nom_extendable=True)
# network.madd("Link",
# nodes + " battery discharger",
# bus0=nodes + " battery",
# bus1=nodes,
# efficiency=costs.at['battery inverter','efficiency']**0.5,
# marginal_cost=options['marginal_cost_storage'],
# p_nom_extendable=True)
def add_co2limit(n, Nyears=1.):
n.add("GlobalConstraint", "CO2Limit",
carrier_attribute="co2_emissions", sense="<=",
constant=snakemake.config['electricity']['co2limit'] * Nyears)
def add_emission_prices(n, emission_prices=None, exclude_co2=False):
if emission_prices is None:
emission_prices = snakemake.config['costs']['emission_prices']
if exclude_co2: emission_prices.pop('co2')
ep = (pd.Series(emission_prices).rename(lambda x: x+'_emissions') * n.carriers).sum(axis=1)
n.generators['marginal_cost'] += n.generators.carrier.map(ep)
n.storage_units['marginal_cost'] += n.storage_units.carrier.map(ep)
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals():
from vresutils import Dict
import yaml
snakemake = Dict()
with open('../config.yaml') as f:
snakemake.config = yaml.load(f)
snakemake.wildcards = Dict()
snakemake.input = Dict(base_network='../networks/base.nc',
tech_costs='../data/costs.csv',
regions="../resources/regions_onshore.geojson",
**{'profile_' + t: "../resources/profile_" + t + ".nc"
for t in snakemake.config['renewable']})
snakemake.output = ['../networks/elec.nc']
logger.setLevel(snakemake.config['logging_level'])
n = pypsa.Network(snakemake.input.base_network)
Nyears = n.snapshot_weightings.sum()/8760.
costs = load_costs(Nyears)
ppl = load_powerplants(n)
attach_load(n)
update_transmission_costs(n, costs)
# attach_existing_generators(n, costs)
attach_wind_and_solar(n, costs)
attach_hydro(n, costs, ppl)
attach_extendable_generators(n, costs, ppl)
attach_storage(n, costs)
n.export_to_netcdf(snakemake.output[0])

View File

@ -7,12 +7,14 @@ import scipy as sp, scipy.spatial
from scipy.sparse import csgraph
from operator import attrgetter
from six import iteritems
from six.moves import filter
from itertools import count, chain
import shapely, shapely.prepared, shapely.wkt
from shapely.geometry import Point
from vresutils import shapes as vshapes
from vresutils.graph import BreadthFirstLevels
import logging
logger = logging.getLogger(__name__)
@ -115,6 +117,7 @@ def _set_electrical_parameters_lines(lines):
lines.loc[lines["v_nom"] == v_nom, 'type'] = linetypes[v_nom]
lines['s_max_pu'] = snakemake.config['lines']['s_max_pu']
lines.loc[lines.under_construction.astype(bool), 'num_parallel'] = 0.
return lines
@ -147,6 +150,18 @@ def _set_electrical_parameters_links(links):
return links
def _set_electrical_parameters_converters(converters):
converters['p_max_pu'] = snakemake.config['links']['s_max_pu']
converters['p_min_pu'] = -1. * snakemake.config['links']['s_max_pu']
converters['p_nom'] = 2000
# Converters are combined with links
converters['under_construction'] = False
converters['underground'] = False
return converters
def _set_electrical_parameters_transformers(transformers):
config = snakemake.config['transformers']
@ -172,6 +187,72 @@ def _remove_unconnected_components(network):
return network[component == component_sizes.index[0]]
def _set_countries_and_substations(n):
buses = n.buses
def buses_in_shape(shape):
shape = shapely.prepared.prep(shape)
return pd.Series(
np.fromiter((shape.contains(Point(x, y))
for x, y in buses.loc[:,["x", "y"]].values),
dtype=bool, count=len(buses)),
index=buses.index
)
countries = snakemake.config['countries']
country_shapes = vshapes.countries(subset=countries, add_KV_to_RS=True,
tolerance=0.01, minarea=0.1)
offshore_shapes = vshapes.eez(subset=countries, tolerance=0.01)
substation_b = buses['symbol'].str.contains('substation', case=False)
def prefer_voltage(x, which):
index = x.index
if len(index) == 1:
return pd.Series(index, index)
key = (x.index[0]
if x['v_nom'].isnull().all()
else getattr(x['v_nom'], 'idx' + which)())
return pd.Series(key, index)
gb = buses.loc[substation_b].groupby(['x', 'y'], as_index=False,
group_keys=False, sort=False)
bus_map_low = gb.apply(prefer_voltage, 'min')
lv_b = (bus_map_low == bus_map_low.index).reindex(buses.index, fill_value=False)
bus_map_high = gb.apply(prefer_voltage, 'max')
hv_b = (bus_map_high == bus_map_high.index).reindex(buses.index, fill_value=False)
onshore_b = pd.Series(False, buses.index)
offshore_b = pd.Series(False, buses.index)
for country in countries:
onshore_shape = country_shapes[country]
onshore_country_b = buses_in_shape(onshore_shape)
onshore_b |= onshore_country_b
buses.loc[onshore_country_b, 'country'] = country
if country not in offshore_shapes: continue
offshore_country_b = buses_in_shape(offshore_shapes[country])
offshore_b |= offshore_country_b
buses.loc[offshore_country_b, 'country'] = country
buses['substation_lv'] = lv_b & onshore_b
buses['substation_off'] = offshore_b | (hv_b & onshore_b)
# Nearest country in numbers of hops defines country of homeless buses
c_nan_b = buses.country.isnull()
c = n.buses['country']
graph = n.graph()
n.buses.loc[c_nan_b, 'country'] = \
[(next(filter(len, map(lambda x: c.loc[x].dropna(), BreadthFirstLevels(graph, [b]))))
.value_counts().index[0])
for b in buses.index[c_nan_b]]
return buses
def base_network():
buses = _load_buses_from_eg()
@ -181,16 +262,15 @@ def base_network():
lines = _load_lines_from_eg(buses)
transformers = _load_transformers_from_eg(buses)
# buses, lines, transformers = _split_aclines_with_several_voltages(buses, lines, transformers)
lines = _set_electrical_parameters_lines(lines)
links = _set_electrical_parameters_links(links)
transformers = _set_electrical_parameters_transformers(transformers)
links = _set_electrical_parameters_links(links)
converters = _set_electrical_parameters_converters(converters)
n = pypsa.Network()
n.name = 'PyPSA-Eur'
n.set_snapshots(pd.date_range(snakemake.config['historical_year'], periods=8760, freq='h'))
n.set_snapshots(pd.date_range(freq='h', **snakemake.config['snapshots']))
n.import_components_from_dataframe(buses, "Bus")
n.import_components_from_dataframe(lines, "Line")
@ -198,13 +278,12 @@ def base_network():
n.import_components_from_dataframe(links, "Link")
n.import_components_from_dataframe(converters, "Link")
if 'T' in snakemake.wildcards.opts.split('-'):
raise NotImplemented
n = _remove_unconnected_components(n)
_apply_parameter_corrections(n)
_set_countries_and_substations(n)
return n
if __name__ == "__main__":
@ -223,7 +302,6 @@ if __name__ == "__main__":
links_p_nom='../data/links_p_nom.csv'
)
snakemake.wildcards = Dict(opts='LC')
with open('../config.yaml') as f:
snakemake.config = yaml.load(f)
snakemake.output = ['../networks/base_LC.nc']

View File

@ -0,0 +1,50 @@
import os
from operator import attrgetter
import pandas as pd
import geopandas as gpd
from vresutils import shapes as vshapes
from vresutils.graph import voronoi_partition_pts
import pypsa
countries = snakemake.config['countries']
n = pypsa.Network(snakemake.input.base_network)
country_shapes = vshapes.countries(subset=countries, add_KV_to_RS=True,
tolerance=0.01, minarea=0.1)
offshore_shapes = vshapes.eez(subset=countries, tolerance=0.01)
onshore_regions = []
offshore_regions = []
for country in countries:
c_b = n.buses.country == country
onshore_shape = country_shapes[country]
onshore_locs = n.buses.loc[c_b & n.buses.substation_lv, ["x", "y"]]
onshore_regions.append(gpd.GeoDataFrame({
'geometry': voronoi_partition_pts(onshore_locs.values, onshore_shape),
'country': country
}, index=onshore_locs.index))
if country not in offshore_shapes: continue
offshore_shape = offshore_shapes[country]
offshore_locs = n.buses.loc[c_b & n.buses.substation_off, ["x", "y"]]
offshore_regions_c = gpd.GeoDataFrame({
'geometry': voronoi_partition_pts(offshore_locs.values, offshore_shape),
'country': country
}, index=offshore_locs.index)
offshore_regions_c = offshore_regions_c.loc[offshore_regions_c.area > 1e-2]
offshore_regions.append(offshore_regions_c)
def save_to_geojson(s, fn):
if os.path.exists(fn):
os.unlink(fn)
s.reset_index().to_file(fn, driver='GeoJSON')
save_to_geojson(pd.concat(onshore_regions), snakemake.output.regions_onshore)
save_to_geojson(pd.concat(offshore_regions), snakemake.output.regions_offshore)

View File

@ -0,0 +1,22 @@
#!/usr/bin/env python
import atlite
import pandas as pd
from vresutils import shapes as vshapes, hydro as vhydro
import logging
logger = logging.getLogger(__name__)
logger.setLevel(level=snakemake.config['logging_level'])
cutout = atlite.Cutout(snakemake.config['renewable']['hydro']['cutout'])
countries = snakemake.config['countries']
country_shapes = pd.Series(vshapes.countries(countries)).reindex(countries)
country_shapes.index.name = 'countries'
eia_stats = vhydro.get_eia_annual_hydro_generation().reindex(columns=countries)
inflow = cutout.runoff(shapes=country_shapes,
smooth=True,
lower_threshold_quantile=True,
normalize_using_yearly=eia_stats)
inflow.to_netcdf(snakemake.output[0])

View File

@ -0,0 +1,18 @@
import atlite
import xarray as xr
import pandas as pd
from vresutils import landuse as vlanduse
config = snakemake.config['renewable'][snakemake.wildcards.technology]
cutout = atlite.Cutout(config['cutout'])
total_capacity = config['capacity_per_sqm'] * vlanduse._cutout_cell_areas(cutout)
potentials = xr.DataArray(total_capacity * vlanduse.corine_for_cutout(cutout, **config['corine']),
[cutout.meta.indexes['y'], cutout.meta.indexes['x']])
if 'height_cutoff' in config:
potentials.values[(cutout.meta['height'] < - config['height_cutoff']).transpose(*potentials.dims)] = 0.
potentials.to_netcdf(snakemake.output[0])

View File

@ -0,0 +1,49 @@
#!/usr/bin/env python
import atlite
import numpy as np
import xarray as xr
import pandas as pd
import geopandas as gpd
from vresutils import landuse as vlanduse
import logging
logger = logging.getLogger(__name__)
logger.setLevel(level=snakemake.config['logging_level'])
config = snakemake.config['renewable'][snakemake.wildcards.technology]
time = pd.date_range(freq='m', **snakemake.config['snapshots'])
params = dict(years=slice(*time.year[[0, -1]]), months=slice(*time.month[[0, -1]]))
regions = gpd.read_file(snakemake.input.regions).set_index('name')
regions.index.name = 'bus'
cutout = atlite.Cutout(config['cutout'], **params)
# Potentials
potentials = xr.open_dataarray(snakemake.input.potentials)
# Indicatormatrix
indicatormatrix = cutout.indicatormatrix(regions.geometry)
resource = config['resource']
func = getattr(cutout, resource.pop('method'))
capacity_factor = func(capacity_factor=True, **resource)
layout = capacity_factor * potentials
profile, capacities = func(matrix=indicatormatrix, index=regions.index,
layout=layout, per_unit=True, return_capacity=True,
**resource)
relativepotentials = (potentials / layout).stack(spatial=('y', 'x')).values
p_nom_max = xr.DataArray([np.nanmin(relativepotentials[row.nonzero()[1]])
if row.getnnz() > 0 else 0
for row in indicatormatrix.tocsr()],
[capacities.coords['bus']]) * capacities
ds = xr.merge([profile.rename('profile'),
capacities.rename('weight'),
p_nom_max.rename('p_nom_max')])
(ds.sel(bus=ds['profile'].mean('time') > config.get('min_p_max_pu', 0.))
.to_netcdf(snakemake.output.profile))

201
scripts/cluster_network.py Normal file
View File

@ -0,0 +1,201 @@
# coding: utf-8
import pandas as pd
idx = pd.IndexSlice
import os
import numpy as np
import scipy as sp
import xarray as xr
import geopandas as gpd
import shapely
from six.moves import reduce
import pypsa
from pypsa.networkclustering import (busmap_by_stubs, busmap_by_kmeans,
_make_consense, get_clustering_from_busmap)
def normed(x):
return (x/x.sum()).fillna(0.)
def simplify_network_to_380(n):
## All goes to v_nom == 380
n.buses['v_nom'] = 380.
linetype_380, = n.lines.loc[n.lines.v_nom == 380., 'type'].unique()
lines_v_nom_b = n.lines.v_nom != 380.
n.lines.loc[lines_v_nom_b, 'num_parallel'] *= (n.lines.loc[lines_v_nom_b, 'v_nom'] / 380.)**2
n.lines.loc[lines_v_nom_b, 'v_nom'] = 380.
n.lines.loc[lines_v_nom_b, 'type'] = linetype_380
# Replace transformers by lines
trafo_map = pd.Series(n.transformers.bus1.values, index=n.transformers.bus0.values)
trafo_map = trafo_map[~trafo_map.index.duplicated(keep='first')]
several_trafo_b = trafo_map.isin(trafo_map.index)
trafo_map.loc[several_trafo_b] = trafo_map.loc[several_trafo_b].map(trafo_map)
missing_buses_i = n.buses.index.difference(trafo_map.index)
trafo_map = trafo_map.append(pd.Series(missing_buses_i, missing_buses_i))
for c in n.one_port_components|n.branch_components:
df = n.df(c)
for col in df.columns:
if col.startswith('bus'):
df[col] = df[col].map(trafo_map)
n.mremove("Transformer", n.transformers.index)
n.mremove("Bus", n.buses.index.difference(trafo_map))
return n, trafo_map
def remove_stubs(n):
n.determine_network_topology()
busmap = busmap_by_stubs(n, ['carrier', 'country'])
n.buses.loc[busmap.index, ['x','y']] = n.buses.loc[busmap, ['x','y']].values
clustering = get_clustering_from_busmap(
n, busmap,
bus_strategies=dict(country=_make_consense("Bus", "country")),
line_length_factor=snakemake.config['lines']['length_factor'],
aggregate_generators_weighted=True,
aggregate_one_ports=["Load", "StorageUnit"]
)
return clustering.network, busmap
def weighting_for_country(x):
conv_carriers = {'OCGT', 'PHS', 'hydro'}
gen = (n
.generators.loc[n.generators.carrier.isin(conv_carriers)]
.groupby('bus').p_nom.sum()
.reindex(n.buses.index, fill_value=0.) +
n
.storage_units.loc[n.storage_units.carrier.isin(conv_carriers)]
.groupby('bus').p_nom.sum()
.reindex(n.buses.index, fill_value=0.))
load = n.loads_t.p_set.mean().groupby(n.loads.bus).sum()
b_i = x.index
g = normed(gen.reindex(b_i, fill_value=0))
l = normed(load.reindex(b_i, fill_value=0))
w= g + l
return (w * (100. / w.max())).astype(int)
return weighting_for_country
## Plot weighting for Germany
def plot_weighting(n, country):
n.plot(bus_sizes=(2*weighting_for_country(n.buses.loc[n.buses.country == country])).reindex(n.buses.index, fill_value=1))
p = vshapes.countries()['DE']
plt.xlim(p.bounds[0], p.bounds[2])
plt.ylim(p.bounds[1], p.bounds[3])
# # Determining the number of clusters per country
def distribute_clusters(n_clusters):
load = n.loads_t.p_set.mean().groupby(n.loads.bus).sum()
loadc = load.groupby([n.buses.country, n.buses.sub_network]).sum()
n_cluster_per_country = n_clusters * normed(loadc)
one_cluster_b = n_cluster_per_country < 0.5
n_one_cluster, n_one_cluster_prev = one_cluster_b.sum(), 0
while n_one_cluster > n_one_cluster_prev:
n_clusters_rem = n_clusters - one_cluster_b.sum()
assert n_clusters_rem > 0
n_cluster_per_country[~one_cluster_b] = n_clusters_rem * normed(loadc[~one_cluster_b])
one_cluster_b = n_cluster_per_country < 0.5
n_one_cluster, n_one_cluster_prev = one_cluster_b.sum(), n_one_cluster
n_cluster_per_country[one_cluster_b] = 1.1
n_cluster_per_country[~one_cluster_b] = n_cluster_per_country[~one_cluster_b] + 0.5
return n_cluster_per_country.astype(int)
def distribute_clusters_exactly(n_clusters):
for d in [0, 1, -1, 2, -2]:
n_cluster_per_country = distribute_clusters(n_clusters + d)
if n_cluster_per_country.sum() == n_clusters:
return n_cluster_per_country
else:
return distribute_clusters(n_clusters)
def busmap_for_n_clusters(n_clusters):
n_clusters = distribute_clusters_exactly(n_clusters)
def busmap_for_country(x):
prefix = x.name[0] + x.name[1] + ' '
if len(x) == 1:
return pd.Series(prefix + '0', index=x.index)
weight = weighting_for_country(x)
return prefix + busmap_by_kmeans(n, weight, n_clusters[x.name], buses_i=x.index)
return n.buses.groupby(['country', 'sub_network'], group_keys=False).apply(busmap_for_country)
def plot_busmap_for_n_clusters(n_clusters=50):
busmap = busmap_for_n_clusters(n_clusters)
cs = busmap.unique()
cr = sns.color_palette("hls", len(cs))
n.plot(bus_colors=busmap.map(dict(zip(cs, cr))))
del cs, cr
def clustering_for_n_clusters(n_clusters):
clustering = get_clustering_from_busmap(
n, busmap_for_n_clusters(n_clusters),
bus_strategies=dict(country=_make_consense("Bus", "country")),
aggregate_generators_weighted=True,
aggregate_one_ports=["Load", "StorageUnit"]
)
# set n-1 security margin to 0.5 for 37 clusters and to 0.7 from 200 clusters
# (there was already one of 0.7 in-place)
s_max_pu = np.clip(0.5 + 0.2 * (n_clusters - 37) / (200 - 37), 0.5, 0.7)
clustering.network.lines['s_max_pu'] = s_max_pu
return clustering
def save_to_geojson(s, fn):
if os.path.exists(fn):
os.unlink(fn)
s.reset_index().to_file(fn, driver='GeoJSON')
def cluster_regions(busmaps):
busmap = reduce(lambda x, y: x.map(y), busmaps[1:], busmaps[0])
for which in ('regions_onshore', 'regions_offshore'):
regions = gpd.read_file(getattr(snakemake.input, which)).set_index('name')
geom_c = regions.geometry.groupby(clustering.busmap).apply(shapely.ops.cascaded_union)
regions_c = gpd.GeoDataFrame(dict(geometry=geom_c))
save_to_geojson(regions_c, getattr(snakemake.output, which))
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals():
from vresutils import Dict
import yaml
snakemake = Dict()
with open('../config.yaml') as f:
snakemake.config = yaml.load(f)
snakemake.wildcards = Dict(clusters='37')
snakemake.input = Dict(network='../networks/elec.nc',
regions_onshore='../resources/regions_onshore.geojson',
regions_offshore='../resources/regions_offshore.geojson')
snakemake.output = Dict(network='../networks/elec_{clusters}.nc'.format(**snakemake.wildcards),
regions_onshore='../resources/regions_onshore_{clusters}.geojson'.format(**snakemake.wildcards),
regions_offshore='../resources/regions_offshore_{clusters}.geojson'.format(**snakemake.wildcards))
n = pypsa.Network(snakemake.input.network)
n, trafo_map = simplify_network_to_380(n)
n, stub_map = remove_stubs(n)
n_clusters = int(snakemake.wildcards.clusters)
clustering = clustering_for_n_clusters(n_clusters)
clustering.network.export_to_netcdf(snakemake.output.network)
cluster_regions((trafo_map, stub_map, clustering.busmap))

View File

@ -0,0 +1,83 @@
# coding: utf-8
import logging
logger = logging.getLogger(__name__)
import pandas as pd
idx = pd.IndexSlice
import numpy as np
import scipy as sp
import xarray as xr
import geopandas as gpd
import pypsa
def normed(s): return s/s.sum()
def add_co2limit(n, Nyears=1.):
n.add("GlobalConstraint", "CO2Limit",
carrier_attribute="co2_emissions", sense="<=",
constant=snakemake.config['electricity']['co2limit'] * Nyears)
def add_emission_prices(n, emission_prices=None, exclude_co2=False):
if emission_prices is None:
emission_prices = snakemake.config['costs']['emission_prices']
if exclude_co2: emission_prices.pop('co2')
ep = (pd.Series(emission_prices).rename(lambda x: x+'_emissions') * n.carriers).sum(axis=1)
n.generators['marginal_cost'] += n.generators.carrier.map(ep)
n.storage_units['marginal_cost'] += n.storage_units.carrier.map(ep)
def set_line_volume_limit(n, lv):
# Either line_volume cap or cost
n.lines['capital_cost'] = 0.
n.links['capital_cost'] = 0.
lines_s_nom = n.lines.s_nom.where(
n.lines.type == '',
np.sqrt(3) * n.lines.num_parallel *
n.lines.type.map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom)
)
n.lines['s_nom_min'] = lines_s_nom
n.links['p_nom_min'] = n.links['p_nom']
n.lines['s_nom_extendable'] = True
n.links['p_nom_extendable'] = True
n.line_volume_limit = lv * ((lines_s_nom * n.lines['length']).sum() +
n.links.loc[n.links.carrier=='DC'].eval('p_nom * length').sum())
return n
if __name__ == "__main__":
# Detect running outside of snakemake and mock snakemake for testing
if 'snakemake' not in globals():
from vresutils import Dict
import yaml
snakemake = Dict()
with open('../config.yaml') as f:
snakemake.config = yaml.load(f)
snakemake.wildcards = Dict(clusters='37', lv='2', opts='Co2L')
snakemake.input = ['../networks/elec_37.nc']
snakemake.output = ['../networks/elec_37_lv2_Co2L.nc']
logger.setLevel(snakemake.config['logging_level'])
opts = snakemake.wildcards.opts.split('-')
n = pypsa.Network(snakemake.input[0])
Nyears = n.snapshot_weightings.sum()/8760.
if 'Co2L' in opts:
add_co2limit(n, Nyears)
add_emission_prices(n, exclude_co2=True)
if 'Ep' in opts:
add_emission_prices(n)
set_line_volume_limit(n, float(snakemake.wildcards.lv))
n.export_to_netcdf(snakemake.output[0])

View File

@ -6,8 +6,6 @@ logging.basicConfig(filename=snakemake.log.python, level=logging.INFO)
import pypsa
from _helpers import madd
if 'tmpdir' in snakemake.config['solving']:
# PYOMO should write its lp files into tmp here
tmpdir = snakemake.config['solving']['tmpdir']
@ -25,14 +23,14 @@ def prepare_network(n):
if solve_opts.get('load_shedding'):
n.add("Carrier", "Load")
madd(n, "Generator", "Load",
bus=n.buses.index,
carrier='load',
marginal_cost=1.0e5 * snakemake.config['costs']['EUR_to_ZAR'],
# intersect between macroeconomic and surveybased
# willingness to pay
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full
p_nom=1e6)
n.madd("Generator", n.buses.index, " load",
bus=n.buses.index,
carrier='load',
marginal_cost=1.0e5,
# intersect between macroeconomic and surveybased
# willingness to pay
# http://journal.frontiersin.org/article/10.3389/fenrg.2015.00055/full
p_nom=1e6)
if solve_opts.get('noisy_costs'):
for t in n.iterate_components():
@ -66,11 +64,30 @@ def solve_network(n):
ext_gens_i = n.generators.index[n.generators.carrier.isin(conv_techs) & n.generators.p_nom_extendable]
n.model.safe_peakdemand = pypsa.opt.Constraint(expr=sum(n.model.generator_p_nom[gen] for gen in ext_gens_i) >= peakdemand - exist_conv_caps)
def add_lv_constraint(n):
line_volume = getattr(n, 'line_volume_limit')
if line_volume is not None and not np.isinf(line_volume):
n.model.line_volume_constraint = pypsa.opt.Constraint(
expr=((sum(n.model.passive_branch_s_nom["Line",line]*n.lines.at[line,"length"]
for line in n.lines.index[n.lines.s_nom_extendable]) +
sum(n.model.link_p_nom[link]*n.links.at[link,"length"]
for link in n.links.index[(n.links.carrier=='DC') &
n.links.p_nom_extendable]))
<= line_volume)
)
def add_eps_storage_constraint(n):
if not hasattr(n, 'epsilon'):
n.epsilon = 1e-5
fix_sus_i = n.storage_units.index[~ n.storage_units.p_nom_extendable]
n.model.objective.expr += sum(n.epsilon * n.model.state_of_charge[su, n.snapshots[0]] for su in fix_sus_i)
def fix_lines(n, lines_i=None, links_i=None): # , fix=True):
if lines_i is not None and len(lines_i) > 0:
s_nom = n.lines.s_nom.where(
n.lines.type == '',
np.sqrt(3) * n.lines.type.map(n.line_types.i_nom) * n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel
np.sqrt(3) * n.lines.type.map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel
)
for l in lines_i:
n.model.passive_branch_s_nom["Line", l].fix(s_nom.at[l])
@ -98,6 +115,8 @@ def solve_network(n):
if not hasattr(n, 'opt') or not isinstance(n.opt, pypsa.opf.PersistentSolver):
pypsa.opf.network_lopf_build_model(n, formulation=solve_opts['formulation'])
add_opts_constraints(n)
add_lv_constraint(n)
add_eps_storage_constraint(n)
pypsa.opf.network_lopf_prepare_solver(n, solver_name=solver_name)
@ -132,7 +151,8 @@ def solve_network(n):
lines = pd.DataFrame(n.lines[['r', 'x', 'type', 'num_parallel']])
lines['s_nom'] = (
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) * n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel
np.sqrt(3) * n.lines['type'].map(n.line_types.i_nom) *
n.lines.bus0.map(n.buses.v_nom) * n.lines.num_parallel
).where(n.lines.type != '', n.lines['s_nom'])
lines_ext_typed_b = (n.lines.type != '') & lines_ext_b
@ -211,10 +231,9 @@ def solve_network(n):
return n
if __name__ == "__main__":
n = pypsa.Network()
n.import_from_hdf5(snakemake.input[0])
n = pypsa.Network(snakemake.input[0])
n = prepare_network(n)
n = solve_network(n)
n.export_to_hdf5(snakemake.output[0])
n.export_to_netcdf(snakemake.output[0])