2021-07-01 18:09:04 +00:00
|
|
|
import os
|
2022-04-03 16:49:35 +00:00
|
|
|
import pytz
|
2021-07-01 18:09:04 +00:00
|
|
|
import pandas as pd
|
|
|
|
from pathlib import Path
|
|
|
|
from pypsa.descriptors import Dict
|
|
|
|
from pypsa.components import components, component_attrs
|
2020-05-07 12:45:14 +00:00
|
|
|
|
|
|
|
import logging
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2021-06-18 07:41:18 +00:00
|
|
|
|
2021-07-01 18:09:04 +00:00
|
|
|
def override_component_attrs(directory):
|
|
|
|
"""Tell PyPSA that links can have multiple outputs by
|
|
|
|
overriding the component_attrs. This can be done for
|
|
|
|
as many buses as you need with format busi for i = 2,3,4,5,....
|
|
|
|
See https://pypsa.org/doc/components.html#link-with-multiple-outputs-or-inputs
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
directory : string
|
|
|
|
Folder where component attributes to override are stored
|
|
|
|
analogous to ``pypsa/component_attrs``, e.g. `links.csv`.
|
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
Dictionary of overriden component attributes.
|
|
|
|
"""
|
|
|
|
|
|
|
|
attrs = Dict({k : v.copy() for k,v in component_attrs.items()})
|
|
|
|
|
|
|
|
for component, list_name in components.list_name.items():
|
|
|
|
fn = f"{directory}/{list_name}.csv"
|
|
|
|
if os.path.isfile(fn):
|
|
|
|
overrides = pd.read_csv(fn, index_col=0, na_values="n/a")
|
|
|
|
attrs[component] = overrides.combine_first(attrs[component])
|
|
|
|
|
|
|
|
return attrs
|
|
|
|
|
|
|
|
|
|
|
|
# from pypsa-eur/_helpers.py
|
2021-06-18 07:41:18 +00:00
|
|
|
def mock_snakemake(rulename, **wildcards):
|
|
|
|
"""
|
|
|
|
This function is expected to be executed from the 'scripts'-directory of '
|
|
|
|
the snakemake project. It returns a snakemake.script.Snakemake object,
|
|
|
|
based on the Snakefile.
|
|
|
|
|
|
|
|
If a rule has wildcards, you have to specify them in **wildcards.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
rulename: str
|
|
|
|
name of the rule for which the snakemake object should be generated
|
|
|
|
**wildcards:
|
|
|
|
keyword arguments fixing the wildcards. Only necessary if wildcards are
|
|
|
|
needed.
|
|
|
|
"""
|
|
|
|
import snakemake as sm
|
|
|
|
import os
|
|
|
|
from pypsa.descriptors import Dict
|
|
|
|
from snakemake.script import Snakemake
|
|
|
|
|
|
|
|
script_dir = Path(__file__).parent.resolve()
|
|
|
|
assert Path.cwd().resolve() == script_dir, \
|
|
|
|
f'mock_snakemake has to be run from the repository scripts directory {script_dir}'
|
|
|
|
os.chdir(script_dir.parent)
|
|
|
|
for p in sm.SNAKEFILE_CHOICES:
|
|
|
|
if os.path.exists(p):
|
|
|
|
snakefile = p
|
|
|
|
break
|
2022-01-25 11:57:04 +00:00
|
|
|
workflow = sm.Workflow(snakefile, overwrite_configfiles=[])
|
2021-06-18 07:41:18 +00:00
|
|
|
workflow.include(snakefile)
|
|
|
|
workflow.global_resources = {}
|
|
|
|
rule = workflow.get_rule(rulename)
|
|
|
|
dag = sm.dag.DAG(workflow, rules=[rule])
|
|
|
|
wc = Dict(wildcards)
|
|
|
|
job = sm.jobs.Job(rule, dag, wc)
|
|
|
|
|
|
|
|
def make_accessable(*ios):
|
|
|
|
for io in ios:
|
|
|
|
for i in range(len(io)):
|
|
|
|
io[i] = os.path.abspath(io[i])
|
|
|
|
|
|
|
|
make_accessable(job.input, job.output, job.log)
|
|
|
|
snakemake = Snakemake(job.input, job.output, job.params, job.wildcards,
|
|
|
|
job.threads, job.resources, job.log,
|
|
|
|
job.dag.workflow.config, job.rule.name, None,)
|
|
|
|
# create log and output dir if not existent
|
|
|
|
for path in list(snakemake.log) + list(snakemake.output):
|
|
|
|
Path(path).parent.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
|
os.chdir(script_dir)
|
|
|
|
return snakemake
|
2021-11-03 19:34:43 +00:00
|
|
|
|
|
|
|
# from pypsa-eur/_helpers.py
|
|
|
|
def progress_retrieve(url, file):
|
|
|
|
import urllib
|
|
|
|
from progressbar import ProgressBar
|
|
|
|
|
|
|
|
pbar = ProgressBar(0, 100)
|
|
|
|
|
|
|
|
def dlProgress(count, blockSize, totalSize):
|
|
|
|
pbar.update( int(count * blockSize * 100 / totalSize) )
|
|
|
|
|
2022-04-03 16:49:35 +00:00
|
|
|
urllib.request.urlretrieve(url, file, reporthook=dlProgress)
|
|
|
|
|
|
|
|
|
|
|
|
def generate_periodic_profiles(dt_index, nodes, weekly_profile, localize=None):
|
|
|
|
"""
|
|
|
|
Give a 24*7 long list of weekly hourly profiles, generate this for each
|
|
|
|
country for the period dt_index, taking account of time zones and summer time.
|
|
|
|
"""
|
|
|
|
|
|
|
|
weekly_profile = pd.Series(weekly_profile, range(24*7))
|
|
|
|
|
|
|
|
week_df = pd.DataFrame(index=dt_index, columns=nodes)
|
|
|
|
|
|
|
|
for node in nodes:
|
|
|
|
timezone = pytz.timezone(pytz.country_timezones[node[:2]][0])
|
|
|
|
tz_dt_index = dt_index.tz_convert(timezone)
|
|
|
|
week_df[node] = [24 * dt.weekday() + dt.hour for dt in tz_dt_index]
|
|
|
|
week_df[node] = week_df[node].map(weekly_profile)
|
|
|
|
|
|
|
|
week_df = week_df.tz_localize(localize)
|
|
|
|
|
|
|
|
return week_df
|