Merge pull request #314 from PyPSA/loggin-replace-print
Replacing print statement with logging
This commit is contained in:
commit
6d98edb320
@ -16,7 +16,7 @@ from helper import override_component_attrs, update_config_with_sector_opts
|
||||
|
||||
def add_brownfield(n, n_p, year):
|
||||
|
||||
print("adding brownfield")
|
||||
logger.info(f"Preparing brownfield for the year {year}")
|
||||
|
||||
# electric transmission grid set optimised capacities of previous as minimum
|
||||
n.lines.s_nom_min = n_p.lines.s_nom_opt
|
||||
@ -122,10 +122,11 @@ if __name__ == "__main__":
|
||||
planning_horizons=2030,
|
||||
)
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
|
||||
update_config_with_sector_opts(snakemake.config, snakemake.wildcards.sector_opts)
|
||||
|
||||
print(snakemake.input.network_p)
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
logger.info(f"Preparing brownfield from the file {snakemake.input.network_p}")
|
||||
|
||||
year = int(snakemake.wildcards.planning_horizons)
|
||||
|
||||
|
@ -120,7 +120,7 @@ def add_power_capacities_installed_before_baseyear(n, grouping_years, costs, bas
|
||||
to read lifetime to estimate YearDecomissioning
|
||||
baseyear : int
|
||||
"""
|
||||
print("adding power capacities installed before baseyear from powerplants.csv")
|
||||
logger.debug(f"Adding power capacities installed before {baseyear} from powerplants.csv")
|
||||
|
||||
df_agg = pd.read_csv(snakemake.input.powerplants, index_col=0)
|
||||
|
||||
@ -357,7 +357,7 @@ def add_heating_capacities_installed_before_baseyear(n, baseyear, grouping_years
|
||||
services proportional to heating load in both 50% capacities
|
||||
in rural busess 50% in urban buses
|
||||
"""
|
||||
print("adding heating capacities installed before baseyear")
|
||||
logger.debug(f"Adding heating capacities installed before {baseyear}")
|
||||
|
||||
# Add existing heating capacities, data comes from the study
|
||||
# "Mapping and analyses of the current and future (2020 - 2030)
|
||||
|
@ -1,3 +1,6 @@
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from functools import partial
|
||||
from tqdm import tqdm
|
||||
from helper import mute_print
|
||||
@ -454,7 +457,7 @@ def build_energy_totals(countries, eurostat, swiss, idees):
|
||||
fuel_use = df[f"electricity {sector} {use}"]
|
||||
fuel = df[f"electricity {sector}"]
|
||||
avg = fuel_use.div(fuel).mean()
|
||||
print(f"{sector}: average fraction of electricity for {use} is {avg:.3f}")
|
||||
logger.debug(f"{sector}: average fraction of electricity for {use} is {avg:.3f}")
|
||||
df.loc[to_fill, f"electricity {sector} {use}"] = avg * df.loc[to_fill, f"electricity {sector}"]
|
||||
|
||||
# non-electric use
|
||||
@ -463,7 +466,7 @@ def build_energy_totals(countries, eurostat, swiss, idees):
|
||||
nonelectric_use = df[f"total {sector} {use}"] - df[f"electricity {sector} {use}"]
|
||||
nonelectric = df[f"total {sector}"] - df[f"electricity {sector}"]
|
||||
avg = nonelectric_use.div(nonelectric).mean()
|
||||
print(f"{sector}: average fraction of non-electric for {use} is {avg:.3f}")
|
||||
logger.debug(f"{sector}: average fraction of non-electric for {use} is {avg:.3f}")
|
||||
electric_use = df.loc[to_fill, f"electricity {sector} {use}"]
|
||||
nonelectric = df.loc[to_fill, f"total {sector}"] - df.loc[to_fill, f"electricity {sector}"]
|
||||
df.loc[to_fill, f"total {sector} {use}"] = electric_use + avg * nonelectric
|
||||
@ -673,7 +676,7 @@ def build_transport_data(countries, population, idees):
|
||||
transport_data.at["CH", "number cars"] = 4.136e6
|
||||
|
||||
missing = transport_data.index[transport_data["number cars"].isna()]
|
||||
print(f"Missing data on cars from:\n{list(missing)}\nFilling gaps with averaged data.")
|
||||
logger.info(f"Missing data on cars from:\n{list(missing)}\nFilling gaps with averaged data.")
|
||||
|
||||
cars_pp = transport_data["number cars"] / population
|
||||
transport_data.loc[missing, "number cars"] = cars_pp.mean() * population
|
||||
@ -683,7 +686,7 @@ def build_transport_data(countries, population, idees):
|
||||
transport_data["average fuel efficiency"] = idees["passenger car efficiency"]
|
||||
|
||||
missing = transport_data.index[transport_data["average fuel efficiency"].isna()]
|
||||
print(f"Missing data on fuel efficiency from:\n{list(missing)}\nFilling gapswith averaged data.")
|
||||
logger.info(f"Missing data on fuel efficiency from:\n{list(missing)}\nFilling gapswith averaged data.")
|
||||
|
||||
fill_values = transport_data["average fuel efficiency"].mean()
|
||||
transport_data.loc[missing, "average fuel efficiency"] = fill_values
|
||||
@ -696,6 +699,8 @@ if __name__ == "__main__":
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_energy_totals')
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
|
||||
config = snakemake.config["energy"]
|
||||
|
||||
nuts3 = gpd.read_file(snakemake.input.nuts3_shapes).set_index("index")
|
||||
|
@ -1,5 +1,8 @@
|
||||
"""Build industrial distribution keys from hotmaps database."""
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import uuid
|
||||
import pandas as pd
|
||||
import geopandas as gpd
|
||||
@ -33,7 +36,7 @@ def locate_missing_industrial_sites(df):
|
||||
|
||||
loc = geocode([s.City, s.Country], geometry='wkt')
|
||||
if loc is not None:
|
||||
print(f"Found:\t{loc}\nFor:\t{s['City']}, {s['Country']}\n")
|
||||
logger.debug(f"Found:\t{loc}\nFor:\t{s['City']}, {s['Country']}\n")
|
||||
return f"POINT({loc.longitude} {loc.latitude})"
|
||||
else:
|
||||
return None
|
||||
@ -46,8 +49,7 @@ def locate_missing_industrial_sites(df):
|
||||
num_found = len(missing) - num_still_missing
|
||||
share_missing = len(missing) / len(df) * 100
|
||||
share_still_missing = num_still_missing / len(df) * 100
|
||||
print(f"Found {num_found} missing locations.",
|
||||
f"Share of missing locations reduced from {share_missing:.2f}% to {share_still_missing:.2f}%.")
|
||||
logger.warning(f"Found {num_found} missing locations. \nShare of missing locations reduced from {share_missing:.2f}% to {share_still_missing:.2f}%.")
|
||||
|
||||
return df
|
||||
|
||||
@ -125,6 +127,8 @@ if __name__ == "__main__":
|
||||
clusters=48,
|
||||
)
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
|
||||
regions = gpd.read_file(snakemake.input.regions_onshore).set_index('name')
|
||||
|
||||
hotmaps = prepare_hotmaps_database(regions)
|
||||
|
@ -1,5 +1,8 @@
|
||||
"""Build industrial production per country."""
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import multiprocessing as mp
|
||||
@ -189,7 +192,7 @@ def separate_basic_chemicals(demand):
|
||||
there = ammonia.index.intersection(demand.index)
|
||||
missing = demand.index.symmetric_difference(there)
|
||||
|
||||
print("Following countries have no ammonia demand:", missing)
|
||||
logger.info(f"Following countries have no ammonia demand: {missing.tolist()}")
|
||||
|
||||
demand["Ammonia"] = 0.
|
||||
|
||||
@ -213,6 +216,8 @@ if __name__ == '__main__':
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_industrial_production_per_country')
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
|
||||
countries = non_EU + eu28
|
||||
|
||||
year = snakemake.config['industry']['reference_year']
|
||||
|
@ -1,5 +1,8 @@
|
||||
"""Build mapping between grid cells and population (total, urban, rural)"""
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import multiprocessing as mp
|
||||
import atlite
|
||||
import numpy as np
|
||||
@ -12,6 +15,8 @@ if __name__ == '__main__':
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('build_population_layouts')
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
|
||||
cutout = atlite.Cutout(snakemake.config['atlite']['cutout'])
|
||||
|
||||
grid_cells = cutout.grid.geometry
|
||||
@ -54,7 +59,7 @@ if __name__ == '__main__':
|
||||
pop_urban = pd.Series(0., density_cells.index)
|
||||
|
||||
for ct in countries:
|
||||
print(ct, urban_fraction[ct])
|
||||
logger.debug(f"The urbanization rate for {ct} is {round(urban_fraction[ct]*100)}%")
|
||||
|
||||
indicator_nuts3_ct = nuts3.country.apply(lambda x: 1. if x == ct else 0.)
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import sys
|
||||
import yaml
|
||||
@ -431,8 +433,9 @@ def calculate_weighted_prices(n, label, weighted_prices):
|
||||
|
||||
weighted_prices.loc[carrier,label] = (load * n.buses_t.marginal_price[buses]).sum().sum() / load.sum().sum()
|
||||
|
||||
# still have no idea what this is for, only for debug reasons.
|
||||
if carrier[:5] == "space":
|
||||
print(load * n.buses_t.marginal_price[buses])
|
||||
logger.debug(load * n.buses_t.marginal_price[buses])
|
||||
|
||||
return weighted_prices
|
||||
|
||||
@ -537,7 +540,7 @@ def make_summaries(networks_dict):
|
||||
df[output] = pd.DataFrame(columns=columns, dtype=float)
|
||||
|
||||
for label, filename in networks_dict.items():
|
||||
print(label, filename)
|
||||
logger.info(f"Make summary for scenario {label}, using {filename}")
|
||||
|
||||
overrides = override_component_attrs(snakemake.input.overrides)
|
||||
n = pypsa.Network(filename, override_component_attrs=overrides)
|
||||
@ -561,6 +564,8 @@ if __name__ == "__main__":
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('make_summary')
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
|
||||
networks_dict = {
|
||||
(cluster, lv, opt+sector_opt, planning_horizon) :
|
||||
snakemake.config['results_dir'] + snakemake.config['run'] + f'/postnetworks/elec_s{simpl}_{cluster}_lv{lv}_{opt}_{sector_opt}_{planning_horizon}.nc' \
|
||||
@ -572,8 +577,6 @@ if __name__ == "__main__":
|
||||
for planning_horizon in snakemake.config['scenario']['planning_horizons']
|
||||
}
|
||||
|
||||
print(networks_dict)
|
||||
|
||||
Nyears = 1
|
||||
|
||||
costs_db = prepare_costs(
|
||||
|
@ -1,3 +1,6 @@
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import pypsa
|
||||
|
||||
import pandas as pd
|
||||
@ -75,7 +78,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
|
||||
.unstack().fillna(0.))
|
||||
costs = pd.concat([costs, costs_c], axis=1)
|
||||
|
||||
print(comp, costs)
|
||||
logger.debug(f"{comp}, {costs}")
|
||||
|
||||
costs = costs.groupby(costs.columns, axis=1).sum()
|
||||
|
||||
@ -87,7 +90,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
|
||||
|
||||
for item in new_columns:
|
||||
if item not in tech_colors:
|
||||
print("Warning!",item,"not in config/plotting/tech_colors")
|
||||
logger.warning(f"{item} not in config/plotting/tech_colors")
|
||||
|
||||
costs = costs.stack() # .sort_index()
|
||||
|
||||
@ -102,7 +105,7 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
|
||||
# drop non-bus
|
||||
to_drop = costs.index.levels[0].symmetric_difference(n.buses.index)
|
||||
if len(to_drop) != 0:
|
||||
print("dropping non-buses", to_drop)
|
||||
logger.info(f"Dropping non-buses {to_drop.tolist()}")
|
||||
costs.drop(to_drop, level=0, inplace=True, axis=0, errors="ignore")
|
||||
|
||||
# make sure they are removed from index
|
||||
@ -141,12 +144,12 @@ def plot_map(network, components=["links", "stores", "storage_units", "generator
|
||||
line_widths = n.lines.s_nom_opt
|
||||
link_widths = n.links.p_nom_opt
|
||||
title = "total grid"
|
||||
|
||||
line_widths = line_widths.clip(line_lower_threshold,line_upper_threshold)
|
||||
link_widths = link_widths.clip(line_lower_threshold,line_upper_threshold)
|
||||
|
||||
line_widths[line_widths < line_lower_threshold] = 0.
|
||||
link_widths[link_widths < line_lower_threshold] = 0.
|
||||
|
||||
line_widths[line_widths > line_upper_threshold] = line_upper_threshold
|
||||
link_widths[link_widths > line_upper_threshold] = line_upper_threshold
|
||||
line_widths = line_widths.replace(line_lower_threshold,0)
|
||||
link_widths = link_widths.replace(line_lower_threshold,0)
|
||||
|
||||
fig, ax = plt.subplots(subplot_kw={"projection": ccrs.EqualEarth()})
|
||||
fig.set_size_inches(7, 6)
|
||||
@ -663,11 +666,11 @@ def plot_map_without(network):
|
||||
line_widths = n.lines.s_nom_min
|
||||
link_widths = n.links.p_nom_min
|
||||
|
||||
line_widths[line_widths < line_lower_threshold] = 0.
|
||||
link_widths[link_widths < line_lower_threshold] = 0.
|
||||
line_widths = line_widths.clip(line_lower_threshold,line_upper_threshold)
|
||||
link_widths = link_widths.clip(line_lower_threshold,line_upper_threshold)
|
||||
|
||||
line_widths[line_widths > line_upper_threshold] = line_upper_threshold
|
||||
link_widths[link_widths > line_upper_threshold] = line_upper_threshold
|
||||
line_widths = line_widths.replace(line_lower_threshold,0)
|
||||
link_widths = link_widths.replace(line_lower_threshold,0)
|
||||
|
||||
n.plot(
|
||||
bus_colors="k",
|
||||
@ -751,7 +754,7 @@ def plot_series(network, carrier="AC", name="test"):
|
||||
to_drop = supply.columns[(abs(supply) < threshold).all()]
|
||||
|
||||
if len(to_drop) != 0:
|
||||
print("dropping", to_drop)
|
||||
logger.info(f"Dropping {to_drop.tolist()} from supply")
|
||||
supply.drop(columns=to_drop, inplace=True)
|
||||
|
||||
supply.index.name = None
|
||||
@ -841,6 +844,8 @@ if __name__ == "__main__":
|
||||
planning_horizons="2050",
|
||||
)
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
|
||||
overrides = override_component_attrs(snakemake.input.overrides)
|
||||
n = pypsa.Network(snakemake.input.network, override_component_attrs=overrides)
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
@ -135,13 +136,12 @@ def plot_costs():
|
||||
|
||||
to_drop = df.index[df.max(axis=1) < snakemake.config['plotting']['costs_threshold']]
|
||||
|
||||
print("dropping")
|
||||
|
||||
print(df.loc[to_drop])
|
||||
logger.info(f"Dropping technology with costs below {snakemake.config['plotting']['costs_threshold']} EUR billion per year")
|
||||
logger.debug(df.loc[to_drop])
|
||||
|
||||
df = df.drop(to_drop)
|
||||
|
||||
print(df.sum())
|
||||
logger.info(f"Total system cost of {round(df.sum()[0])} EUR billion per year")
|
||||
|
||||
new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order))
|
||||
|
||||
@ -191,15 +191,12 @@ def plot_energy():
|
||||
|
||||
to_drop = df.index[df.abs().max(axis=1) < snakemake.config['plotting']['energy_threshold']]
|
||||
|
||||
print("dropping")
|
||||
|
||||
print(df.loc[to_drop])
|
||||
logger.info(f"Dropping all technology with energy consumption or production below {snakemake.config['plotting']['energy_threshold']} TWh/a")
|
||||
logger.debug(df.loc[to_drop])
|
||||
|
||||
df = df.drop(to_drop)
|
||||
|
||||
print(df.sum())
|
||||
|
||||
print(df)
|
||||
logger.info(f"Total energy of {round(df.sum()[0])} TWh/a")
|
||||
|
||||
new_index = preferred_order.intersection(df.index).append(df.index.difference(preferred_order))
|
||||
|
||||
@ -207,7 +204,7 @@ def plot_energy():
|
||||
|
||||
fig, ax = plt.subplots(figsize=(12,8))
|
||||
|
||||
print(df.loc[new_index, new_columns])
|
||||
logger.debug(df.loc[new_index, new_columns])
|
||||
|
||||
df.loc[new_index, new_columns].T.plot(
|
||||
kind="bar",
|
||||
@ -247,6 +244,8 @@ def plot_balances():
|
||||
|
||||
balances = {i.replace(" ","_"): [i] for i in balances_df.index.levels[0]}
|
||||
balances["energy"] = [i for i in balances_df.index.levels[0] if i not in co2_carriers]
|
||||
|
||||
fig, ax = plt.subplots(figsize=(12,8))
|
||||
|
||||
for k, v in balances.items():
|
||||
|
||||
@ -263,13 +262,17 @@ def plot_balances():
|
||||
|
||||
to_drop = df.index[df.abs().max(axis=1) < snakemake.config['plotting']['energy_threshold']/10]
|
||||
|
||||
print("dropping")
|
||||
if v[0] in co2_carriers:
|
||||
units = "MtCO2/a"
|
||||
else:
|
||||
units = "TWh/a"
|
||||
|
||||
print(df.loc[to_drop])
|
||||
logger.debug(f"Dropping technology energy balance smaller than {snakemake.config['plotting']['energy_threshold']/10} {units}")
|
||||
logger.debug(df.loc[to_drop])
|
||||
|
||||
df = df.drop(to_drop)
|
||||
|
||||
print(df.sum())
|
||||
logger.debug(f"Total energy balance for {v} of {round(df.sum()[0],2)} {units}")
|
||||
|
||||
if df.empty:
|
||||
continue
|
||||
@ -278,8 +281,6 @@ def plot_balances():
|
||||
|
||||
new_columns = df.columns.sort_values()
|
||||
|
||||
fig, ax = plt.subplots(figsize=(12,8))
|
||||
|
||||
df.loc[new_index,new_columns].T.plot(kind="bar",ax=ax,stacked=True,color=[snakemake.config['plotting']['tech_colors'][i] for i in new_index])
|
||||
|
||||
|
||||
@ -301,6 +302,8 @@ def plot_balances():
|
||||
|
||||
|
||||
fig.savefig(snakemake.output.balances[:-10] + k + ".pdf", bbox_inches='tight')
|
||||
|
||||
plt.cla()
|
||||
|
||||
|
||||
def historical_emissions(cts):
|
||||
@ -441,6 +444,7 @@ if __name__ == "__main__":
|
||||
from helper import mock_snakemake
|
||||
snakemake = mock_snakemake('plot_summary')
|
||||
|
||||
logging.basicConfig(level=snakemake.config['logging_level'])
|
||||
|
||||
n_header = 4
|
||||
|
||||
|
@ -706,7 +706,7 @@ def prepare_costs(cost_file, USD_to_EUR, discount_rate, Nyears, lifetime):
|
||||
|
||||
def add_generation(n, costs):
|
||||
|
||||
logger.info("adding electricity generation")
|
||||
logger.info("Adding electricity generation")
|
||||
|
||||
nodes = pop_layout.index
|
||||
|
||||
@ -737,7 +737,7 @@ def add_generation(n, costs):
|
||||
|
||||
def add_ammonia(n, costs):
|
||||
|
||||
logger.info("adding ammonia carrier with synthesis, cracking and storage")
|
||||
logger.info("Adding ammonia carrier with synthesis, cracking and storage")
|
||||
|
||||
nodes = pop_layout.index
|
||||
|
||||
@ -1495,7 +1495,7 @@ def add_heat(n, costs):
|
||||
# exogenously reduce space heat demand
|
||||
if options["reduce_space_heat_exogenously"]:
|
||||
dE = get(options["reduce_space_heat_exogenously_factor"], investment_year)
|
||||
logger.info(f"assumed space heat reduction of {dE:.2%}")
|
||||
logger.info(f"Assumed space heat reduction of {dE:.2%}")
|
||||
for sector in sectors:
|
||||
heat_demand[sector + " space"] = (1 - dE) * heat_demand[sector + " space"]
|
||||
|
||||
@ -2647,7 +2647,7 @@ def maybe_adjust_costs_and_potentials(n, opts):
|
||||
|
||||
# TODO this should rather be a config no wildcard
|
||||
def limit_individual_line_extension(n, maxext):
|
||||
logger.info(f"limiting new HVAC and HVDC extensions to {maxext} MW")
|
||||
logger.info(f"Limiting new HVAC and HVDC extensions to {maxext} MW")
|
||||
n.lines['s_nom_max'] = n.lines['s_nom'] + maxext
|
||||
hvdc = n.links.index[n.links.carrier == 'DC']
|
||||
n.links.loc[hvdc, 'p_nom_max'] = n.links.loc[hvdc, 'p_nom'] + maxext
|
||||
@ -2814,7 +2814,7 @@ def set_temporal_aggregation(n, opts, solver_name):
|
||||
m = re.match(r"(^\d+)sn$", o, re.IGNORECASE)
|
||||
if m is not None:
|
||||
sn = int(m[1])
|
||||
logger.info(f"use every {sn} snapshot as representative")
|
||||
logger.info(f"Use every {sn} snapshot as representative")
|
||||
n.set_snapshots(n.snapshots[::sn])
|
||||
n.snapshot_weightings *= sn
|
||||
break
|
||||
@ -2822,7 +2822,7 @@ def set_temporal_aggregation(n, opts, solver_name):
|
||||
m = re.match(r"^(\d+)seg$", o, re.IGNORECASE)
|
||||
if m is not None:
|
||||
segments = int(m[1])
|
||||
logger.info(f"use temporal segmentation with {segments} segments")
|
||||
logger.info(f"Use temporal segmentation with {segments} segments")
|
||||
n = apply_time_segmentation(n, segments, solver_name=solver_name)
|
||||
break
|
||||
return n
|
||||
|
Loading…
Reference in New Issue
Block a user