Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update parmap API to remove DeprecationWarnings #7

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion environment_linux64.yml
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ dependencies:
- lmfit==0.9.2
- markupsafe==0.23
- nwdiag==1.0.0
- parmap==1.2.3
- parmap==1.5.1
- pybtex==0.19
- pybtex-docutils==0.2.1
- pygments==2.1
Expand Down
2 changes: 1 addition & 1 deletion environment_osx.yml
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ dependencies:
- lmfit==0.9.2
- markupsafe==0.23
- nwdiag==1.0.0
- parmap==1.2.3
- parmap==1.5.1
- pybtex==0.19
- pybtex-docutils==0.2.1
- pygments==2.1
Expand Down
2 changes: 1 addition & 1 deletion environment_win64.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ dependencies:
- lmfit==0.9.2
- markupsafe==0.23
- nwdiag==1.0.0
- parmap==1.2.3
- parmap==1.5.1
- pexpect==4.0.1
- pydot==1.0.2
- pybtex==0.19
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ nwdiag==1.0.0
olefile==0.44
oset==0.1.3
pandas==0.17.1
parmap==1.2.3
parmap==1.5.1
pathlib2==2.1.0
pexpect==3.3
pickleshare==0.5
Expand Down
2 changes: 1 addition & 1 deletion sifra/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@

import sysresponse
import infrastructure_response
185 changes: 126 additions & 59 deletions sifra/infrastructure_response.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
import os
import sys
import time
Expand All @@ -10,16 +11,18 @@
import pandas as pd
import parmap

import matplotlib
matplotlib.use('Agg')


from model_ingest import ingest_spreadsheet
from sifraclasses import Scenario
from sifra.modelling.hazard_levels import HazardLevels

import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt

import seaborn as sns
from colorama import Fore
from colorama import Fore, Back, Style


def run_scenario(config_file):
Expand All @@ -30,7 +33,14 @@ def run_scenario(config_file):
:return: None
"""
# Construct the scenario object
print(Style.BRIGHT + Fore.GREEN +
"\nLoading scenario config... " +
Style.RESET_ALL, end='')

scenario = Scenario(config_file)
print(Style.BRIGHT + Fore.GREEN + "Done." +
"\nInitiating model run...\n" + Style.RESET_ALL)
code_start_time = time.time()

# `IFSystem` object that contains a list of components
infrastructure = ingest_spreadsheet(config_file)
Expand Down Expand Up @@ -63,7 +73,7 @@ def calculate_response(scenario, infrastructure):
hazard_levels.hazard_range(),
infrastructure,
scenario,
parallel=scenario.run_parallel_proc))
pm_parallel=scenario.run_parallel_proc))
# combine the responses into one list
post_processing_list = [{}, # hazard level vs component damage state index
{}, # hazard level vs infrastructure output
Expand Down Expand Up @@ -118,26 +128,54 @@ def run_para_scen(hazard_level, infrastructure, scenario):
# BEGIN POST-PROCESSING ...
# ****************************************************************************

def plot_mean_econ_loss(fc, sc, economic_loss_array):
"""Draws and saves a boxplot of mean economic loss."""
def plot_mean_econ_loss(sc, economic_loss_array):
"""Draws and saves a boxplot of mean economic loss"""

hazvals_ext = [[str(i)] * sc.num_samples
for i in list(sc.hazard_intensity_vals)]
x1 = np.ndarray.flatten(np.array(hazvals_ext))

smpl = range(1, sc.num_samples+1, 1)
x2 = np.array(smpl * sc.num_hazard_pts)

arrays = [x1, x2]
econ_loss = np.array(economic_loss_array)
econ_loss = np.ndarray.flatten(econ_loss.transpose())
econ_loss_flat = np.ndarray.flatten(econ_loss)

econ_loss_df = pd.DataFrame(econ_loss_flat, index=arrays)
econ_loss_df.index.names = ['Hazard Intensity', 'Sample Num']
econ_loss_df.columns = ['Econ Loss Ratio']

fig = plt.figure(figsize=(9, 5), facecolor='white')
sns.set(style='ticks', palette='Set3')
ax = sns.boxplot(economic_loss_array * 100, showmeans=True,
linewidth=0.7, color='lightgrey',
meanprops=dict(marker='s',
markeredgecolor='salmon',
markerfacecolor='salmon')
sns.set(style='ticks', palette='Set2')
# whitesmoke='#F5F5F5', coral='#FF7F50'
ax = sns.boxplot(x=x1, y='Econ Loss Ratio', data=econ_loss_df,
linewidth=0.8, color='whitesmoke',
showmeans=True,
meanprops=dict(marker='o',
markeredgecolor='coral',
markerfacecolor='coral')
)
sns.despine(top=True, left=True, right=True)
ax.tick_params(axis='y', left='off', right='off')
ax.yaxis.grid(True)

intensity_label = sc.intensity_measure_param+' ('\
+sc.intensity_measure_unit+')'
ax.set_xlabel(intensity_label)
ax.set_ylabel('Loss Fraction (%)')

sns.despine(bottom=False, top=True, left=True, right=True, offset=10)
ax.spines['bottom'].set_linewidth(0.8)
ax.spines['bottom'].set_color('#555555')

ax.yaxis.grid(True, which="major", linestyle='-',
linewidth=0.4, color='#B6B6B6')

ax.tick_params(axis='x', bottom='on', top='off',
width=0.8, labelsize=8, pad=5, color='#555555')
ax.tick_params(axis='y', left='off', right='off',
width=0.8, labelsize=8, pad=5, color='#555555')

ax.set_xticklabels(sc.hazard_intensity_vals)
intensity_label \
= sc.intensity_measure_param+' ('+sc.intensity_measure_unit+')'
ax.set_xlabel(intensity_label, labelpad=9, size=10)
ax.set_ylabel('Loss Fraction (%)', labelpad=9, size=10)

ax.set_title('Loss Ratio', loc='center', y=1.04)
ax.title.set_fontsize(12)

Expand All @@ -157,29 +195,24 @@ def post_processing(infrastructure, scenario, response_list):
:param response_list: Values from the simulation
:return: None
"""
write_system_response(response_list, scenario)
loss_by_comp_type(response_list, infrastructure, scenario)
economic_loss_array = response_list[4]
plot_mean_econ_loss(scenario, economic_loss_array)
pe_by_component_class(response_list, infrastructure, scenario)

def write_system_response(response_list, scenario):

# ------------------------------------------------------------------------
# 'ids_comp_vs_haz' is a dict of numpy arrays
# We pickle it for archival. But the file size can get very large.
# So we zip it for archival and delete the original
# ------------------------------------------------------------------------
idshaz = os.path.join(scenario.raw_output_dir, 'ids_comp_vs_haz.pickle')
id_comp_vs_haz = response_list[0]
with open(idshaz, 'w') as handle:
for response_key in sorted(id_comp_vs_haz.keys()):
pickle.dump({response_key: id_comp_vs_haz[response_key]}, handle)

component_resp_dict = response_list[2]
crd_pkl = os.path.join(scenario.raw_output_dir, 'component_resp_dict.pickle')
with open(crd_pkl, 'w') as handle:
for response_key in sorted(component_resp_dict.keys()):
pickle.dump({response_key: component_resp_dict[response_key]}, handle)

sys_output_dict = response_list[1]
sod_pkl = os.path.join(scenario.raw_output_dir, 'sys_output_dict.pickle')
with open(sod_pkl, 'w') as handle:
for response_key in sorted(sys_output_dict.keys()):
pickle.dump({response_key: sys_output_dict[response_key]}, handle)

idshaz_zip = os.path.join(scenario.raw_output_dir, 'ids_comp_vs_haz.zip')
zipmode = zipfile.ZIP_DEFLATED
with zipfile.ZipFile(idshaz_zip, 'w', zipmode) as zip:
Expand All @@ -189,16 +222,34 @@ def post_processing(infrastructure, scenario, response_list):
# ------------------------------------------------------------------------
# System output file (for given hazard transfer parameter value)
# ------------------------------------------------------------------------
sys_output_dict = response_list[1]
sod_pkl = os.path.join(scenario.raw_output_dir,
'sys_output_dict.pickle')
with open(sod_pkl, 'w') as handle:
for response_key in sorted(sys_output_dict.keys()):
pickle.dump({response_key: sys_output_dict[response_key]},
handle)

sys_output_df = pd.DataFrame(sys_output_dict)
sys_output_df.index.name = 'Output Nodes'
sys_output_df = sys_output_df.transpose()
sys_output_df.index.name = 'Hazard Intensity'

outfile_sysoutput = os.path.join(scenario.output_path,
'system_output_given_haz_param.csv')
'system_output_vs_haz_intensity.csv')
sys_output_df.to_csv(outfile_sysoutput,
sep=',', index_label=['Output Nodes'])
sep=',',
index_label=[sys_output_df.index.name])

loss_by_comp_type(response_list, infrastructure, scenario)
pe_by_component_class(response_list, infrastructure, scenario)
# ------------------------------------------------------------------------
# Hazard response for component instances, i.e. components as-installed
# ------------------------------------------------------------------------
component_resp_dict = response_list[2]
crd_pkl = os.path.join(scenario.raw_output_dir,
'component_resp_dict.pickle')
with open(crd_pkl, 'w') as handle:
for response_key in sorted(component_resp_dict.keys()):
pickle.dump({response_key: component_resp_dict[response_key]},
handle)


def loss_by_comp_type(response_list, infrastructure, scenario):
Expand All @@ -220,10 +271,12 @@ def loss_by_comp_type(response_list, infrastructure, scenario):
(comp_type, 'func_std'))
)

mindex = pd.MultiIndex.from_tuples(tp_ct,
names=['component_type', 'response'])
comptype_resp_df = pd.DataFrame(index=mindex,
columns=[scenario.hazard_intensity_str])
mindex = pd.MultiIndex.from_tuples(
tp_ct,
names=['component_type', 'response'])
comptype_resp_df = pd.DataFrame(
index=mindex,
columns=[scenario.hazard_intensity_str])
comptype_resp_dict = comptype_resp_df.to_dict()

component_resp_dict = response_list[2]
Expand Down Expand Up @@ -284,18 +337,22 @@ def loss_by_comp_type(response_list, infrastructure, scenario):
for j, hazard_level in enumerate(scenario.hazard_intensity_str):
for i in range(scenario.num_samples):
# system output and economic loss
sys_frag[i, j] = np.sum(economic_loss_array[i, j] > if_system_damage_states)
sys_frag[i, j] = \
np.sum(economic_loss_array[i, j] > if_system_damage_states)

# Calculating Probability of Exceedence:
pe_sys_econloss = np.zeros((len(infrastructure.get_system_damage_states()), scenario.num_hazard_pts))
pe_sys_econloss = np.zeros(
(len(infrastructure.get_system_damage_states()),
scenario.num_hazard_pts)
)
for j in range(scenario.num_hazard_pts):
for i in range(len(infrastructure.get_system_damage_states())):
pe_sys_econloss[i, j] = \
np.sum(sys_frag[:, j] >= i) / float(scenario.num_samples)

# --- Output File --- response of each COMPONENT TYPE to hazard ---
outfile_comptype_resp = os.path.join(
scenario.output_path, 'comp_type_response.csv')
scenario.output_path, 'comptype_response.csv')
comptype_resp_df = pd.DataFrame(comptype_resp_dict)
comptype_resp_df.index.names = ['component_type', 'response']
comptype_resp_df.to_csv(
Expand All @@ -305,7 +362,7 @@ def loss_by_comp_type(response_list, infrastructure, scenario):

# --- Output File --- mean loss of component type ---
outfile_comptype_loss = os.path.join(
scenario.output_path, 'comp_type_meanloss.csv')
scenario.output_path, 'comptype_meanloss.csv')
comptype_loss_df = comptype_resp_df.iloc[
comptype_resp_df.index.get_level_values(1) == 'loss_mean']
comptype_loss_df.reset_index(level='response', inplace=True)
Expand All @@ -317,7 +374,7 @@ def loss_by_comp_type(response_list, infrastructure, scenario):

# --- Output File --- mean failures for component types ---
outfile_comptype_failures = os.path.join(
scenario.output_path, 'comp_type_meanfailures.csv')
scenario.output_path, 'comptype_meanfailures.csv')
comptype_failure_df = comptype_resp_df.iloc[
comptype_resp_df.index.get_level_values(1) == 'num_failures']
comptype_failure_df.reset_index(level='response', inplace=True)
Expand All @@ -340,11 +397,11 @@ def loss_by_comp_type(response_list, infrastructure, scenario):

def pe_by_component_class(response_list, infrastructure, scenario):
"""
Aggregate the probability of exceeding a damage state calculations by component type.
:param response_list: list of simulation results
:param infrastructure: simulated infrastructure
:param scenario: values used in simulation
:return: None
Calculated probability of exceedence based on component classes
:param response_list:
:param infrastructure:
:param scenario:
:return:
"""
# ------------------------------------------------------------------------
# For Probability of Exceedence calculations based on component failures
Expand All @@ -361,18 +418,24 @@ def pe_by_component_class(response_list, infrastructure, scenario):
for comp_id, component in infrastructure.components.items():
cp_class_map[component.component_class].append(component)

# ------------------------------------------------------------------------
# For Probability of Exceedence calculations based on component failures:
# Damage state boundaries for Component Type Failures (Substations) are
# based on HAZUS MH MR3, p 8-66 to 8-68
# ------------------------------------------------------------------------
if infrastructure.system_class == 'Substation':
cp_classes_costed = \
[x for x in cp_classes_in_system if x not in infrastructure.uncosted_classes]
[x for x in cp_classes_in_system
if x not in infrastructure.uncosted_classes]

# --- System fragility - Based on Failure of Component Classes ---
comp_class_failures = \
{cc: np.zeros((scenario.num_samples, scenario.num_hazard_pts))
for cc in cp_classes_costed}

comp_class_frag = {cc: np.zeros((scenario.num_samples, scenario.num_hazard_pts))
for cc in cp_classes_costed}
comp_class_frag = \
{cc: np.zeros((scenario.num_samples, scenario.num_hazard_pts))
for cc in cp_classes_costed}

for j, hazard_level in enumerate(HazardLevels(scenario)):
for i in range(scenario.num_samples):
Expand All @@ -381,7 +444,8 @@ def pe_by_component_class(response_list, infrastructure, scenario):
comp_class_failures[compclass][i, j] += \
response_list[hazard_level.hazard_intensity]\
[i, infrastructure.components[c]]
comp_class_failures[compclass][i, j] /= len(cp_class_map[compclass])
comp_class_failures[compclass][i, j] /= \
len(cp_class_map[compclass])

comp_class_frag[compclass][i, j] = \
np.sum(comp_class_failures[compclass][i, j] > \
Expand Down Expand Up @@ -528,7 +592,8 @@ def pe_by_component_class(response_list, infrastructure, scenario):
logging.info("\nOutputs saved in: " +
Fore.GREEN + scenario.output_path + Fore.RESET + '\n')

plot_mean_econ_loss(infrastructure, scenario, economic_loss_array)
print("\nOutputs saved in:\n" +
Fore.GREEN + scenario.output_path + Fore.RESET + '\n')

# ... END POST-PROCESSING
# ****************************************************************************
Expand All @@ -548,10 +613,12 @@ def pe2pb(pe):

def main():
SETUPFILE = sys.argv[1]

run_scenario(SETUPFILE)

print(Style.BRIGHT + Fore.YELLOW +
"[ Run time: %s ]\n" %
str(timedelta(seconds=(time.time() - code_start_time))) +
Style.RESET_ALL)

if __name__ == '__main__':
main()

Loading