From 0cd32ae95eb1b7852f1df146cc41c6d7273d3463 Mon Sep 17 00:00:00 2001 From: au650680 Date: Tue, 21 Oct 2025 12:46:31 +0200 Subject: [PATCH 1/6] Added clustering and mode tracking Clustering and mode tracking is added with example code for both and a shared module for both. Other small changes have been made. --- src/data/accel/hbk/aligner.py | 2 +- src/data/accel/metadata.py | 1 + src/examples/clustering.py | 53 + src/examples/example.py | 31 + src/examples/mode_tracking.py | 54 +- src/examples/run_pyoma.py | 56 +- src/examples/updating_parameters.py | 10 +- src/functions/plot_mode_tracking.py | 67 ++ src/functions/sysid_plot.py | 455 ++++++++ src/methods/clustering_tracking_module.py | 317 +++++ src/methods/constants.py | 38 +- src/methods/model_update_module.py | 64 +- src/methods/packages/clustering.py | 1206 ++++++++++++++++++++ src/methods/packages/mode_track.py | 944 --------------- src/methods/packages/mode_tracking.py | 355 ++++++ src/methods/packages/pyoma/ssiWrapper.py | 43 +- src/methods/{sys_id.py => sysid_module.py} | 38 +- tests/integration/methods/test_sys_id.py | 14 +- tests/unit/methods/test_sys_id_unit.py | 2 +- 19 files changed, 2652 insertions(+), 1098 deletions(-) create mode 100644 src/examples/clustering.py create mode 100644 src/functions/plot_mode_tracking.py create mode 100644 src/functions/sysid_plot.py create mode 100644 src/methods/clustering_tracking_module.py create mode 100644 src/methods/packages/clustering.py delete mode 100644 src/methods/packages/mode_track.py create mode 100644 src/methods/packages/mode_tracking.py rename src/methods/{sys_id.py => sysid_module.py} (88%) diff --git a/src/data/accel/hbk/aligner.py b/src/data/accel/hbk/aligner.py index 335e4ef..ab339a1 100644 --- a/src/data/accel/hbk/aligner.py +++ b/src/data/accel/hbk/aligner.py @@ -130,7 +130,7 @@ def _extract_aligned_block(self, group: List[int], batch_size: int, ch.clear_used_data(group[0], requested_samples) aligned_array = np.array(aligned_data, dtype=np.float32) - print(f"Aligned shape: {aligned_array.shape}") + print(f"\nAligned shape: {aligned_array.shape}") return aligned_array, utc_time diff --git a/src/data/accel/metadata.py b/src/data/accel/metadata.py index 0e54027..592f3c9 100644 --- a/src/data/accel/metadata.py +++ b/src/data/accel/metadata.py @@ -11,6 +11,7 @@ def extract_fs_from_metadata(mqtt_config: Dict[str, Any]) -> int: def _on_metadata(client: MQTTClient, userdata, message) -> None: try: payload = json.loads(message.payload.decode("utf-8")) + print("Metadata",payload) fs_candidate = payload["Analysis chain"][0]["Sampling"] if fs_candidate: fs_result["fs"] = fs_candidate diff --git a/src/examples/clustering.py b/src/examples/clustering.py new file mode 100644 index 0000000..707bb8f --- /dev/null +++ b/src/examples/clustering.py @@ -0,0 +1,53 @@ +import sys +import time +import matplotlib.pyplot as plt +from data.comm.mqtt import load_config +from data.accel.hbk.aligner import Aligner +from methods import sysid_module as sysID +from methods import clustering_tracking_module as MT +from methods.constants import PARAMS +from functions.sysid_plot import plot_clusters + +# pylint: disable=R0914 +def run_clustering_with_local_sysid(config_path): + number_of_minutes = 1 + config = load_config(config_path) + mqtt_config = config["MQTT"] + + # Setting up the client and extracting Fs + data_client, fs = sysID.setup_client(mqtt_config) + + # Setting up the aligner + data_topic_indexes = [0, 2, 3, 4] + selected_topics = [mqtt_config["TopicsToSubscribe"][i] for i in data_topic_indexes] + aligner = Aligner(data_client, topics=selected_topics) + + aligner_time = None + t1 = time.time() + while aligner_time is None: + time.sleep(0.1) + t2 = time.time() + t_text = f"Waiting for data for {round(t2-t1,1)} seconds" + print(t_text,end="\r") + oma_output, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) + data_client.disconnect() + + # Mode Tracks + dictionary_of_clusters, median_frequencies = MT.run_mode_clustering( + oma_output,PARAMS) + + # Print frequencies + print("\nMedian frequencies:", median_frequencies) + + fig_ax = plot_clusters(dictionary_of_clusters, oma_output, PARAMS, fig_ax = None) + plt.show(block=False) + sys.stdout.flush() + +def run_clustering_with_remote_sysid(config_path): + oma_output, dictionary_of_clusters = MT.subscribe_and_cluster(config_path,PARAMS) + fig_ax = plot_clusters(dictionary_of_clusters, oma_output, PARAMS, fig_ax = None) + plt.show(block=False) + sys.stdout.flush() + +def run_live_clustering_with_remote_sysid(config_path): + MT.subscribe_cluster_looping(config_path,topic_index=0,plot=[1,1]) diff --git a/src/examples/example.py b/src/examples/example.py index f6af198..7f49c72 100644 --- a/src/examples/example.py +++ b/src/examples/example.py @@ -6,10 +6,17 @@ run_oma_and_plot, run_oma_and_publish, run_oma_and_print, + run_oma_and_publish_loop, +) +from examples.clustering import ( + run_clustering_with_local_sysid, + run_clustering_with_remote_sysid, + run_live_clustering_with_remote_sysid, ) from examples.mode_tracking import ( run_mode_tracking_with_local_sysid, run_mode_tracking_with_remote_sysid, + run_live_mode_tracking_with_remote_sysid, ) from examples.updating_parameters import run_model_update @@ -37,6 +44,11 @@ def align_readings(ctx): def oma_and_publish(ctx): run_oma_and_publish(ctx.obj["CONFIG"]) +@cli.command() +@click.pass_context +def oma_and_publish_looping(ctx): + run_oma_and_publish_loop(ctx.obj["CONFIG"]) + @cli.command() @click.pass_context def oma_and_plot(ctx): @@ -47,6 +59,20 @@ def oma_and_plot(ctx): def oma_and_print(ctx): run_oma_and_print(ctx.obj["CONFIG"]) +@cli.command() +@click.pass_context +def clustering_with_local_sysid(ctx): + run_clustering_with_local_sysid(ctx.obj["CONFIG"]) + +@cli.command() +@click.pass_context +def clustering_with_remote_sysid(ctx): + run_clustering_with_remote_sysid(ctx.obj["CONFIG"]) + +@cli.command() +@click.pass_context +def live_clustering_with_remote_sysid(ctx): + run_live_clustering_with_remote_sysid(ctx.obj["CONFIG"]) @cli.command() @click.pass_context @@ -58,6 +84,11 @@ def mode_tracking_with_local_sysid(ctx): def mode_tracking_with_remote_sysid(ctx): run_mode_tracking_with_remote_sysid(ctx.obj["CONFIG"]) +@cli.command() +@click.pass_context +def live_mode_tracking_with_remote_sysid(ctx): + run_live_mode_tracking_with_remote_sysid(ctx.obj["CONFIG"]) + @cli.command() @click.pass_context diff --git a/src/examples/mode_tracking.py b/src/examples/mode_tracking.py index cb4f270..1c52c49 100644 --- a/src/examples/mode_tracking.py +++ b/src/examples/mode_tracking.py @@ -1,12 +1,15 @@ -import numpy as np +import sys +import matplotlib.pyplot as plt from data.comm.mqtt import load_config from data.accel.hbk.aligner import Aligner -from methods import sys_id as sysID -from methods import model_update_module as MT +from methods import sysid_module as sysID +from methods import clustering_tracking_module as MT +from methods.constants import PARAMS +from functions.plot_mode_tracking import plot_tracked_modes # pylint: disable=R0914 def run_mode_tracking_with_local_sysid(config_path): - number_of_minutes = 0.5 + number_of_minutes = 1 config = load_config(config_path) mqtt_config = config["MQTT"] @@ -14,7 +17,7 @@ def run_mode_tracking_with_local_sysid(config_path): data_client, fs = sysID.setup_client(mqtt_config) # Setting up the aligner - data_topic_indexes = [0, 2] + data_topic_indexes = [0, 2, 3, 4] selected_topics = [mqtt_config["TopicsToSubscribe"][i] for i in data_topic_indexes] aligner = Aligner(data_client, topics=selected_topics) @@ -23,32 +26,25 @@ def run_mode_tracking_with_local_sysid(config_path): oma_output, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) data_client.disconnect() - # Mode Track - cleaned_values, median_frequencies, confidence_intervals = MT.run_mode_track( - oma_output) + # Mode Tracks + dictionary_of_clusters, median_frequencies = MT.run_mode_clustering( + oma_output,PARAMS) - median_frequencies = [] - mode_shapes_list = [] - - for cluster in cleaned_values: - mode_shapes = cluster["mode_shapes"] # shape: (n_modes_in_cluster, n_channels) - median_shape = np.median(mode_shapes, axis=0) # median across modes - median_frequencies.append(cluster["median"]) - mode_shapes_list.append(median_shape) - - # Convert to numpy arrays - median_frequencies = np.array(median_frequencies) - mode_shapes_array = np.array(mode_shapes_list) # shape: (n_clusters, n_channels) - print("Mode shapes:", mode_shapes_array) + # Print frequencies print("\nMedian frequencies:", median_frequencies) - print("\nConfidence intervals:", confidence_intervals) + tracked_clusters = {} + tracked_clusters = MT.run_mode_tracking(dictionary_of_clusters,tracked_clusters,PARAMS) + + fig_ax = plot_tracked_modes(tracked_clusters, PARAMS, fig_ax = None, x_length = None) + plt.show(block=True) + sys.stdout.flush() def run_mode_tracking_with_remote_sysid(config_path): - config = load_config(config_path) - cleaned_values, median_frequencies, confidence_intervals = ( - MT.subscribe_and_get_cleaned_values(config_path) - ) - print("Cleaned values:", cleaned_values) - print("Tracked frequencies:", median_frequencies) - print("\nConfidence intervals:", confidence_intervals) + oma_output, clusters, tracked_clusters = MT.subscribe_and_get_clusters(config_path) + fig_ax = plot_tracked_modes(tracked_clusters, PARAMS, fig_ax = None, x_length = None) + plt.show(block=True) + sys.stdout.flush() + +def run_live_mode_tracking_with_remote_sysid(config_path): + MT.subscribe_cluster_and_tracking_looping(config_path,topic_index=0,plot=[1,1,1]) diff --git a/src/examples/run_pyoma.py b/src/examples/run_pyoma.py index c05f290..0a08ba5 100644 --- a/src/examples/run_pyoma.py +++ b/src/examples/run_pyoma.py @@ -1,9 +1,11 @@ import sys +import time import matplotlib.pyplot as plt -from methods import sys_id as sysID from data.comm.mqtt import load_config from data.accel.hbk.aligner import Aligner -from functions.natural_freq import plot_natural_frequencies +from functions.sysid_plot import plot_stabilization_diagram +from methods import sysid_module as sysID +from methods.constants import PARAMS def setup_oma(config_path, data_topic_indexes): @@ -31,27 +33,38 @@ def setup_oma(config_path, data_topic_indexes): def run_oma_and_plot(config_path): - number_of_minutes = 0.2 - data_topic_indexes = [0, 2] + number_of_minutes = 1 + data_topic_indexes = [0, 2, 3, 4] aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) fig_ax = None aligner_time = None + t1 = time.time() while aligner_time is None: + time.sleep(0.1) + t2 = time.time() + t_text = f"Waiting for data for {round(t2-t1,1)} seconds" + print(t_text,end="\r") results, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) data_client.disconnect() - fig_ax = plot_natural_frequencies(results['Fn_poles'], freqlim=(0, 75), fig_ax=fig_ax) + print(aligner_time) + fig_ax = plot_stabilization_diagram(results, PARAMS, fig_ax=fig_ax) plt.show(block=True) sys.stdout.flush() def run_oma_and_print(config_path): number_of_minutes = 0.2 - data_topic_indexes = [0, 2] + data_topic_indexes = [0, 2, 3, 4] aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) aligner_time = None + t1 = time.time() while aligner_time is None: + time.sleep(0.1) + t2 = time.time() + t_text = f"Waiting for data for {round(t2-t1,1)} seconds" + print(t_text,end="\r") results, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) data_client.disconnect() sys.stdout.flush() @@ -63,15 +76,15 @@ def run_oma_and_print(config_path): def run_oma_and_publish(config_path): - number_of_minutes = 0.02 - data_topic_indexes = [0, 2] + number_of_minutes = 1 + data_topic_indexes = [0, 2, 3, 4] aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) publish_config = load_config(config_path)["sysID"] # Setting up the client for publishing OMA results publish_client, _ = sysID.setup_client(publish_config) # fs not needed here - sysID.publish_oma_results( + publish_result = sysID.publish_oma_results( number_of_minutes, aligner, publish_client, @@ -79,6 +92,29 @@ def run_oma_and_publish(config_path): fs ) - print(f"Publishing to topic: {publish_config['TopicsToSubscribe'][0]}") + if publish_result is True: + print(f"Publishing to topic: {publish_config['TopicsToSubscribe'][0]}") data_client.disconnect() sys.stdout.flush() + + +def run_oma_and_publish_loop(config_path): + number_of_minutes = 1 + data_topic_indexes = [0, 2, 3, 4] + aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) + publish_config = load_config(config_path)["sysID"] + + # Setting up the client for publishing OMA results + publish_client, _ = sysID.setup_client(publish_config) # fs not needed here + + loop = True + while loop: + loop = sysID.publish_oma_results( + number_of_minutes, + aligner, + publish_client, + publish_config["TopicsToSubscribe"][0], + fs + ) + if loop is True: + print(f"Publishing to topic: {publish_config['TopicsToSubscribe'][0]}") diff --git a/src/examples/updating_parameters.py b/src/examples/updating_parameters.py index 04984ce..fe3e98b 100644 --- a/src/examples/updating_parameters.py +++ b/src/examples/updating_parameters.py @@ -1,8 +1,10 @@ import time from data.comm.mqtt import load_config from data.accel.hbk.aligner import Aligner -from methods import sys_id as sysID -from methods import model_update_module as MT +from methods import sysid_module as sysID +from methods import clustering_tracking_module as MT +from methods import model_update_module as MU +from methods.constants import PARAMS # pylint: disable=R0914, C0103 def run_model_update(config_path): @@ -26,10 +28,10 @@ def run_model_update(config_path): data_client.disconnect() # Mode Track - cleaned_values, _, _ = MT.run_mode_track(oma_output) + dictionary_clusters, median_frequencies = MT.run_mode_clustering(oma_output,PARAMS) # Run model update - update_result = MT.run_model_update(cleaned_values) + update_result = MU.run_model_update(dictionary_clusters) if update_result is not None: optimized_parameters = update_result['optimized_parameters'] diff --git a/src/functions/plot_mode_tracking.py b/src/functions/plot_mode_tracking.py new file mode 100644 index 0000000..bac9357 --- /dev/null +++ b/src/functions/plot_mode_tracking.py @@ -0,0 +1,67 @@ +from typing import Tuple, Dict, Any +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.figure +plt.rcParams['font.family'] = 'Times New Roman' + +def plot_tracked_modes( + tracked_clusters: Dict[str, Any], + oma_params: Dict[str, Any], + fig_ax: Any = None, + x_length: int = None)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + """ + Plot tracked modes + + Args: + oma_results (dict): PyOMA results + oma_params (dict): OMA parameters + Returns: + fig_ax (tuple): fig and ax of plot + + """ + + if fig_ax is None: + plt.ion() + fig, (ax1) = plt.subplots(1,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1) = fig_ax + ax1.clear() + + ii = 0 + max_x = [] + for key in tracked_clusters.keys(): + if key == "iteration": + pass + else: + tracked_cluster_list = tracked_clusters[key] + m_f = [] + x = [] + for cluster in tracked_cluster_list: + m_f.append(cluster['median_f']) + x.append(cluster['id']) + + sc = ax1.scatter(x, m_f, marker="o", s=50) + col2 = sc.get_facecolors().tolist() + ax1.plot(x, m_f, color=col2[0]) + max_x.append(max(x)) + ii += 1 + + ax1.set_ylabel("Eigenfrequency [Hz]", fontsize=20, color = 'black') + ax1.set_xlabel("Dataset", fontsize=20, color = 'black') + ax1.tick_params(axis='both', which='major', labelsize=17) + + ax1.set_ylim(0, oma_params['Fs']/2) + if x_length is not None: + ax1.set_xlim(np.maximum(max(max_x)-x_length,0),max(max_x)+1) + ax1.set_xticks(np.arange(np.maximum(max(max_x)-x_length,0), + np.maximum(max(max_x)+1,x_length), 5)) + + # Add major and minor grid lines + ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1) diff --git a/src/functions/sysid_plot.py b/src/functions/sysid_plot.py new file mode 100644 index 0000000..ea6f87c --- /dev/null +++ b/src/functions/sysid_plot.py @@ -0,0 +1,455 @@ +from typing import Tuple, Dict, Any +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.figure +from methods.packages.clustering import (remove_complex_conjugates,remove_highly_uncertain_points) +plt.rcParams['font.family'] = 'Times New Roman' + + +def plot_pre_stabilization_diagram( + oma_results: Dict[str, Any], + oma_params: Dict[str, Any], + fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + + """ + Plot stabilization of raw OMA data before pre-cleaning + + Args: + oma_results (dict): PyOMA results + oma_params (dict): OMA parameters + fix_ax (tuple): fig and ax of plot to redraw + Returns: + fig_ax (tuple): fig and ax of plot + + """ + + + if fig_ax is None: + plt.ion() + fig, (ax1,ax2) = plt.subplots(1,2,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1,ax2) = fig_ax + ax1.clear() + ax2.clear() + + frequencies, damping_ratios, _, cov_freq, cov_damping = remove_complex_conjugates(oma_results) + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax1.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + + ax1.scatter(x, y_model_order, marker="o", s=50, c="r") + if cov_freq is not None: + xerr = 2*np.sqrt(cov_freq) + xerr = xerr.flatten(order="f") + ax1.errorbar(x, y_model_order, xerr=xerr, fmt="None", capsize=5, ecolor="gray") + + ax1.set_ylim(0, oma_params['model_order'] + 1) + ax1.set_xlim(0, oma_params['Fs']/2) + + # Add major and minor grid lines + ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + # # # ............................................................................ + + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax2.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y = damping_ratios.flatten(order="f") + + ax2.scatter(x, y, marker="o", s=50, c="r") + if cov_freq is not None: + xerr = np.sqrt(cov_damping) * 2 + xerr = xerr.flatten(order="f") + ax2.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") + + ax2.set_ylim(0, 0.1+0.005) + ax2.set_xlim(0, oma_params['Fs']/2) + + for i, txt in enumerate(y_model_order): + ax2.annotate(str(txt), (x[i], y[i])) + + # Add major and minor grid lines + ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) + +def plot_stabilization_diagram( + oma_results: Dict[str, Any], + oma_params: Dict[str, Any], + fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + """ + Plot stabilization of OMA data before after pre-cleaning + + Args: + oma_results (dict): PyOMA results + oma_params (dict): OMA parameters + Returns: + fig_ax (tuple): fig and ax of plot + + """ + + if fig_ax is None: + plt.ion() + fig, (ax1,ax2) = plt.subplots(1,2,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1,ax2) = fig_ax + ax1.clear() + ax2.clear() + + #Pre-clean + frequencies, cov_freq, damping_ratios, cov_damping, _ = remove_highly_uncertain_points(oma_results,oma_params) + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax1.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + + ax1.scatter(x, y_model_order, marker="o", s=50, c="r") + + if cov_freq is not None: + xerr = 2*np.sqrt(cov_freq) + xerr = xerr.flatten(order="f") + ax1.errorbar(x, y_model_order, xerr=xerr, fmt="None", capsize=5, ecolor="gray") + + ax1.set_ylim(0, oma_params['model_order'] + 1) + ax1.set_xlim(0, oma_params['Fs']/2) + + # Add major and minor grid lines + ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + # # # ............................................................................ + + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax2.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y = damping_ratios.flatten(order="f") + + ax2.scatter(x, y, marker="o", s=50, c="r") + + if cov_freq is not None: + xerr = np.sqrt(cov_damping) * 2 + xerr = xerr.flatten(order="f") + ax2.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") + + for i, txt in enumerate(y_model_order): + ax2.annotate(str(txt), (x[i], y[i])) + + ax2.set_ylim(0, max(y[~np.isnan(y)])+0.005) + ax2.set_xlim(0, oma_params['Fs']/2) + + # Add major and minor grid lines + ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) + +def plot_clusters(clusters: Dict[str,dict], + oma_results: Dict[str, Any], + oma_params: Dict[str, Any], + fig_ax = None)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + """ + Plot stabilization of clusters + + Args: + clsuters (dict): Dictionary of clusters + oma_results (dict): PyOMA results + oma_params (dict): OMA parameters + fix_ax (tuple): fig and ax of plot to redraw + Returns: + fig_ax (tuple): fig and ax of plot + + """ + + if fig_ax is None: + plt.ion() + #fig, (ax1,ax2) = plt.subplots(1,2,figsize=(8, 6), tight_layout=True) + fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12, 6), tight_layout=True) + title_number = 0 + else: + fig, (ax1,ax2) = fig_ax + title = fig.axes[0].get_title() + ax1.clear() + ax2.clear() + + iteration_number = title.split(' ')[-1] + #print(iteration_number) + title_number = int(iteration_number) + 1 + + #Pre-clean + frequencies, cov_freq, damping_ratios, cov_damping, _ = remove_highly_uncertain_points(oma_results,oma_params) + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax1.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + + ax1.scatter(x, y_model_order, marker="^", s=20, c="r", zorder=0, label='Non clustered') + + if cov_freq is not None: + xerr = 2*np.sqrt(cov_freq) + xerr = xerr.flatten(order="f") + ax1.errorbar( + x, y_model_order, xerr=xerr, fmt="None", capsize=5, ecolor="r", zorder=1 + ) + + idx = 0 + for i, key in enumerate(clusters.keys()): + cluster = clusters[key] + MO = cluster['model_order'] + freq_cluster = cluster['f'] + freq_cov_cluster = cluster['cov_f'] + + sc = ax1.scatter(freq_cluster, MO, marker="o", s=40, label=f'Cluster {i}') + col = sc.get_facecolors().tolist() + ax1.vlines(np.median(freq_cluster),min(cluster['model_order']), + max(cluster['model_order']),color=col) + + xerr_cluster = np.sqrt(freq_cov_cluster) * 2 + ax1.errorbar(freq_cluster, MO, xerr=xerr_cluster, + fmt="None", capsize=5, ecolor="gray",zorder=200) + idx += 1 + + ax1.set_ylim(0, oma_params['model_order'] + 1) + ax1.set_xlim(0, oma_params['Fs']/2) + # Add major and minor grid lines + ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + ax1.legend(prop={'size': 20}) #bbox_to_anchor=(0.1, 1.1) + ax1.set_title(f"Data set: {title_number}") + + # # # ............................................................................ + + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax2.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y = damping_ratios.flatten(order="f") + + sc = ax2.scatter(x, y, marker="^", s=20, c="r", zorder=0, label='Non clustered') + if cov_freq is not None: + xerr = np.sqrt(cov_damping) * 2 + xerr = xerr.flatten(order="f") + ax2.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") + + for i, key in enumerate(clusters.keys()): + cluster = clusters[key] + freq_cluster = cluster['f'] + damp_cluster = cluster['d'] + damp_cov_cluster = cluster['cov_d'] + + ax2.scatter(freq_cluster, damp_cluster, s=50, zorder=3) + xerr_cluster = np.sqrt(damp_cov_cluster) * 2 + ax2.errorbar(freq_cluster, damp_cluster, yerr=xerr_cluster, + fmt="None", capsize=5, ecolor="gray") + + for i, txt in enumerate(y_model_order): + ax2.annotate(str(txt), (x[i], y[i])) + + if y[~np.isnan(y)].shape[0] > 1: + ax2.set_ylim(0, max(max(y[~np.isnan(y)])+0.005,0.1)) + else: + ax2.set_ylim(0, 0.1) + ax2.set_xlim(0, oma_params['Fs']/2) + + # Add major and minor grid lines + ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) + +def plot_stabilization_diagram_for_paper( + oma_results: Dict[str, Any], + oma_params: Dict[str, Any], + fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + """ + Plot stabilization of OMA data before after pre-cleaning for paper + + Args: + oma_results (dict): PyOMA results + oma_params (dict): OMA parameters + Returns: + fig_ax (tuple): fig and ax of plot + + """ + if fig_ax is None: + plt.ion() + fig, (ax1) = plt.subplots(1,1,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1) = fig_ax + ax1.clear() + + #Pre-clean + frequencies, cov_freq, damping_ratios, cov_damping,_ = remove_highly_uncertain_points(oma_results,oma_params) + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax1.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + + ax1.scatter(x, y_model_order, marker="o", s=50, c="r") + + if cov_freq is not None: + xerr = 2*np.sqrt(cov_freq) + xerr = xerr.flatten(order="f") + ax1.errorbar(x, y_model_order, xerr=xerr, fmt="None", capsize=5, ecolor="gray") + + ax1.set_ylim(0, oma_params['model_order'] + 1) + ax1.set_xlim(0, oma_params['Fs']/2) + + # Add major and minor grid lines + ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + # # # ............................................................................ + + if fig_ax is None: + plt.ion() + fig, (ax2) = plt.subplots(1,1,figsize=(8, 6), tight_layout=True) + else: + fig, (ax2) = fig_ax + ax2.clear() + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax2.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y = damping_ratios.flatten(order="f") + + ax2.scatter(x, y, marker="o", s=50, c="r") + + if cov_freq is not None: + xerr = np.sqrt(cov_damping) * 2 + xerr = xerr.flatten(order="f") + ax2.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") + + ax2.set_ylim(0, max(y[~np.isnan(y)])+0.005) + ax2.set_xlim(0, oma_params['Fs']/2) + + # Add major and minor grid lines + ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) + +def plot_clusters_for_paper(clusters: Dict[str,dict], + oma_params: Dict[str, Any], + fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + """ + Plot stabilization of clusters for paper + + Args: + clsuters (dict): Dictionary of clusters + oma_results (dict): PyOMA results + oma_params (dict): OMA parameters + fix_ax (tuple): fig and ax of plot to redraw + Returns: + fig_ax (tuple): fig and ax of plot + + """ + if fig_ax is None: + plt.ion() + fig, (ax1) = plt.subplots(1,1,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1) = fig_ax + ax1.clear() + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax1.tick_params(axis='both', which='major', labelsize=17) + + idx = 0 + for i, key in enumerate(clusters.keys()): + + cluster = clusters[key] + MO = cluster['model_order'] + freq_cluster = cluster['f'] + freq_cov_cluster = cluster['cov_f'] + + ax1.scatter(freq_cluster, MO, marker="o", s=50, label=f'Cluster {i+1}') + + xerr_cluster = np.sqrt(freq_cov_cluster) * 2 + ax1.errorbar(freq_cluster, MO, xerr=xerr_cluster, + fmt="None", capsize=5, ecolor="gray",zorder=200) + idx += 1 + + ax1.set_ylim(0, oma_params['model_order'] + 1) + ax1.set_xlim(0, oma_params['Fs']/2) + # Add major and minor grid lines + ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + ax1.legend(prop={'size': 20}) #bbox_to_anchor=(0.1, 1.1) + + # # # ............................................................................ + + if fig_ax is None: + plt.ion() + fig, (ax2) = plt.subplots(1,1,figsize=(8, 6), tight_layout=True) + + else: + fig, (ax2) = fig_ax + ax2.clear() + + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax2.tick_params(axis='both', which='major', labelsize=17) + + for i, key in enumerate(clusters.keys()): + cluster = clusters[key] + freq_cluster = cluster['f'] + damp_cluster = cluster['d'] + damp_cov_cluster = cluster['cov_d'] + xerr = np.sqrt(damp_cov_cluster) * 2 + xerr = xerr.flatten(order="f") + + ax2.scatter(freq_cluster, damp_cluster, s=50, zorder=3,label=f'Cluster {i+1}') + xerr_cluster = np.sqrt(damp_cov_cluster) * 2 + ax2.errorbar(freq_cluster, damp_cluster, yerr=xerr_cluster, + fmt="None", capsize=5, ecolor="gray") + + ax2.set_ylim(0, max(damp_cluster)+0.005) + ax2.set_xlim(0, oma_params['Fs']/2) + ax2.legend(prop={'size': 20}) + + # Add major and minor grid lines + ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) diff --git a/src/methods/clustering_tracking_module.py b/src/methods/clustering_tracking_module.py new file mode 100644 index 0000000..3ffdf88 --- /dev/null +++ b/src/methods/clustering_tracking_module.py @@ -0,0 +1,317 @@ +import json +import sys +import threading +from typing import Any, List, Dict, Tuple +import numpy as np +import matplotlib.pyplot as plt +import paho.mqtt.client as mqtt +from methods.constants import PARAMS +from methods.packages.clustering import cluster_func +from methods.packages.mode_tracking import cluster_tracking +from functions.sysid_plot import (plot_clusters,plot_stabilization_diagram) +from functions.plot_mode_tracking import plot_tracked_modes +from data.comm.mqtt import load_config, setup_mqtt_client +# pylint: disable=C0103, W0603 + +# Global threading event to wait for OMA data +result_ready = threading.Event() +oma_output_global = None # will store received OMA data inside callback + +def _convert_oma_output(obj: Any) -> Any: + """Recursively convert JSON structure into complex numbers and numpy arrays.""" + if isinstance(obj, dict): + if "real" in obj and "imag" in obj: + return complex(obj["real"], obj["imag"]) + return {k: _convert_oma_output(v) for k, v in obj.items()} + + if isinstance(obj, list): + try: + return np.array([_convert_oma_output(item) for item in obj]) + except Exception: + return [_convert_oma_output(item) for item in obj] + + return obj + + +def _on_connect(client: mqtt.Client, userdata: dict, flags: dict, reason_code: int, properties: mqtt.Properties) -> None: + """Callback when MQTT client connects.""" + if reason_code == 0: + print("Connected to MQTT broker.") + client.subscribe(userdata["topic"], qos=userdata["qos"]) + print(f"Subscribed to topic: {userdata['topic']}") + else: + print(f"Failed to connect to MQTT broker. Code: {reason_code}") + + +def _on_message(_client: mqtt.Client, _userdata: dict, msg: mqtt.MQTTMessage) -> None: + """Callback when a message is received.""" + global oma_output_global + print(f"Message received on topic: {msg.topic}") + try: + raw = json.loads(msg.payload.decode("utf-8")) + oma_output = _convert_oma_output(raw["OMA_output"]) + timestamp = raw["timestamp"] + print(f"Received OMA data at timestamp: {timestamp}") + oma_output_global = oma_output + result_ready.set() + except Exception as e: + print(f"Error processing OMA message: {e}") + + +def run_mode_clustering(oma_output: Any, params: dict[str,Any]) -> Tuple[dict[str,Any], np.ndarray]: + """ + Runs the mode clustering algorithm. + + Args: + oma_output (Any): OMA output from subscription or elsewhere. + Returns: + cluster_dict (dict[str,Any]), + median_frequencies (np.ndarray), + """ + dictionary_clusters = cluster_func(oma_output, params) + + median_frequencies = np.array([dictionary_clusters[key]["median_f"] + for key in dictionary_clusters.keys()]) + return dictionary_clusters, median_frequencies + + +def run_mode_tracking(cluster_dict: dict[str,Any], tracked_clusters: dict[str,Any], + params: dict[str,Any]) -> dict[str,Any]: + """ + Runs the mode tracking algorithm. + + Args: + cluster_dict (dict[str,Any]): Clusters from OMA + Returns: + tracked_clusters (dict[str,Any]): Tracked clusters + """ + tracked_clusters = cluster_tracking(cluster_dict, tracked_clusters, params) + return tracked_clusters + + +def subscribe_and_cluster(config_path: str, params: Dict[str,Any] + ) -> Tuple[Dict[str,Any], Dict[str,Any]]: + """ + Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. + + Args: + config_path (str): Path to config JSON. + + Returns: + oma_output_global (Dict[str,Any]): OMA output + clusters (Dict[str,Any]]): Clusters + """ + global oma_output_global + oma_output_global = None # Reset in case old data is present + result_ready.clear() + + config = load_config(config_path) + mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index=0) + + mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) + mqtt_client.on_connect = _on_connect + mqtt_client.on_message = _on_message + mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) + mqtt_client.loop_start() + print("Waiting for OMA data...") + try: + result_ready.wait() # Wait until message arrives + mqtt_client.loop_stop() + mqtt_client.disconnect() + + if oma_output_global is None: + raise RuntimeError("Failed to receive OMA data.") + + print("OMA data received. Running mode clustering and tracking...") + clusters, median_frequencies = run_mode_clustering(oma_output_global,params) + print("Clustered frequencies", median_frequencies) + + except KeyboardInterrupt: + print("Shutting down gracefully") + mqtt_client.loop_stop() + mqtt_client.disconnect() + except Exception as e: + print(f"Unexpected error: {e}") + + return oma_output_global, clusters + + +def subscribe_and_get_clusters(config_path: str) -> Tuple[List[Dict], np.ndarray, np.ndarray]: + """ + Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. + + Args: + config_path (str): Path to config JSON. + + Returns: + oma_output_global (Dict[str,Any]): OMA output + clusters (Dict[str,Any]]): Clusters + tracked_clusters (Dict[str,Any]]): Tracked clusters + """ + global oma_output_global + oma_output_global = None # Reset in case old data is present + result_ready.clear() + tracked_clusters = {} + + config = load_config(config_path) + mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index=0) + + mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) + mqtt_client.on_connect = _on_connect + mqtt_client.on_message = _on_message + mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) + mqtt_client.loop_start() + print("Waiting for OMA data...") + try: + result_ready.wait() # Wait until message arrives + mqtt_client.loop_stop() + mqtt_client.disconnect() + + if oma_output_global is None: + raise RuntimeError("Failed to receive OMA data.") + + print("OMA data received. Running mode clustering and tracking...") + clusters, median_frequencies = run_mode_clustering(oma_output_global,PARAMS) + print("Clustered frequencies", median_frequencies) + tracked_clusters = run_mode_tracking(clusters, tracked_clusters,PARAMS) + + except KeyboardInterrupt: + print("Shutting down gracefully") + mqtt_client.loop_stop() + mqtt_client.disconnect() + except Exception as e: + print(f"Unexpected error: {e}") + + return oma_output_global, clusters, tracked_clusters + + +def subscribe_cluster_looping(config_path: str, topic_index: int = 0, + plot: np.ndarray[bool] = np.array([1,1]) + ) -> Tuple[List[Dict], np.ndarray, np.ndarray]: + """ + Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. + + Args: + config_path (str): Path to config JSON. + topic_index (int): Topic to subscribe + plot (np.ndarray[bool]): Array describing what plots to show + + Returns: + oma_output_global (Dict[str,Any]): OMA output + clusters (Dict[str,Any]]): Clusters + tracked_clusters (Dict[str,Any]]): Tracked clusters + """ + global oma_output_global + oma_output_global = None # Reset in case old data is present + result_ready.clear() + + config = load_config(config_path) + mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index) + + mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) + mqtt_client.on_connect = _on_connect + mqtt_client.on_message = _on_message + mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) + mqtt_client.loop_start() + + fig_ax1 = None + fig_ax2 = None + while True: + # try: + print("Waiting for OMA data...") + result_ready.wait() # Wait until message arrives + + if oma_output_global is None: + raise RuntimeError("Failed to receive OMA data.") + + print("OMA data received. Running mode clustering and tracking...") + result_ready.clear() + + if plot[0] == 1: + fig_ax1 = plot_stabilization_diagram(oma_output_global,PARAMS,fig_ax=fig_ax1) + plt.show(block=False) + + clusters, median_frequencies = run_mode_clustering(oma_output_global,PARAMS) + print("Clustered frequencies", median_frequencies) + + if plot[1] == 1: + fig_ax2 = plot_clusters(clusters,oma_output_global,PARAMS,fig_ax=fig_ax2) + plt.show(block=False) + + sys.stdout.flush() + # except KeyboardInterrupt: + # print("Shutting down gracefully") + # mqtt_client.loop_stop() + # mqtt_client.disconnect() + # break + # except Exception as e: + # print(f"Unexpected error: {e}") + +def subscribe_cluster_and_tracking_looping(config_path: str, topic_index: int = 0, + plot: np.ndarray[bool] = np.array([1,1,1]) + ) -> Tuple[List[Dict], np.ndarray, np.ndarray]: + """ + Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. + + Args: + config_path (str): Path to config JSON. + topic_index (int): Topic to subscribe + plot (np.ndarray[bool]): Array describing what plots to show + + Returns: + + Plots: + Stabilization diagram + Cluster plot + Tracked clusters plot + """ + global oma_output_global + oma_output_global = None # Reset in case old data is present + result_ready.clear() + tracked_clusters = {} + + config = load_config(config_path) + mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index) + + mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) + mqtt_client.on_connect = _on_connect + mqtt_client.on_message = _on_message + mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) + mqtt_client.loop_start() + + fig_ax1 = None + fig_ax2 = None + fig_ax3 = None + while True: + try: + print("Waiting for OMA data...") + result_ready.wait() # Wait until message arrives + + if oma_output_global is None: + raise RuntimeError("Failed to receive OMA data.") + + print("OMA data received. Running mode clustering and tracking...") + result_ready.clear() + + if plot[0] == 1: + fig_ax1 = plot_stabilization_diagram(oma_output_global,PARAMS,fig_ax=fig_ax1) + plt.show(block=False) + + clusters, median_frequencies = run_mode_clustering(oma_output_global,PARAMS) + print("Clustered frequencies", median_frequencies) + tracked_clusters = run_mode_tracking(clusters, tracked_clusters,PARAMS) + + if plot[1] == 1: + fig_ax2 = plot_clusters(clusters,oma_output_global,PARAMS,fig_ax=fig_ax2) + plt.show(block=False) + if plot[2] == 1: + fig_ax3 = plot_tracked_modes(tracked_clusters,PARAMS,fig_ax=fig_ax3,x_length=None) + plt.show(block=False) + sys.stdout.flush() + except KeyboardInterrupt: + print("Shutting down gracefully") + mqtt_client.loop_stop() + mqtt_client.disconnect() + break + except Exception as e: + print(f"Unexpected error: {e}") diff --git a/src/methods/constants.py b/src/methods/constants.py index 3309c56..d376b5e 100644 --- a/src/methods/constants.py +++ b/src/methods/constants.py @@ -6,17 +6,39 @@ MIN_SAMPLES_NEEDED = 540 # Minimum samples for running sysid -BLOCK_SHIFT = 30 - -MODEL_ORDER = 20 - -# Constants for Model track -MSTAB_FACTOR = 0.4 # This is goning to be multiplied by the MODEL_ORDER to get the mstab -TMAC = 0.9 - # Constants for Model Update # 1st parameter is spring stiffness and 2nd is unbounded length X0 = np.array([1e1, 10e-3]) # Create bounds using element-wise i.e. different parameters have different bounds BOUNDS = [(1e-2 * X0[0], 1e2 * X0[0]), (1e-2 * X0[1], 1e2 * X0[1])] + + + + +# Parameters +PARAMS = {} + +#Pre-clean +PARAMS['freq_variance_treshold'] = 0.1 +PARAMS['damp_variance_treshold'] = 10**6 + +PARAMS['Fs'] = 256 # Sample frequency +PARAMS['model_order_min'] = 2 # Set the min model order +PARAMS['model_order'] = 15 # Set the max model order for analysis +PARAMS['block_shift'] = 30 # Block size in Hankel matrix +PARAMS['sensor_order'] = np.array([0, 2, 1, 3]) # sensor location in data + +# Params for clustering: +PARAMS['mstab'] = 6 # minimum number of frequencies to be validate as cluster +PARAMS['tMAC'] = 0.95 # MAC threshold to be included in cluster +PARAMS['bound_multiplier'] = 2 # Standard deviation multiplier +PARAMS['allignment_factor'] = [0.05,0.01] # Factors for allignment + +# Params for mode tracking +PARAMS['phi_cri'] = 0.8 #0.98 # MAC criteria [%] +PARAMS['freq_cri'] = 0.2 #0.2 # Frequency difference criteria [%] +PARAMS['obj_cri'] = 0.1 +# If more clusters match, an it is not clear what cluster is best, +# then check if the difference of the objective function values are less than the criteria. +# Then it is probably the one with higest MAC rather than frequency [difference] diff --git a/src/methods/model_update_module.py b/src/methods/model_update_module.py index 652630c..2342816 100644 --- a/src/methods/model_update_module.py +++ b/src/methods/model_update_module.py @@ -1,16 +1,13 @@ import json import threading -from typing import Any, List, Dict, Tuple, Optional +from typing import Any, List, Dict, Optional import numpy as np import paho.mqtt.client as mqtt from scipy.optimize import minimize from scipy.linalg import eigh -from methods.constants import MODEL_ORDER, MSTAB_FACTOR, TMAC -from methods.packages.mode_track import mode_allingment from methods.packages.eval_yafem_model import eval_yafem_model from methods.packages import model_update from methods.constants import X0, BOUNDS -from data.comm.mqtt import load_config, setup_mqtt_client # pylint: disable=C0103, W0603 # Global threading event to wait for OMA data @@ -58,27 +55,6 @@ def _on_message(_client: mqtt.Client, _userdata: dict, msg: mqtt.MQTTMessage) -> print(f"Error processing OMA message: {e}") -def run_mode_track(oma_output: Any) -> Tuple[List[Dict], np.ndarray, np.ndarray]: - """ - Runs the mode tracking algorithm. - - Args: - oma_output (Any): OMA output from subscription or elsewhere. - Returns: - cleaned_values (List[Dict]), - median_frequencies (np.ndarray), - confidence_intervals (np.ndarray) - """ - mstab = MODEL_ORDER * MSTAB_FACTOR - cleaned_values = mode_allingment(oma_output, mstab, TMAC) - median_frequencies = np.array([cluster["median"] for cluster in cleaned_values]) - confidence_intervals = np.array([ - cluster["original_cluster"]["confidence_interval"] - for cluster in cleaned_values - ]) - return cleaned_values, median_frequencies, confidence_intervals - - # pylint: disable=R0914 def run_model_update(cleaned_values: List[Dict]) -> Optional[Dict[str, Any]]: """ @@ -137,41 +113,3 @@ def run_model_update(cleaned_values: List[Dict]) -> Optional[Dict[str, Any]]: except ValueError as e: print(f"Skipping model updating due to error: {e}") return None - - -def subscribe_and_get_cleaned_values(config_path: str, - num_clusters: int = 2) -> Tuple[List[Dict], np.ndarray, np.ndarray]: - """ - Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. - - Args: - config_path (str): Path to config JSON. - num_clusters (int): Number of clusters to keep after mode tracking. - - Returns: - cleaned_values (List[Dict]), - median_frequencies (np.ndarray), - confidence_intervals (np.ndarray) - """ - global oma_output_global - oma_output_global = None # Reset in case old data is present - result_ready.clear() - - config = load_config(config_path) - mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index=0) - - mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) - mqtt_client.on_connect = _on_connect - mqtt_client.on_message = _on_message - mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) - mqtt_client.loop_start() - print("Waiting for OMA data...") - result_ready.wait() # Wait until message arrives - mqtt_client.loop_stop() - mqtt_client.disconnect() - - if oma_output_global is None: - raise RuntimeError("Failed to receive OMA data.") - - print("OMA data received. Running mode tracking...") - return run_mode_track(oma_output_global) diff --git a/src/methods/packages/clustering.py b/src/methods/packages/clustering.py new file mode 100644 index 0000000..36642a5 --- /dev/null +++ b/src/methods/packages/clustering.py @@ -0,0 +1,1206 @@ +from typing import Any +import numpy as np + +# Following the algorithm proposed here: https://doi.org/10.1007/978-3-031-61421-7_56 +# JVM 10/10/2025 + +def cluster_func(oma_results: dict[str,Any], Params : dict[str,Any]) -> tuple[dict[str,Any], dict[str,Any], dict[str,Any]]: + """ + Clustering of OMA results + + Args: + oma_results (dict): PyOMA results + Params (dict): Algorihm parameters + Returns: + cluster_dict_1 (dict): Dictionary of clusters after clustering + cluster_dict_2 (dict): Dictionary of clusters after alignment + cluster_dict_3 (dict): Dictionary of clusters after cardinailty check + + """ + + #Preeliminary cleaning + frequencies_, cov_freq_, damping_ratios_, cov_damping_, mode_shapes_ = remove_complex_conjugates(oma_results) + frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes = remove_highly_uncertain_points(oma_results,Params) + + # Transpose, flip and sort arrays, such that arrays maps directly to the stabilization diagram. + # This means the the frequency array maps directly to the plot: + # MO. + # 5.| x x + # 4.| x + # 3.| x + # 2.| x + # 1.| + # 0.| + # -1----4------- Frequency + # The frequency array will then have the shape (6,3). Initially (6,6) but the complex conjugates have been removed. So 6 is halved to 3. + # 6 for each model order, including 0 and 3 for maximum poles in a modelorder + # The frequency array will then become: + # _0_1_ + # 0| 1 4 + # 1| 1 Nan + # 0| 1 Nan + # 0| Nan 4 + # 0| Nan Nan + # 0| Nan Nan + + frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes2, model_orders = transform_oma_features(frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes) + + row, col = np.indices(model_orders.shape) + row = row.flatten(order="C") + col = col.flatten(order="C") + + #Initiate data + data1 = {'frequencies':frequencies, + 'damping_ratios':damping_ratios, + 'cov_f':cov_freq, + 'cov_d':cov_damping, + 'mode_shapes':mode_shapes2, + 'row':row, + 'col':col} + + cluster_dict = {} + cluster_counter = 0 + for count, f in enumerate(frequencies.flatten(order="f")): #np.count_nonzero(~np.isnan(frequencies)) + + #print("\nIteration",count,"Unclustered poles:",np.count_nonzero(~np.isnan(frequencies))) + + #Extract data + frequencies = data1['frequencies'] + damping_ratios = data1['damping_ratios'] + cov_freq = data1['cov_f'] + cov_damping = data1['cov_d'] + + #Inital point + r = row[count] + c = col[count] + ip = [frequencies[r,c],cov_freq[r,c],damping_ratios[r,c],cov_damping[r,c]] + + if np.isnan(ip[0]) == True: #Pass if the pole does not exist. + pass + else: + initial_points = cluster_initial(ip,data1) #Algorithm. 1 step 3 - Initialization + + #Creating clusters + cluster1 = cluster_creation(initial_points,Params) + + data2 = data1.copy() + + # Cluster expansion + expansion = True + kk = 0 + while expansion: + kk += 1 + if kk > 10: + print("Expansion never ends, something is wrong.") + breakpoint() + pre_cluster = cluster1 + cluster2 = cluster_expansion(cluster1,data2,Params,oma_results) + if cluster2['f'].shape == pre_cluster['f'].shape: + if (cluster2['f'] == pre_cluster['f']).all(): + expansion = False + else: + cluster1 = cluster2 + else: + cluster1 = cluster2 + + #Sort if more than one pole exist in the cluster + if isinstance(cluster2['f'],np.ndarray): + cluster2 = sort_cluster(cluster2) + + #Save cluster + if isinstance(cluster2['f'],np.ndarray): #Must atleast have two poles + #print("Cluster saved", np.median(cluster2['f'])) + cluster_dict[str(cluster_counter)] = cluster2 + cluster_counter += 1 + data1 = remove_data_from_S(data2,cluster2) #Remove clustered poles from data + else: + print("cluster2 too short:",1,"But must be:",Params['mstab']) + + + #Allignment or merging of stacked clusters + cluster_dict2 = alignment(cluster_dict.copy(),Params) + #Median filter + #cluster_dict3 = median_filter(cluster_dict2.copy()) + + #Custom cardinality check + cluster_dict3 = {} + cluster_counter = 0 + for ii, key in enumerate(cluster_dict2.keys()): + cluster = cluster_dict2[key] + if isinstance(cluster['f'],np.ndarray): + if cluster['f'].shape[0] < Params['mstab']: + print("cluster", np.median(cluster['f']),"too short:",cluster['f'].shape[0],"But must be:",Params['mstab']) + else: + print("Cluster saved", np.median(cluster['f'])) + cluster_dict3[str(ii)] = cluster + cluster_counter += 1 + data1 = remove_data_from_S(data2,cluster) #Remove clustered poles from data + else: + print("cluster too short:",1,"But must be:",Params['mstab']) + cluster_dict2.pop(key) + + #Add median and confidence intervals (one sided) to cluster data + for key in cluster_dict3.keys(): + cluster = cluster_dict3[key] + cluster['median_f'] = np.median(cluster['f']) + # ci_f_upper = [] + # ci_f_lower = [] + # ci_d_upper = [] + # ci_d_lower = [] + # for ii, cov_f in enumerate(cluster['cov_f']): + # ci_f_upper.append(np.sqrt(cov_f) * Params['bound_multiplier']) + # ci_f_lower.append(np.sqrt(cov_f) * Params['bound_multiplier']) + # ci_d_upper.append(np.sqrt(cluster['cov_d'][ii]) * Params['bound_multiplier']) + # ci_d_lower.append(np.sqrt(cluster['cov_d'][ii]) * Params['bound_multiplier']) + ci_f = np.sqrt(cluster['cov_f']) * Params['bound_multiplier'] + ci_d = np.sqrt(cluster['cov_d']) * Params['bound_multiplier'] + cluster['ci_f'] = ci_f + cluster['ci_d'] = ci_d + + #Sort the clusters into accending order of median frequency + median_frequencies = np.zeros(len(cluster_dict3)) + for ii, key in enumerate(cluster_dict3.keys()): + cluster = cluster_dict3[key] + median_frequencies[ii] = cluster['median_f'] + + indices = np.argsort(median_frequencies) + cluster_dict4 = {} + for ii, id in enumerate(np.array(list(cluster_dict3.keys()))[indices]): #Rename all cluster dict from 0 to len(cluster_dict2) + cluster_dict4[ii] = cluster_dict3[id] #Insert a cluster into a key + + return cluster_dict4 + +def calculate_mac(reference_mode: np.array, mode_shape: np.array) -> float: + """ + Calculate Modal Assurance Criterion (MAC) + + Args: + reference_mode (np.array): Mode shape to compare to + mode_shape (np.array): Mode shape to compare + Returns: + MAC (float): Modal Assurance Criterion + + """ + numerator = np.abs(np.dot(reference_mode.conj().T, mode_shape)) ** 2 + denominator = np.dot(reference_mode.conj().T, reference_mode) * np.dot(mode_shape.conj().T, mode_shape) + return np.real(numerator / denominator) + +def cluster_initial(ip: list[float], data: dict[str,Any], bound: float = 2) -> dict[str,Any]: + """ + Find the initial cluster points + + Args: + ip (list): Frequency, damping and covariance for the inital point (ip) + data (dict): OMA points data + bound (float): Multiplier on standard deviation + Returns: + initial_points (float): Initial points to create cluster from + + """ + #Extract data of initial point + ip_f = ip[0] + ip_cov_f = ip[1] + ip_d = ip[2] + ip_cov_d = ip[3] + + # Confidence interval using the ±2*standard_deviation + f_lower_bound = ip_f - bound * np.sqrt(ip_cov_f) + f_upper_bound = ip_f + bound * np.sqrt(ip_cov_f) + z_lower_bound = ip_d - bound * np.sqrt(ip_cov_d) + z_upper_bound = ip_d + bound * np.sqrt(ip_cov_d) + + + frequencies = data['frequencies'] + damping_ratios = data['damping_ratios'] + + # Find elements within the current limit that are still ungrouped + condition_mask = (frequencies >= f_lower_bound) & (frequencies <= f_upper_bound) & (damping_ratios >= z_lower_bound) & (damping_ratios <= z_upper_bound)# & ungrouped_mask + indices = np.argwhere(condition_mask) # Get indices satisfying the condition + + #Generate the data for inital points + initial_points = {} + initial_points['f'] = data['frequencies'][condition_mask] + initial_points['cov_f'] = data['cov_f'][condition_mask] + initial_points['d'] = data['damping_ratios'][condition_mask] + initial_points['cov_d'] = data['cov_d'][condition_mask] + initial_points['ms'] = data['mode_shapes'][condition_mask,:] + initial_points['row'] = indices[:,0] + initial_points['col'] = indices[:,1] + + return initial_points + +def cluster_creation(IP: dict[str,Any],Params: dict[str,Any]) -> dict[str,Any]: #Algorithm 2 + """ + Create cluster + + Args: + IP (dict): Dictionary of data on inital points + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Cluster + + """ #Algorithm 2 + #print("\nCluster creation") + #Extract data: + frequencies = IP['f'] + cov_f = IP['cov_f'] + damping_ratios = IP['d'] + cov_d = IP['cov_d'] + mode_shapes = IP['ms'] + row = IP['row'] + col = IP['col'] + + IPu = {} + if len(row) != len(set(row)): #line 5 in algorithm #If there are multiple points at the same model order + for ii, id in enumerate(row): #Go through all rows/model orders + pos = np.argwhere(row==id) #Locate the indices of one or more poles + #line 6 in algorithm + if len(pos) == 1: #If only 1 pole exist at the model order + if len(IPu) == 0: #First pole + IPu['f'] = frequencies[ii] + IPu['cov_f'] = cov_f[ii] + IPu['d'] = damping_ratios[ii] + IPu['cov_d'] = cov_d[ii] + IPu['ms'] = np.array((mode_shapes[ii,:])) + IPu['row'] = row[ii] + IPu['col'] = col[ii] + unique = 1 #To determine if the unique poles are more than one, for later use. if 1 then only one unique pole exist + else: + IPu['f'] = np.append(IPu['f'],frequencies[ii]) + IPu['cov_f'] = np.append(IPu['cov_f'],cov_f[ii]) + IPu['d'] = np.append(IPu['d'],damping_ratios[ii]) + IPu['cov_d'] = np.append(IPu['cov_d'],cov_d[ii]) + IPu['ms'] = np.vstack((IPu['ms'],mode_shapes[ii,:])) + IPu['row'] = np.append(IPu['row'],row[ii]) + IPu['col'] = np.append(IPu['col'],col[ii]) + unique = 2 #To determine if the unique poles are more than one, for later use. if 2 more than one uniqe pole exist + + if len(IPu) > 0: #If there exist model orders with unique poles + if unique == 1: #If there only exist one unique pole + cluster = {'f':np.array([IPu['f']]), + 'cov_f':np.array([IPu['cov_f']]), + 'd':np.array([IPu['d']]), + 'cov_d':np.array([IPu['cov_d']]), + 'mode_shapes':np.array([IPu['ms']]), + 'model_order':np.array([Params['model_order']-IPu['row']]), + 'row':np.array([IPu['row']]), + 'col':np.array([IPu['col']]), + 'MAC':np.array([1])} + # print("371, IPu",cluster['f'],cluster['row']) + else: #If more unique poles exist + cluster = {'f':np.array([IPu['f'][0]]), + 'cov_f':np.array([IPu['cov_f'][0]]), + 'd':np.array([IPu['d'][0]]), + 'cov_d':np.array([IPu['cov_d'][0]]), + 'mode_shapes':np.array([IPu['ms'][0,:]]), + 'model_order':np.array([Params['model_order']-IPu['row'][0]]), + 'row':np.array([IPu['row'][0]]), + 'col':np.array([IPu['col'][0]]), + 'MAC':np.array([1])} + # print("381, IPu",cluster['f'],cluster['row']) + # print("IPu",IPu['row']) + # if cluster['f'][0] > 300: + # breakpoint() + cluster, non_clustered_IPu = cluster_from_mac(cluster,IPu,Params) #cluster the unique poles + + else: #if no unique poles exist then go forth with the initial point, ip. + #Only the initial point is clustered + cluster = {'f':np.array([frequencies[0]]), + 'cov_f':np.array([cov_f[0]]), + 'd':np.array([damping_ratios[0]]), + 'cov_d':np.array([cov_d[0]]), + 'mode_shapes':np.array([mode_shapes[0,:]]), + 'model_order':np.array([Params['model_order']-row[0]]), + 'row':np.array([row[0]]), + 'col':np.array([col[0]]), + 'MAC':np.array([1])} + + #Check if there are multiple points with same model order as ip + ip_ids = np.argwhere(row==row[0]) + if len(ip_ids[:,0]) > 1: # Remove all the other points at the same model order + for ii in ip_ids[1:,0]: + try: + frequencies = np.delete(frequencies,ii) + cov_f = np.delete(cov_f,ii) + damping_ratios = np.delete(damping_ratios,ii) + cov_d = np.delete(cov_d,ii) + mode_shapes = np.delete(mode_shapes,ii,axis=0) + row = np.delete(row,ii) + col = np.delete(col,ii) + except: + breakpoint() + # print("379,ip is alone",cluster['row'],row) + + + # try: + # print("Cluster after IPu",cluster['row']) + # except: + # pass + + if len(row) != len(set(row)): #If there still are points at the same model order in IP + IPm = {} + for ii, id in enumerate(row): #Go through all rows/model orders + pos = np.argwhere(row==id) #Locate the indices of one or more poles + #line 6 in algorithm + if len(pos) > 1: #If more than one pole exist for the model order + if len(IPm) == 0: #First pole + IPm['f'] = frequencies[ii] + IPm['cov_f'] = cov_f[ii] + IPm['d'] = damping_ratios[ii] + IPm['cov_d'] = cov_d[ii] + IPm['ms'] = np.array((mode_shapes[ii,:])) + IPm['row'] = row[ii] + IPm['col'] = col[ii] + else: + IPm['f'] = np.append(IPm['f'],frequencies[ii]) + IPm['cov_f'] = np.append(IPm['cov_f'],cov_f[ii]) + IPm['d'] = np.append(IPm['d'],damping_ratios[ii]) + IPm['cov_d'] = np.append(IPm['cov_d'],cov_d[ii]) + IPm['ms'] = np.vstack((IPm['ms'],np.array(mode_shapes[ii,:]))) + IPm['row'] = np.append(IPm['row'],row[ii]) + IPm['col'] = np.append(IPm['col'],col[ii]) + # After the unique poles are clustered, the multiple poles are clusterd + # try: + # print("IPu",IPu['f'],IPu['row']) + # except: + # print("No IPu") + # try: + # print("IPm",IPm['f'],IPm['row']) + # except: + # print("No IPm") + # print("to compare",cluster['f'][0],cluster['row'][0]) + cluster, non_clustered_IPm = cluster_from_mac_IPm(cluster,IPm,Params) + + + + #Start while loop + cluster_len_before = 0 + while len(cluster['row']) != cluster_len_before: + # print(len(cluster['row']),cluster_len_before) + # print("c", cluster['row']) + # try: + # print("u", non_clustered_IPu['row']) + # except: + # pass + # try: + # print("m", non_clustered_IPm['row']) + # except: + # pass + + cluster_len_before = len(cluster['row']) + try: + if len(non_clustered_IPu['row']) > 0: + cluster, non_clustered_IPu = cluster_from_mac(cluster,non_clustered_IPu,Params) #cluster the unique poles again + except: + pass + if len(non_clustered_IPm['row']) > 0: + cluster, non_clustered_IPm = cluster_from_mac_IPm(cluster,non_clustered_IPm,Params) #cluster the non-unique poles again + + else: #line 1 in algorithm: only unique poles + cluster = {'f':np.array([frequencies[0]]), + 'cov_f':np.array([cov_f[0]]), + 'd':np.array([damping_ratios[0]]), + 'cov_d':np.array([cov_d[0]]), + 'mode_shapes':np.array([mode_shapes[0,:]]), + 'model_order':np.array([Params['model_order']-row[0]]), + 'row':np.array([row[0]]), + 'col':np.array([col[0]]), + 'MAC':np.array([1])} + if IP['f'].shape[0] > 1: + cluster, _ = cluster_from_mac(cluster,IP,Params) + + #Here lies the algorithms cardinality check + # print(cluster) + # if cluster['f'].shape[0] < Params['mstab']: + # print("cluster too short:",cluster['f'].shape[0],"But must be:",Params['mstab']) + # cluster = {} + + return cluster + +def cluster_from_mac(cluster: dict[str,Any], IP: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: + """ + Add points to cluster based on MAC + + Args: + cluster (dict): Intermediate cluster + IP (dict): Dictionary of data on inital points + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Intermediate cluster + + """ + + #Extract data + frequencies = IP['f'] + cov_f = IP['cov_f'] + damping_ratios = IP['d'] + cov_d = IP['cov_d'] + mode_shapes = IP['ms'] + row = IP['row'] + col = IP['col'] + + ip_ms = IP['ms'][0] + i_ms = IP['ms'][1:] + f_ip = frequencies[0] + f_i = frequencies[1:] + row_i = row[1:] + # print(cluster['row']) + # print(IP['ms'].shape) + + skip_id = [] + + for jj, ms in enumerate(i_ms): #Go through all mode shapes in cluster + idx = jj+1 + MAC = calculate_mac(ip_ms,ms) #Does the mode shape match with the first pole + # print(row_i[jj],MAC) + if MAC > Params['tMAC']: #line 2 in algorithm + #Add to cluster + cluster['f'] = np.append(cluster['f'],frequencies[idx]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[idx]) + cluster['d'] = np.append(cluster['d'],damping_ratios[idx]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[idx]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[idx,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[idx]) + cluster['row'] = np.append(cluster['row'],row[idx]) + cluster['col'] = np.append(cluster['col'],col[idx]) + + skip_id.append(idx) + + + #IP['ms'] = np.delete(IP['ms'],skip_id,axis=0) + + # print(cluster['row']) + # print(IP['ms'].shape) + # print("skip_id",skip_id) + #Compare remaining points with newly added cluster points, i.e. points are compared with the full cluster, not just ip + if cluster['f'].shape[0] > 1: #If points have been added to cluster proceed + if IP['ms'].shape[0] > len(skip_id): #If there are more points to compare left, then proceed + unclustered_points = 1 + while IP['ms'].shape[0] != unclustered_points: #Run until no points are clustered anymore + unclustered_points = IP['ms'].shape[0] + + i_ms = IP['ms'][1:] + for jj, ms in enumerate(i_ms): + idx = jj+1 + if idx in skip_id: + # print(idx) + continue + + MAC_list = [] + for c_ms in cluster['mode_shapes']: + MAC_list.append(calculate_mac(c_ms,ms)) + + # print("MAC_list",MAC_list) + if max(MAC_list) > Params['tMAC']: #line 2 in algorithm + #Add to cluster + cluster['f'] = np.append(cluster['f'],frequencies[idx]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[idx]) + cluster['d'] = np.append(cluster['d'],damping_ratios[idx]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[idx]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[idx,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[idx]) + cluster['row'] = np.append(cluster['row'],row[idx]) + cluster['col'] = np.append(cluster['col'],col[idx]) + + skip_id.append(idx) + + #IP['ms'] = np.delete(IP['ms'],skip_id,axis=0) + + # skip_id.insert(0,0) + # skip_id_array = np.array(skip_id) + + # all_id = np.array(list(range(len(row)))) + # unclustered_id = np.delete(all_id,skip_id_array) + + clustered_id = [] + for r2 in cluster['row']: #For every entry in row cluster + unclustered_point = False + for ii, r1 in enumerate(IP['row']): #For every entry in row IPu + if r1 == r2: #If r1 is a entry of "row" in the cluster, then save that row for later. + clustered_id.append(ii) + + all_id = np.array(list(range(len(IP['row'])))) + + clustered_id = np.array(clustered_id) + if clustered_id.shape[0] > 0: + unclustered_id = np.delete(all_id,clustered_id) + unclustered_id = np.insert(unclustered_id,0,0) + else: + unclustered_id = all_id + + unclustered_IPu = {} + unclustered_IPu['f'] = IP['f'][unclustered_id] + unclustered_IPu['cov_f'] = IP['cov_f'][unclustered_id] + unclustered_IPu['d'] = IP['d'][unclustered_id] + unclustered_IPu['cov_d'] = IP['cov_d'][unclustered_id] + unclustered_IPu['ms'] = IP['ms'][unclustered_id] + unclustered_IPu['row'] = IP['row'][unclustered_id] + unclustered_IPu['col'] = IP['col'][unclustered_id] + + return cluster, unclustered_IPu + +def cluster_from_mac_IPm(cluster: dict[str,Any], IPm: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: + """ + Cluster based on MAC if multiple poles exist for the model order + + Args: + cluster (dict): Intermediate cluster + IP (dict): Dictionary of data on inital points + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Intermediate cluster + + """ + #Cluster based on MAC if multiple poles exist for the model order + # print("cluster_IPm") + #Extract data + frequencies = IPm['f'] + cov_f = IPm['cov_f'] + damping_ratios = IPm['d'] + cov_d = IPm['cov_d'] + mode_shapes = IPm['ms'] + row = IPm['row'] + col = IPm['col'] + + # if isinstance(cluster['f'],np.ndarray): + # ip_ms = cluster['mode_shapes'][0,:] #Mode shape of the first pole + # else: + # ip_ms = cluster['mode_shapes'] #Mode shape of the first pole + + # Find the model orders with multiple poles + pos = [] + for ii, idd in enumerate(set(row)): + pos.append(np.argwhere(row==idd)) + + skip_id = [] + skip_id_before = None + while skip_id != skip_id_before: + ip_ms = cluster['mode_shapes'] + if isinstance(cluster['f'],np.ndarray): + ip_ms_0 = ip_ms[0,:] #Mode shape of the first pole + else: + ip_ms_0 = ip_ms #Mode shape of the first pole + + i_ms = IPm['ms'][:] #Mode shape of the model orders with mutiple poles + + + skip_id_before = skip_id.copy() + # print("Cluster in IPm",cluster['row']) + + + #Go through all the model orders + for oo, pos_i in enumerate(pos): + MAC = np.zeros(pos_i.shape[0]) + # print("IPm model order",list(set(row))[oo]) + + if oo in skip_id: #Skip these model orders, since they have already been added. + continue + + pos_i = pos_i[:,0] + for ii, id_row in enumerate(pos_i): + #print(IPm['row'][id_row],id_row) + #print(ip_ms.shape,i_ms[id_row].shape) + MAC[ii] = calculate_mac(ip_ms_0,i_ms[id_row]) #Calculate MAC between first pole of cluster and a pole in IPm + + #If MAC is not satisfied + if MAC[ii] < Params['tMAC']: #Search for max across all mode shapes in cluster: + #line 3 in algorithm + MAC_list = [] + for ms in ip_ms: + MAC_list.append(calculate_mac(ms,i_ms[id_row])) + MAC[ii] = max(MAC_list) + + #Find the mask for the poles that meets the MAC criteria + mask = MAC > Params['tMAC'] + pos_MAC = np.argwhere(mask==True) #Get indicies + + #Formatting of the indicies + if pos_MAC.shape[0] > 1: #more than one indice + pos_MAC = pos_MAC[:,0] + else: #Only one or zero indice (No MAC match) + if pos_MAC.shape[0] == 1: + pos_MAC = pos_MAC[0] + + # print("MAC",MAC) + # print("MACpos",pos_MAC) + if pos_MAC.shape[0] > 1: #If multiple poles comply with MAC criteria + #ids formatting + ids = pos_i[pos_MAC] + #ids = ids[:,0] + + #Get frequencies of poles + freq = np.zeros(ids.shape[0]) + for jj, idid in enumerate(ids): + freq[jj] = frequencies[idid] + median_f = np.median(cluster['f']) + + #Locate the index of the closest pole + idx = (np.abs(freq - median_f)).argmin() + ll = pos_i[pos_MAC[idx]] + + # print("IPm point mac approved",row[ll],frequencies[ll],MAC) + + #Add this pole to the cluster + cluster['f'] = np.append(cluster['f'],frequencies[ll]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[ll]) + cluster['d'] = np.append(cluster['d'],damping_ratios[ll]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[ll]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[ll,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC[pos_MAC[idx]]) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[ll]) + cluster['row'] = np.append(cluster['row'],row[ll]) + cluster['col'] = np.append(cluster['col'],col[ll]) + + skip_id.append(oo) + + elif pos_MAC.shape[0] == 1: #If only one pole complies with MAC + ll = pos_i[pos_MAC[0]] + + + # print("IPm point mac approved",row[ll],frequencies[ll],MAC) + + + #Add this pole to the cluster + cluster['f'] = np.append(cluster['f'],frequencies[ll]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[ll]) + cluster['d'] = np.append(cluster['d'],damping_ratios[ll]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[ll]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[ll,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC[pos_MAC[0]]) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[ll]) + cluster['row'] = np.append(cluster['row'],row[ll]) + cluster['col'] = np.append(cluster['col'],col[ll]) + + skip_id.append(oo) + # else: + # print("Not clustered. MAC not satisfied") + # print("skip",skip_id) + + clustered_id = [] + for r2 in cluster['row']: #For every entry in row cluster + unclustered_point = False + for ii, r1 in enumerate(IPm['row']): #For every entry in row IPm + if r1 == r2: #If r1 is a entry of "row" in the cluster, then save that row for later. + clustered_id.append(ii) + + all_id = np.array(list(range(len(IPm['row'])))) + + clustered_id = np.array(clustered_id) + if clustered_id.shape[0] > 0: + unclustered_id = np.delete(all_id,clustered_id) + else: + unclustered_id = all_id + # print("709,unclustered_id",unclustered_id) + + unclustered_IPm = {} + unclustered_IPm['f'] = IPm['f'][unclustered_id] + unclustered_IPm['cov_f'] = IPm['cov_f'][unclustered_id] + unclustered_IPm['d'] = IPm['d'][unclustered_id] + unclustered_IPm['cov_d'] = IPm['cov_d'][unclustered_id] + unclustered_IPm['ms'] = IPm['ms'][unclustered_id] + unclustered_IPm['row'] = IPm['row'][unclustered_id] + unclustered_IPm['col'] = IPm['col'][unclustered_id] + + # print("unclustered_IPm['row']",unclustered_IPm['row']) + + + return cluster, unclustered_IPm + +def remove_data_from_S(data: dict[str,Any],cluster: dict[str,Any]) -> dict[str,Any]: + """ + Remove cluster from data or S + + Args: + data (dict): OMA points data + cluster (dict): cluster + Returns: + data2 (dict): Filtered OMA points data + + """ + #Copy data + frequencies = data['frequencies'].copy() + damping_ratios = data['damping_ratios'].copy() + cov_freq = data['cov_f'].copy() + cov_damping = data['cov_d'].copy() + mode_shapes = data['mode_shapes'].copy() + row = data['row'].copy() + col = data['col'].copy() + #Make new data dictionary + data2 = {'frequencies':frequencies, + 'damping_ratios':damping_ratios, + 'cov_f':cov_freq, + 'cov_d':cov_damping, + 'mode_shapes':mode_shapes, + 'row':row, + 'col':col} + #Remove data + row = cluster['row'] + col = cluster['col'] + for ii, r in enumerate(row): + c = col[ii] + data2['frequencies'][r,c] = np.nan + data2['damping_ratios'][r,c] = np.nan + data2['cov_f'][r,c] = np.nan + data2['cov_d'][r,c] = np.nan + data2['mode_shapes'][r,c,:] = np.nan + + return data2 + +def cluster_expansion(cluster: dict[str,Any], data: dict[str,Any], Params: dict[str,Any], oma_results) -> dict[str,Any]: + """ + Expand cluster based on minima and maxima bound + + Args: + cluster (dict): Intermediate cluster + data (dict): OMA points data + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Expanded cluster + + """ + #print("\nExpansion") + unClustered_frequencies = data['frequencies'] + unClustered_damping = data['damping_ratios'] + + freq_c = cluster['f'] + cov_f = cluster['cov_f'] + damp_c = cluster['d'] + cov_d = cluster['cov_d'] + row = cluster['row'] + + bound_multiplier = Params['bound_multiplier'] + + #Find min-max bounds of cluster + f_lower_bound = np.min(freq_c - bound_multiplier * np.sqrt(cov_f)) # Minimum of all points for frequencies + f_upper_bound = np.max(freq_c + bound_multiplier * np.sqrt(cov_f)) # Maximum of all points for frequencies + d_lower_bound = np.min(damp_c - bound_multiplier * np.sqrt(cov_d)) # Minimum of all points for damping + d_upper_bound = np.max(damp_c + bound_multiplier * np.sqrt(cov_d)) # Maximum of all points for damping + + #Mask of possible expanded poles + condition_mask = (unClustered_frequencies >= f_lower_bound) & (unClustered_frequencies <= f_upper_bound) & (unClustered_damping >= d_lower_bound) & (unClustered_damping <= d_upper_bound) + # Get indices satisfying the condition + expanded_indices = np.argwhere(condition_mask) + + #Initiate cluster_points for cluster creation + cluster_points = {} + cluster_points['f'] = data['frequencies'][condition_mask] + cluster_points['cov_f'] = data['cov_f'][condition_mask] + cluster_points['d'] = data['damping_ratios'][condition_mask] + cluster_points['cov_d'] = data['cov_d'][condition_mask] + cluster_points['ms'] = data['mode_shapes'][condition_mask,:] + cluster_points['row'] = expanded_indices[:,0] + cluster_points['col'] = expanded_indices[:,1] + + #print(cluster_points['f']) + #print(cluster_points['row']) + + #Make the first ip from cluster be the previous first point in cluster_points + if isinstance(cluster['f'],np.ndarray): + index_f = np.argwhere(cluster_points['f'] == cluster['f'][0]) + else: + index_f = np.argwhere(cluster_points['f'] == cluster['f']) + if len(index_f[:,0]) > 1: + index_row = np.argwhere(cluster_points['row'][index_f[:,0]] == cluster['row'][0]) + ip_id = int(index_f[index_row[:,0]][:,0]) + else: + ip_id = int(index_f[:,0]) + indecies = list(range(len(cluster_points['f']))) + poped_id = indecies.pop(ip_id) + indecies.insert(0,poped_id) + indecies = np.array(indecies) + + cluster_points['f'] = cluster_points['f'][indecies] + cluster_points['cov_f'] = cluster_points['cov_f'][indecies] + cluster_points['d'] = cluster_points['d'][indecies] + cluster_points['cov_d'] = cluster_points['cov_d'][indecies] + cluster_points['ms'] = cluster_points['ms'][indecies,:] + cluster_points['row'] = cluster_points['row'][indecies] + cluster_points['col'] = cluster_points['col'][indecies] + + # print("row_before",cluster_points['row']) + #print("exp1",cluster_points['f']) + + #Check if these values can be clustered + cluster = cluster_creation(cluster_points,Params) + if isinstance(cluster['f'],np.ndarray): + if len(cluster['row']) != len(set(cluster['row'])): + print("row_before",cluster_points['row']) + print("row_after",cluster['row']) + print("exp2",cluster['f']) + print("double orders",cluster['row']) + + breakpoint() + + # print("row_before",cluster_points['row']) + #print("exp1",cluster_points['f']) + # print("row_after",cluster['row']) + # print("exp2",cluster['f']) + + return cluster + +def sort_cluster(cluster: dict[str,Any]) -> dict[str,Any]: + """ + Sort cluster based on row/model order + + Args: + cluster (dict): Cluster + Returns: + cluster (dict): Sorted cluster + + """ + sort_id = np.argsort(cluster['row']) + + cluster['f'] = cluster['f'][sort_id] + cluster['cov_f'] = cluster['cov_f'][sort_id] + cluster['d'] = cluster['d'][sort_id] + cluster['cov_d'] = cluster['cov_d'][sort_id] + cluster['mode_shapes'] = cluster['mode_shapes'][sort_id,:] + cluster['MAC'] = cluster['MAC'][sort_id] + cluster['model_order'] = cluster['model_order'][sort_id] + cluster['row'] = cluster['row'][sort_id] + cluster['col'] = cluster['col'][sort_id] + + return cluster + +def alignment(cluster_dict: dict[str,dict], Params: dict[str,Any]) -> dict[str,dict]: + """ + Alignment/merging of clusters + + Args: + cluster_dict (dict): Dictionary of multiple clusters + Params (dict): Dictionary of algorithm parameters + Returns: + cluster_dict (dict): Dictionary of aligned clusters + + """ + #print("\nCluster alignment") + median_f = [] + for key in cluster_dict.keys(): #Find the median of each cluster + cluster = cluster_dict[key] + median_f.append(np.median(cluster['f'])) + median_f = np.array(median_f) + + deleted_cluster_id = [] + for ii, m_f in enumerate(median_f): #Go through all medians + if ii in deleted_cluster_id: #If cluster is deleted pass on + #print(deleted_cluster_id) + continue + # Calculate absolute difference of selected median and all medians + diff = abs(median_f-m_f) + # If this difference is above 0 (not itself) and inside the bounds: + # Bounds are the minimum of either median_f * allignment_factor_0 or Sampling frequency / 2 * allignment_factor_1 + # For lower median frequencies the bound is determined by the size of median frequency. + # For higher median frequencies the bound is determined by the sampling frequency + + mask = (diff > 0) & (diff < min(m_f*Params['allignment_factor'][0],Params['Fs']/2*Params['allignment_factor'][1])) + indices = np.argwhere(mask == True) #Indicies of clusters that are closely located in frequency + + + + #print(cluster_dict.keys()) + if indices.shape[0] > 0:# If one or more clusters are found + ids = indices[:,0] + #print("ids",ids) + for id in ids: #Go through all clusters that is closely located + if id in deleted_cluster_id: + continue + + + #print("id",id) + break_loop = 0 + cluster1 = cluster_dict[str(ii)] #Parent cluster + cluster2 = cluster_dict[str(id)] #Co-located cluster + + # Proposed method + # for r in cluster2['model_order']: + # if r in cluster1['model_order']: #If the two clusters have poles with same model order, then skip the allignment + # print("Clusters have the same MO",cluster2['model_order'],cluster1['model_order']) + # break_loop = 1 + # if break_loop == 1: + # break + + MAC = calculate_mac(cluster1['mode_shapes'][0],cluster2['mode_shapes'][0]) # Check mode shape for the first pole in each cluster + if MAC >= Params['tMAC']: #If MAC complies with the criteria, then add the two clusters + cluster, cluster_remaining = join_clusters(cluster_dict[str(ii)],cluster_dict[str(id)],Params) + cluster_dict[str(ii)] = cluster #Save the new larger cluster + if len(cluster_remaining) == 0: #If the remaining cluster is emmpty + cluster_dict.pop(str(id), None) #Remove the co-located cluster + deleted_cluster_id.append(int(id)) #The delete cluster id + else: + cluster_dict[str(id)] = cluster_remaining #Save the remaining cluster + + else: #Check if the mode shapes across any of the poles complies with the MAC criteria + + MAC = np.zeros((cluster1['mode_shapes'].shape[0],cluster2['mode_shapes'].shape[0])) + for jj, ms1 in enumerate(cluster1['mode_shapes']): + for kk, ms2 in enumerate(cluster2['mode_shapes']): + MAC[jj,kk] = calculate_mac(ms1,ms2) + if MAC.max() >= Params['tMAC']: #If MAC criteria is meet add the clusters together + cluster, cluster_remaining = join_clusters(cluster_dict[str(ii)],cluster_dict[str(id)],Params) + cluster_dict[str(ii)] = cluster #Save the new larger cluster + if len(cluster_remaining) == 0: #If the remaining cluster is emmpty + cluster_dict.pop(str(id), None) #Remove the co-located cluster + deleted_cluster_id.append(int(id)) #The delete cluster id + else: + cluster_dict[str(id)] = cluster_remaining #Save the remaining cluster + # else: + # if cluster1['f'][0] > 300: + # breakpoint() + + + cluster_dict_alligned = cluster_dict + return cluster_dict_alligned + +def join_clusters(cluster_1: dict[str,Any], cluster_2: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: + """ + Add two clusters together + + Args: + cluster_1 (dict): Cluster + cluster_2 (dict): Cluster + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Joined cluster + cluster_remaining (dict): The cluster that remains + + """ + #Adding two clusters together + cluster = {} + cluster_remaining = {} + row1 = cluster_1['row'] + row2 = cluster_2['row'] + + #Should the dominant cluster be the one that have the higest model orders? + if row1.shape[0] >= row2.shape[0]: #Let be the largest cluster be the dominant one + cluster1 = cluster_1 + cluster2 = cluster_2 + row1 = cluster_1['row'] + row2 = cluster_2['row'] + else: + cluster1 = cluster_2 + cluster2 = cluster_1 + row1 = cluster_2['row'] + row2 = cluster_1['row'] + + median_f1 = np.median(cluster1['f']) + + for MO in range(Params['model_order']): #Go through all poles in a cluster + jj = np.argwhere(row1 == MO) + id = np.argwhere(row2 == MO) + if MO in row1: #If a pole in the largest cluster exist for the this model order + r1 = MO + if MO in row2: #If a pole exist in the same model order + #Get frequencies of the poles + f1 = cluster1['f'][jj[:,0]] + f2 = cluster2['f'][id[:,0]] + if abs(median_f1-f2) >= abs(median_f1-f1): #If pole in cluster 1 is closer to median of cluster 1 + cluster = append_cluster_data(cluster,cluster1,jj[:,0]) + cluster_remaining = append_cluster_data(cluster_remaining,cluster2,id[:,0]) + else: #If pole in cluster 2 is closer to median of cluster 1 + cluster = append_cluster_data(cluster,cluster2,id[:,0]) + cluster_remaining = append_cluster_data(cluster_remaining,cluster1,jj[:,0]) + else: #If only one pole exist in the largest cluster + cluster = append_cluster_data(cluster,cluster1,jj[:,0]) + elif MO in row2: #If a pole in the smallest cluster exist for the model order + cluster = append_cluster_data(cluster,cluster2,id[:,0]) + + return cluster, cluster_remaining + +def append_cluster_data(cluster: dict[str,Any], cluster2: dict[str,Any], id: int) -> dict[str,Any]: + """ + Add cluster data to an existing cluster + + Args: + cluster (dict): Existing cluster + cluster2 (dict): Cluster + id (int): id of data to append + Returns: + cluster (dict): Cluster + + """ + if len(cluster) == 0: #If it is the first pole + cluster['f'] = cluster2['f'][id] + cluster['cov_f'] = cluster2['cov_f'][id] + cluster['d'] = cluster2['d'][id] + cluster['cov_d'] = cluster2['cov_d'][id] + cluster['mode_shapes'] = cluster2['mode_shapes'][id,:] + cluster['MAC'] = cluster2['MAC'][id] + cluster['model_order'] = cluster2['model_order'][id] + cluster['row'] = cluster2['row'][id] + cluster['col'] = cluster2['col'][id] + else: + cluster['f'] = np.append(cluster['f'],cluster2['f'][id]) + cluster['cov_f'] = np.append(cluster['cov_f'],cluster2['cov_f'][id]) + cluster['d'] = np.append(cluster['d'],cluster2['d'][id]) + cluster['cov_d'] = np.append(cluster['cov_d'],cluster2['cov_d'][id]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],cluster2['mode_shapes'][id,:])) + cluster['MAC'] = np.append(cluster['MAC'],cluster2['MAC'][id]) + cluster['model_order'] = np.append(cluster['model_order'],cluster2['model_order'][id]) + cluster['row'] = np.append(cluster['row'],cluster2['row'][id]) + cluster['col'] = np.append(cluster['col'],cluster2['col'][id]) + return cluster + +def median_filter(cluster_dict: dict[str,dict]) -> dict[str,dict]: + """ + Apply median filter to cluster + + Args: + cluster_dict (dict): Dictionary of multiple clusters + Returns: + cluster_dict3 (dict): Median filtered multiple clusters + + """ + print("\nMedian filter") + cluster_dict3 = {} + for key in cluster_dict.keys(): + cluster = cluster_dict[key] + #print(cluster['mode_shapes']) + median_f = np.median(cluster['f']) #Calculate median + + cluster_new = {} + for ii, f in enumerate(cluster['f']): #Go through all cluster poles + lower_bound = f - np.sqrt(cluster['cov_f'][ii]) * 2 + upper_bound = f + np.sqrt(cluster['cov_f'][ii]) * 2 + if (median_f > lower_bound) & (median_f < upper_bound): #Check if a cluster confidence interval wraps the median + cluster_new = append_cluster_data(cluster_new,cluster,ii) + # else: + # print("not",cluster['model_order'][ii]) + + cluster_dict3[key] = cluster_new + + return cluster_dict3 + + +def remove_complex_conjugates(oma_results): + """ + Remove complex conjucates + + Args: + oma_results (Dict[str, Any]): Results from PyOMA-2 + + Returns: + frequencies (np.ndarray): Frequencies (mean) + cov_freq (np.ndarray): Covariance of frequency + damping_ratios (np.ndarray): Damping ratios (mean) + cov_damping (np.ndarray): Covariance of damping ratio + mode_shapes (np.ndarray): Mode shapes + """ + OMA = oma_results.copy() + # OMA results as numpy array + frequencies = OMA['Fn_poles'].copy() + cov_freq = OMA['Fn_poles_cov'].copy() + damping_ratios = OMA['Xi_poles'].copy() + cov_damping = OMA['Xi_poles_cov'].copy() + mode_shapes = OMA['Phi_poles'].copy() + + # Remove the complex conjugate entries + frequencies = frequencies[::2] # This is 'S' as per algorithm + damping_ratios = damping_ratios[::2] # This is 'S' as per algorithm + mode_shapes = mode_shapes[::2, :, :] + cov_freq = cov_freq[::2] + cov_damping = cov_damping[::2] + + return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes + +def transform_oma_features(frequencies_,cov_freq_,damping_ratios_,cov_damping_,mode_shapes_): + """ + Transform oma results + + Args: + frequencies_ (np.ndarray): Frequencies (mean) + cov_freq_ (np.ndarray): Covariance of frequency + damping_ratios_ (np.ndarray): Damping ratios (mean) + cov_damping_ (np.ndarray): Covariance of damping ratio + mode_shapes_ (np.ndarray): Mode shapes + + Returns: + frequencies (np.ndarray): Frequencies (mean) + cov_freq (np.ndarray): Covariance of frequency + damping_ratios (np.ndarray): Damping ratios (mean) + cov_damping (np.ndarray): Covariance of damping ratio + mode_shapes (np.ndarray): Mode shapes + """ + # Transpose, flip and sort arrays, such that arrays maps directly to the stabilization diagram. + # This means the the frequency array maps directly to the plot: + # MO. + # 5.| x x + # 4.| x + # 3.| x + # 2.| x + # 1.| + # 0.| + # -1----4------- Frequency + # The frequency array will then have the shape (6,3). Initially (6,6) but the complex conjugates have been removed. So 6 is halved to 3. + # 6 for each model order, including 0 and 3 for maximum poles in a modelorder + # The frequency array will then become: + # _0_1_ + # 0| 1 4 + # 1| 1 Nan + # 0| 1 Nan + # 0| Nan 4 + # 0| Nan Nan + # 0| Nan Nan + + #Transformation of data + frequencies = np.transpose(frequencies_) + frequencies = np.flip(frequencies, 0) + sort_indices = np.argsort(frequencies,axis=1) + frequencies = np.take_along_axis(frequencies, sort_indices, axis=1) + cov_freq = np.transpose(cov_freq_) + cov_freq = np.flip(cov_freq, 0) + cov_freq = np.take_along_axis(cov_freq, sort_indices, axis=1) + damping_ratios = np.transpose(damping_ratios_) + damping_ratios = np.flip(damping_ratios, 0) + damping_ratios = np.take_along_axis(damping_ratios, sort_indices, axis=1) + cov_damping = np.transpose(cov_damping_) + cov_damping = np.flip(cov_damping, 0) + cov_damping = np.take_along_axis(cov_damping, sort_indices, axis=1) + mode_shapes = np.moveaxis(mode_shapes_, [0, 1, 2], [1, 0, 2]) + + mode_shapes2 = np.zeros(mode_shapes.shape,dtype=np.complex128) + for ii, indices in enumerate(sort_indices): + mode_shapes2[ii,:,:] = mode_shapes[(sort_indices.shape[0]-ii-1),indices,:] + + # Array of model orders + model_order = np.arange(sort_indices.shape[0]) + model_orders = np.stack((model_order,) * sort_indices.shape[1], axis=1) + model_orders = np.flip(model_orders) + + return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes2, model_orders + +def remove_highly_uncertain_points(oma_results,oma_params): + """ + Remove highly uncertain points + + Args: + oma_results (Dict[str, Any]): Results from PyOMA-2 + oma_params (Dict[str, Any]): Parameters + + Returns: + frequencies (np.ndarray): Frequencies (mean) + cov_freq (np.ndarray): Covariance of frequency + damping_ratios (np.ndarray): Damping ratios (mean) + cov_damping (np.ndarray): Covariance of damping ratio + mode_shapes (np.ndarray): Mode shapes + """ + frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes = remove_complex_conjugates(oma_results) + + # # #=================== Removing high uncertain poles ======================= + freq_variance_treshold = oma_params.get('freq_variance_treshold', 0.1) + damp_variance_treshold = oma_params.get('damp_variance_treshold', 10**6) + frequency_coefficient_variation = np.sqrt(cov_freq)/frequencies + damping_coefficient_variation = np.sqrt(cov_damping)/damping_ratios + indices_frequency = frequency_coefficient_variation > freq_variance_treshold + indices_damping = damping_coefficient_variation > damp_variance_treshold + above_nyquist = frequencies > oma_params['Fs']/2 + combined_indices = np.logical_or(np.logical_or(indices_frequency,indices_damping),above_nyquist) + frequencies[combined_indices] = np.nan + damping_ratios[combined_indices] = np.nan + cov_freq[combined_indices] = np.nan + cov_damping[combined_indices] = np.nan + mask = np.broadcast_to(np.expand_dims(combined_indices, axis=2), mode_shapes.shape) + mode_shapes[mask] = np.nan + + return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes \ No newline at end of file diff --git a/src/methods/packages/mode_track.py b/src/methods/packages/mode_track.py deleted file mode 100644 index fe68c91..0000000 --- a/src/methods/packages/mode_track.py +++ /dev/null @@ -1,944 +0,0 @@ -"This file is taken from the DTaaS-platform" -import matplotlib.pyplot as plt -import matplotlib.tri as mtri -import numpy as np -import numpy.ma as ma -import copy - -# plt.close('all') -# Clustering function -def cluster_frequencies(frequencies, damping_ratios, mode_shapes, - frequencies_max_MO, cov_freq_max_MO, - damping_ratios_max_MO, cov_damping_max_MO, - mode_shapes_max_MO, tMAC, bound_multiplier=2): - """ - - - Parameters - ---------- - frequencies : TYPE - DESCRIPTION. - damping_ratios : TYPE - DESCRIPTION. - mode_shapes : TYPE - DESCRIPTION. - frequencies_max_MO : TYPE - DESCRIPTION. - cov_freq_max_MO : TYPE - DESCRIPTION. - damping_ratios_max_MO : TYPE - DESCRIPTION. - cov_damping_max_MO : TYPE - DESCRIPTION. - mode_shapes_max_MO : TYPE - DESCRIPTION. - tMAC : TYPE - DESCRIPTION. - bound_multiplier : TYPE, optional - DESCRIPTION. The default is 2. - - Returns - ------- - None. - - """ - - # Modify the index of frequency to sorting - - sorted_indices = np.argsort(frequencies_max_MO) - fn_sorted = frequencies_max_MO[sorted_indices] - damping_ratios_sorted = damping_ratios_max_MO[sorted_indices] - cov_fn_sorted = cov_freq_max_MO[sorted_indices] - cov_damping_sorted = cov_damping_max_MO[sorted_indices] - mode_shape_sorted = mode_shapes_max_MO[sorted_indices] - - fn_unique, unique_indices = np.unique(fn_sorted, return_index=True) - cov_fn_unique = cov_fn_sorted[unique_indices] - damping_ratios_unique = damping_ratios_sorted[unique_indices] - cov_damping_unique = cov_damping_sorted[unique_indices] - mode_shape_unique = mode_shape_sorted[unique_indices] - - # print(f'unsorted frequencies: {frequencies_max_MO}') - # print(f'unique frequencies: {fn_unique}') - # print(f'unsorted covariance: {cov_freq_max_MO}') - # print(f'unique covariance: {cov_fn_unique}') - - # frequencies = frequencies[::2] # This is 'S' as per algorithm - # mode_shapes = mode_shapes[::2, :, :] - - # print(f'Shape of frequencies: {frequencies.shape}') - - C_cluster = [] - Ip = [] - - # Mask to track ungrouped elements (initially all elements are ungrouped) - ungrouped_mask = np.ones_like(frequencies, dtype=bool) - - # Check each limit and save indices - for ip, (f_MxMO, fcov_MxMO, z_MxMO, zcov_MxMO) in enumerate(zip(fn_unique, - cov_fn_unique, damping_ratios_unique, cov_damping_unique)): - if np.isnan(f_MxMO): - continue - - # Confidence interval using the mean±2*standard_deviation - f_lower_bound = f_MxMO - bound_multiplier * np.sqrt(fcov_MxMO) - f_upper_bound = f_MxMO + bound_multiplier * np.sqrt(fcov_MxMO) - z_lower_bound = z_MxMO - bound_multiplier * np.sqrt(zcov_MxMO) - z_upper_bound = z_MxMO + bound_multiplier * np.sqrt(zcov_MxMO) - - # Find elements within the current limit that are still ungrouped - condition_mask = (frequencies >= f_lower_bound) & (frequencies <= f_upper_bound) & (damping_ratios >= z_lower_bound) & (damping_ratios <= z_upper_bound) & ungrouped_mask - indices = np.argwhere(condition_mask) # Get indices satisfying the condition - - # Initialization of Ip - Ip.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound, f_upper_bound, z_lower_bound, z_upper_bound), - "indices": indices, - "f_values": frequencies[tuple(indices.T)], - "z_values": damping_ratios[tuple(indices.T)] - }) - - # for Ip_item in Ip: - # print(f'Ip values: {Ip_item["f_values"]}') - - - # declared for appending - updated_indices = np.empty((0, 2), dtype=int) - f_updated_values = [] - z_updated_values = [] - # print(f'ip : {ip}') - - - # Find duplicates and their indices - # print(f'Indices : {Ip[ip]["indices"]}') - model_order_id = Ip[ip]["indices"][:,1] - # print(f'model order id: {model_order_id}') - unique, counts = np.unique(model_order_id, return_counts=True) - duplicates = unique[counts > 1] # model order number with duplicate modes - # print(f'Duplicate : {duplicates}') - # Create a boolean mask for duplicate rows - is_duplicate_row = np.isin(model_order_id, duplicates) - # Filter indices with duplicate values - indices_Ipm = Ip[ip]["indices"][is_duplicate_row] # Rows with duplicates - # print(f'Ipm indices: {indices_Ipm}') - indices_Ipu = Ip[ip]["indices"][~is_duplicate_row] - # print(f'Ipu indices: {indices_Ipu}') - # Check if indices_Ipu is empty - if indices_Ipu.size > 0: - ip_for_Ipu = indices_Ipu[np.argmax(indices_Ipu[:, 1])] - # print(f'ip for Ipu : {ip_for_Ipu}') - else: - print("No unique mode issue in this step.") - - - if duplicates.size == 0: - print("All values are unique.") - if len(indices)>1: - - for ii in indices: - target_mode_shape = mode_shapes[ii[0], ii[1], :] # Extract mode shape from the 3D array - reference_mode_shape = mode_shape_unique[ip] - - # print(f'print target_mode_shape: {target_mode_shape}') - # print(f'print reference_mode_shape: {reference_mode_shape}') - - # Calculate MAC with the reference mode shape - mac_value = calculate_mac(reference_mode_shape, target_mode_shape) - # print(f'MAC value: {mac_value}') - # print(f'ip : {ip}') - # print(f'MAC : {mac_value}') - # Check the MAC value to include in C. Algorithm 2: step 2 - if mac_value > tMAC: - # print(f'updated indices: {updated_indices}') - # print(f'new indices to be added: {ii}') - updated_indices = np.vstack([updated_indices,ii]) - f_updated_values = np.append(f_updated_values, frequencies[tuple(ii.T)]) - z_updated_values = np.append(z_updated_values, damping_ratios[tuple(ii.T)]) - # print(f'updated values: {updated_values}') - # Check if the cluster already exists - existing_cluster = next((c for c in C_cluster if c["ip_index"] == ip), None) - if existing_cluster: - # Update existing cluster - existing_cluster["indices"] = np.vstack([existing_cluster["indices"], ii]) - existing_cluster["f_values"] = np.append(existing_cluster["f_values"], frequencies[tuple(ii.T)]) - existing_cluster["z_values"] = np.append(existing_cluster["z_values"], damping_ratios[tuple(ii.T)]) - else: - # Create a new cluster - C_cluster.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound, f_upper_bound, z_lower_bound, z_upper_bound), - "indices": np.copy(updated_indices), - "f_values": np.copy(f_updated_values), - "z_values":np.copy(z_updated_values) - }) - - else: - C_cluster.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound,f_upper_bound, z_lower_bound, z_upper_bound), - "indices": indices, - "f_values": frequencies[tuple(indices.T)], - "z_values": damping_ratios[tuple(indices.T)] - }) - - # Handle the duplicate model order for single mode - else: - if len(indices_Ipu)>1: - for ii in indices_Ipu: - target_mode_shape = mode_shapes[ii[0], ii[1], :] # Extract mode shape from the 3D array - reference_mode_shape = mode_shapes[ip_for_Ipu[0], ip_for_Ipu[1], :] - - # print(f'print target_mode_shape: {target_mode_shape}') - # print(f'print reference_mode_shape: {reference_mode_shape}') - - # Calculate MAC with the reference mode shape - mac_value = calculate_mac(reference_mode_shape, target_mode_shape) - # print(f'MAC value: {mac_value}') - # print(f'ip : {ip}') - # print(f'MAC : {mac_value}') - # Check the MAC value to include in C. Algorithm 2: step 2 - if mac_value > tMAC: - # print(f'updated indices: {updated_indices}') - # print(f'new indices to be added: {ii}') - updated_indices = np.vstack([updated_indices,ii]) - f_updated_values = np.append(f_updated_values, frequencies[tuple(ii.T)]) - z_updated_values = np.append(z_updated_values, damping_ratios[tuple(ii.T)]) - # print(f'updated values: {updated_values}') - # Check if the cluster already exists - existing_cluster = next((c for c in C_cluster if c["ip_index"] == ip), None) - if existing_cluster: - # Update existing cluster - existing_cluster["indices"] = np.vstack([existing_cluster["indices"], ii]) - existing_cluster["f_values"] = np.append(existing_cluster["f_values"], frequencies[tuple(ii.T)]) - existing_cluster["z_values"] = np.append(existing_cluster["z_values"], damping_ratios[tuple(ii.T)]) - else: - # print(f'Ipu indices: {indices_Ipu} and frequencies: {f_updated_values}') - # Create a new cluster - C_cluster.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound, f_upper_bound, z_lower_bound, z_upper_bound), - "indices": np.copy(updated_indices), - "f_values": np.copy(f_updated_values), - "z_values":np.copy(z_updated_values) - }) - - else: - C_cluster.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound,f_upper_bound, z_lower_bound, z_upper_bound), - "indices": indices, - "f_values": frequencies[tuple(indices.T)], - "z_values": damping_ratios[tuple(indices.T)] - }) - - - - - - - # for Ip_item in C_cluster: - # print(f'C_cluster values: {Ip_item["f_values"]}') - - - - Ip_C_cluster = [] - # algorith 2: setp 3 [condition check] - for item1 in C_cluster: - # print(f'C_cluster item: {item1}') - # print(f'C_cluster value: {item1["values"]}') - - for item2 in Ip: - if item1['ip_index'] != item2['ip_index']: - continue # Skip the comparison if ip_index is not the same - - if len(item1['f_values']) == len(item2['f_values']): - # print('For C and Ip - values have the same length. Proceeding to compare the values.') - - # Compare the values - if np.all(item1['f_values'] != item2['f_values']): - # print(f'Values are different between C_cluster and Ip: {item1["values"]} vs {item2["values"]}') - continue - else: - print('Values are the same between C_cluster and Ip') - - else: - # print('Values have different lengths between C_cluster and Ip.') - updated_indices2 = np.empty((0, 2), dtype=int) # Reset to empty 2D array - f_updated_values2 = [] - z_updated_values2 = [] - for pp in item1['indices']: - for kk in item2['indices']: - reference_mode_shape = mode_shapes[pp[0], pp[1], :] - target_mode_shape = mode_shapes[kk[0], kk[1], :] - mac_value = calculate_mac(reference_mode_shape, target_mode_shape) - if mac_value > tMAC: - updated_indices2 = np.vstack([updated_indices2,kk]) - f_updated_values2 = np.append(f_updated_values2, frequencies[tuple(kk.T)]) - z_updated_values2 = np.append(z_updated_values2, damping_ratios[tuple(kk.T)]) - # print(f'newly added indices: {kk}') - # print(f'newly added values: {frequencies[tuple(kk.T)]}') - Ip_C_cluster.append({ - "ip_index": item1['ip_index'], - "indices" : updated_indices2, - "f_values" : f_updated_values2, - "z_values" : z_updated_values2 - }) - - # for Ip_item in Ip_C_cluster: - # print(f'Ip_C_cluster values: {Ip_item["f_values"]}') - # print(f'Ip_C_cluster indices: {Ip_item["indices"]}') - - # Initialize C_cluster_finale as a deep copy of C_cluster - C_cluster_finale = copy.deepcopy(C_cluster) - - # Add the points from Ip_C_cluster if they satisfy MAC conditions - # algorith 2: setp 3 [addition of point] - for item1 in C_cluster: - for item2 in Ip_C_cluster: - if item1['ip_index'] != item2['ip_index']: - continue # Skip the comparison if ip_index is not the same - - # Combine values from both clusters - f_merged_values = np.concatenate((item1['f_values'], item2['f_values'])) - z_merged_values = np.concatenate((item1['z_values'], item2['z_values'])) - # Combine indices from both clusters - merged_indices = np.concatenate((item1['indices'], item2['indices'])) - - # Find the corresponding cluster in C_cluster_finale - for finale_item in C_cluster_finale: - if finale_item['ip_index'] == item1['ip_index']: - # Update values and indices - finale_item['f_values'] = f_merged_values - finale_item['z_values'] = z_merged_values - finale_item['indices'] = merged_indices - break # Exit the loop once the match is found - - - # for C_item in C_cluster_finale: - # print(f'C_cluster values end: {C_item["values"]}') - - # algorith 2: step 4 - Ip_indices = np.vstack([item['indices'] for item in C_cluster]) - # Make a copy of frequencies to represent unclustered frequencies - unclustered_frequencies = frequencies.copy() - unclustered_damping = damping_ratios.copy() - # Update the copied matrix to NaN at collected indices - for idx in Ip_indices: - unclustered_frequencies[tuple(idx)] = np.nan # Set to NaN - unclustered_damping[tuple(idx)] = np.nan - - # print(f'Unclustred frequencies: {unclustered_frequencies}') - - # Find all indices in the frequencies matrix - all_indices = np.array(np.meshgrid(np.arange(frequencies.shape[0]), np.arange(frequencies.shape[1]))).T.reshape(-1, 2) - - # Identify unclustered indices: exclude NaN and indices in clustered_indices - unclustered_indices = [] - for idx in all_indices: - if not np.isnan(frequencies[tuple(idx)]) and not any((idx == Ip_indices).all(axis=1)): - unclustered_indices.append(idx) - - unclustered_indices = np.array(unclustered_indices) - # print(f'Unclustred indices: {unclustered_indices}') - - return C_cluster_finale, unclustered_frequencies, unclustered_damping, unclustered_indices - -# MAC calculation function -def calculate_mac(reference_mode, mode_shape): - """ - - - Parameters - ---------- - reference_mode : TYPE - DESCRIPTION. - mode_shape : TYPE - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - """ - numerator = np.abs(np.dot(reference_mode.conj().T, mode_shape)) ** 2 - denominator = np.dot(reference_mode.conj().T, reference_mode) * np.dot(mode_shape.conj().T, mode_shape) - return np.real(numerator / denominator) - -def clusterexpansion(C_clusters, unClustered_frequencies, unClustered_damping, cov_freq, cov_damping, mode_shapes, unClustered_indices, tMAC, bound_multiplier=2): - """ - - - Parameters - ---------- - C_clusters : TYPE - DESCRIPTION. - unClustered_frequencies : TYPE - DESCRIPTION. - unClustered_damping : TYPE - DESCRIPTION. - cov_freq : TYPE - DESCRIPTION. - cov_damping : TYPE - DESCRIPTION. - mode_shapes : TYPE - DESCRIPTION. - unClustered_indices : TYPE - DESCRIPTION. - bound_multiplier : TYPE, optional - DESCRIPTION. The default is 2. - - Raises - ------ - a - DESCRIPTION. - - Returns - ------- - C_cluster_finale : TYPE - DESCRIPTION. - unclustered_frequencies_expanded : TYPE - DESCRIPTION. - unclustered_damping_expanded : TYPE - DESCRIPTION. - unclustered_indices_expnaded : TYPE - DESCRIPTION. - - """ - - # cov_freq = cov_freq[::2] - # mode_shapes = mode_shapes[::2, :, :] - - # import pprint - # for cluster in C_clusters: - # pprint.pprint(cluster) - - Ip_plus = [] - - for cluster in C_clusters: - - f_values = cluster['f_values'] - z_values = cluster['z_values'] - indices = cluster['indices'] - - # **Skip if the cluster is empty** - if len(f_values) == 0: - print("Skipping empty cluster...") - continue # Move to the next cluster - - # print("Covariance Array:", np.sqrt(cov_freq[tuple(indices.T)])) - # Calculate the lower and upper bounds for the current cluster - # print(f'f_values: {f_values}') - # print(f'cov_freq[tuple(indices.T): {cov_freq[tuple(indices.T)]}') - f_lower_bound = np.min(f_values - bound_multiplier * np.sqrt(cov_freq[tuple(indices.T)])) # Minimum of all points for frequencies - f_upper_bound = np.max(f_values + bound_multiplier * np.sqrt(cov_freq[tuple(indices.T)])) # Maximum of all points for frequencies - z_lower_bound = np.min(z_values - bound_multiplier * np.sqrt(cov_damping[tuple(indices.T)])) # Minimum of all points for damping - z_upper_bound = np.max(z_values + bound_multiplier * np.sqrt(cov_damping[tuple(indices.T)])) # Maximum of all points for damping - - # print(f'Print cluster lower bound: {lower_bound}') - # print(f'Print cluster upper bound: {upper_bound}') - - # Find elements within the current limit that are still ungrouped - condition_mask2 = (unClustered_frequencies >= f_lower_bound) & (unClustered_frequencies <= f_upper_bound) & (unClustered_damping >= z_lower_bound) & (unClustered_damping <= z_upper_bound) - # Get indices satisfying the condition - expanded_indices = np.argwhere(condition_mask2) - - # Initialize lists to store updated indices and values - updated_indices3 = [] - f_updated_values3 = [] - z_updated_values3 = [] - - # Loop through the unclustered indices and append matching values to the cluster - for idx in expanded_indices: - freq_value = unClustered_frequencies[tuple(idx)] # Get the frequency value at this index - damp_value = unClustered_damping[tuple(idx)] # Get the damping value at this index - updated_indices3.append(idx) # Append the index - f_updated_values3.append(freq_value) # Append the frequency value - z_updated_values3.append(damp_value) # Append the damping value - - # Create a new cluster and append it to Ip_plus_cluster - Ip_plus.append({ - "ip_index": cluster['ip_index'], # Use the ip_index from the original cluster - "indices": np.array(updated_indices3), # Updated indices - "f_values": np.array(f_updated_values3), # Updated frequency values - "z_values": np.array(z_updated_values3) # Updated damping values - }) - - - Ip_plus_C = [] - # algorith 2: setp 3 [condition check] - for item1 in C_clusters: - # print(f'C_cluster item: {item1}') - # print(f'C_cluster value: {item1["values"]}') - - for item2 in Ip_plus: - if item1['ip_index'] != item2['ip_index']: - continue # Skip the comparison if ip_index is not the same - - if len(item1['f_values']) == len(item2['f_values']): - # print('For C and Ip - values have the same length. Proceeding to compare the values.') - - # Compare the values - if np.all(item1['f_values'] != item2['f_values']): - # print(f'Values are different between C_cluster and Ip: {item1["values"]} vs {item2["values"]}') - continue - else: - print(f'Values are the same between C_cluster and Ip_plus: {item1["f_values"]}') - - else: - # print('Values have different lengths between C_cluster and Ip.') - updated_indices4 = np.empty((0, 2), dtype=int) # Reset to empty 2D array - f_updated_values4 = [] - z_updated_values4 = [] - for pp in item1['indices']: - for kk in item2['indices']: - reference_mode_shape = mode_shapes[pp[0], pp[1], :] - target_mode_shape = mode_shapes[kk[0], kk[1], :] - mac_value = calculate_mac(reference_mode_shape, target_mode_shape) - if mac_value > tMAC: - updated_indices4 = np.vstack([updated_indices4,kk]) - f_updated_values4 = np.append(f_updated_values4, unClustered_frequencies[tuple(kk.T)]) - z_updated_values4 = np.append(z_updated_values4, unClustered_damping[tuple(kk.T)]) - # print(f'newly added indices: {kk}') - # print(f'newly added values: {frequencies[tuple(kk.T)]}') - Ip_plus_C.append({ - "ip_index": item1['ip_index'], - "indices" : updated_indices4, - "f_values" : f_updated_values4, - "z_values" : z_updated_values4 - }) - - - # Initialize C_cluster_finale as a deep copy of C_cluster - C_cluster_finale = copy.deepcopy(C_clusters) - - # Add the points from Ip_C_cluster if they satisfy MAC conditions - # algorith 2: setp 3 [addition of point] - for item1 in C_clusters: - for item2 in Ip_plus_C: - if item1['ip_index'] != item2['ip_index']: - continue # Skip the comparison if ip_index is not the same - - # Combine values from both clusters - f_merged_values2 = np.concatenate((item1['f_values'], item2['f_values'])) # concatenate frequencies - z_merged_values2 = np.concatenate((item1['z_values'], item2['z_values'])) # concatenate damping - # Combine indices from both clusters - merged_indices2 = np.concatenate((item1['indices'], item2['indices'])) - - # Find the corresponding cluster in C_cluster_finale - for finale_item in C_cluster_finale: - if finale_item['ip_index'] == item1['ip_index']: - # Update values and indices - finale_item['f_values'] = f_merged_values2 - finale_item['z_values'] = z_merged_values2 - finale_item['indices'] = merged_indices2 - break # Exit the loop once the match is found - - - # algorith 2: step 4 - # Filter out empty 'indices' arrays and check if there are any non-empty ones - valid_indices = [item['indices'] for item in C_clusters if item['indices'].size > 0] - - if valid_indices: - # If there are valid indices, proceed with stacking - Ip_plus_indices = np.vstack(valid_indices) - else: - # If there are no valid indices, handle accordingly (e.g., set to empty or raise a warning) - # print("No valid indices to stack.") - Ip_plus_indices = np.array([]) # Or choose another fallback behavior - # Make a copy of frequencies to represent unclustered frequencies - unclustered_frequencies_expanded = unClustered_frequencies.copy() - unclustered_damping_expanded = unClustered_damping.copy() - # Update the copied matrix to NaN at collected indices - for idx in Ip_plus_indices: - unclustered_frequencies_expanded[tuple(idx)] = np.nan # Set to NaN - unclustered_damping_expanded[tuple(idx)] = np.nan # Set to NaN - - # print(f'Unclustred frequencies: {unclustered_frequencies}') - - # Find all indices in the frequencies matrix - all_indices = np.array(np.meshgrid(np.arange(unClustered_frequencies.shape[0]), np.arange(unClustered_frequencies.shape[1]))).T.reshape(-1, 2) - - # Identify unclustered indices: exclude NaN and indices in clustered_indices - unclustered_indices_expnaded = [] - for idx in all_indices: - # if not np.isnan(unClustered_frequencies[tuple(idx)]) and not any((idx == Ip_plus_indices).all(axis=1)): - if Ip_plus_indices.size > 0 and not np.isnan(unClustered_frequencies[tuple(idx)]) and not any((idx == Ip_plus_indices).all(axis=1)): - unclustered_indices_expnaded.append(idx) - - unclustered_indices_expnaded = np.array(unclustered_indices_expnaded) - # print(f'Unclustred indices expanded: {unclustered_indices_expnaded}') - - return C_cluster_finale, unclustered_frequencies_expanded, unclustered_damping_expanded, unclustered_indices_expnaded - - -def visualize_clusters(clusters, cov_freq, bounds): - """ - - - Parameters - ---------- - clusters : TYPE - DESCRIPTION. - cov_freq : TYPE - DESCRIPTION. - bounds : TYPE - DESCRIPTION. - - Returns - ------- - None. - - """ - # Sort clusters by their median if available, otherwise keep original order - clusters.sort(key=lambda cluster: np.median(cluster["values"]) if "values" in cluster and len(cluster["values"]) > 0 else float('inf')) - - # Create subplots (one for each cluster) - num_clusters = len(clusters) - fig, axs = plt.subplots(num_clusters, 1, figsize=(10, 5 * num_clusters), tight_layout=True) - - - if num_clusters == 1: - axs = [axs] # Ensure axs is always iterable - - for idx, (cluster, ax) in enumerate(zip(clusters, axs)): - cluster_values = cluster["f_values"] - cluster_indices = cluster["indices"] - cluster_cov = cov_freq[tuple(np.array(cluster_indices).T)] # Covariance for original cluster - - # Extract the second part of the cluster indices for plotting - model_orders = cluster_indices[:, 1] - - # Scatter plot the cluster values against model orders - ax.scatter(cluster_values, model_orders, label="Cluster Data") - - # Plot the cluster values with covariance as error bars - ax.errorbar( - cluster_values, - model_orders, # Use the second index for the vertical axis - xerr=(np.sqrt(cluster_cov)*bounds), # Error bars for x-values based on covariance - fmt='o', capsize=5, ecolor='red', label="± 2σ" - ) - - # Check if the 'median' key exists in the cluster dictionary - if 'median' in cluster: - median_value = cluster["median"] - if not np.isnan(median_value): # If median is not NaN, plot the vertical line - ax.axvline(median_value, color='blue', linestyle='--', label='Median') - - ax.set_title(f"Cluster {idx + 1}") - ax.set_xlabel("Frequency [Hz]") - ax.set_ylabel("Model Order") - ax.set_ylim(0, 21) - ax.legend() - ax.grid() - - plt.show() - - -def clean_clusters_by_median(clusters, cov_freq, bound_multiplier=2): - """ - - - Parameters - ---------- - clusters : TYPE - DESCRIPTION. - cov_freq : TYPE - DESCRIPTION. - bound_multiplier : TYPE, optional - DESCRIPTION. The default is 2. - - Returns - ------- - cleaned_clusters : TYPE - DESCRIPTION. - - """ - cleaned_clusters = [] - - for cluster_idx, cluster in enumerate(clusters): - # Extract values and indices from the cluster - f_cluster_values = np.array(cluster["f_values"]) - z_cluster_values = np.array(cluster["z_values"]) - cluster_indices = np.array(cluster["indices"]) - - # Extract covariance for each cluster element - f_cluster_cov = cov_freq[tuple(cluster_indices.T)] # Extract covariance for the given indices - - # Remove duplicates by using unique values and their indices - f_unique_values, unique_indices = np.unique(f_cluster_values, return_index=True) - f_unique_cov = f_cluster_cov[unique_indices] - z_unique = z_cluster_values[unique_indices] - unique_indices_2D = cluster_indices[unique_indices] - - # Update the original cluster with unique values and indices - cluster["f_values"] = f_unique_values - cluster["z_values"] = z_unique - cluster["indices"] = unique_indices_2D - - # Calculate the median of the unique values - median_value = np.nanmedian(f_unique_values) - - # Define bounds for filtering based on the bound_multiplier and covariance - lower_bound = f_unique_values - bound_multiplier * np.sqrt(f_unique_cov) - upper_bound = f_unique_values + bound_multiplier * np.sqrt(f_unique_cov) - - # Keep elements where the median lies within the bounds - mask = (median_value >= lower_bound) & (median_value <= upper_bound) - f_cleaned_values = f_unique_values[mask] - z_cleaned_values = z_unique[mask] - cleaned_indices = unique_indices_2D[mask] - - # Append the cleaned cluster to the result if there are enough values - if len(f_cleaned_values) > 1: # Keep clusters with more than one cleaned value - cleaned_clusters.append({ - "original_cluster": cluster, # Store the original cluster (now updated with unique values) - "f_values": f_cleaned_values, - "z_values": z_cleaned_values, - "indices": cleaned_indices, - "median": median_value, - "bound_multiplier": bound_multiplier, # Store the bound multiplier used - }) - - return cleaned_clusters - - -def mode_allingment(ssi_mode_track_res, mstab, tMAC): - print("DEBUG: oma_output inside mode_allingment:", type(ssi_mode_track_res), ssi_mode_track_res) - - # extract results - frequencies = ssi_mode_track_res['Fn_poles'] - cov_freq = ssi_mode_track_res['Fn_poles_cov'] - damping_ratios = ssi_mode_track_res['Xi_poles'] - cov_damping = ssi_mode_track_res['Xi_poles_cov'] - mode_shapes = ssi_mode_track_res['Phi_poles'] - bounds = 2 # standard deviation multiplier - - frequencies_max_MO = frequencies[:,-1] - cov_freq_max_MO = cov_freq[:,-1] - damping_ratios_max_MO = damping_ratios[:,-1] - cov_damping_max_MO = cov_damping[:,-1] - mode_shapes_max_MO = mode_shapes[:,-1,:] - - frequencies_copy = frequencies.copy() - - # Remove the complex conjugate entries - frequencies = frequencies[::2] # This is 'S' as per algorithm - damping_ratios = damping_ratios[::2] # This is 'S' as per algorithm - mode_shapes = mode_shapes[::2, :, :] - cov_freq = cov_freq[::2] - cov_damping = cov_damping[::2] - - frequency_coefficient_variation = np.sqrt(cov_freq)/frequencies - damping_coefficient_variation = np.sqrt(cov_damping)/damping_ratios - indices_frequency = frequency_coefficient_variation > 0.05 - indices_damping = damping_coefficient_variation > 0.5 - combined_indices = indices_frequency & indices_damping - frequencies[combined_indices] = np.nan - damping_ratios[combined_indices] = np.nan - cov_freq[combined_indices] = np.nan - cov_damping[combined_indices] = np.nan - - - # Initial clustering - C_clusters, unClustd_frequencies, unClustd_damping, unClustd_indices = cluster_frequencies(frequencies, damping_ratios, - mode_shapes, frequencies_max_MO, cov_freq_max_MO, - damping_ratios_max_MO, cov_damping_max_MO, - mode_shapes_max_MO, tMAC, bound_multiplier=bounds) - - # Expansion step - C_expanded, unClustd_frequencies_expanded, unClustd_damping_expanded, unClustd_indices_expanded = clusterexpansion(C_clusters, unClustd_frequencies, unClustd_damping, - cov_freq, cov_damping, mode_shapes, - unClustd_indices, tMAC, bound_multiplier=bounds) - - - last_ip_index = max(cluster['ip_index'] for cluster in C_expanded) - - count = 0 - - # Loop until unClustd_indices contains only one index - while True: - # print(f"unClustd indices expanded size before: {unClustd_indices_expanded.size}") - # print(f'Loop counter: {count}') - count += 1 - # Check the termination condition - if unClustd_indices_expanded.size <= 2: # Stop if there are fewer than 2 indices - print("No more unclustered indices to process. Exiting ...") - break - - # Get the highest column index from unClustd_indices - highest_column = np.max(unClustd_indices_expanded[:, 1]) # Assuming column index is in the second column - - # Create a mask for the unclustered indices - mask1 = np.full(frequencies.shape, False) # Initialize a boolean mask - mask1[tuple(unClustd_indices_expanded.T)] = True # Set only unclustered indices to True - unClustd_frequencies = frequencies.copy() - unClustd_damping = damping_ratios.copy() - unClustd_frequencies[~mask1] = np.nan - unClustd_damping[~mask1] = np.nan - unClustd_cov_freq = cov_freq.copy() - unClustd_cov_damp = cov_damping.copy() - unClustd_cov_freq[~mask1] = np.nan # Unclustered frequency variance matrix - unClustd_cov_damp[~mask1] = np.nan # Unclustered damping variance matrix - unClustd_mode_shapes = mode_shapes.copy() - - for ii in range(unClustd_mode_shapes.shape[2]): - slice_2d = unClustd_mode_shapes[:, :, ii] - slice_2d[~mask1] = np.nan - unClustd_mode_shapes[:, :, ii] = slice_2d # Unclustered mode shape matrix - - # Filter the data for the highest column - frequencies_max_MO = unClustd_frequencies_expanded[:, highest_column] - # print(f'Maximum model order: {highest_column}') - # print(f'MO frequencies: {frequencies_max_MO}') - damping_ratios_max_MO = unClustd_damping_expanded[:, highest_column] - # print(f'frequencies initization: {frequencies_max_MO}') - cov_freq_max_MO = unClustd_cov_freq[:, highest_column] - cov_damp_max_MO = unClustd_cov_damp[:, highest_column] - mode_shapes_max_MO = unClustd_mode_shapes[:, highest_column, :] - - # Call the cluster_frequencies function with updated parameters - C_cluster_loop, unClustd_frequencies_loop, unClustd_damping_loop, unClustd_indices_loop = cluster_frequencies( - unClustd_frequencies, - unClustd_damping, - unClustd_mode_shapes, - frequencies_max_MO, - cov_freq_max_MO, - damping_ratios_max_MO, - cov_damping_max_MO, - mode_shapes_max_MO, - tMAC, - bound_multiplier=bounds - ) - print("Initial clustering done.") - - # import pprint - # for cluster in C_clusters: - # pprint.pprint(cluster) - - if unClustd_indices_loop.size == 0: - print("No unclustered indices left. Exiting ...") - # Update the clusters with new 'ip_index' values - for cluster in C_cluster_loop: - # Update the ip_index for the new clusters (starting from last_ip_index + 1) - new_ip_index = last_ip_index + 1 - cluster["ip_index"] = new_ip_index - - # Append the updated cluster to the final list - C_expanded.append(cluster) - - # Update last_ip_index to the newly assigned ip_index for the next iteration - last_ip_index = new_ip_index - - # print('before break') - break - # print('after break') - - print("Expansion started in loop.") - # Expansion step for each initial clusters - C_expanded_loop, unClustd_frequencies_expanded_loop, unClustd_damping_expanded_loop, unClustd_indices_expanded_loop = clusterexpansion( - C_cluster_loop, - unClustd_frequencies_loop, - unClustd_damping_loop, - cov_freq, - cov_damping, - mode_shapes, - unClustd_indices_loop, - tMAC, - bound_multiplier=bounds - ) - print("Expansion clustering done.") - - # Update the clusters with new 'ip_index' values - for cluster in C_expanded_loop: - # Update the ip_index for the new clusters (starting from last_ip_index + 1) - new_ip_index = last_ip_index + 1 - cluster["ip_index"] = new_ip_index - - # Append the updated cluster to the final list - C_expanded.append(cluster) - - # Update last_ip_index to the newly assigned ip_index for the next iteration - last_ip_index = new_ip_index - - # print("Expansion added to clustering.") - - if unClustd_indices_expanded_loop.size == 0: - print("No unclustered indices left. Exiting ...") - break - # Update the unClustd_indices for the next iteration - unClustd_indices_expanded = unClustd_indices_expanded_loop[ - unClustd_indices_expanded_loop[:, 1] != highest_column - ] - - # Check if the size of unClustd_indices_expanded has become less than or equal to 2 - if unClustd_indices_expanded.size <= 2: - print("Unclustered indices size <= 2. Stopping ...") - break - - # Removing repeatation during merge - for cluster in C_expanded: - # Get the current values - f_values = cluster['f_values'] - indices = cluster['indices'] - z_values = cluster['z_values'] - # Find unique f_values and their indices - unique_f_values, unique_indices = np.unique(f_values, return_index=True) - cluster['f_values'] = unique_f_values - cluster['indices'] = indices[unique_indices] - cluster['z_values'] = z_values[unique_indices] - - - # # Visualize the initial clusters - # visualize_clusters(C_expanded, cov_freq, bounds) - - # # import pprint - # for cluster in C_expanded: - # print(f"ip_index: {cluster['ip_index']}, f_values length: {len(cluster['f_values'])}") - # print(f"Cluster confidence interval: {cluster['confidence_interval'][0:2]}") - # print(f"Cluster shape: {len(cluster['f_values'])}") - # # pprint.pprint(cluster) - # # print(f"ip_index: {cluster['ip_index']}") - # # print(f"indices shape: {cluster['indices'].shape}") - # # print(f"f_values shape: {len(cluster['f_values'])}") - - - print('Cluster filter started') - # Filter clusters with less than 'mstab' elements - C_expanded_filtered = [cluster for cluster in C_expanded if cluster['indices'].shape[0] > mstab] - # Sort clusters by the lower bound of their confidence_interval (the first value in the tuple) - C_expanded_filtered.sort(key=lambda cluster: cluster['confidence_interval'][0]) - print('Cluster filter finished') - - # # Visualize the cluster filter by element numbers - # visualize_clusters(C_expanded_filtered, cov_freq, bounds) - - # Cluster cleaning based on median - cleaned_clusters = clean_clusters_by_median(C_expanded_filtered, cov_freq, bound_multiplier=bounds) - - # remove repeatative clusters - seen = set() - uq_clusters = [] - for d in cleaned_clusters: - f_values_tuple = tuple(d['f_values']) - if f_values_tuple not in seen: - seen.add(f_values_tuple) - uq_clusters.append(d) - - for cluster in uq_clusters: - indices = cluster['indices'] - mode_shapes_list = [] - - for idx in indices: - # Extract mode shapes using indices - mode_shape = mode_shapes[idx[0], idx[1], :] - mode_shapes_list.append(mode_shape) - - # Add mode shapes to the dictionary - cluster['mode_shapes'] = np.array(mode_shapes_list) - - uq_clusters_sorted = sorted(uq_clusters, key=lambda cluster: cluster["median"]) - - return uq_clusters_sorted diff --git a/src/methods/packages/mode_tracking.py b/src/methods/packages/mode_tracking.py new file mode 100644 index 0000000..5ca9858 --- /dev/null +++ b/src/methods/packages/mode_tracking.py @@ -0,0 +1,355 @@ +from typing import Any +import numpy as np +from methods.packages.clustering import calculate_mac + +# JVM 14/10/2025 + +def cluster_tracking(cluster_dict: dict[str,Any],tracked_clusters: dict[str,Any],Params: dict[str,Any]=None) -> dict[str,Any]: + """ + Tracking of modes across experiments + + Args: + cluster_dict (dict): Dictionary of clusters + tracked_clusters (dict): Previously tracked clusters + Params (dict): tracking parameters + + Returns: + tracked_clusters (dict): Previously tracked clusters + + """ + print("Cluster tracking") + if Params == None: + Params = {'phi_cri':0.8, + 'freq_cri':0.2} + + m_f = [] + for key in cluster_dict.keys(): + cluster = cluster_dict[key] + m_f.append(cluster['median_f']) + + t_list = [] + t_length = [] + for key in tracked_clusters: #Go through all tracked clusters. They are identified with keys which are integers from 0 and up to total number of clusters + if key == 'iteration': + pass + else: + tracked_cluster_list = tracked_clusters[key] #Accessing all cluster in a tracked cluster group + t_length.append(len(tracked_cluster_list)) + tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group + #median freq of last cluster in tracked cluster group + t_list.append(tracked_cluster['median_f']) + + # No tracked clusters yet? + if not tracked_clusters: + first_track = 1 + else: + first_track = 0 + + if first_track == 1: + for id, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + cluster['id'] = 0 + + tracked_clusters['iteration'] = 0 + tracked_clusters[str(id)] = [cluster] + else: + iter = tracked_clusters['iteration'] + 1 + tracked_clusters['iteration'] = iter + + result = match_cluster_to_tracked_cluster(cluster_dict,tracked_clusters,Params) #Match clusters to tracked clusters + + result_int = [] + for val in result.values(): #Get all non-"new" results + if type(val) == int: + result_int.append(val) + + if len(result_int) == len(set(result_int)): #If all clusters match with a unique tracked cluster + for ii, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + pos = result[str(ii)] #Find pos in result dict + cluster['id'] = iter + if pos == "new": #Add cluster as a new tracked cluster + new_key = len(tracked_clusters)-1 #-1 for "iteration", + 1 for next cluster and -1 for starting at 0 = -1 + #print(f"new key: {new_key}") + tracked_clusters[str(new_key)] = [cluster] + else: #Add cluster to an existing tracked cluster + cluster_to_add_to = tracked_clusters[str(pos)] + cluster_to_add_to.append(cluster) + tracked_clusters[str(pos)] = cluster_to_add_to + + else: #If there are some clusters that match with the same tracked cluster. + kk = 0 + skip_tracked_cluster = [] + skip_cluster = [] + while len(result_int) != len(set(result_int)): + kk += 1 + if kk > 10: + #Debug info: + unique_match_debug_info(result,cluster_dict,t_list) + print("Unresolved mode tracking") + breakpoint() + + for possible_match_id in set(result.values()): #Go through all unique values + if possible_match_id == "new": #Do nothing if "new" + pass + else: + test_if_str = np.argwhere(np.array(list(result.values())) == "new") #Test if "new" is present. If so, then we must match with str instead of int. + if len(test_if_str) > 0: + itemindex = np.argwhere(np.array(list(result.values())) == str(possible_match_id)) #Find the index of the unique cluster match + else: + itemindex = np.argwhere(np.array(list(result.values())) == possible_match_id) #Find the index of the unique cluster match + print(possible_match_id,np.array(list(result.values())),itemindex, len(itemindex)) + + if len(itemindex) > 1: #If multiple clusters match to the same tracked cluster + pos, result, cluster_index = resolve_unique_matches(possible_match_id, itemindex, result, cluster_dict, tracked_clusters) + skip_tracked_cluster.append(str(result[str(cluster_index[pos])])) #Skip the best tracked cluster which is matced with another cluster. + skip_cluster.append(cluster_index[pos]) #Skip the best tracked cluster which is matced with another cluster. + + result = match_cluster_to_tracked_cluster(cluster_dict,tracked_clusters,Params,result,skip_cluster,skip_tracked_cluster) #Match with tracked clusters, but skip the already matched. + + #Debug info: + unique_match_debug_info(result,cluster_dict,t_list) + + result_int = [] + for val in result.values(): + if type(val) == int: + result_int.append(val) + + #Add the clusters to tracked clusters + for ii, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + pos = result[str(ii)] #Find pos in result dict + cluster['id'] = iter + if pos == "new": + new_key = len(tracked_clusters)-1 #-1 for "iteration", + 1 for next cluster and -1 for starting at 0 = -1 + tracked_clusters[str(new_key)] = [cluster] + else: + cluster_to_add_to = tracked_clusters[str(pos)] + cluster_to_add_to.append(cluster) + tracked_clusters[str(pos)] = cluster_to_add_to + + + + return tracked_clusters + +def match_cluster_to_tracked_cluster(cluster_dict: dict[str,Any], tracked_clusters: dict[str,Any], Params: dict[str,Any], result_prev: dict[str,Any] = {},skip_cluster: list = [], skip_tracked_cluster: list = []) -> dict[str,Any]: + """ + Match clusters to tracked clusters + + The result dictionary consist of keys: cluster indecies, and values: indecies of tracked cluster to match with + Example: + Cluster 1 match with tracked cluster 2 + Cluster 2 match with tracked cluster 1 + Cluster 3 match with tracked cluster 1 + Cluster 4 match with "new", i.e. could not be matched with an existing tracked cluster + + Args: + cluster_dict (dict): Dictionary of clusters + tracked_clusters (dict): Previously tracked clusters + Params (dict): tracking parameters + result_prev (dict): Dictionary of previous match result + skip_cluster (list): List of clusters that have proven they are a optimal match with a tracked cluster + skip_tracked_cluster (list): List of tracked clusters that have an optimal match with a cluster + + Returns: + result (dict): Dictionary of matches + + """ + result = {} + for id, key in enumerate(cluster_dict): #Go through all clusters + if id in skip_cluster: #If this cluster is already matched skip it + result[str(id)] = result_prev[str(id)] + continue + + #Get mode shapes + cluster = cluster_dict[key] + omega = cluster['median_f'] + phi = cluster['mode_shapes'][0] + phi_all = cluster['mode_shapes'] + + Xres = [] + MAC_list = [] + D_freq = [] + omega_t_list = [] + MAC_max_list = [] + MAC_avg_list = [] + for key in tracked_clusters: #Go through all tracked clusters. They are identified with keys which are integers from 0 and up to total number of clusters + if key == 'iteration': + pass + else: + tracked_cluster_list = tracked_clusters[key] #Accessing all cluster in a tracked cluster group + tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group + omega_t = tracked_cluster['median_f'] #median freq of last cluster in tracked cluster group + omega_t_list.append(omega_t) + phi_t_all = tracked_cluster['mode_shapes'] #phi of last cluster in tracked cluster group + phi_t = phi_t_all[0] + + MAC_list.append(float(calculate_mac(phi_t, phi))) + + MACs = np.zeros((phi_all.shape[0],phi_t_all.shape[0])) + for ii, phi in enumerate(phi_all): + for jj, phi_t in enumerate(phi_t_all): + MAC = float(calculate_mac(phi_t, phi)) + MACs[ii,jj] = MAC #Compare the cluster with all tracked clusters + + if key in skip_tracked_cluster: + MAC_avg = np.mean(0) + MAC_max = np.max(0) + MAC_max_list.append(0) + MAC_avg_list.append(0) + D_freq.append(10**6) + else: + MAC_avg = np.mean(MACs) + MAC_max = np.max(MACs) + MAC_max_list.append(MAC_max) + MAC_avg_list.append(MAC_avg) + D_freq.append(abs(omega_t-omega)/omega) + + itemindex1 = np.argwhere(np.array(MAC_max_list) > Params['phi_cri']) #Find where the cluster matches the tracked cluster regarding the MAC criteria + itemindex = np.argwhere(np.array(D_freq)[itemindex1[:,0]] < Params['freq_cri']) #Find where the cluster matches the tracked cluster regarding the MAC and frequency criteria + indicies = itemindex1[itemindex[:,0]] + if len(indicies) > 1: #If two or more clusters combly with the mode shape criteria + Xres = [] + Xres_f = [] + Xres_MAC = [] + for nn in indicies: + pos = nn[0] + X = D_freq[pos]/MAC_max_list[pos] #Objective function + Xres.append(X) + Xres_f.append(D_freq[pos]) + Xres_MAC.append(MAC_max_list[pos]) + + if Xres != []: # One or more cluster(s) combly with the frequency criteria + pos1 = Xres.index(min(Xres)) #Find the cluster that is most likely + pos2 = Xres_MAC.index(max(Xres_MAC)) #Find the largest MAC + pos3 = Xres_f.index(min(Xres_f)) #Find the smallest frequency difference + + if len(Xres) > 1: #If more than one cluster comply with criteria + Xres_left = Xres.copy() + del Xres_left[pos1] + if type(Xres_left) == np.float64: + Xres_left = [Xres_left] + + Xres_MAC_left = Xres_MAC.copy() + del Xres_MAC_left[pos1] + if type(Xres_MAC_left) == np.float64: + Xres_MAC_left = [Xres_MAC_left] + + Xres_f_left = Xres_f.copy() + del Xres_f_left[pos1] + if type(Xres_f_left) == np.float64: + Xres_f_left = [Xres_f_left] + + pos1_2 = Xres_left.index(min(Xres_left)) #Find the cluster that is most likely + pos2_2 = Xres_MAC_left.index(max(Xres_MAC_left)) #Find the cluster that is most likely based on MAC + pos3_2 = Xres_f_left.index(min(Xres_f_left)) #Find the cluster that is most likely based on Freq + + if (pos1 == pos2) and (pos1 == pos3): #If one match on all three parameters: objective function, max MAC and frequency difference + pos = int(indicies[pos1][0]) + result[str(id)] = pos #group to a tracked cluster + + #Make different: abs(min(Xres_left)/min(Xres)) < Params['obj_cri'] = 2 + elif abs(min(Xres_left)-min(Xres)) < Params['obj_cri']: #If the objective function results are close + if (min(Xres_f) < Params['freq_cri']) and (min(Xres_f_left) < Params['freq_cri']): #If both frequency differences are close to the target cluster + pos = int(indicies[pos2_2][0]) #Match with best MAC + result[str(id)] = pos #group to a tracked cluster + elif (min(Xres_f) < Params['freq_cri']) and (min(Xres_f_left) > Params['freq_cri']): #If Xres_f is smaller than the threshold + pos = int(indicies[pos3][0]) #Match with lowest frequency difference + result[str(id)] = pos #group to a tracked cluster + elif (min(Xres_f) > Params['freq_cri']) and (min(Xres_f_left) < Params['freq_cri']): + pos = int(indicies[pos3_2][0]) #Match with lowest frequency difference + result[str(id)] = pos #group to a tracked cluster + else: #If none of the above choose the one with highest MAC + pos = int(indicies[pos2_2][0]) + result[str(id)] = pos #group to a tracked cluster + else: #If none of the above choose the one with lowest onjective function + pos = int(indicies[pos1][0]) + result[str(id)] = pos #group to a tracked cluster + + else: #No cluster comply with frequency criteria, so a new cluster is saved + result[str(id)] = "new" + + elif len(indicies) == 1: #If one cluster combly with the mode shape criteria + pos = int(indicies[0][0]) + result[str(id)] = pos #group to a tracked cluster + + else: #Does not comply with mode shape criteria + result[str(id)] = "new" + + return result + +def resolve_unique_matches(possible_match_id, itemindex, result, cluster_dict, tracked_clusters): + """ + Resolve if two clusters match with the same tracked cluster. Determine what match is the most optimal. + Those clusters that does not have an optimal match, they are given the match result = "new" + + Example: + Cluster 2 match with tracked cluster 1 + Cluster 3 match with tracked cluster 1 + + Args: + possible_match_id (int): The index of tracked cluster + itemindex (np.ndarray): The indecies of clusters that have the same match + result (dict): Dictionary of suggested matches + cluster_dict (dict): Dictionary of clusters + tracked_clusters (dict): Previously tracked clusters + + Returns: + pos (int): Value of cluster that have the most optimal match. + result (dict): Dictionary of re-done matches + cluster_index: The indecies of clusters that have the same match + + """ + mean_MAC = [] + keys = [str(y[0]) for y in itemindex.tolist()] #Make keys for dictionary based on indices in itemindex + for nn in itemindex: #Go through possible clusters match index + cluster = cluster_dict[int(nn[0])] + phi_all = cluster["mode_shapes"] #Find mode shapes in cluster + tracked_cluster_list = tracked_clusters[str(possible_match_id)] #Accessing all cluster in a tracked cluster group + tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group + phi_t_all = tracked_cluster['mode_shapes'] #Find mode shapes in tracked cluster + + #Make list of mode shapes have the same length, i.e. same number of poles + if len(phi_all) > len(phi_t_all): + phi_all = phi_all[0:len(phi_t_all)] + elif len(phi_all) < len(phi_t_all): + phi_t_all = phi_t_all[0:len(phi_all)] + else: #Equal length + pass + MAC_matrix = np.zeros((len(phi_all),len(phi_all))) #Initiate a matrix of MAC values + for ii, phi in enumerate(phi_all): + for jj, phi_t in enumerate(phi_t_all): + MAC_matrix[ii,jj] = calculate_mac(phi,phi_t) #Mac + + mean_MAC.append(np.mean(MAC_matrix)) #Save the mean values of MAC from this cluster compared to the matched tracked cluster + pos = mean_MAC.index(max(mean_MAC)) #Find the index with higest mean MAC, i.e. the cluster that match best with the tracked cluster. + + cluster_index = itemindex[:,0] + + for key in keys: + if keys[pos] == key: #Let the best cluster match stay + pass + else: #Add the clusters with the worst match as a new cluster + result[key] = "new" + return pos, result, cluster_index + +def unique_match_debug_info(result,cluster_dict,t_list): + """ + Debug info + + Args: + result (dict): Dictionary of matches + cluster_dict (dict): Dictionary of clusters + t_list (list): List of median frequencies of last tracked tracked clusters + + Returns: + + """ + print('\n') + for ii, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + pos = result[str(ii)] #Find pos in result dict + if pos == "new": + print(cluster_dict[key]['median_f'],str(ii),pos) + else: + print(cluster_dict[key]['median_f'],str(ii),pos,t_list[pos]) \ No newline at end of file diff --git a/src/methods/packages/pyoma/ssiWrapper.py b/src/methods/packages/pyoma/ssiWrapper.py index eadd783..db560e9 100644 --- a/src/methods/packages/pyoma/ssiWrapper.py +++ b/src/methods/packages/pyoma/ssiWrapper.py @@ -1,6 +1,8 @@ import typing import logging +import numpy as np + from pyoma2.algorithms.data.result import SSIResult from pyoma2.algorithms.data.run_params import SSIRunParams from pyoma2.algorithms.base import BaseAlgorithm @@ -106,20 +108,33 @@ def run(self) -> SSIResult: Fns, Xis, Phis, Fn_cov, Xi_cov, Phi_cov = gen.applymask( lista, mask7, Phis.shape[2] ) - + + #Infer minimum order + for ii in range(ordmin): + id = ii + nan_Matrix = np.empty(Fns.shape[0]) + nan_Matrix[:] = np.nan + Fns[:,id] = nan_Matrix + Xis[:,id] = nan_Matrix + Fn_cov[:,id] = nan_Matrix + Xi_cov[:,id] = nan_Matrix + nan_Matrix = np.empty((Phis.shape[0],Phis.shape[2])) + nan_Matrix[:,:] = np.nan + Phis[:,id,:] = nan_Matrix + Phi_cov[:,id,:] = nan_Matrix - # Get the labels of the poles - Lab = gen.SC_apply( - Fns, - Xis, - Phis, - ordmin, - ordmax, - step, - sc["err_fn"], - sc["err_xi"], - sc["err_phi"], - ) + # # Get the labels of the poles + # Lab = gen.SC_apply( + # Fns, + # Xis, + # Phis, + # ordmin, + # ordmax, + # step, + # sc["err_fn"], + # sc["err_xi"], + # sc["err_phi"], + # ) return SSIResult( Obs=Obs, @@ -130,7 +145,7 @@ def run(self) -> SSIResult: Fn_poles=Fns, Xi_poles=Xis, Phi_poles=Phis, - Lab=Lab, + # Lab=Lab, Fn_poles_cov=Fn_cov, Xi_poles_cov=Xi_cov, Phi_poles_cov=Phi_cov, diff --git a/src/methods/sys_id.py b/src/methods/sysid_module.py similarity index 88% rename from src/methods/sys_id.py rename to src/methods/sysid_module.py index ffd2b1e..6b1986b 100644 --- a/src/methods/sys_id.py +++ b/src/methods/sysid_module.py @@ -9,7 +9,7 @@ from data.comm.mqtt import setup_mqtt_client from data.accel.hbk.aligner import Aligner from methods.packages.pyoma.ssiWrapper import SSIcov -from methods.constants import MODEL_ORDER, BLOCK_SHIFT, DEFAULT_FS +from methods.constants import DEFAULT_FS, PARAMS @@ -40,6 +40,7 @@ def sysid(data, params): name="SSIcovmm_mt", method='cov_mm', br=params['block_shift'], + ordmin=params['model_order_min'], ordmax=params['model_order'], calc_unc=True ) @@ -54,7 +55,6 @@ def sysid(data, params): 'Xi_poles': output['Xi_poles'], 'Xi_poles_cov': output['Xi_poles_cov'], 'Phi_poles': output['Phi_poles'], - 'Lab': output['Lab'] } @@ -96,11 +96,6 @@ def get_oma_results( Returns: A tuple (OMA_output, timestamp) if successful, or None if data is not ready. """ - oma_params = { - "Fs": fs, - "block_shift": BLOCK_SHIFT, - "model_order": MODEL_ORDER - } number_of_samples = int(sampling_period * 60 * fs) data, timestamp = aligner.extract(number_of_samples) @@ -109,7 +104,7 @@ def get_oma_results( return None, None try: - oma_output = sysid(data, oma_params) + oma_output = sysid(data, PARAMS) return oma_output, timestamp except Exception as e: print(f"sysID failed: {e}") @@ -129,14 +124,19 @@ def publish_oma_results(sampling_period: int, aligner: Aligner, publish_topic: The MQTT topic to publish results to. fs: Sampling frequency. """ + t1 = time.time() + loop = True while True: try: - time.sleep(0.5) + time.sleep(0.1) + t2 = time.time() + t_text = f"Waiting for data for {round(t2-t1,1)} seconds" + print(t_text,end="\r") oma_output, timestamp = get_oma_results(sampling_period, aligner, fs) - print(f"OMA result: {oma_output}") - print(f"Timestamp: {timestamp}") + if oma_output: + print(f"Timestamp: {timestamp}") payload = { "timestamp": timestamp.isoformat(), "OMA_output": convert_numpy_to_list(oma_output) @@ -150,15 +150,19 @@ def publish_oma_results(sampling_period: int, aligner: Aligner, publish_client.publish(publish_topic, message, qos=1) print(f"[{timestamp.isoformat()}] Published OMA result to {publish_topic}") + loop = True break - except Exception as e: - print(f"Failed to publish OMA result: {e}") + print(f"\nFailed to publish OMA result: {e}") + except KeyboardInterrupt: - print("Shutting down gracefully") - aligner.client.loop_stop() - aligner.client.disconnect() + print("\nShutting down gracefully") + aligner.mqtt_client.loop_stop() + aligner.mqtt_client.disconnect() publish_client.disconnect() + loop = False break except Exception as e: - print(f"Unexpected error: {e}") + print(f"\nUnexpected error: {e}") + + return loop diff --git a/tests/integration/methods/test_sys_id.py b/tests/integration/methods/test_sys_id.py index c047e0b..f74ba04 100644 --- a/tests/integration/methods/test_sys_id.py +++ b/tests/integration/methods/test_sys_id.py @@ -3,7 +3,7 @@ from datetime import datetime from unittest.mock import MagicMock -from methods import sys_id +from methods import sysid_module def test_sysid(): # Define OMA parameters @@ -17,7 +17,7 @@ def test_sysid(): data = np.loadtxt('tests/integration/input_data/Acc_4DOF.txt').T # Perform system identification - sysid_output = sys_id.sysid(data, oma_params) + sysid_output = sysid_module.sysid(data, oma_params) # Extract results using dictionary keys frequencies = sysid_output['Fn_poles'] @@ -59,7 +59,7 @@ def test_sysid_full_flow_success(): "model_order": 20 } - oma_result = sys_id.sysid(data, oma_params) + oma_result = sysid_module.sysid(data, oma_params) # Check output structure assert isinstance(oma_result, dict) @@ -68,7 +68,7 @@ def test_sysid_full_flow_success(): assert isinstance(oma_result[key], list) or isinstance(oma_result[key], np.ndarray) # Convert to JSON-safe structure - converted = sys_id.convert_numpy_to_list(oma_result) + converted = sysid_module.convert_numpy_to_list(oma_result) assert isinstance(converted, dict) assert isinstance(converted["Fn_poles"], list) @@ -76,7 +76,7 @@ def test_sysid_full_flow_success(): def test_get_oma_results_integration(mocker): from datetime import datetime import numpy as np - from methods import sys_id + from methods import sysid_module fs = 100 # sampling frequency mock_aligner = MagicMock() @@ -88,7 +88,7 @@ def test_get_oma_results_integration(mocker): mock_aligner.extract.return_value = (mock_data, mock_timestamp) - oma_output, timestamp = sys_id.get_oma_results(number_of_minutes, mock_aligner, fs) + oma_output, timestamp = sysid_module.get_oma_results(number_of_minutes, mock_aligner, fs) assert isinstance(oma_output, dict) assert "Fn_poles" in oma_output @@ -108,4 +108,4 @@ def test_sysid_raises_on_empty_data(): } with pytest.raises(Exception): - sys_id.sysid(data, oma_params) + sysid_module.sysid(data, oma_params) diff --git a/tests/unit/methods/test_sys_id_unit.py b/tests/unit/methods/test_sys_id_unit.py index 22efc4a..9799841 100644 --- a/tests/unit/methods/test_sys_id_unit.py +++ b/tests/unit/methods/test_sys_id_unit.py @@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch from datetime import datetime import json -from methods.sys_id import ( +from methods.sysid_module import ( sysid, get_oma_results, publish_oma_results, From 1f711db7da6d19de897e67e5d9ea5679e1b61105 Mon Sep 17 00:00:00 2001 From: au650680 Date: Tue, 21 Oct 2025 15:11:47 +0200 Subject: [PATCH 2/6] Added clustering and mode tracking Clustering and mode tracking is added with example code for both and a shared module for both. Other small changes have been made. --- src/data/accel/hbk/aligner.py | 2 +- src/data/accel/metadata.py | 1 + src/examples/clustering.py | 53 + src/examples/example.py | 31 + src/examples/mode_tracking.py | 54 +- src/examples/run_pyoma.py | 56 +- src/examples/updating_parameters.py | 10 +- src/functions/plot_mode_tracking.py | 67 ++ src/functions/sysid_plot.py | 455 ++++++++ src/methods/clustering_tracking_module.py | 317 +++++ src/methods/constants.py | 38 +- src/methods/model_update_module.py | 64 +- src/methods/packages/clustering.py | 1206 ++++++++++++++++++++ src/methods/packages/mode_track.py | 944 --------------- src/methods/packages/mode_tracking.py | 355 ++++++ src/methods/packages/pyoma/ssiWrapper.py | 43 +- src/methods/{sys_id.py => sysid_module.py} | 38 +- tests/integration/methods/test_sys_id.py | 14 +- tests/unit/methods/test_sys_id_unit.py | 2 +- 19 files changed, 2652 insertions(+), 1098 deletions(-) create mode 100644 src/examples/clustering.py create mode 100644 src/functions/plot_mode_tracking.py create mode 100644 src/functions/sysid_plot.py create mode 100644 src/methods/clustering_tracking_module.py create mode 100644 src/methods/packages/clustering.py delete mode 100644 src/methods/packages/mode_track.py create mode 100644 src/methods/packages/mode_tracking.py rename src/methods/{sys_id.py => sysid_module.py} (88%) diff --git a/src/data/accel/hbk/aligner.py b/src/data/accel/hbk/aligner.py index 335e4ef..ab339a1 100644 --- a/src/data/accel/hbk/aligner.py +++ b/src/data/accel/hbk/aligner.py @@ -130,7 +130,7 @@ def _extract_aligned_block(self, group: List[int], batch_size: int, ch.clear_used_data(group[0], requested_samples) aligned_array = np.array(aligned_data, dtype=np.float32) - print(f"Aligned shape: {aligned_array.shape}") + print(f"\nAligned shape: {aligned_array.shape}") return aligned_array, utc_time diff --git a/src/data/accel/metadata.py b/src/data/accel/metadata.py index 0e54027..592f3c9 100644 --- a/src/data/accel/metadata.py +++ b/src/data/accel/metadata.py @@ -11,6 +11,7 @@ def extract_fs_from_metadata(mqtt_config: Dict[str, Any]) -> int: def _on_metadata(client: MQTTClient, userdata, message) -> None: try: payload = json.loads(message.payload.decode("utf-8")) + print("Metadata",payload) fs_candidate = payload["Analysis chain"][0]["Sampling"] if fs_candidate: fs_result["fs"] = fs_candidate diff --git a/src/examples/clustering.py b/src/examples/clustering.py new file mode 100644 index 0000000..707bb8f --- /dev/null +++ b/src/examples/clustering.py @@ -0,0 +1,53 @@ +import sys +import time +import matplotlib.pyplot as plt +from data.comm.mqtt import load_config +from data.accel.hbk.aligner import Aligner +from methods import sysid_module as sysID +from methods import clustering_tracking_module as MT +from methods.constants import PARAMS +from functions.sysid_plot import plot_clusters + +# pylint: disable=R0914 +def run_clustering_with_local_sysid(config_path): + number_of_minutes = 1 + config = load_config(config_path) + mqtt_config = config["MQTT"] + + # Setting up the client and extracting Fs + data_client, fs = sysID.setup_client(mqtt_config) + + # Setting up the aligner + data_topic_indexes = [0, 2, 3, 4] + selected_topics = [mqtt_config["TopicsToSubscribe"][i] for i in data_topic_indexes] + aligner = Aligner(data_client, topics=selected_topics) + + aligner_time = None + t1 = time.time() + while aligner_time is None: + time.sleep(0.1) + t2 = time.time() + t_text = f"Waiting for data for {round(t2-t1,1)} seconds" + print(t_text,end="\r") + oma_output, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) + data_client.disconnect() + + # Mode Tracks + dictionary_of_clusters, median_frequencies = MT.run_mode_clustering( + oma_output,PARAMS) + + # Print frequencies + print("\nMedian frequencies:", median_frequencies) + + fig_ax = plot_clusters(dictionary_of_clusters, oma_output, PARAMS, fig_ax = None) + plt.show(block=False) + sys.stdout.flush() + +def run_clustering_with_remote_sysid(config_path): + oma_output, dictionary_of_clusters = MT.subscribe_and_cluster(config_path,PARAMS) + fig_ax = plot_clusters(dictionary_of_clusters, oma_output, PARAMS, fig_ax = None) + plt.show(block=False) + sys.stdout.flush() + +def run_live_clustering_with_remote_sysid(config_path): + MT.subscribe_cluster_looping(config_path,topic_index=0,plot=[1,1]) diff --git a/src/examples/example.py b/src/examples/example.py index f6af198..7f49c72 100644 --- a/src/examples/example.py +++ b/src/examples/example.py @@ -6,10 +6,17 @@ run_oma_and_plot, run_oma_and_publish, run_oma_and_print, + run_oma_and_publish_loop, +) +from examples.clustering import ( + run_clustering_with_local_sysid, + run_clustering_with_remote_sysid, + run_live_clustering_with_remote_sysid, ) from examples.mode_tracking import ( run_mode_tracking_with_local_sysid, run_mode_tracking_with_remote_sysid, + run_live_mode_tracking_with_remote_sysid, ) from examples.updating_parameters import run_model_update @@ -37,6 +44,11 @@ def align_readings(ctx): def oma_and_publish(ctx): run_oma_and_publish(ctx.obj["CONFIG"]) +@cli.command() +@click.pass_context +def oma_and_publish_looping(ctx): + run_oma_and_publish_loop(ctx.obj["CONFIG"]) + @cli.command() @click.pass_context def oma_and_plot(ctx): @@ -47,6 +59,20 @@ def oma_and_plot(ctx): def oma_and_print(ctx): run_oma_and_print(ctx.obj["CONFIG"]) +@cli.command() +@click.pass_context +def clustering_with_local_sysid(ctx): + run_clustering_with_local_sysid(ctx.obj["CONFIG"]) + +@cli.command() +@click.pass_context +def clustering_with_remote_sysid(ctx): + run_clustering_with_remote_sysid(ctx.obj["CONFIG"]) + +@cli.command() +@click.pass_context +def live_clustering_with_remote_sysid(ctx): + run_live_clustering_with_remote_sysid(ctx.obj["CONFIG"]) @cli.command() @click.pass_context @@ -58,6 +84,11 @@ def mode_tracking_with_local_sysid(ctx): def mode_tracking_with_remote_sysid(ctx): run_mode_tracking_with_remote_sysid(ctx.obj["CONFIG"]) +@cli.command() +@click.pass_context +def live_mode_tracking_with_remote_sysid(ctx): + run_live_mode_tracking_with_remote_sysid(ctx.obj["CONFIG"]) + @cli.command() @click.pass_context diff --git a/src/examples/mode_tracking.py b/src/examples/mode_tracking.py index cb4f270..1c52c49 100644 --- a/src/examples/mode_tracking.py +++ b/src/examples/mode_tracking.py @@ -1,12 +1,15 @@ -import numpy as np +import sys +import matplotlib.pyplot as plt from data.comm.mqtt import load_config from data.accel.hbk.aligner import Aligner -from methods import sys_id as sysID -from methods import model_update_module as MT +from methods import sysid_module as sysID +from methods import clustering_tracking_module as MT +from methods.constants import PARAMS +from functions.plot_mode_tracking import plot_tracked_modes # pylint: disable=R0914 def run_mode_tracking_with_local_sysid(config_path): - number_of_minutes = 0.5 + number_of_minutes = 1 config = load_config(config_path) mqtt_config = config["MQTT"] @@ -14,7 +17,7 @@ def run_mode_tracking_with_local_sysid(config_path): data_client, fs = sysID.setup_client(mqtt_config) # Setting up the aligner - data_topic_indexes = [0, 2] + data_topic_indexes = [0, 2, 3, 4] selected_topics = [mqtt_config["TopicsToSubscribe"][i] for i in data_topic_indexes] aligner = Aligner(data_client, topics=selected_topics) @@ -23,32 +26,25 @@ def run_mode_tracking_with_local_sysid(config_path): oma_output, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) data_client.disconnect() - # Mode Track - cleaned_values, median_frequencies, confidence_intervals = MT.run_mode_track( - oma_output) + # Mode Tracks + dictionary_of_clusters, median_frequencies = MT.run_mode_clustering( + oma_output,PARAMS) - median_frequencies = [] - mode_shapes_list = [] - - for cluster in cleaned_values: - mode_shapes = cluster["mode_shapes"] # shape: (n_modes_in_cluster, n_channels) - median_shape = np.median(mode_shapes, axis=0) # median across modes - median_frequencies.append(cluster["median"]) - mode_shapes_list.append(median_shape) - - # Convert to numpy arrays - median_frequencies = np.array(median_frequencies) - mode_shapes_array = np.array(mode_shapes_list) # shape: (n_clusters, n_channels) - print("Mode shapes:", mode_shapes_array) + # Print frequencies print("\nMedian frequencies:", median_frequencies) - print("\nConfidence intervals:", confidence_intervals) + tracked_clusters = {} + tracked_clusters = MT.run_mode_tracking(dictionary_of_clusters,tracked_clusters,PARAMS) + + fig_ax = plot_tracked_modes(tracked_clusters, PARAMS, fig_ax = None, x_length = None) + plt.show(block=True) + sys.stdout.flush() def run_mode_tracking_with_remote_sysid(config_path): - config = load_config(config_path) - cleaned_values, median_frequencies, confidence_intervals = ( - MT.subscribe_and_get_cleaned_values(config_path) - ) - print("Cleaned values:", cleaned_values) - print("Tracked frequencies:", median_frequencies) - print("\nConfidence intervals:", confidence_intervals) + oma_output, clusters, tracked_clusters = MT.subscribe_and_get_clusters(config_path) + fig_ax = plot_tracked_modes(tracked_clusters, PARAMS, fig_ax = None, x_length = None) + plt.show(block=True) + sys.stdout.flush() + +def run_live_mode_tracking_with_remote_sysid(config_path): + MT.subscribe_cluster_and_tracking_looping(config_path,topic_index=0,plot=[1,1,1]) diff --git a/src/examples/run_pyoma.py b/src/examples/run_pyoma.py index c05f290..0a08ba5 100644 --- a/src/examples/run_pyoma.py +++ b/src/examples/run_pyoma.py @@ -1,9 +1,11 @@ import sys +import time import matplotlib.pyplot as plt -from methods import sys_id as sysID from data.comm.mqtt import load_config from data.accel.hbk.aligner import Aligner -from functions.natural_freq import plot_natural_frequencies +from functions.sysid_plot import plot_stabilization_diagram +from methods import sysid_module as sysID +from methods.constants import PARAMS def setup_oma(config_path, data_topic_indexes): @@ -31,27 +33,38 @@ def setup_oma(config_path, data_topic_indexes): def run_oma_and_plot(config_path): - number_of_minutes = 0.2 - data_topic_indexes = [0, 2] + number_of_minutes = 1 + data_topic_indexes = [0, 2, 3, 4] aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) fig_ax = None aligner_time = None + t1 = time.time() while aligner_time is None: + time.sleep(0.1) + t2 = time.time() + t_text = f"Waiting for data for {round(t2-t1,1)} seconds" + print(t_text,end="\r") results, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) data_client.disconnect() - fig_ax = plot_natural_frequencies(results['Fn_poles'], freqlim=(0, 75), fig_ax=fig_ax) + print(aligner_time) + fig_ax = plot_stabilization_diagram(results, PARAMS, fig_ax=fig_ax) plt.show(block=True) sys.stdout.flush() def run_oma_and_print(config_path): number_of_minutes = 0.2 - data_topic_indexes = [0, 2] + data_topic_indexes = [0, 2, 3, 4] aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) aligner_time = None + t1 = time.time() while aligner_time is None: + time.sleep(0.1) + t2 = time.time() + t_text = f"Waiting for data for {round(t2-t1,1)} seconds" + print(t_text,end="\r") results, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) data_client.disconnect() sys.stdout.flush() @@ -63,15 +76,15 @@ def run_oma_and_print(config_path): def run_oma_and_publish(config_path): - number_of_minutes = 0.02 - data_topic_indexes = [0, 2] + number_of_minutes = 1 + data_topic_indexes = [0, 2, 3, 4] aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) publish_config = load_config(config_path)["sysID"] # Setting up the client for publishing OMA results publish_client, _ = sysID.setup_client(publish_config) # fs not needed here - sysID.publish_oma_results( + publish_result = sysID.publish_oma_results( number_of_minutes, aligner, publish_client, @@ -79,6 +92,29 @@ def run_oma_and_publish(config_path): fs ) - print(f"Publishing to topic: {publish_config['TopicsToSubscribe'][0]}") + if publish_result is True: + print(f"Publishing to topic: {publish_config['TopicsToSubscribe'][0]}") data_client.disconnect() sys.stdout.flush() + + +def run_oma_and_publish_loop(config_path): + number_of_minutes = 1 + data_topic_indexes = [0, 2, 3, 4] + aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) + publish_config = load_config(config_path)["sysID"] + + # Setting up the client for publishing OMA results + publish_client, _ = sysID.setup_client(publish_config) # fs not needed here + + loop = True + while loop: + loop = sysID.publish_oma_results( + number_of_minutes, + aligner, + publish_client, + publish_config["TopicsToSubscribe"][0], + fs + ) + if loop is True: + print(f"Publishing to topic: {publish_config['TopicsToSubscribe'][0]}") diff --git a/src/examples/updating_parameters.py b/src/examples/updating_parameters.py index 04984ce..fe3e98b 100644 --- a/src/examples/updating_parameters.py +++ b/src/examples/updating_parameters.py @@ -1,8 +1,10 @@ import time from data.comm.mqtt import load_config from data.accel.hbk.aligner import Aligner -from methods import sys_id as sysID -from methods import model_update_module as MT +from methods import sysid_module as sysID +from methods import clustering_tracking_module as MT +from methods import model_update_module as MU +from methods.constants import PARAMS # pylint: disable=R0914, C0103 def run_model_update(config_path): @@ -26,10 +28,10 @@ def run_model_update(config_path): data_client.disconnect() # Mode Track - cleaned_values, _, _ = MT.run_mode_track(oma_output) + dictionary_clusters, median_frequencies = MT.run_mode_clustering(oma_output,PARAMS) # Run model update - update_result = MT.run_model_update(cleaned_values) + update_result = MU.run_model_update(dictionary_clusters) if update_result is not None: optimized_parameters = update_result['optimized_parameters'] diff --git a/src/functions/plot_mode_tracking.py b/src/functions/plot_mode_tracking.py new file mode 100644 index 0000000..bac9357 --- /dev/null +++ b/src/functions/plot_mode_tracking.py @@ -0,0 +1,67 @@ +from typing import Tuple, Dict, Any +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.figure +plt.rcParams['font.family'] = 'Times New Roman' + +def plot_tracked_modes( + tracked_clusters: Dict[str, Any], + oma_params: Dict[str, Any], + fig_ax: Any = None, + x_length: int = None)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + """ + Plot tracked modes + + Args: + oma_results (dict): PyOMA results + oma_params (dict): OMA parameters + Returns: + fig_ax (tuple): fig and ax of plot + + """ + + if fig_ax is None: + plt.ion() + fig, (ax1) = plt.subplots(1,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1) = fig_ax + ax1.clear() + + ii = 0 + max_x = [] + for key in tracked_clusters.keys(): + if key == "iteration": + pass + else: + tracked_cluster_list = tracked_clusters[key] + m_f = [] + x = [] + for cluster in tracked_cluster_list: + m_f.append(cluster['median_f']) + x.append(cluster['id']) + + sc = ax1.scatter(x, m_f, marker="o", s=50) + col2 = sc.get_facecolors().tolist() + ax1.plot(x, m_f, color=col2[0]) + max_x.append(max(x)) + ii += 1 + + ax1.set_ylabel("Eigenfrequency [Hz]", fontsize=20, color = 'black') + ax1.set_xlabel("Dataset", fontsize=20, color = 'black') + ax1.tick_params(axis='both', which='major', labelsize=17) + + ax1.set_ylim(0, oma_params['Fs']/2) + if x_length is not None: + ax1.set_xlim(np.maximum(max(max_x)-x_length,0),max(max_x)+1) + ax1.set_xticks(np.arange(np.maximum(max(max_x)-x_length,0), + np.maximum(max(max_x)+1,x_length), 5)) + + # Add major and minor grid lines + ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1) diff --git a/src/functions/sysid_plot.py b/src/functions/sysid_plot.py new file mode 100644 index 0000000..ea6f87c --- /dev/null +++ b/src/functions/sysid_plot.py @@ -0,0 +1,455 @@ +from typing import Tuple, Dict, Any +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.figure +from methods.packages.clustering import (remove_complex_conjugates,remove_highly_uncertain_points) +plt.rcParams['font.family'] = 'Times New Roman' + + +def plot_pre_stabilization_diagram( + oma_results: Dict[str, Any], + oma_params: Dict[str, Any], + fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + + """ + Plot stabilization of raw OMA data before pre-cleaning + + Args: + oma_results (dict): PyOMA results + oma_params (dict): OMA parameters + fix_ax (tuple): fig and ax of plot to redraw + Returns: + fig_ax (tuple): fig and ax of plot + + """ + + + if fig_ax is None: + plt.ion() + fig, (ax1,ax2) = plt.subplots(1,2,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1,ax2) = fig_ax + ax1.clear() + ax2.clear() + + frequencies, damping_ratios, _, cov_freq, cov_damping = remove_complex_conjugates(oma_results) + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax1.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + + ax1.scatter(x, y_model_order, marker="o", s=50, c="r") + if cov_freq is not None: + xerr = 2*np.sqrt(cov_freq) + xerr = xerr.flatten(order="f") + ax1.errorbar(x, y_model_order, xerr=xerr, fmt="None", capsize=5, ecolor="gray") + + ax1.set_ylim(0, oma_params['model_order'] + 1) + ax1.set_xlim(0, oma_params['Fs']/2) + + # Add major and minor grid lines + ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + # # # ............................................................................ + + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax2.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y = damping_ratios.flatten(order="f") + + ax2.scatter(x, y, marker="o", s=50, c="r") + if cov_freq is not None: + xerr = np.sqrt(cov_damping) * 2 + xerr = xerr.flatten(order="f") + ax2.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") + + ax2.set_ylim(0, 0.1+0.005) + ax2.set_xlim(0, oma_params['Fs']/2) + + for i, txt in enumerate(y_model_order): + ax2.annotate(str(txt), (x[i], y[i])) + + # Add major and minor grid lines + ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) + +def plot_stabilization_diagram( + oma_results: Dict[str, Any], + oma_params: Dict[str, Any], + fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + """ + Plot stabilization of OMA data before after pre-cleaning + + Args: + oma_results (dict): PyOMA results + oma_params (dict): OMA parameters + Returns: + fig_ax (tuple): fig and ax of plot + + """ + + if fig_ax is None: + plt.ion() + fig, (ax1,ax2) = plt.subplots(1,2,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1,ax2) = fig_ax + ax1.clear() + ax2.clear() + + #Pre-clean + frequencies, cov_freq, damping_ratios, cov_damping, _ = remove_highly_uncertain_points(oma_results,oma_params) + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax1.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + + ax1.scatter(x, y_model_order, marker="o", s=50, c="r") + + if cov_freq is not None: + xerr = 2*np.sqrt(cov_freq) + xerr = xerr.flatten(order="f") + ax1.errorbar(x, y_model_order, xerr=xerr, fmt="None", capsize=5, ecolor="gray") + + ax1.set_ylim(0, oma_params['model_order'] + 1) + ax1.set_xlim(0, oma_params['Fs']/2) + + # Add major and minor grid lines + ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + # # # ............................................................................ + + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax2.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y = damping_ratios.flatten(order="f") + + ax2.scatter(x, y, marker="o", s=50, c="r") + + if cov_freq is not None: + xerr = np.sqrt(cov_damping) * 2 + xerr = xerr.flatten(order="f") + ax2.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") + + for i, txt in enumerate(y_model_order): + ax2.annotate(str(txt), (x[i], y[i])) + + ax2.set_ylim(0, max(y[~np.isnan(y)])+0.005) + ax2.set_xlim(0, oma_params['Fs']/2) + + # Add major and minor grid lines + ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) + +def plot_clusters(clusters: Dict[str,dict], + oma_results: Dict[str, Any], + oma_params: Dict[str, Any], + fig_ax = None)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + """ + Plot stabilization of clusters + + Args: + clsuters (dict): Dictionary of clusters + oma_results (dict): PyOMA results + oma_params (dict): OMA parameters + fix_ax (tuple): fig and ax of plot to redraw + Returns: + fig_ax (tuple): fig and ax of plot + + """ + + if fig_ax is None: + plt.ion() + #fig, (ax1,ax2) = plt.subplots(1,2,figsize=(8, 6), tight_layout=True) + fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12, 6), tight_layout=True) + title_number = 0 + else: + fig, (ax1,ax2) = fig_ax + title = fig.axes[0].get_title() + ax1.clear() + ax2.clear() + + iteration_number = title.split(' ')[-1] + #print(iteration_number) + title_number = int(iteration_number) + 1 + + #Pre-clean + frequencies, cov_freq, damping_ratios, cov_damping, _ = remove_highly_uncertain_points(oma_results,oma_params) + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax1.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + + ax1.scatter(x, y_model_order, marker="^", s=20, c="r", zorder=0, label='Non clustered') + + if cov_freq is not None: + xerr = 2*np.sqrt(cov_freq) + xerr = xerr.flatten(order="f") + ax1.errorbar( + x, y_model_order, xerr=xerr, fmt="None", capsize=5, ecolor="r", zorder=1 + ) + + idx = 0 + for i, key in enumerate(clusters.keys()): + cluster = clusters[key] + MO = cluster['model_order'] + freq_cluster = cluster['f'] + freq_cov_cluster = cluster['cov_f'] + + sc = ax1.scatter(freq_cluster, MO, marker="o", s=40, label=f'Cluster {i}') + col = sc.get_facecolors().tolist() + ax1.vlines(np.median(freq_cluster),min(cluster['model_order']), + max(cluster['model_order']),color=col) + + xerr_cluster = np.sqrt(freq_cov_cluster) * 2 + ax1.errorbar(freq_cluster, MO, xerr=xerr_cluster, + fmt="None", capsize=5, ecolor="gray",zorder=200) + idx += 1 + + ax1.set_ylim(0, oma_params['model_order'] + 1) + ax1.set_xlim(0, oma_params['Fs']/2) + # Add major and minor grid lines + ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + ax1.legend(prop={'size': 20}) #bbox_to_anchor=(0.1, 1.1) + ax1.set_title(f"Data set: {title_number}") + + # # # ............................................................................ + + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax2.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y = damping_ratios.flatten(order="f") + + sc = ax2.scatter(x, y, marker="^", s=20, c="r", zorder=0, label='Non clustered') + if cov_freq is not None: + xerr = np.sqrt(cov_damping) * 2 + xerr = xerr.flatten(order="f") + ax2.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") + + for i, key in enumerate(clusters.keys()): + cluster = clusters[key] + freq_cluster = cluster['f'] + damp_cluster = cluster['d'] + damp_cov_cluster = cluster['cov_d'] + + ax2.scatter(freq_cluster, damp_cluster, s=50, zorder=3) + xerr_cluster = np.sqrt(damp_cov_cluster) * 2 + ax2.errorbar(freq_cluster, damp_cluster, yerr=xerr_cluster, + fmt="None", capsize=5, ecolor="gray") + + for i, txt in enumerate(y_model_order): + ax2.annotate(str(txt), (x[i], y[i])) + + if y[~np.isnan(y)].shape[0] > 1: + ax2.set_ylim(0, max(max(y[~np.isnan(y)])+0.005,0.1)) + else: + ax2.set_ylim(0, 0.1) + ax2.set_xlim(0, oma_params['Fs']/2) + + # Add major and minor grid lines + ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) + +def plot_stabilization_diagram_for_paper( + oma_results: Dict[str, Any], + oma_params: Dict[str, Any], + fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + """ + Plot stabilization of OMA data before after pre-cleaning for paper + + Args: + oma_results (dict): PyOMA results + oma_params (dict): OMA parameters + Returns: + fig_ax (tuple): fig and ax of plot + + """ + if fig_ax is None: + plt.ion() + fig, (ax1) = plt.subplots(1,1,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1) = fig_ax + ax1.clear() + + #Pre-clean + frequencies, cov_freq, damping_ratios, cov_damping,_ = remove_highly_uncertain_points(oma_results,oma_params) + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax1.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + + ax1.scatter(x, y_model_order, marker="o", s=50, c="r") + + if cov_freq is not None: + xerr = 2*np.sqrt(cov_freq) + xerr = xerr.flatten(order="f") + ax1.errorbar(x, y_model_order, xerr=xerr, fmt="None", capsize=5, ecolor="gray") + + ax1.set_ylim(0, oma_params['model_order'] + 1) + ax1.set_xlim(0, oma_params['Fs']/2) + + # Add major and minor grid lines + ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + # # # ............................................................................ + + if fig_ax is None: + plt.ion() + fig, (ax2) = plt.subplots(1,1,figsize=(8, 6), tight_layout=True) + else: + fig, (ax2) = fig_ax + ax2.clear() + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax2.tick_params(axis='both', which='major', labelsize=17) + + x = frequencies.flatten(order="f") + y = damping_ratios.flatten(order="f") + + ax2.scatter(x, y, marker="o", s=50, c="r") + + if cov_freq is not None: + xerr = np.sqrt(cov_damping) * 2 + xerr = xerr.flatten(order="f") + ax2.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") + + ax2.set_ylim(0, max(y[~np.isnan(y)])+0.005) + ax2.set_xlim(0, oma_params['Fs']/2) + + # Add major and minor grid lines + ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) + +def plot_clusters_for_paper(clusters: Dict[str,dict], + oma_params: Dict[str, Any], + fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + """ + Plot stabilization of clusters for paper + + Args: + clsuters (dict): Dictionary of clusters + oma_results (dict): PyOMA results + oma_params (dict): OMA parameters + fix_ax (tuple): fig and ax of plot to redraw + Returns: + fig_ax (tuple): fig and ax of plot + + """ + if fig_ax is None: + plt.ion() + fig, (ax1) = plt.subplots(1,1,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1) = fig_ax + ax1.clear() + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax1.tick_params(axis='both', which='major', labelsize=17) + + idx = 0 + for i, key in enumerate(clusters.keys()): + + cluster = clusters[key] + MO = cluster['model_order'] + freq_cluster = cluster['f'] + freq_cov_cluster = cluster['cov_f'] + + ax1.scatter(freq_cluster, MO, marker="o", s=50, label=f'Cluster {i+1}') + + xerr_cluster = np.sqrt(freq_cov_cluster) * 2 + ax1.errorbar(freq_cluster, MO, xerr=xerr_cluster, + fmt="None", capsize=5, ecolor="gray",zorder=200) + idx += 1 + + ax1.set_ylim(0, oma_params['model_order'] + 1) + ax1.set_xlim(0, oma_params['Fs']/2) + # Add major and minor grid lines + ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + ax1.legend(prop={'size': 20}) #bbox_to_anchor=(0.1, 1.1) + + # # # ............................................................................ + + if fig_ax is None: + plt.ion() + fig, (ax2) = plt.subplots(1,1,figsize=(8, 6), tight_layout=True) + + else: + fig, (ax2) = fig_ax + ax2.clear() + + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax2.tick_params(axis='both', which='major', labelsize=17) + + for i, key in enumerate(clusters.keys()): + cluster = clusters[key] + freq_cluster = cluster['f'] + damp_cluster = cluster['d'] + damp_cov_cluster = cluster['cov_d'] + xerr = np.sqrt(damp_cov_cluster) * 2 + xerr = xerr.flatten(order="f") + + ax2.scatter(freq_cluster, damp_cluster, s=50, zorder=3,label=f'Cluster {i+1}') + xerr_cluster = np.sqrt(damp_cov_cluster) * 2 + ax2.errorbar(freq_cluster, damp_cluster, yerr=xerr_cluster, + fmt="None", capsize=5, ecolor="gray") + + ax2.set_ylim(0, max(damp_cluster)+0.005) + ax2.set_xlim(0, oma_params['Fs']/2) + ax2.legend(prop={'size': 20}) + + # Add major and minor grid lines + ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) diff --git a/src/methods/clustering_tracking_module.py b/src/methods/clustering_tracking_module.py new file mode 100644 index 0000000..3ffdf88 --- /dev/null +++ b/src/methods/clustering_tracking_module.py @@ -0,0 +1,317 @@ +import json +import sys +import threading +from typing import Any, List, Dict, Tuple +import numpy as np +import matplotlib.pyplot as plt +import paho.mqtt.client as mqtt +from methods.constants import PARAMS +from methods.packages.clustering import cluster_func +from methods.packages.mode_tracking import cluster_tracking +from functions.sysid_plot import (plot_clusters,plot_stabilization_diagram) +from functions.plot_mode_tracking import plot_tracked_modes +from data.comm.mqtt import load_config, setup_mqtt_client +# pylint: disable=C0103, W0603 + +# Global threading event to wait for OMA data +result_ready = threading.Event() +oma_output_global = None # will store received OMA data inside callback + +def _convert_oma_output(obj: Any) -> Any: + """Recursively convert JSON structure into complex numbers and numpy arrays.""" + if isinstance(obj, dict): + if "real" in obj and "imag" in obj: + return complex(obj["real"], obj["imag"]) + return {k: _convert_oma_output(v) for k, v in obj.items()} + + if isinstance(obj, list): + try: + return np.array([_convert_oma_output(item) for item in obj]) + except Exception: + return [_convert_oma_output(item) for item in obj] + + return obj + + +def _on_connect(client: mqtt.Client, userdata: dict, flags: dict, reason_code: int, properties: mqtt.Properties) -> None: + """Callback when MQTT client connects.""" + if reason_code == 0: + print("Connected to MQTT broker.") + client.subscribe(userdata["topic"], qos=userdata["qos"]) + print(f"Subscribed to topic: {userdata['topic']}") + else: + print(f"Failed to connect to MQTT broker. Code: {reason_code}") + + +def _on_message(_client: mqtt.Client, _userdata: dict, msg: mqtt.MQTTMessage) -> None: + """Callback when a message is received.""" + global oma_output_global + print(f"Message received on topic: {msg.topic}") + try: + raw = json.loads(msg.payload.decode("utf-8")) + oma_output = _convert_oma_output(raw["OMA_output"]) + timestamp = raw["timestamp"] + print(f"Received OMA data at timestamp: {timestamp}") + oma_output_global = oma_output + result_ready.set() + except Exception as e: + print(f"Error processing OMA message: {e}") + + +def run_mode_clustering(oma_output: Any, params: dict[str,Any]) -> Tuple[dict[str,Any], np.ndarray]: + """ + Runs the mode clustering algorithm. + + Args: + oma_output (Any): OMA output from subscription or elsewhere. + Returns: + cluster_dict (dict[str,Any]), + median_frequencies (np.ndarray), + """ + dictionary_clusters = cluster_func(oma_output, params) + + median_frequencies = np.array([dictionary_clusters[key]["median_f"] + for key in dictionary_clusters.keys()]) + return dictionary_clusters, median_frequencies + + +def run_mode_tracking(cluster_dict: dict[str,Any], tracked_clusters: dict[str,Any], + params: dict[str,Any]) -> dict[str,Any]: + """ + Runs the mode tracking algorithm. + + Args: + cluster_dict (dict[str,Any]): Clusters from OMA + Returns: + tracked_clusters (dict[str,Any]): Tracked clusters + """ + tracked_clusters = cluster_tracking(cluster_dict, tracked_clusters, params) + return tracked_clusters + + +def subscribe_and_cluster(config_path: str, params: Dict[str,Any] + ) -> Tuple[Dict[str,Any], Dict[str,Any]]: + """ + Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. + + Args: + config_path (str): Path to config JSON. + + Returns: + oma_output_global (Dict[str,Any]): OMA output + clusters (Dict[str,Any]]): Clusters + """ + global oma_output_global + oma_output_global = None # Reset in case old data is present + result_ready.clear() + + config = load_config(config_path) + mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index=0) + + mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) + mqtt_client.on_connect = _on_connect + mqtt_client.on_message = _on_message + mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) + mqtt_client.loop_start() + print("Waiting for OMA data...") + try: + result_ready.wait() # Wait until message arrives + mqtt_client.loop_stop() + mqtt_client.disconnect() + + if oma_output_global is None: + raise RuntimeError("Failed to receive OMA data.") + + print("OMA data received. Running mode clustering and tracking...") + clusters, median_frequencies = run_mode_clustering(oma_output_global,params) + print("Clustered frequencies", median_frequencies) + + except KeyboardInterrupt: + print("Shutting down gracefully") + mqtt_client.loop_stop() + mqtt_client.disconnect() + except Exception as e: + print(f"Unexpected error: {e}") + + return oma_output_global, clusters + + +def subscribe_and_get_clusters(config_path: str) -> Tuple[List[Dict], np.ndarray, np.ndarray]: + """ + Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. + + Args: + config_path (str): Path to config JSON. + + Returns: + oma_output_global (Dict[str,Any]): OMA output + clusters (Dict[str,Any]]): Clusters + tracked_clusters (Dict[str,Any]]): Tracked clusters + """ + global oma_output_global + oma_output_global = None # Reset in case old data is present + result_ready.clear() + tracked_clusters = {} + + config = load_config(config_path) + mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index=0) + + mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) + mqtt_client.on_connect = _on_connect + mqtt_client.on_message = _on_message + mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) + mqtt_client.loop_start() + print("Waiting for OMA data...") + try: + result_ready.wait() # Wait until message arrives + mqtt_client.loop_stop() + mqtt_client.disconnect() + + if oma_output_global is None: + raise RuntimeError("Failed to receive OMA data.") + + print("OMA data received. Running mode clustering and tracking...") + clusters, median_frequencies = run_mode_clustering(oma_output_global,PARAMS) + print("Clustered frequencies", median_frequencies) + tracked_clusters = run_mode_tracking(clusters, tracked_clusters,PARAMS) + + except KeyboardInterrupt: + print("Shutting down gracefully") + mqtt_client.loop_stop() + mqtt_client.disconnect() + except Exception as e: + print(f"Unexpected error: {e}") + + return oma_output_global, clusters, tracked_clusters + + +def subscribe_cluster_looping(config_path: str, topic_index: int = 0, + plot: np.ndarray[bool] = np.array([1,1]) + ) -> Tuple[List[Dict], np.ndarray, np.ndarray]: + """ + Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. + + Args: + config_path (str): Path to config JSON. + topic_index (int): Topic to subscribe + plot (np.ndarray[bool]): Array describing what plots to show + + Returns: + oma_output_global (Dict[str,Any]): OMA output + clusters (Dict[str,Any]]): Clusters + tracked_clusters (Dict[str,Any]]): Tracked clusters + """ + global oma_output_global + oma_output_global = None # Reset in case old data is present + result_ready.clear() + + config = load_config(config_path) + mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index) + + mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) + mqtt_client.on_connect = _on_connect + mqtt_client.on_message = _on_message + mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) + mqtt_client.loop_start() + + fig_ax1 = None + fig_ax2 = None + while True: + # try: + print("Waiting for OMA data...") + result_ready.wait() # Wait until message arrives + + if oma_output_global is None: + raise RuntimeError("Failed to receive OMA data.") + + print("OMA data received. Running mode clustering and tracking...") + result_ready.clear() + + if plot[0] == 1: + fig_ax1 = plot_stabilization_diagram(oma_output_global,PARAMS,fig_ax=fig_ax1) + plt.show(block=False) + + clusters, median_frequencies = run_mode_clustering(oma_output_global,PARAMS) + print("Clustered frequencies", median_frequencies) + + if plot[1] == 1: + fig_ax2 = plot_clusters(clusters,oma_output_global,PARAMS,fig_ax=fig_ax2) + plt.show(block=False) + + sys.stdout.flush() + # except KeyboardInterrupt: + # print("Shutting down gracefully") + # mqtt_client.loop_stop() + # mqtt_client.disconnect() + # break + # except Exception as e: + # print(f"Unexpected error: {e}") + +def subscribe_cluster_and_tracking_looping(config_path: str, topic_index: int = 0, + plot: np.ndarray[bool] = np.array([1,1,1]) + ) -> Tuple[List[Dict], np.ndarray, np.ndarray]: + """ + Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. + + Args: + config_path (str): Path to config JSON. + topic_index (int): Topic to subscribe + plot (np.ndarray[bool]): Array describing what plots to show + + Returns: + + Plots: + Stabilization diagram + Cluster plot + Tracked clusters plot + """ + global oma_output_global + oma_output_global = None # Reset in case old data is present + result_ready.clear() + tracked_clusters = {} + + config = load_config(config_path) + mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index) + + mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) + mqtt_client.on_connect = _on_connect + mqtt_client.on_message = _on_message + mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) + mqtt_client.loop_start() + + fig_ax1 = None + fig_ax2 = None + fig_ax3 = None + while True: + try: + print("Waiting for OMA data...") + result_ready.wait() # Wait until message arrives + + if oma_output_global is None: + raise RuntimeError("Failed to receive OMA data.") + + print("OMA data received. Running mode clustering and tracking...") + result_ready.clear() + + if plot[0] == 1: + fig_ax1 = plot_stabilization_diagram(oma_output_global,PARAMS,fig_ax=fig_ax1) + plt.show(block=False) + + clusters, median_frequencies = run_mode_clustering(oma_output_global,PARAMS) + print("Clustered frequencies", median_frequencies) + tracked_clusters = run_mode_tracking(clusters, tracked_clusters,PARAMS) + + if plot[1] == 1: + fig_ax2 = plot_clusters(clusters,oma_output_global,PARAMS,fig_ax=fig_ax2) + plt.show(block=False) + if plot[2] == 1: + fig_ax3 = plot_tracked_modes(tracked_clusters,PARAMS,fig_ax=fig_ax3,x_length=None) + plt.show(block=False) + sys.stdout.flush() + except KeyboardInterrupt: + print("Shutting down gracefully") + mqtt_client.loop_stop() + mqtt_client.disconnect() + break + except Exception as e: + print(f"Unexpected error: {e}") diff --git a/src/methods/constants.py b/src/methods/constants.py index 3309c56..d376b5e 100644 --- a/src/methods/constants.py +++ b/src/methods/constants.py @@ -6,17 +6,39 @@ MIN_SAMPLES_NEEDED = 540 # Minimum samples for running sysid -BLOCK_SHIFT = 30 - -MODEL_ORDER = 20 - -# Constants for Model track -MSTAB_FACTOR = 0.4 # This is goning to be multiplied by the MODEL_ORDER to get the mstab -TMAC = 0.9 - # Constants for Model Update # 1st parameter is spring stiffness and 2nd is unbounded length X0 = np.array([1e1, 10e-3]) # Create bounds using element-wise i.e. different parameters have different bounds BOUNDS = [(1e-2 * X0[0], 1e2 * X0[0]), (1e-2 * X0[1], 1e2 * X0[1])] + + + + +# Parameters +PARAMS = {} + +#Pre-clean +PARAMS['freq_variance_treshold'] = 0.1 +PARAMS['damp_variance_treshold'] = 10**6 + +PARAMS['Fs'] = 256 # Sample frequency +PARAMS['model_order_min'] = 2 # Set the min model order +PARAMS['model_order'] = 15 # Set the max model order for analysis +PARAMS['block_shift'] = 30 # Block size in Hankel matrix +PARAMS['sensor_order'] = np.array([0, 2, 1, 3]) # sensor location in data + +# Params for clustering: +PARAMS['mstab'] = 6 # minimum number of frequencies to be validate as cluster +PARAMS['tMAC'] = 0.95 # MAC threshold to be included in cluster +PARAMS['bound_multiplier'] = 2 # Standard deviation multiplier +PARAMS['allignment_factor'] = [0.05,0.01] # Factors for allignment + +# Params for mode tracking +PARAMS['phi_cri'] = 0.8 #0.98 # MAC criteria [%] +PARAMS['freq_cri'] = 0.2 #0.2 # Frequency difference criteria [%] +PARAMS['obj_cri'] = 0.1 +# If more clusters match, an it is not clear what cluster is best, +# then check if the difference of the objective function values are less than the criteria. +# Then it is probably the one with higest MAC rather than frequency [difference] diff --git a/src/methods/model_update_module.py b/src/methods/model_update_module.py index 652630c..2342816 100644 --- a/src/methods/model_update_module.py +++ b/src/methods/model_update_module.py @@ -1,16 +1,13 @@ import json import threading -from typing import Any, List, Dict, Tuple, Optional +from typing import Any, List, Dict, Optional import numpy as np import paho.mqtt.client as mqtt from scipy.optimize import minimize from scipy.linalg import eigh -from methods.constants import MODEL_ORDER, MSTAB_FACTOR, TMAC -from methods.packages.mode_track import mode_allingment from methods.packages.eval_yafem_model import eval_yafem_model from methods.packages import model_update from methods.constants import X0, BOUNDS -from data.comm.mqtt import load_config, setup_mqtt_client # pylint: disable=C0103, W0603 # Global threading event to wait for OMA data @@ -58,27 +55,6 @@ def _on_message(_client: mqtt.Client, _userdata: dict, msg: mqtt.MQTTMessage) -> print(f"Error processing OMA message: {e}") -def run_mode_track(oma_output: Any) -> Tuple[List[Dict], np.ndarray, np.ndarray]: - """ - Runs the mode tracking algorithm. - - Args: - oma_output (Any): OMA output from subscription or elsewhere. - Returns: - cleaned_values (List[Dict]), - median_frequencies (np.ndarray), - confidence_intervals (np.ndarray) - """ - mstab = MODEL_ORDER * MSTAB_FACTOR - cleaned_values = mode_allingment(oma_output, mstab, TMAC) - median_frequencies = np.array([cluster["median"] for cluster in cleaned_values]) - confidence_intervals = np.array([ - cluster["original_cluster"]["confidence_interval"] - for cluster in cleaned_values - ]) - return cleaned_values, median_frequencies, confidence_intervals - - # pylint: disable=R0914 def run_model_update(cleaned_values: List[Dict]) -> Optional[Dict[str, Any]]: """ @@ -137,41 +113,3 @@ def run_model_update(cleaned_values: List[Dict]) -> Optional[Dict[str, Any]]: except ValueError as e: print(f"Skipping model updating due to error: {e}") return None - - -def subscribe_and_get_cleaned_values(config_path: str, - num_clusters: int = 2) -> Tuple[List[Dict], np.ndarray, np.ndarray]: - """ - Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. - - Args: - config_path (str): Path to config JSON. - num_clusters (int): Number of clusters to keep after mode tracking. - - Returns: - cleaned_values (List[Dict]), - median_frequencies (np.ndarray), - confidence_intervals (np.ndarray) - """ - global oma_output_global - oma_output_global = None # Reset in case old data is present - result_ready.clear() - - config = load_config(config_path) - mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index=0) - - mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) - mqtt_client.on_connect = _on_connect - mqtt_client.on_message = _on_message - mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) - mqtt_client.loop_start() - print("Waiting for OMA data...") - result_ready.wait() # Wait until message arrives - mqtt_client.loop_stop() - mqtt_client.disconnect() - - if oma_output_global is None: - raise RuntimeError("Failed to receive OMA data.") - - print("OMA data received. Running mode tracking...") - return run_mode_track(oma_output_global) diff --git a/src/methods/packages/clustering.py b/src/methods/packages/clustering.py new file mode 100644 index 0000000..36642a5 --- /dev/null +++ b/src/methods/packages/clustering.py @@ -0,0 +1,1206 @@ +from typing import Any +import numpy as np + +# Following the algorithm proposed here: https://doi.org/10.1007/978-3-031-61421-7_56 +# JVM 10/10/2025 + +def cluster_func(oma_results: dict[str,Any], Params : dict[str,Any]) -> tuple[dict[str,Any], dict[str,Any], dict[str,Any]]: + """ + Clustering of OMA results + + Args: + oma_results (dict): PyOMA results + Params (dict): Algorihm parameters + Returns: + cluster_dict_1 (dict): Dictionary of clusters after clustering + cluster_dict_2 (dict): Dictionary of clusters after alignment + cluster_dict_3 (dict): Dictionary of clusters after cardinailty check + + """ + + #Preeliminary cleaning + frequencies_, cov_freq_, damping_ratios_, cov_damping_, mode_shapes_ = remove_complex_conjugates(oma_results) + frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes = remove_highly_uncertain_points(oma_results,Params) + + # Transpose, flip and sort arrays, such that arrays maps directly to the stabilization diagram. + # This means the the frequency array maps directly to the plot: + # MO. + # 5.| x x + # 4.| x + # 3.| x + # 2.| x + # 1.| + # 0.| + # -1----4------- Frequency + # The frequency array will then have the shape (6,3). Initially (6,6) but the complex conjugates have been removed. So 6 is halved to 3. + # 6 for each model order, including 0 and 3 for maximum poles in a modelorder + # The frequency array will then become: + # _0_1_ + # 0| 1 4 + # 1| 1 Nan + # 0| 1 Nan + # 0| Nan 4 + # 0| Nan Nan + # 0| Nan Nan + + frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes2, model_orders = transform_oma_features(frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes) + + row, col = np.indices(model_orders.shape) + row = row.flatten(order="C") + col = col.flatten(order="C") + + #Initiate data + data1 = {'frequencies':frequencies, + 'damping_ratios':damping_ratios, + 'cov_f':cov_freq, + 'cov_d':cov_damping, + 'mode_shapes':mode_shapes2, + 'row':row, + 'col':col} + + cluster_dict = {} + cluster_counter = 0 + for count, f in enumerate(frequencies.flatten(order="f")): #np.count_nonzero(~np.isnan(frequencies)) + + #print("\nIteration",count,"Unclustered poles:",np.count_nonzero(~np.isnan(frequencies))) + + #Extract data + frequencies = data1['frequencies'] + damping_ratios = data1['damping_ratios'] + cov_freq = data1['cov_f'] + cov_damping = data1['cov_d'] + + #Inital point + r = row[count] + c = col[count] + ip = [frequencies[r,c],cov_freq[r,c],damping_ratios[r,c],cov_damping[r,c]] + + if np.isnan(ip[0]) == True: #Pass if the pole does not exist. + pass + else: + initial_points = cluster_initial(ip,data1) #Algorithm. 1 step 3 - Initialization + + #Creating clusters + cluster1 = cluster_creation(initial_points,Params) + + data2 = data1.copy() + + # Cluster expansion + expansion = True + kk = 0 + while expansion: + kk += 1 + if kk > 10: + print("Expansion never ends, something is wrong.") + breakpoint() + pre_cluster = cluster1 + cluster2 = cluster_expansion(cluster1,data2,Params,oma_results) + if cluster2['f'].shape == pre_cluster['f'].shape: + if (cluster2['f'] == pre_cluster['f']).all(): + expansion = False + else: + cluster1 = cluster2 + else: + cluster1 = cluster2 + + #Sort if more than one pole exist in the cluster + if isinstance(cluster2['f'],np.ndarray): + cluster2 = sort_cluster(cluster2) + + #Save cluster + if isinstance(cluster2['f'],np.ndarray): #Must atleast have two poles + #print("Cluster saved", np.median(cluster2['f'])) + cluster_dict[str(cluster_counter)] = cluster2 + cluster_counter += 1 + data1 = remove_data_from_S(data2,cluster2) #Remove clustered poles from data + else: + print("cluster2 too short:",1,"But must be:",Params['mstab']) + + + #Allignment or merging of stacked clusters + cluster_dict2 = alignment(cluster_dict.copy(),Params) + #Median filter + #cluster_dict3 = median_filter(cluster_dict2.copy()) + + #Custom cardinality check + cluster_dict3 = {} + cluster_counter = 0 + for ii, key in enumerate(cluster_dict2.keys()): + cluster = cluster_dict2[key] + if isinstance(cluster['f'],np.ndarray): + if cluster['f'].shape[0] < Params['mstab']: + print("cluster", np.median(cluster['f']),"too short:",cluster['f'].shape[0],"But must be:",Params['mstab']) + else: + print("Cluster saved", np.median(cluster['f'])) + cluster_dict3[str(ii)] = cluster + cluster_counter += 1 + data1 = remove_data_from_S(data2,cluster) #Remove clustered poles from data + else: + print("cluster too short:",1,"But must be:",Params['mstab']) + cluster_dict2.pop(key) + + #Add median and confidence intervals (one sided) to cluster data + for key in cluster_dict3.keys(): + cluster = cluster_dict3[key] + cluster['median_f'] = np.median(cluster['f']) + # ci_f_upper = [] + # ci_f_lower = [] + # ci_d_upper = [] + # ci_d_lower = [] + # for ii, cov_f in enumerate(cluster['cov_f']): + # ci_f_upper.append(np.sqrt(cov_f) * Params['bound_multiplier']) + # ci_f_lower.append(np.sqrt(cov_f) * Params['bound_multiplier']) + # ci_d_upper.append(np.sqrt(cluster['cov_d'][ii]) * Params['bound_multiplier']) + # ci_d_lower.append(np.sqrt(cluster['cov_d'][ii]) * Params['bound_multiplier']) + ci_f = np.sqrt(cluster['cov_f']) * Params['bound_multiplier'] + ci_d = np.sqrt(cluster['cov_d']) * Params['bound_multiplier'] + cluster['ci_f'] = ci_f + cluster['ci_d'] = ci_d + + #Sort the clusters into accending order of median frequency + median_frequencies = np.zeros(len(cluster_dict3)) + for ii, key in enumerate(cluster_dict3.keys()): + cluster = cluster_dict3[key] + median_frequencies[ii] = cluster['median_f'] + + indices = np.argsort(median_frequencies) + cluster_dict4 = {} + for ii, id in enumerate(np.array(list(cluster_dict3.keys()))[indices]): #Rename all cluster dict from 0 to len(cluster_dict2) + cluster_dict4[ii] = cluster_dict3[id] #Insert a cluster into a key + + return cluster_dict4 + +def calculate_mac(reference_mode: np.array, mode_shape: np.array) -> float: + """ + Calculate Modal Assurance Criterion (MAC) + + Args: + reference_mode (np.array): Mode shape to compare to + mode_shape (np.array): Mode shape to compare + Returns: + MAC (float): Modal Assurance Criterion + + """ + numerator = np.abs(np.dot(reference_mode.conj().T, mode_shape)) ** 2 + denominator = np.dot(reference_mode.conj().T, reference_mode) * np.dot(mode_shape.conj().T, mode_shape) + return np.real(numerator / denominator) + +def cluster_initial(ip: list[float], data: dict[str,Any], bound: float = 2) -> dict[str,Any]: + """ + Find the initial cluster points + + Args: + ip (list): Frequency, damping and covariance for the inital point (ip) + data (dict): OMA points data + bound (float): Multiplier on standard deviation + Returns: + initial_points (float): Initial points to create cluster from + + """ + #Extract data of initial point + ip_f = ip[0] + ip_cov_f = ip[1] + ip_d = ip[2] + ip_cov_d = ip[3] + + # Confidence interval using the ±2*standard_deviation + f_lower_bound = ip_f - bound * np.sqrt(ip_cov_f) + f_upper_bound = ip_f + bound * np.sqrt(ip_cov_f) + z_lower_bound = ip_d - bound * np.sqrt(ip_cov_d) + z_upper_bound = ip_d + bound * np.sqrt(ip_cov_d) + + + frequencies = data['frequencies'] + damping_ratios = data['damping_ratios'] + + # Find elements within the current limit that are still ungrouped + condition_mask = (frequencies >= f_lower_bound) & (frequencies <= f_upper_bound) & (damping_ratios >= z_lower_bound) & (damping_ratios <= z_upper_bound)# & ungrouped_mask + indices = np.argwhere(condition_mask) # Get indices satisfying the condition + + #Generate the data for inital points + initial_points = {} + initial_points['f'] = data['frequencies'][condition_mask] + initial_points['cov_f'] = data['cov_f'][condition_mask] + initial_points['d'] = data['damping_ratios'][condition_mask] + initial_points['cov_d'] = data['cov_d'][condition_mask] + initial_points['ms'] = data['mode_shapes'][condition_mask,:] + initial_points['row'] = indices[:,0] + initial_points['col'] = indices[:,1] + + return initial_points + +def cluster_creation(IP: dict[str,Any],Params: dict[str,Any]) -> dict[str,Any]: #Algorithm 2 + """ + Create cluster + + Args: + IP (dict): Dictionary of data on inital points + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Cluster + + """ #Algorithm 2 + #print("\nCluster creation") + #Extract data: + frequencies = IP['f'] + cov_f = IP['cov_f'] + damping_ratios = IP['d'] + cov_d = IP['cov_d'] + mode_shapes = IP['ms'] + row = IP['row'] + col = IP['col'] + + IPu = {} + if len(row) != len(set(row)): #line 5 in algorithm #If there are multiple points at the same model order + for ii, id in enumerate(row): #Go through all rows/model orders + pos = np.argwhere(row==id) #Locate the indices of one or more poles + #line 6 in algorithm + if len(pos) == 1: #If only 1 pole exist at the model order + if len(IPu) == 0: #First pole + IPu['f'] = frequencies[ii] + IPu['cov_f'] = cov_f[ii] + IPu['d'] = damping_ratios[ii] + IPu['cov_d'] = cov_d[ii] + IPu['ms'] = np.array((mode_shapes[ii,:])) + IPu['row'] = row[ii] + IPu['col'] = col[ii] + unique = 1 #To determine if the unique poles are more than one, for later use. if 1 then only one unique pole exist + else: + IPu['f'] = np.append(IPu['f'],frequencies[ii]) + IPu['cov_f'] = np.append(IPu['cov_f'],cov_f[ii]) + IPu['d'] = np.append(IPu['d'],damping_ratios[ii]) + IPu['cov_d'] = np.append(IPu['cov_d'],cov_d[ii]) + IPu['ms'] = np.vstack((IPu['ms'],mode_shapes[ii,:])) + IPu['row'] = np.append(IPu['row'],row[ii]) + IPu['col'] = np.append(IPu['col'],col[ii]) + unique = 2 #To determine if the unique poles are more than one, for later use. if 2 more than one uniqe pole exist + + if len(IPu) > 0: #If there exist model orders with unique poles + if unique == 1: #If there only exist one unique pole + cluster = {'f':np.array([IPu['f']]), + 'cov_f':np.array([IPu['cov_f']]), + 'd':np.array([IPu['d']]), + 'cov_d':np.array([IPu['cov_d']]), + 'mode_shapes':np.array([IPu['ms']]), + 'model_order':np.array([Params['model_order']-IPu['row']]), + 'row':np.array([IPu['row']]), + 'col':np.array([IPu['col']]), + 'MAC':np.array([1])} + # print("371, IPu",cluster['f'],cluster['row']) + else: #If more unique poles exist + cluster = {'f':np.array([IPu['f'][0]]), + 'cov_f':np.array([IPu['cov_f'][0]]), + 'd':np.array([IPu['d'][0]]), + 'cov_d':np.array([IPu['cov_d'][0]]), + 'mode_shapes':np.array([IPu['ms'][0,:]]), + 'model_order':np.array([Params['model_order']-IPu['row'][0]]), + 'row':np.array([IPu['row'][0]]), + 'col':np.array([IPu['col'][0]]), + 'MAC':np.array([1])} + # print("381, IPu",cluster['f'],cluster['row']) + # print("IPu",IPu['row']) + # if cluster['f'][0] > 300: + # breakpoint() + cluster, non_clustered_IPu = cluster_from_mac(cluster,IPu,Params) #cluster the unique poles + + else: #if no unique poles exist then go forth with the initial point, ip. + #Only the initial point is clustered + cluster = {'f':np.array([frequencies[0]]), + 'cov_f':np.array([cov_f[0]]), + 'd':np.array([damping_ratios[0]]), + 'cov_d':np.array([cov_d[0]]), + 'mode_shapes':np.array([mode_shapes[0,:]]), + 'model_order':np.array([Params['model_order']-row[0]]), + 'row':np.array([row[0]]), + 'col':np.array([col[0]]), + 'MAC':np.array([1])} + + #Check if there are multiple points with same model order as ip + ip_ids = np.argwhere(row==row[0]) + if len(ip_ids[:,0]) > 1: # Remove all the other points at the same model order + for ii in ip_ids[1:,0]: + try: + frequencies = np.delete(frequencies,ii) + cov_f = np.delete(cov_f,ii) + damping_ratios = np.delete(damping_ratios,ii) + cov_d = np.delete(cov_d,ii) + mode_shapes = np.delete(mode_shapes,ii,axis=0) + row = np.delete(row,ii) + col = np.delete(col,ii) + except: + breakpoint() + # print("379,ip is alone",cluster['row'],row) + + + # try: + # print("Cluster after IPu",cluster['row']) + # except: + # pass + + if len(row) != len(set(row)): #If there still are points at the same model order in IP + IPm = {} + for ii, id in enumerate(row): #Go through all rows/model orders + pos = np.argwhere(row==id) #Locate the indices of one or more poles + #line 6 in algorithm + if len(pos) > 1: #If more than one pole exist for the model order + if len(IPm) == 0: #First pole + IPm['f'] = frequencies[ii] + IPm['cov_f'] = cov_f[ii] + IPm['d'] = damping_ratios[ii] + IPm['cov_d'] = cov_d[ii] + IPm['ms'] = np.array((mode_shapes[ii,:])) + IPm['row'] = row[ii] + IPm['col'] = col[ii] + else: + IPm['f'] = np.append(IPm['f'],frequencies[ii]) + IPm['cov_f'] = np.append(IPm['cov_f'],cov_f[ii]) + IPm['d'] = np.append(IPm['d'],damping_ratios[ii]) + IPm['cov_d'] = np.append(IPm['cov_d'],cov_d[ii]) + IPm['ms'] = np.vstack((IPm['ms'],np.array(mode_shapes[ii,:]))) + IPm['row'] = np.append(IPm['row'],row[ii]) + IPm['col'] = np.append(IPm['col'],col[ii]) + # After the unique poles are clustered, the multiple poles are clusterd + # try: + # print("IPu",IPu['f'],IPu['row']) + # except: + # print("No IPu") + # try: + # print("IPm",IPm['f'],IPm['row']) + # except: + # print("No IPm") + # print("to compare",cluster['f'][0],cluster['row'][0]) + cluster, non_clustered_IPm = cluster_from_mac_IPm(cluster,IPm,Params) + + + + #Start while loop + cluster_len_before = 0 + while len(cluster['row']) != cluster_len_before: + # print(len(cluster['row']),cluster_len_before) + # print("c", cluster['row']) + # try: + # print("u", non_clustered_IPu['row']) + # except: + # pass + # try: + # print("m", non_clustered_IPm['row']) + # except: + # pass + + cluster_len_before = len(cluster['row']) + try: + if len(non_clustered_IPu['row']) > 0: + cluster, non_clustered_IPu = cluster_from_mac(cluster,non_clustered_IPu,Params) #cluster the unique poles again + except: + pass + if len(non_clustered_IPm['row']) > 0: + cluster, non_clustered_IPm = cluster_from_mac_IPm(cluster,non_clustered_IPm,Params) #cluster the non-unique poles again + + else: #line 1 in algorithm: only unique poles + cluster = {'f':np.array([frequencies[0]]), + 'cov_f':np.array([cov_f[0]]), + 'd':np.array([damping_ratios[0]]), + 'cov_d':np.array([cov_d[0]]), + 'mode_shapes':np.array([mode_shapes[0,:]]), + 'model_order':np.array([Params['model_order']-row[0]]), + 'row':np.array([row[0]]), + 'col':np.array([col[0]]), + 'MAC':np.array([1])} + if IP['f'].shape[0] > 1: + cluster, _ = cluster_from_mac(cluster,IP,Params) + + #Here lies the algorithms cardinality check + # print(cluster) + # if cluster['f'].shape[0] < Params['mstab']: + # print("cluster too short:",cluster['f'].shape[0],"But must be:",Params['mstab']) + # cluster = {} + + return cluster + +def cluster_from_mac(cluster: dict[str,Any], IP: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: + """ + Add points to cluster based on MAC + + Args: + cluster (dict): Intermediate cluster + IP (dict): Dictionary of data on inital points + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Intermediate cluster + + """ + + #Extract data + frequencies = IP['f'] + cov_f = IP['cov_f'] + damping_ratios = IP['d'] + cov_d = IP['cov_d'] + mode_shapes = IP['ms'] + row = IP['row'] + col = IP['col'] + + ip_ms = IP['ms'][0] + i_ms = IP['ms'][1:] + f_ip = frequencies[0] + f_i = frequencies[1:] + row_i = row[1:] + # print(cluster['row']) + # print(IP['ms'].shape) + + skip_id = [] + + for jj, ms in enumerate(i_ms): #Go through all mode shapes in cluster + idx = jj+1 + MAC = calculate_mac(ip_ms,ms) #Does the mode shape match with the first pole + # print(row_i[jj],MAC) + if MAC > Params['tMAC']: #line 2 in algorithm + #Add to cluster + cluster['f'] = np.append(cluster['f'],frequencies[idx]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[idx]) + cluster['d'] = np.append(cluster['d'],damping_ratios[idx]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[idx]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[idx,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[idx]) + cluster['row'] = np.append(cluster['row'],row[idx]) + cluster['col'] = np.append(cluster['col'],col[idx]) + + skip_id.append(idx) + + + #IP['ms'] = np.delete(IP['ms'],skip_id,axis=0) + + # print(cluster['row']) + # print(IP['ms'].shape) + # print("skip_id",skip_id) + #Compare remaining points with newly added cluster points, i.e. points are compared with the full cluster, not just ip + if cluster['f'].shape[0] > 1: #If points have been added to cluster proceed + if IP['ms'].shape[0] > len(skip_id): #If there are more points to compare left, then proceed + unclustered_points = 1 + while IP['ms'].shape[0] != unclustered_points: #Run until no points are clustered anymore + unclustered_points = IP['ms'].shape[0] + + i_ms = IP['ms'][1:] + for jj, ms in enumerate(i_ms): + idx = jj+1 + if idx in skip_id: + # print(idx) + continue + + MAC_list = [] + for c_ms in cluster['mode_shapes']: + MAC_list.append(calculate_mac(c_ms,ms)) + + # print("MAC_list",MAC_list) + if max(MAC_list) > Params['tMAC']: #line 2 in algorithm + #Add to cluster + cluster['f'] = np.append(cluster['f'],frequencies[idx]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[idx]) + cluster['d'] = np.append(cluster['d'],damping_ratios[idx]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[idx]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[idx,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[idx]) + cluster['row'] = np.append(cluster['row'],row[idx]) + cluster['col'] = np.append(cluster['col'],col[idx]) + + skip_id.append(idx) + + #IP['ms'] = np.delete(IP['ms'],skip_id,axis=0) + + # skip_id.insert(0,0) + # skip_id_array = np.array(skip_id) + + # all_id = np.array(list(range(len(row)))) + # unclustered_id = np.delete(all_id,skip_id_array) + + clustered_id = [] + for r2 in cluster['row']: #For every entry in row cluster + unclustered_point = False + for ii, r1 in enumerate(IP['row']): #For every entry in row IPu + if r1 == r2: #If r1 is a entry of "row" in the cluster, then save that row for later. + clustered_id.append(ii) + + all_id = np.array(list(range(len(IP['row'])))) + + clustered_id = np.array(clustered_id) + if clustered_id.shape[0] > 0: + unclustered_id = np.delete(all_id,clustered_id) + unclustered_id = np.insert(unclustered_id,0,0) + else: + unclustered_id = all_id + + unclustered_IPu = {} + unclustered_IPu['f'] = IP['f'][unclustered_id] + unclustered_IPu['cov_f'] = IP['cov_f'][unclustered_id] + unclustered_IPu['d'] = IP['d'][unclustered_id] + unclustered_IPu['cov_d'] = IP['cov_d'][unclustered_id] + unclustered_IPu['ms'] = IP['ms'][unclustered_id] + unclustered_IPu['row'] = IP['row'][unclustered_id] + unclustered_IPu['col'] = IP['col'][unclustered_id] + + return cluster, unclustered_IPu + +def cluster_from_mac_IPm(cluster: dict[str,Any], IPm: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: + """ + Cluster based on MAC if multiple poles exist for the model order + + Args: + cluster (dict): Intermediate cluster + IP (dict): Dictionary of data on inital points + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Intermediate cluster + + """ + #Cluster based on MAC if multiple poles exist for the model order + # print("cluster_IPm") + #Extract data + frequencies = IPm['f'] + cov_f = IPm['cov_f'] + damping_ratios = IPm['d'] + cov_d = IPm['cov_d'] + mode_shapes = IPm['ms'] + row = IPm['row'] + col = IPm['col'] + + # if isinstance(cluster['f'],np.ndarray): + # ip_ms = cluster['mode_shapes'][0,:] #Mode shape of the first pole + # else: + # ip_ms = cluster['mode_shapes'] #Mode shape of the first pole + + # Find the model orders with multiple poles + pos = [] + for ii, idd in enumerate(set(row)): + pos.append(np.argwhere(row==idd)) + + skip_id = [] + skip_id_before = None + while skip_id != skip_id_before: + ip_ms = cluster['mode_shapes'] + if isinstance(cluster['f'],np.ndarray): + ip_ms_0 = ip_ms[0,:] #Mode shape of the first pole + else: + ip_ms_0 = ip_ms #Mode shape of the first pole + + i_ms = IPm['ms'][:] #Mode shape of the model orders with mutiple poles + + + skip_id_before = skip_id.copy() + # print("Cluster in IPm",cluster['row']) + + + #Go through all the model orders + for oo, pos_i in enumerate(pos): + MAC = np.zeros(pos_i.shape[0]) + # print("IPm model order",list(set(row))[oo]) + + if oo in skip_id: #Skip these model orders, since they have already been added. + continue + + pos_i = pos_i[:,0] + for ii, id_row in enumerate(pos_i): + #print(IPm['row'][id_row],id_row) + #print(ip_ms.shape,i_ms[id_row].shape) + MAC[ii] = calculate_mac(ip_ms_0,i_ms[id_row]) #Calculate MAC between first pole of cluster and a pole in IPm + + #If MAC is not satisfied + if MAC[ii] < Params['tMAC']: #Search for max across all mode shapes in cluster: + #line 3 in algorithm + MAC_list = [] + for ms in ip_ms: + MAC_list.append(calculate_mac(ms,i_ms[id_row])) + MAC[ii] = max(MAC_list) + + #Find the mask for the poles that meets the MAC criteria + mask = MAC > Params['tMAC'] + pos_MAC = np.argwhere(mask==True) #Get indicies + + #Formatting of the indicies + if pos_MAC.shape[0] > 1: #more than one indice + pos_MAC = pos_MAC[:,0] + else: #Only one or zero indice (No MAC match) + if pos_MAC.shape[0] == 1: + pos_MAC = pos_MAC[0] + + # print("MAC",MAC) + # print("MACpos",pos_MAC) + if pos_MAC.shape[0] > 1: #If multiple poles comply with MAC criteria + #ids formatting + ids = pos_i[pos_MAC] + #ids = ids[:,0] + + #Get frequencies of poles + freq = np.zeros(ids.shape[0]) + for jj, idid in enumerate(ids): + freq[jj] = frequencies[idid] + median_f = np.median(cluster['f']) + + #Locate the index of the closest pole + idx = (np.abs(freq - median_f)).argmin() + ll = pos_i[pos_MAC[idx]] + + # print("IPm point mac approved",row[ll],frequencies[ll],MAC) + + #Add this pole to the cluster + cluster['f'] = np.append(cluster['f'],frequencies[ll]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[ll]) + cluster['d'] = np.append(cluster['d'],damping_ratios[ll]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[ll]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[ll,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC[pos_MAC[idx]]) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[ll]) + cluster['row'] = np.append(cluster['row'],row[ll]) + cluster['col'] = np.append(cluster['col'],col[ll]) + + skip_id.append(oo) + + elif pos_MAC.shape[0] == 1: #If only one pole complies with MAC + ll = pos_i[pos_MAC[0]] + + + # print("IPm point mac approved",row[ll],frequencies[ll],MAC) + + + #Add this pole to the cluster + cluster['f'] = np.append(cluster['f'],frequencies[ll]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[ll]) + cluster['d'] = np.append(cluster['d'],damping_ratios[ll]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[ll]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[ll,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC[pos_MAC[0]]) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[ll]) + cluster['row'] = np.append(cluster['row'],row[ll]) + cluster['col'] = np.append(cluster['col'],col[ll]) + + skip_id.append(oo) + # else: + # print("Not clustered. MAC not satisfied") + # print("skip",skip_id) + + clustered_id = [] + for r2 in cluster['row']: #For every entry in row cluster + unclustered_point = False + for ii, r1 in enumerate(IPm['row']): #For every entry in row IPm + if r1 == r2: #If r1 is a entry of "row" in the cluster, then save that row for later. + clustered_id.append(ii) + + all_id = np.array(list(range(len(IPm['row'])))) + + clustered_id = np.array(clustered_id) + if clustered_id.shape[0] > 0: + unclustered_id = np.delete(all_id,clustered_id) + else: + unclustered_id = all_id + # print("709,unclustered_id",unclustered_id) + + unclustered_IPm = {} + unclustered_IPm['f'] = IPm['f'][unclustered_id] + unclustered_IPm['cov_f'] = IPm['cov_f'][unclustered_id] + unclustered_IPm['d'] = IPm['d'][unclustered_id] + unclustered_IPm['cov_d'] = IPm['cov_d'][unclustered_id] + unclustered_IPm['ms'] = IPm['ms'][unclustered_id] + unclustered_IPm['row'] = IPm['row'][unclustered_id] + unclustered_IPm['col'] = IPm['col'][unclustered_id] + + # print("unclustered_IPm['row']",unclustered_IPm['row']) + + + return cluster, unclustered_IPm + +def remove_data_from_S(data: dict[str,Any],cluster: dict[str,Any]) -> dict[str,Any]: + """ + Remove cluster from data or S + + Args: + data (dict): OMA points data + cluster (dict): cluster + Returns: + data2 (dict): Filtered OMA points data + + """ + #Copy data + frequencies = data['frequencies'].copy() + damping_ratios = data['damping_ratios'].copy() + cov_freq = data['cov_f'].copy() + cov_damping = data['cov_d'].copy() + mode_shapes = data['mode_shapes'].copy() + row = data['row'].copy() + col = data['col'].copy() + #Make new data dictionary + data2 = {'frequencies':frequencies, + 'damping_ratios':damping_ratios, + 'cov_f':cov_freq, + 'cov_d':cov_damping, + 'mode_shapes':mode_shapes, + 'row':row, + 'col':col} + #Remove data + row = cluster['row'] + col = cluster['col'] + for ii, r in enumerate(row): + c = col[ii] + data2['frequencies'][r,c] = np.nan + data2['damping_ratios'][r,c] = np.nan + data2['cov_f'][r,c] = np.nan + data2['cov_d'][r,c] = np.nan + data2['mode_shapes'][r,c,:] = np.nan + + return data2 + +def cluster_expansion(cluster: dict[str,Any], data: dict[str,Any], Params: dict[str,Any], oma_results) -> dict[str,Any]: + """ + Expand cluster based on minima and maxima bound + + Args: + cluster (dict): Intermediate cluster + data (dict): OMA points data + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Expanded cluster + + """ + #print("\nExpansion") + unClustered_frequencies = data['frequencies'] + unClustered_damping = data['damping_ratios'] + + freq_c = cluster['f'] + cov_f = cluster['cov_f'] + damp_c = cluster['d'] + cov_d = cluster['cov_d'] + row = cluster['row'] + + bound_multiplier = Params['bound_multiplier'] + + #Find min-max bounds of cluster + f_lower_bound = np.min(freq_c - bound_multiplier * np.sqrt(cov_f)) # Minimum of all points for frequencies + f_upper_bound = np.max(freq_c + bound_multiplier * np.sqrt(cov_f)) # Maximum of all points for frequencies + d_lower_bound = np.min(damp_c - bound_multiplier * np.sqrt(cov_d)) # Minimum of all points for damping + d_upper_bound = np.max(damp_c + bound_multiplier * np.sqrt(cov_d)) # Maximum of all points for damping + + #Mask of possible expanded poles + condition_mask = (unClustered_frequencies >= f_lower_bound) & (unClustered_frequencies <= f_upper_bound) & (unClustered_damping >= d_lower_bound) & (unClustered_damping <= d_upper_bound) + # Get indices satisfying the condition + expanded_indices = np.argwhere(condition_mask) + + #Initiate cluster_points for cluster creation + cluster_points = {} + cluster_points['f'] = data['frequencies'][condition_mask] + cluster_points['cov_f'] = data['cov_f'][condition_mask] + cluster_points['d'] = data['damping_ratios'][condition_mask] + cluster_points['cov_d'] = data['cov_d'][condition_mask] + cluster_points['ms'] = data['mode_shapes'][condition_mask,:] + cluster_points['row'] = expanded_indices[:,0] + cluster_points['col'] = expanded_indices[:,1] + + #print(cluster_points['f']) + #print(cluster_points['row']) + + #Make the first ip from cluster be the previous first point in cluster_points + if isinstance(cluster['f'],np.ndarray): + index_f = np.argwhere(cluster_points['f'] == cluster['f'][0]) + else: + index_f = np.argwhere(cluster_points['f'] == cluster['f']) + if len(index_f[:,0]) > 1: + index_row = np.argwhere(cluster_points['row'][index_f[:,0]] == cluster['row'][0]) + ip_id = int(index_f[index_row[:,0]][:,0]) + else: + ip_id = int(index_f[:,0]) + indecies = list(range(len(cluster_points['f']))) + poped_id = indecies.pop(ip_id) + indecies.insert(0,poped_id) + indecies = np.array(indecies) + + cluster_points['f'] = cluster_points['f'][indecies] + cluster_points['cov_f'] = cluster_points['cov_f'][indecies] + cluster_points['d'] = cluster_points['d'][indecies] + cluster_points['cov_d'] = cluster_points['cov_d'][indecies] + cluster_points['ms'] = cluster_points['ms'][indecies,:] + cluster_points['row'] = cluster_points['row'][indecies] + cluster_points['col'] = cluster_points['col'][indecies] + + # print("row_before",cluster_points['row']) + #print("exp1",cluster_points['f']) + + #Check if these values can be clustered + cluster = cluster_creation(cluster_points,Params) + if isinstance(cluster['f'],np.ndarray): + if len(cluster['row']) != len(set(cluster['row'])): + print("row_before",cluster_points['row']) + print("row_after",cluster['row']) + print("exp2",cluster['f']) + print("double orders",cluster['row']) + + breakpoint() + + # print("row_before",cluster_points['row']) + #print("exp1",cluster_points['f']) + # print("row_after",cluster['row']) + # print("exp2",cluster['f']) + + return cluster + +def sort_cluster(cluster: dict[str,Any]) -> dict[str,Any]: + """ + Sort cluster based on row/model order + + Args: + cluster (dict): Cluster + Returns: + cluster (dict): Sorted cluster + + """ + sort_id = np.argsort(cluster['row']) + + cluster['f'] = cluster['f'][sort_id] + cluster['cov_f'] = cluster['cov_f'][sort_id] + cluster['d'] = cluster['d'][sort_id] + cluster['cov_d'] = cluster['cov_d'][sort_id] + cluster['mode_shapes'] = cluster['mode_shapes'][sort_id,:] + cluster['MAC'] = cluster['MAC'][sort_id] + cluster['model_order'] = cluster['model_order'][sort_id] + cluster['row'] = cluster['row'][sort_id] + cluster['col'] = cluster['col'][sort_id] + + return cluster + +def alignment(cluster_dict: dict[str,dict], Params: dict[str,Any]) -> dict[str,dict]: + """ + Alignment/merging of clusters + + Args: + cluster_dict (dict): Dictionary of multiple clusters + Params (dict): Dictionary of algorithm parameters + Returns: + cluster_dict (dict): Dictionary of aligned clusters + + """ + #print("\nCluster alignment") + median_f = [] + for key in cluster_dict.keys(): #Find the median of each cluster + cluster = cluster_dict[key] + median_f.append(np.median(cluster['f'])) + median_f = np.array(median_f) + + deleted_cluster_id = [] + for ii, m_f in enumerate(median_f): #Go through all medians + if ii in deleted_cluster_id: #If cluster is deleted pass on + #print(deleted_cluster_id) + continue + # Calculate absolute difference of selected median and all medians + diff = abs(median_f-m_f) + # If this difference is above 0 (not itself) and inside the bounds: + # Bounds are the minimum of either median_f * allignment_factor_0 or Sampling frequency / 2 * allignment_factor_1 + # For lower median frequencies the bound is determined by the size of median frequency. + # For higher median frequencies the bound is determined by the sampling frequency + + mask = (diff > 0) & (diff < min(m_f*Params['allignment_factor'][0],Params['Fs']/2*Params['allignment_factor'][1])) + indices = np.argwhere(mask == True) #Indicies of clusters that are closely located in frequency + + + + #print(cluster_dict.keys()) + if indices.shape[0] > 0:# If one or more clusters are found + ids = indices[:,0] + #print("ids",ids) + for id in ids: #Go through all clusters that is closely located + if id in deleted_cluster_id: + continue + + + #print("id",id) + break_loop = 0 + cluster1 = cluster_dict[str(ii)] #Parent cluster + cluster2 = cluster_dict[str(id)] #Co-located cluster + + # Proposed method + # for r in cluster2['model_order']: + # if r in cluster1['model_order']: #If the two clusters have poles with same model order, then skip the allignment + # print("Clusters have the same MO",cluster2['model_order'],cluster1['model_order']) + # break_loop = 1 + # if break_loop == 1: + # break + + MAC = calculate_mac(cluster1['mode_shapes'][0],cluster2['mode_shapes'][0]) # Check mode shape for the first pole in each cluster + if MAC >= Params['tMAC']: #If MAC complies with the criteria, then add the two clusters + cluster, cluster_remaining = join_clusters(cluster_dict[str(ii)],cluster_dict[str(id)],Params) + cluster_dict[str(ii)] = cluster #Save the new larger cluster + if len(cluster_remaining) == 0: #If the remaining cluster is emmpty + cluster_dict.pop(str(id), None) #Remove the co-located cluster + deleted_cluster_id.append(int(id)) #The delete cluster id + else: + cluster_dict[str(id)] = cluster_remaining #Save the remaining cluster + + else: #Check if the mode shapes across any of the poles complies with the MAC criteria + + MAC = np.zeros((cluster1['mode_shapes'].shape[0],cluster2['mode_shapes'].shape[0])) + for jj, ms1 in enumerate(cluster1['mode_shapes']): + for kk, ms2 in enumerate(cluster2['mode_shapes']): + MAC[jj,kk] = calculate_mac(ms1,ms2) + if MAC.max() >= Params['tMAC']: #If MAC criteria is meet add the clusters together + cluster, cluster_remaining = join_clusters(cluster_dict[str(ii)],cluster_dict[str(id)],Params) + cluster_dict[str(ii)] = cluster #Save the new larger cluster + if len(cluster_remaining) == 0: #If the remaining cluster is emmpty + cluster_dict.pop(str(id), None) #Remove the co-located cluster + deleted_cluster_id.append(int(id)) #The delete cluster id + else: + cluster_dict[str(id)] = cluster_remaining #Save the remaining cluster + # else: + # if cluster1['f'][0] > 300: + # breakpoint() + + + cluster_dict_alligned = cluster_dict + return cluster_dict_alligned + +def join_clusters(cluster_1: dict[str,Any], cluster_2: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: + """ + Add two clusters together + + Args: + cluster_1 (dict): Cluster + cluster_2 (dict): Cluster + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Joined cluster + cluster_remaining (dict): The cluster that remains + + """ + #Adding two clusters together + cluster = {} + cluster_remaining = {} + row1 = cluster_1['row'] + row2 = cluster_2['row'] + + #Should the dominant cluster be the one that have the higest model orders? + if row1.shape[0] >= row2.shape[0]: #Let be the largest cluster be the dominant one + cluster1 = cluster_1 + cluster2 = cluster_2 + row1 = cluster_1['row'] + row2 = cluster_2['row'] + else: + cluster1 = cluster_2 + cluster2 = cluster_1 + row1 = cluster_2['row'] + row2 = cluster_1['row'] + + median_f1 = np.median(cluster1['f']) + + for MO in range(Params['model_order']): #Go through all poles in a cluster + jj = np.argwhere(row1 == MO) + id = np.argwhere(row2 == MO) + if MO in row1: #If a pole in the largest cluster exist for the this model order + r1 = MO + if MO in row2: #If a pole exist in the same model order + #Get frequencies of the poles + f1 = cluster1['f'][jj[:,0]] + f2 = cluster2['f'][id[:,0]] + if abs(median_f1-f2) >= abs(median_f1-f1): #If pole in cluster 1 is closer to median of cluster 1 + cluster = append_cluster_data(cluster,cluster1,jj[:,0]) + cluster_remaining = append_cluster_data(cluster_remaining,cluster2,id[:,0]) + else: #If pole in cluster 2 is closer to median of cluster 1 + cluster = append_cluster_data(cluster,cluster2,id[:,0]) + cluster_remaining = append_cluster_data(cluster_remaining,cluster1,jj[:,0]) + else: #If only one pole exist in the largest cluster + cluster = append_cluster_data(cluster,cluster1,jj[:,0]) + elif MO in row2: #If a pole in the smallest cluster exist for the model order + cluster = append_cluster_data(cluster,cluster2,id[:,0]) + + return cluster, cluster_remaining + +def append_cluster_data(cluster: dict[str,Any], cluster2: dict[str,Any], id: int) -> dict[str,Any]: + """ + Add cluster data to an existing cluster + + Args: + cluster (dict): Existing cluster + cluster2 (dict): Cluster + id (int): id of data to append + Returns: + cluster (dict): Cluster + + """ + if len(cluster) == 0: #If it is the first pole + cluster['f'] = cluster2['f'][id] + cluster['cov_f'] = cluster2['cov_f'][id] + cluster['d'] = cluster2['d'][id] + cluster['cov_d'] = cluster2['cov_d'][id] + cluster['mode_shapes'] = cluster2['mode_shapes'][id,:] + cluster['MAC'] = cluster2['MAC'][id] + cluster['model_order'] = cluster2['model_order'][id] + cluster['row'] = cluster2['row'][id] + cluster['col'] = cluster2['col'][id] + else: + cluster['f'] = np.append(cluster['f'],cluster2['f'][id]) + cluster['cov_f'] = np.append(cluster['cov_f'],cluster2['cov_f'][id]) + cluster['d'] = np.append(cluster['d'],cluster2['d'][id]) + cluster['cov_d'] = np.append(cluster['cov_d'],cluster2['cov_d'][id]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],cluster2['mode_shapes'][id,:])) + cluster['MAC'] = np.append(cluster['MAC'],cluster2['MAC'][id]) + cluster['model_order'] = np.append(cluster['model_order'],cluster2['model_order'][id]) + cluster['row'] = np.append(cluster['row'],cluster2['row'][id]) + cluster['col'] = np.append(cluster['col'],cluster2['col'][id]) + return cluster + +def median_filter(cluster_dict: dict[str,dict]) -> dict[str,dict]: + """ + Apply median filter to cluster + + Args: + cluster_dict (dict): Dictionary of multiple clusters + Returns: + cluster_dict3 (dict): Median filtered multiple clusters + + """ + print("\nMedian filter") + cluster_dict3 = {} + for key in cluster_dict.keys(): + cluster = cluster_dict[key] + #print(cluster['mode_shapes']) + median_f = np.median(cluster['f']) #Calculate median + + cluster_new = {} + for ii, f in enumerate(cluster['f']): #Go through all cluster poles + lower_bound = f - np.sqrt(cluster['cov_f'][ii]) * 2 + upper_bound = f + np.sqrt(cluster['cov_f'][ii]) * 2 + if (median_f > lower_bound) & (median_f < upper_bound): #Check if a cluster confidence interval wraps the median + cluster_new = append_cluster_data(cluster_new,cluster,ii) + # else: + # print("not",cluster['model_order'][ii]) + + cluster_dict3[key] = cluster_new + + return cluster_dict3 + + +def remove_complex_conjugates(oma_results): + """ + Remove complex conjucates + + Args: + oma_results (Dict[str, Any]): Results from PyOMA-2 + + Returns: + frequencies (np.ndarray): Frequencies (mean) + cov_freq (np.ndarray): Covariance of frequency + damping_ratios (np.ndarray): Damping ratios (mean) + cov_damping (np.ndarray): Covariance of damping ratio + mode_shapes (np.ndarray): Mode shapes + """ + OMA = oma_results.copy() + # OMA results as numpy array + frequencies = OMA['Fn_poles'].copy() + cov_freq = OMA['Fn_poles_cov'].copy() + damping_ratios = OMA['Xi_poles'].copy() + cov_damping = OMA['Xi_poles_cov'].copy() + mode_shapes = OMA['Phi_poles'].copy() + + # Remove the complex conjugate entries + frequencies = frequencies[::2] # This is 'S' as per algorithm + damping_ratios = damping_ratios[::2] # This is 'S' as per algorithm + mode_shapes = mode_shapes[::2, :, :] + cov_freq = cov_freq[::2] + cov_damping = cov_damping[::2] + + return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes + +def transform_oma_features(frequencies_,cov_freq_,damping_ratios_,cov_damping_,mode_shapes_): + """ + Transform oma results + + Args: + frequencies_ (np.ndarray): Frequencies (mean) + cov_freq_ (np.ndarray): Covariance of frequency + damping_ratios_ (np.ndarray): Damping ratios (mean) + cov_damping_ (np.ndarray): Covariance of damping ratio + mode_shapes_ (np.ndarray): Mode shapes + + Returns: + frequencies (np.ndarray): Frequencies (mean) + cov_freq (np.ndarray): Covariance of frequency + damping_ratios (np.ndarray): Damping ratios (mean) + cov_damping (np.ndarray): Covariance of damping ratio + mode_shapes (np.ndarray): Mode shapes + """ + # Transpose, flip and sort arrays, such that arrays maps directly to the stabilization diagram. + # This means the the frequency array maps directly to the plot: + # MO. + # 5.| x x + # 4.| x + # 3.| x + # 2.| x + # 1.| + # 0.| + # -1----4------- Frequency + # The frequency array will then have the shape (6,3). Initially (6,6) but the complex conjugates have been removed. So 6 is halved to 3. + # 6 for each model order, including 0 and 3 for maximum poles in a modelorder + # The frequency array will then become: + # _0_1_ + # 0| 1 4 + # 1| 1 Nan + # 0| 1 Nan + # 0| Nan 4 + # 0| Nan Nan + # 0| Nan Nan + + #Transformation of data + frequencies = np.transpose(frequencies_) + frequencies = np.flip(frequencies, 0) + sort_indices = np.argsort(frequencies,axis=1) + frequencies = np.take_along_axis(frequencies, sort_indices, axis=1) + cov_freq = np.transpose(cov_freq_) + cov_freq = np.flip(cov_freq, 0) + cov_freq = np.take_along_axis(cov_freq, sort_indices, axis=1) + damping_ratios = np.transpose(damping_ratios_) + damping_ratios = np.flip(damping_ratios, 0) + damping_ratios = np.take_along_axis(damping_ratios, sort_indices, axis=1) + cov_damping = np.transpose(cov_damping_) + cov_damping = np.flip(cov_damping, 0) + cov_damping = np.take_along_axis(cov_damping, sort_indices, axis=1) + mode_shapes = np.moveaxis(mode_shapes_, [0, 1, 2], [1, 0, 2]) + + mode_shapes2 = np.zeros(mode_shapes.shape,dtype=np.complex128) + for ii, indices in enumerate(sort_indices): + mode_shapes2[ii,:,:] = mode_shapes[(sort_indices.shape[0]-ii-1),indices,:] + + # Array of model orders + model_order = np.arange(sort_indices.shape[0]) + model_orders = np.stack((model_order,) * sort_indices.shape[1], axis=1) + model_orders = np.flip(model_orders) + + return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes2, model_orders + +def remove_highly_uncertain_points(oma_results,oma_params): + """ + Remove highly uncertain points + + Args: + oma_results (Dict[str, Any]): Results from PyOMA-2 + oma_params (Dict[str, Any]): Parameters + + Returns: + frequencies (np.ndarray): Frequencies (mean) + cov_freq (np.ndarray): Covariance of frequency + damping_ratios (np.ndarray): Damping ratios (mean) + cov_damping (np.ndarray): Covariance of damping ratio + mode_shapes (np.ndarray): Mode shapes + """ + frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes = remove_complex_conjugates(oma_results) + + # # #=================== Removing high uncertain poles ======================= + freq_variance_treshold = oma_params.get('freq_variance_treshold', 0.1) + damp_variance_treshold = oma_params.get('damp_variance_treshold', 10**6) + frequency_coefficient_variation = np.sqrt(cov_freq)/frequencies + damping_coefficient_variation = np.sqrt(cov_damping)/damping_ratios + indices_frequency = frequency_coefficient_variation > freq_variance_treshold + indices_damping = damping_coefficient_variation > damp_variance_treshold + above_nyquist = frequencies > oma_params['Fs']/2 + combined_indices = np.logical_or(np.logical_or(indices_frequency,indices_damping),above_nyquist) + frequencies[combined_indices] = np.nan + damping_ratios[combined_indices] = np.nan + cov_freq[combined_indices] = np.nan + cov_damping[combined_indices] = np.nan + mask = np.broadcast_to(np.expand_dims(combined_indices, axis=2), mode_shapes.shape) + mode_shapes[mask] = np.nan + + return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes \ No newline at end of file diff --git a/src/methods/packages/mode_track.py b/src/methods/packages/mode_track.py deleted file mode 100644 index fe68c91..0000000 --- a/src/methods/packages/mode_track.py +++ /dev/null @@ -1,944 +0,0 @@ -"This file is taken from the DTaaS-platform" -import matplotlib.pyplot as plt -import matplotlib.tri as mtri -import numpy as np -import numpy.ma as ma -import copy - -# plt.close('all') -# Clustering function -def cluster_frequencies(frequencies, damping_ratios, mode_shapes, - frequencies_max_MO, cov_freq_max_MO, - damping_ratios_max_MO, cov_damping_max_MO, - mode_shapes_max_MO, tMAC, bound_multiplier=2): - """ - - - Parameters - ---------- - frequencies : TYPE - DESCRIPTION. - damping_ratios : TYPE - DESCRIPTION. - mode_shapes : TYPE - DESCRIPTION. - frequencies_max_MO : TYPE - DESCRIPTION. - cov_freq_max_MO : TYPE - DESCRIPTION. - damping_ratios_max_MO : TYPE - DESCRIPTION. - cov_damping_max_MO : TYPE - DESCRIPTION. - mode_shapes_max_MO : TYPE - DESCRIPTION. - tMAC : TYPE - DESCRIPTION. - bound_multiplier : TYPE, optional - DESCRIPTION. The default is 2. - - Returns - ------- - None. - - """ - - # Modify the index of frequency to sorting - - sorted_indices = np.argsort(frequencies_max_MO) - fn_sorted = frequencies_max_MO[sorted_indices] - damping_ratios_sorted = damping_ratios_max_MO[sorted_indices] - cov_fn_sorted = cov_freq_max_MO[sorted_indices] - cov_damping_sorted = cov_damping_max_MO[sorted_indices] - mode_shape_sorted = mode_shapes_max_MO[sorted_indices] - - fn_unique, unique_indices = np.unique(fn_sorted, return_index=True) - cov_fn_unique = cov_fn_sorted[unique_indices] - damping_ratios_unique = damping_ratios_sorted[unique_indices] - cov_damping_unique = cov_damping_sorted[unique_indices] - mode_shape_unique = mode_shape_sorted[unique_indices] - - # print(f'unsorted frequencies: {frequencies_max_MO}') - # print(f'unique frequencies: {fn_unique}') - # print(f'unsorted covariance: {cov_freq_max_MO}') - # print(f'unique covariance: {cov_fn_unique}') - - # frequencies = frequencies[::2] # This is 'S' as per algorithm - # mode_shapes = mode_shapes[::2, :, :] - - # print(f'Shape of frequencies: {frequencies.shape}') - - C_cluster = [] - Ip = [] - - # Mask to track ungrouped elements (initially all elements are ungrouped) - ungrouped_mask = np.ones_like(frequencies, dtype=bool) - - # Check each limit and save indices - for ip, (f_MxMO, fcov_MxMO, z_MxMO, zcov_MxMO) in enumerate(zip(fn_unique, - cov_fn_unique, damping_ratios_unique, cov_damping_unique)): - if np.isnan(f_MxMO): - continue - - # Confidence interval using the mean±2*standard_deviation - f_lower_bound = f_MxMO - bound_multiplier * np.sqrt(fcov_MxMO) - f_upper_bound = f_MxMO + bound_multiplier * np.sqrt(fcov_MxMO) - z_lower_bound = z_MxMO - bound_multiplier * np.sqrt(zcov_MxMO) - z_upper_bound = z_MxMO + bound_multiplier * np.sqrt(zcov_MxMO) - - # Find elements within the current limit that are still ungrouped - condition_mask = (frequencies >= f_lower_bound) & (frequencies <= f_upper_bound) & (damping_ratios >= z_lower_bound) & (damping_ratios <= z_upper_bound) & ungrouped_mask - indices = np.argwhere(condition_mask) # Get indices satisfying the condition - - # Initialization of Ip - Ip.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound, f_upper_bound, z_lower_bound, z_upper_bound), - "indices": indices, - "f_values": frequencies[tuple(indices.T)], - "z_values": damping_ratios[tuple(indices.T)] - }) - - # for Ip_item in Ip: - # print(f'Ip values: {Ip_item["f_values"]}') - - - # declared for appending - updated_indices = np.empty((0, 2), dtype=int) - f_updated_values = [] - z_updated_values = [] - # print(f'ip : {ip}') - - - # Find duplicates and their indices - # print(f'Indices : {Ip[ip]["indices"]}') - model_order_id = Ip[ip]["indices"][:,1] - # print(f'model order id: {model_order_id}') - unique, counts = np.unique(model_order_id, return_counts=True) - duplicates = unique[counts > 1] # model order number with duplicate modes - # print(f'Duplicate : {duplicates}') - # Create a boolean mask for duplicate rows - is_duplicate_row = np.isin(model_order_id, duplicates) - # Filter indices with duplicate values - indices_Ipm = Ip[ip]["indices"][is_duplicate_row] # Rows with duplicates - # print(f'Ipm indices: {indices_Ipm}') - indices_Ipu = Ip[ip]["indices"][~is_duplicate_row] - # print(f'Ipu indices: {indices_Ipu}') - # Check if indices_Ipu is empty - if indices_Ipu.size > 0: - ip_for_Ipu = indices_Ipu[np.argmax(indices_Ipu[:, 1])] - # print(f'ip for Ipu : {ip_for_Ipu}') - else: - print("No unique mode issue in this step.") - - - if duplicates.size == 0: - print("All values are unique.") - if len(indices)>1: - - for ii in indices: - target_mode_shape = mode_shapes[ii[0], ii[1], :] # Extract mode shape from the 3D array - reference_mode_shape = mode_shape_unique[ip] - - # print(f'print target_mode_shape: {target_mode_shape}') - # print(f'print reference_mode_shape: {reference_mode_shape}') - - # Calculate MAC with the reference mode shape - mac_value = calculate_mac(reference_mode_shape, target_mode_shape) - # print(f'MAC value: {mac_value}') - # print(f'ip : {ip}') - # print(f'MAC : {mac_value}') - # Check the MAC value to include in C. Algorithm 2: step 2 - if mac_value > tMAC: - # print(f'updated indices: {updated_indices}') - # print(f'new indices to be added: {ii}') - updated_indices = np.vstack([updated_indices,ii]) - f_updated_values = np.append(f_updated_values, frequencies[tuple(ii.T)]) - z_updated_values = np.append(z_updated_values, damping_ratios[tuple(ii.T)]) - # print(f'updated values: {updated_values}') - # Check if the cluster already exists - existing_cluster = next((c for c in C_cluster if c["ip_index"] == ip), None) - if existing_cluster: - # Update existing cluster - existing_cluster["indices"] = np.vstack([existing_cluster["indices"], ii]) - existing_cluster["f_values"] = np.append(existing_cluster["f_values"], frequencies[tuple(ii.T)]) - existing_cluster["z_values"] = np.append(existing_cluster["z_values"], damping_ratios[tuple(ii.T)]) - else: - # Create a new cluster - C_cluster.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound, f_upper_bound, z_lower_bound, z_upper_bound), - "indices": np.copy(updated_indices), - "f_values": np.copy(f_updated_values), - "z_values":np.copy(z_updated_values) - }) - - else: - C_cluster.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound,f_upper_bound, z_lower_bound, z_upper_bound), - "indices": indices, - "f_values": frequencies[tuple(indices.T)], - "z_values": damping_ratios[tuple(indices.T)] - }) - - # Handle the duplicate model order for single mode - else: - if len(indices_Ipu)>1: - for ii in indices_Ipu: - target_mode_shape = mode_shapes[ii[0], ii[1], :] # Extract mode shape from the 3D array - reference_mode_shape = mode_shapes[ip_for_Ipu[0], ip_for_Ipu[1], :] - - # print(f'print target_mode_shape: {target_mode_shape}') - # print(f'print reference_mode_shape: {reference_mode_shape}') - - # Calculate MAC with the reference mode shape - mac_value = calculate_mac(reference_mode_shape, target_mode_shape) - # print(f'MAC value: {mac_value}') - # print(f'ip : {ip}') - # print(f'MAC : {mac_value}') - # Check the MAC value to include in C. Algorithm 2: step 2 - if mac_value > tMAC: - # print(f'updated indices: {updated_indices}') - # print(f'new indices to be added: {ii}') - updated_indices = np.vstack([updated_indices,ii]) - f_updated_values = np.append(f_updated_values, frequencies[tuple(ii.T)]) - z_updated_values = np.append(z_updated_values, damping_ratios[tuple(ii.T)]) - # print(f'updated values: {updated_values}') - # Check if the cluster already exists - existing_cluster = next((c for c in C_cluster if c["ip_index"] == ip), None) - if existing_cluster: - # Update existing cluster - existing_cluster["indices"] = np.vstack([existing_cluster["indices"], ii]) - existing_cluster["f_values"] = np.append(existing_cluster["f_values"], frequencies[tuple(ii.T)]) - existing_cluster["z_values"] = np.append(existing_cluster["z_values"], damping_ratios[tuple(ii.T)]) - else: - # print(f'Ipu indices: {indices_Ipu} and frequencies: {f_updated_values}') - # Create a new cluster - C_cluster.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound, f_upper_bound, z_lower_bound, z_upper_bound), - "indices": np.copy(updated_indices), - "f_values": np.copy(f_updated_values), - "z_values":np.copy(z_updated_values) - }) - - else: - C_cluster.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound,f_upper_bound, z_lower_bound, z_upper_bound), - "indices": indices, - "f_values": frequencies[tuple(indices.T)], - "z_values": damping_ratios[tuple(indices.T)] - }) - - - - - - - # for Ip_item in C_cluster: - # print(f'C_cluster values: {Ip_item["f_values"]}') - - - - Ip_C_cluster = [] - # algorith 2: setp 3 [condition check] - for item1 in C_cluster: - # print(f'C_cluster item: {item1}') - # print(f'C_cluster value: {item1["values"]}') - - for item2 in Ip: - if item1['ip_index'] != item2['ip_index']: - continue # Skip the comparison if ip_index is not the same - - if len(item1['f_values']) == len(item2['f_values']): - # print('For C and Ip - values have the same length. Proceeding to compare the values.') - - # Compare the values - if np.all(item1['f_values'] != item2['f_values']): - # print(f'Values are different between C_cluster and Ip: {item1["values"]} vs {item2["values"]}') - continue - else: - print('Values are the same between C_cluster and Ip') - - else: - # print('Values have different lengths between C_cluster and Ip.') - updated_indices2 = np.empty((0, 2), dtype=int) # Reset to empty 2D array - f_updated_values2 = [] - z_updated_values2 = [] - for pp in item1['indices']: - for kk in item2['indices']: - reference_mode_shape = mode_shapes[pp[0], pp[1], :] - target_mode_shape = mode_shapes[kk[0], kk[1], :] - mac_value = calculate_mac(reference_mode_shape, target_mode_shape) - if mac_value > tMAC: - updated_indices2 = np.vstack([updated_indices2,kk]) - f_updated_values2 = np.append(f_updated_values2, frequencies[tuple(kk.T)]) - z_updated_values2 = np.append(z_updated_values2, damping_ratios[tuple(kk.T)]) - # print(f'newly added indices: {kk}') - # print(f'newly added values: {frequencies[tuple(kk.T)]}') - Ip_C_cluster.append({ - "ip_index": item1['ip_index'], - "indices" : updated_indices2, - "f_values" : f_updated_values2, - "z_values" : z_updated_values2 - }) - - # for Ip_item in Ip_C_cluster: - # print(f'Ip_C_cluster values: {Ip_item["f_values"]}') - # print(f'Ip_C_cluster indices: {Ip_item["indices"]}') - - # Initialize C_cluster_finale as a deep copy of C_cluster - C_cluster_finale = copy.deepcopy(C_cluster) - - # Add the points from Ip_C_cluster if they satisfy MAC conditions - # algorith 2: setp 3 [addition of point] - for item1 in C_cluster: - for item2 in Ip_C_cluster: - if item1['ip_index'] != item2['ip_index']: - continue # Skip the comparison if ip_index is not the same - - # Combine values from both clusters - f_merged_values = np.concatenate((item1['f_values'], item2['f_values'])) - z_merged_values = np.concatenate((item1['z_values'], item2['z_values'])) - # Combine indices from both clusters - merged_indices = np.concatenate((item1['indices'], item2['indices'])) - - # Find the corresponding cluster in C_cluster_finale - for finale_item in C_cluster_finale: - if finale_item['ip_index'] == item1['ip_index']: - # Update values and indices - finale_item['f_values'] = f_merged_values - finale_item['z_values'] = z_merged_values - finale_item['indices'] = merged_indices - break # Exit the loop once the match is found - - - # for C_item in C_cluster_finale: - # print(f'C_cluster values end: {C_item["values"]}') - - # algorith 2: step 4 - Ip_indices = np.vstack([item['indices'] for item in C_cluster]) - # Make a copy of frequencies to represent unclustered frequencies - unclustered_frequencies = frequencies.copy() - unclustered_damping = damping_ratios.copy() - # Update the copied matrix to NaN at collected indices - for idx in Ip_indices: - unclustered_frequencies[tuple(idx)] = np.nan # Set to NaN - unclustered_damping[tuple(idx)] = np.nan - - # print(f'Unclustred frequencies: {unclustered_frequencies}') - - # Find all indices in the frequencies matrix - all_indices = np.array(np.meshgrid(np.arange(frequencies.shape[0]), np.arange(frequencies.shape[1]))).T.reshape(-1, 2) - - # Identify unclustered indices: exclude NaN and indices in clustered_indices - unclustered_indices = [] - for idx in all_indices: - if not np.isnan(frequencies[tuple(idx)]) and not any((idx == Ip_indices).all(axis=1)): - unclustered_indices.append(idx) - - unclustered_indices = np.array(unclustered_indices) - # print(f'Unclustred indices: {unclustered_indices}') - - return C_cluster_finale, unclustered_frequencies, unclustered_damping, unclustered_indices - -# MAC calculation function -def calculate_mac(reference_mode, mode_shape): - """ - - - Parameters - ---------- - reference_mode : TYPE - DESCRIPTION. - mode_shape : TYPE - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - """ - numerator = np.abs(np.dot(reference_mode.conj().T, mode_shape)) ** 2 - denominator = np.dot(reference_mode.conj().T, reference_mode) * np.dot(mode_shape.conj().T, mode_shape) - return np.real(numerator / denominator) - -def clusterexpansion(C_clusters, unClustered_frequencies, unClustered_damping, cov_freq, cov_damping, mode_shapes, unClustered_indices, tMAC, bound_multiplier=2): - """ - - - Parameters - ---------- - C_clusters : TYPE - DESCRIPTION. - unClustered_frequencies : TYPE - DESCRIPTION. - unClustered_damping : TYPE - DESCRIPTION. - cov_freq : TYPE - DESCRIPTION. - cov_damping : TYPE - DESCRIPTION. - mode_shapes : TYPE - DESCRIPTION. - unClustered_indices : TYPE - DESCRIPTION. - bound_multiplier : TYPE, optional - DESCRIPTION. The default is 2. - - Raises - ------ - a - DESCRIPTION. - - Returns - ------- - C_cluster_finale : TYPE - DESCRIPTION. - unclustered_frequencies_expanded : TYPE - DESCRIPTION. - unclustered_damping_expanded : TYPE - DESCRIPTION. - unclustered_indices_expnaded : TYPE - DESCRIPTION. - - """ - - # cov_freq = cov_freq[::2] - # mode_shapes = mode_shapes[::2, :, :] - - # import pprint - # for cluster in C_clusters: - # pprint.pprint(cluster) - - Ip_plus = [] - - for cluster in C_clusters: - - f_values = cluster['f_values'] - z_values = cluster['z_values'] - indices = cluster['indices'] - - # **Skip if the cluster is empty** - if len(f_values) == 0: - print("Skipping empty cluster...") - continue # Move to the next cluster - - # print("Covariance Array:", np.sqrt(cov_freq[tuple(indices.T)])) - # Calculate the lower and upper bounds for the current cluster - # print(f'f_values: {f_values}') - # print(f'cov_freq[tuple(indices.T): {cov_freq[tuple(indices.T)]}') - f_lower_bound = np.min(f_values - bound_multiplier * np.sqrt(cov_freq[tuple(indices.T)])) # Minimum of all points for frequencies - f_upper_bound = np.max(f_values + bound_multiplier * np.sqrt(cov_freq[tuple(indices.T)])) # Maximum of all points for frequencies - z_lower_bound = np.min(z_values - bound_multiplier * np.sqrt(cov_damping[tuple(indices.T)])) # Minimum of all points for damping - z_upper_bound = np.max(z_values + bound_multiplier * np.sqrt(cov_damping[tuple(indices.T)])) # Maximum of all points for damping - - # print(f'Print cluster lower bound: {lower_bound}') - # print(f'Print cluster upper bound: {upper_bound}') - - # Find elements within the current limit that are still ungrouped - condition_mask2 = (unClustered_frequencies >= f_lower_bound) & (unClustered_frequencies <= f_upper_bound) & (unClustered_damping >= z_lower_bound) & (unClustered_damping <= z_upper_bound) - # Get indices satisfying the condition - expanded_indices = np.argwhere(condition_mask2) - - # Initialize lists to store updated indices and values - updated_indices3 = [] - f_updated_values3 = [] - z_updated_values3 = [] - - # Loop through the unclustered indices and append matching values to the cluster - for idx in expanded_indices: - freq_value = unClustered_frequencies[tuple(idx)] # Get the frequency value at this index - damp_value = unClustered_damping[tuple(idx)] # Get the damping value at this index - updated_indices3.append(idx) # Append the index - f_updated_values3.append(freq_value) # Append the frequency value - z_updated_values3.append(damp_value) # Append the damping value - - # Create a new cluster and append it to Ip_plus_cluster - Ip_plus.append({ - "ip_index": cluster['ip_index'], # Use the ip_index from the original cluster - "indices": np.array(updated_indices3), # Updated indices - "f_values": np.array(f_updated_values3), # Updated frequency values - "z_values": np.array(z_updated_values3) # Updated damping values - }) - - - Ip_plus_C = [] - # algorith 2: setp 3 [condition check] - for item1 in C_clusters: - # print(f'C_cluster item: {item1}') - # print(f'C_cluster value: {item1["values"]}') - - for item2 in Ip_plus: - if item1['ip_index'] != item2['ip_index']: - continue # Skip the comparison if ip_index is not the same - - if len(item1['f_values']) == len(item2['f_values']): - # print('For C and Ip - values have the same length. Proceeding to compare the values.') - - # Compare the values - if np.all(item1['f_values'] != item2['f_values']): - # print(f'Values are different between C_cluster and Ip: {item1["values"]} vs {item2["values"]}') - continue - else: - print(f'Values are the same between C_cluster and Ip_plus: {item1["f_values"]}') - - else: - # print('Values have different lengths between C_cluster and Ip.') - updated_indices4 = np.empty((0, 2), dtype=int) # Reset to empty 2D array - f_updated_values4 = [] - z_updated_values4 = [] - for pp in item1['indices']: - for kk in item2['indices']: - reference_mode_shape = mode_shapes[pp[0], pp[1], :] - target_mode_shape = mode_shapes[kk[0], kk[1], :] - mac_value = calculate_mac(reference_mode_shape, target_mode_shape) - if mac_value > tMAC: - updated_indices4 = np.vstack([updated_indices4,kk]) - f_updated_values4 = np.append(f_updated_values4, unClustered_frequencies[tuple(kk.T)]) - z_updated_values4 = np.append(z_updated_values4, unClustered_damping[tuple(kk.T)]) - # print(f'newly added indices: {kk}') - # print(f'newly added values: {frequencies[tuple(kk.T)]}') - Ip_plus_C.append({ - "ip_index": item1['ip_index'], - "indices" : updated_indices4, - "f_values" : f_updated_values4, - "z_values" : z_updated_values4 - }) - - - # Initialize C_cluster_finale as a deep copy of C_cluster - C_cluster_finale = copy.deepcopy(C_clusters) - - # Add the points from Ip_C_cluster if they satisfy MAC conditions - # algorith 2: setp 3 [addition of point] - for item1 in C_clusters: - for item2 in Ip_plus_C: - if item1['ip_index'] != item2['ip_index']: - continue # Skip the comparison if ip_index is not the same - - # Combine values from both clusters - f_merged_values2 = np.concatenate((item1['f_values'], item2['f_values'])) # concatenate frequencies - z_merged_values2 = np.concatenate((item1['z_values'], item2['z_values'])) # concatenate damping - # Combine indices from both clusters - merged_indices2 = np.concatenate((item1['indices'], item2['indices'])) - - # Find the corresponding cluster in C_cluster_finale - for finale_item in C_cluster_finale: - if finale_item['ip_index'] == item1['ip_index']: - # Update values and indices - finale_item['f_values'] = f_merged_values2 - finale_item['z_values'] = z_merged_values2 - finale_item['indices'] = merged_indices2 - break # Exit the loop once the match is found - - - # algorith 2: step 4 - # Filter out empty 'indices' arrays and check if there are any non-empty ones - valid_indices = [item['indices'] for item in C_clusters if item['indices'].size > 0] - - if valid_indices: - # If there are valid indices, proceed with stacking - Ip_plus_indices = np.vstack(valid_indices) - else: - # If there are no valid indices, handle accordingly (e.g., set to empty or raise a warning) - # print("No valid indices to stack.") - Ip_plus_indices = np.array([]) # Or choose another fallback behavior - # Make a copy of frequencies to represent unclustered frequencies - unclustered_frequencies_expanded = unClustered_frequencies.copy() - unclustered_damping_expanded = unClustered_damping.copy() - # Update the copied matrix to NaN at collected indices - for idx in Ip_plus_indices: - unclustered_frequencies_expanded[tuple(idx)] = np.nan # Set to NaN - unclustered_damping_expanded[tuple(idx)] = np.nan # Set to NaN - - # print(f'Unclustred frequencies: {unclustered_frequencies}') - - # Find all indices in the frequencies matrix - all_indices = np.array(np.meshgrid(np.arange(unClustered_frequencies.shape[0]), np.arange(unClustered_frequencies.shape[1]))).T.reshape(-1, 2) - - # Identify unclustered indices: exclude NaN and indices in clustered_indices - unclustered_indices_expnaded = [] - for idx in all_indices: - # if not np.isnan(unClustered_frequencies[tuple(idx)]) and not any((idx == Ip_plus_indices).all(axis=1)): - if Ip_plus_indices.size > 0 and not np.isnan(unClustered_frequencies[tuple(idx)]) and not any((idx == Ip_plus_indices).all(axis=1)): - unclustered_indices_expnaded.append(idx) - - unclustered_indices_expnaded = np.array(unclustered_indices_expnaded) - # print(f'Unclustred indices expanded: {unclustered_indices_expnaded}') - - return C_cluster_finale, unclustered_frequencies_expanded, unclustered_damping_expanded, unclustered_indices_expnaded - - -def visualize_clusters(clusters, cov_freq, bounds): - """ - - - Parameters - ---------- - clusters : TYPE - DESCRIPTION. - cov_freq : TYPE - DESCRIPTION. - bounds : TYPE - DESCRIPTION. - - Returns - ------- - None. - - """ - # Sort clusters by their median if available, otherwise keep original order - clusters.sort(key=lambda cluster: np.median(cluster["values"]) if "values" in cluster and len(cluster["values"]) > 0 else float('inf')) - - # Create subplots (one for each cluster) - num_clusters = len(clusters) - fig, axs = plt.subplots(num_clusters, 1, figsize=(10, 5 * num_clusters), tight_layout=True) - - - if num_clusters == 1: - axs = [axs] # Ensure axs is always iterable - - for idx, (cluster, ax) in enumerate(zip(clusters, axs)): - cluster_values = cluster["f_values"] - cluster_indices = cluster["indices"] - cluster_cov = cov_freq[tuple(np.array(cluster_indices).T)] # Covariance for original cluster - - # Extract the second part of the cluster indices for plotting - model_orders = cluster_indices[:, 1] - - # Scatter plot the cluster values against model orders - ax.scatter(cluster_values, model_orders, label="Cluster Data") - - # Plot the cluster values with covariance as error bars - ax.errorbar( - cluster_values, - model_orders, # Use the second index for the vertical axis - xerr=(np.sqrt(cluster_cov)*bounds), # Error bars for x-values based on covariance - fmt='o', capsize=5, ecolor='red', label="± 2σ" - ) - - # Check if the 'median' key exists in the cluster dictionary - if 'median' in cluster: - median_value = cluster["median"] - if not np.isnan(median_value): # If median is not NaN, plot the vertical line - ax.axvline(median_value, color='blue', linestyle='--', label='Median') - - ax.set_title(f"Cluster {idx + 1}") - ax.set_xlabel("Frequency [Hz]") - ax.set_ylabel("Model Order") - ax.set_ylim(0, 21) - ax.legend() - ax.grid() - - plt.show() - - -def clean_clusters_by_median(clusters, cov_freq, bound_multiplier=2): - """ - - - Parameters - ---------- - clusters : TYPE - DESCRIPTION. - cov_freq : TYPE - DESCRIPTION. - bound_multiplier : TYPE, optional - DESCRIPTION. The default is 2. - - Returns - ------- - cleaned_clusters : TYPE - DESCRIPTION. - - """ - cleaned_clusters = [] - - for cluster_idx, cluster in enumerate(clusters): - # Extract values and indices from the cluster - f_cluster_values = np.array(cluster["f_values"]) - z_cluster_values = np.array(cluster["z_values"]) - cluster_indices = np.array(cluster["indices"]) - - # Extract covariance for each cluster element - f_cluster_cov = cov_freq[tuple(cluster_indices.T)] # Extract covariance for the given indices - - # Remove duplicates by using unique values and their indices - f_unique_values, unique_indices = np.unique(f_cluster_values, return_index=True) - f_unique_cov = f_cluster_cov[unique_indices] - z_unique = z_cluster_values[unique_indices] - unique_indices_2D = cluster_indices[unique_indices] - - # Update the original cluster with unique values and indices - cluster["f_values"] = f_unique_values - cluster["z_values"] = z_unique - cluster["indices"] = unique_indices_2D - - # Calculate the median of the unique values - median_value = np.nanmedian(f_unique_values) - - # Define bounds for filtering based on the bound_multiplier and covariance - lower_bound = f_unique_values - bound_multiplier * np.sqrt(f_unique_cov) - upper_bound = f_unique_values + bound_multiplier * np.sqrt(f_unique_cov) - - # Keep elements where the median lies within the bounds - mask = (median_value >= lower_bound) & (median_value <= upper_bound) - f_cleaned_values = f_unique_values[mask] - z_cleaned_values = z_unique[mask] - cleaned_indices = unique_indices_2D[mask] - - # Append the cleaned cluster to the result if there are enough values - if len(f_cleaned_values) > 1: # Keep clusters with more than one cleaned value - cleaned_clusters.append({ - "original_cluster": cluster, # Store the original cluster (now updated with unique values) - "f_values": f_cleaned_values, - "z_values": z_cleaned_values, - "indices": cleaned_indices, - "median": median_value, - "bound_multiplier": bound_multiplier, # Store the bound multiplier used - }) - - return cleaned_clusters - - -def mode_allingment(ssi_mode_track_res, mstab, tMAC): - print("DEBUG: oma_output inside mode_allingment:", type(ssi_mode_track_res), ssi_mode_track_res) - - # extract results - frequencies = ssi_mode_track_res['Fn_poles'] - cov_freq = ssi_mode_track_res['Fn_poles_cov'] - damping_ratios = ssi_mode_track_res['Xi_poles'] - cov_damping = ssi_mode_track_res['Xi_poles_cov'] - mode_shapes = ssi_mode_track_res['Phi_poles'] - bounds = 2 # standard deviation multiplier - - frequencies_max_MO = frequencies[:,-1] - cov_freq_max_MO = cov_freq[:,-1] - damping_ratios_max_MO = damping_ratios[:,-1] - cov_damping_max_MO = cov_damping[:,-1] - mode_shapes_max_MO = mode_shapes[:,-1,:] - - frequencies_copy = frequencies.copy() - - # Remove the complex conjugate entries - frequencies = frequencies[::2] # This is 'S' as per algorithm - damping_ratios = damping_ratios[::2] # This is 'S' as per algorithm - mode_shapes = mode_shapes[::2, :, :] - cov_freq = cov_freq[::2] - cov_damping = cov_damping[::2] - - frequency_coefficient_variation = np.sqrt(cov_freq)/frequencies - damping_coefficient_variation = np.sqrt(cov_damping)/damping_ratios - indices_frequency = frequency_coefficient_variation > 0.05 - indices_damping = damping_coefficient_variation > 0.5 - combined_indices = indices_frequency & indices_damping - frequencies[combined_indices] = np.nan - damping_ratios[combined_indices] = np.nan - cov_freq[combined_indices] = np.nan - cov_damping[combined_indices] = np.nan - - - # Initial clustering - C_clusters, unClustd_frequencies, unClustd_damping, unClustd_indices = cluster_frequencies(frequencies, damping_ratios, - mode_shapes, frequencies_max_MO, cov_freq_max_MO, - damping_ratios_max_MO, cov_damping_max_MO, - mode_shapes_max_MO, tMAC, bound_multiplier=bounds) - - # Expansion step - C_expanded, unClustd_frequencies_expanded, unClustd_damping_expanded, unClustd_indices_expanded = clusterexpansion(C_clusters, unClustd_frequencies, unClustd_damping, - cov_freq, cov_damping, mode_shapes, - unClustd_indices, tMAC, bound_multiplier=bounds) - - - last_ip_index = max(cluster['ip_index'] for cluster in C_expanded) - - count = 0 - - # Loop until unClustd_indices contains only one index - while True: - # print(f"unClustd indices expanded size before: {unClustd_indices_expanded.size}") - # print(f'Loop counter: {count}') - count += 1 - # Check the termination condition - if unClustd_indices_expanded.size <= 2: # Stop if there are fewer than 2 indices - print("No more unclustered indices to process. Exiting ...") - break - - # Get the highest column index from unClustd_indices - highest_column = np.max(unClustd_indices_expanded[:, 1]) # Assuming column index is in the second column - - # Create a mask for the unclustered indices - mask1 = np.full(frequencies.shape, False) # Initialize a boolean mask - mask1[tuple(unClustd_indices_expanded.T)] = True # Set only unclustered indices to True - unClustd_frequencies = frequencies.copy() - unClustd_damping = damping_ratios.copy() - unClustd_frequencies[~mask1] = np.nan - unClustd_damping[~mask1] = np.nan - unClustd_cov_freq = cov_freq.copy() - unClustd_cov_damp = cov_damping.copy() - unClustd_cov_freq[~mask1] = np.nan # Unclustered frequency variance matrix - unClustd_cov_damp[~mask1] = np.nan # Unclustered damping variance matrix - unClustd_mode_shapes = mode_shapes.copy() - - for ii in range(unClustd_mode_shapes.shape[2]): - slice_2d = unClustd_mode_shapes[:, :, ii] - slice_2d[~mask1] = np.nan - unClustd_mode_shapes[:, :, ii] = slice_2d # Unclustered mode shape matrix - - # Filter the data for the highest column - frequencies_max_MO = unClustd_frequencies_expanded[:, highest_column] - # print(f'Maximum model order: {highest_column}') - # print(f'MO frequencies: {frequencies_max_MO}') - damping_ratios_max_MO = unClustd_damping_expanded[:, highest_column] - # print(f'frequencies initization: {frequencies_max_MO}') - cov_freq_max_MO = unClustd_cov_freq[:, highest_column] - cov_damp_max_MO = unClustd_cov_damp[:, highest_column] - mode_shapes_max_MO = unClustd_mode_shapes[:, highest_column, :] - - # Call the cluster_frequencies function with updated parameters - C_cluster_loop, unClustd_frequencies_loop, unClustd_damping_loop, unClustd_indices_loop = cluster_frequencies( - unClustd_frequencies, - unClustd_damping, - unClustd_mode_shapes, - frequencies_max_MO, - cov_freq_max_MO, - damping_ratios_max_MO, - cov_damping_max_MO, - mode_shapes_max_MO, - tMAC, - bound_multiplier=bounds - ) - print("Initial clustering done.") - - # import pprint - # for cluster in C_clusters: - # pprint.pprint(cluster) - - if unClustd_indices_loop.size == 0: - print("No unclustered indices left. Exiting ...") - # Update the clusters with new 'ip_index' values - for cluster in C_cluster_loop: - # Update the ip_index for the new clusters (starting from last_ip_index + 1) - new_ip_index = last_ip_index + 1 - cluster["ip_index"] = new_ip_index - - # Append the updated cluster to the final list - C_expanded.append(cluster) - - # Update last_ip_index to the newly assigned ip_index for the next iteration - last_ip_index = new_ip_index - - # print('before break') - break - # print('after break') - - print("Expansion started in loop.") - # Expansion step for each initial clusters - C_expanded_loop, unClustd_frequencies_expanded_loop, unClustd_damping_expanded_loop, unClustd_indices_expanded_loop = clusterexpansion( - C_cluster_loop, - unClustd_frequencies_loop, - unClustd_damping_loop, - cov_freq, - cov_damping, - mode_shapes, - unClustd_indices_loop, - tMAC, - bound_multiplier=bounds - ) - print("Expansion clustering done.") - - # Update the clusters with new 'ip_index' values - for cluster in C_expanded_loop: - # Update the ip_index for the new clusters (starting from last_ip_index + 1) - new_ip_index = last_ip_index + 1 - cluster["ip_index"] = new_ip_index - - # Append the updated cluster to the final list - C_expanded.append(cluster) - - # Update last_ip_index to the newly assigned ip_index for the next iteration - last_ip_index = new_ip_index - - # print("Expansion added to clustering.") - - if unClustd_indices_expanded_loop.size == 0: - print("No unclustered indices left. Exiting ...") - break - # Update the unClustd_indices for the next iteration - unClustd_indices_expanded = unClustd_indices_expanded_loop[ - unClustd_indices_expanded_loop[:, 1] != highest_column - ] - - # Check if the size of unClustd_indices_expanded has become less than or equal to 2 - if unClustd_indices_expanded.size <= 2: - print("Unclustered indices size <= 2. Stopping ...") - break - - # Removing repeatation during merge - for cluster in C_expanded: - # Get the current values - f_values = cluster['f_values'] - indices = cluster['indices'] - z_values = cluster['z_values'] - # Find unique f_values and their indices - unique_f_values, unique_indices = np.unique(f_values, return_index=True) - cluster['f_values'] = unique_f_values - cluster['indices'] = indices[unique_indices] - cluster['z_values'] = z_values[unique_indices] - - - # # Visualize the initial clusters - # visualize_clusters(C_expanded, cov_freq, bounds) - - # # import pprint - # for cluster in C_expanded: - # print(f"ip_index: {cluster['ip_index']}, f_values length: {len(cluster['f_values'])}") - # print(f"Cluster confidence interval: {cluster['confidence_interval'][0:2]}") - # print(f"Cluster shape: {len(cluster['f_values'])}") - # # pprint.pprint(cluster) - # # print(f"ip_index: {cluster['ip_index']}") - # # print(f"indices shape: {cluster['indices'].shape}") - # # print(f"f_values shape: {len(cluster['f_values'])}") - - - print('Cluster filter started') - # Filter clusters with less than 'mstab' elements - C_expanded_filtered = [cluster for cluster in C_expanded if cluster['indices'].shape[0] > mstab] - # Sort clusters by the lower bound of their confidence_interval (the first value in the tuple) - C_expanded_filtered.sort(key=lambda cluster: cluster['confidence_interval'][0]) - print('Cluster filter finished') - - # # Visualize the cluster filter by element numbers - # visualize_clusters(C_expanded_filtered, cov_freq, bounds) - - # Cluster cleaning based on median - cleaned_clusters = clean_clusters_by_median(C_expanded_filtered, cov_freq, bound_multiplier=bounds) - - # remove repeatative clusters - seen = set() - uq_clusters = [] - for d in cleaned_clusters: - f_values_tuple = tuple(d['f_values']) - if f_values_tuple not in seen: - seen.add(f_values_tuple) - uq_clusters.append(d) - - for cluster in uq_clusters: - indices = cluster['indices'] - mode_shapes_list = [] - - for idx in indices: - # Extract mode shapes using indices - mode_shape = mode_shapes[idx[0], idx[1], :] - mode_shapes_list.append(mode_shape) - - # Add mode shapes to the dictionary - cluster['mode_shapes'] = np.array(mode_shapes_list) - - uq_clusters_sorted = sorted(uq_clusters, key=lambda cluster: cluster["median"]) - - return uq_clusters_sorted diff --git a/src/methods/packages/mode_tracking.py b/src/methods/packages/mode_tracking.py new file mode 100644 index 0000000..5ca9858 --- /dev/null +++ b/src/methods/packages/mode_tracking.py @@ -0,0 +1,355 @@ +from typing import Any +import numpy as np +from methods.packages.clustering import calculate_mac + +# JVM 14/10/2025 + +def cluster_tracking(cluster_dict: dict[str,Any],tracked_clusters: dict[str,Any],Params: dict[str,Any]=None) -> dict[str,Any]: + """ + Tracking of modes across experiments + + Args: + cluster_dict (dict): Dictionary of clusters + tracked_clusters (dict): Previously tracked clusters + Params (dict): tracking parameters + + Returns: + tracked_clusters (dict): Previously tracked clusters + + """ + print("Cluster tracking") + if Params == None: + Params = {'phi_cri':0.8, + 'freq_cri':0.2} + + m_f = [] + for key in cluster_dict.keys(): + cluster = cluster_dict[key] + m_f.append(cluster['median_f']) + + t_list = [] + t_length = [] + for key in tracked_clusters: #Go through all tracked clusters. They are identified with keys which are integers from 0 and up to total number of clusters + if key == 'iteration': + pass + else: + tracked_cluster_list = tracked_clusters[key] #Accessing all cluster in a tracked cluster group + t_length.append(len(tracked_cluster_list)) + tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group + #median freq of last cluster in tracked cluster group + t_list.append(tracked_cluster['median_f']) + + # No tracked clusters yet? + if not tracked_clusters: + first_track = 1 + else: + first_track = 0 + + if first_track == 1: + for id, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + cluster['id'] = 0 + + tracked_clusters['iteration'] = 0 + tracked_clusters[str(id)] = [cluster] + else: + iter = tracked_clusters['iteration'] + 1 + tracked_clusters['iteration'] = iter + + result = match_cluster_to_tracked_cluster(cluster_dict,tracked_clusters,Params) #Match clusters to tracked clusters + + result_int = [] + for val in result.values(): #Get all non-"new" results + if type(val) == int: + result_int.append(val) + + if len(result_int) == len(set(result_int)): #If all clusters match with a unique tracked cluster + for ii, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + pos = result[str(ii)] #Find pos in result dict + cluster['id'] = iter + if pos == "new": #Add cluster as a new tracked cluster + new_key = len(tracked_clusters)-1 #-1 for "iteration", + 1 for next cluster and -1 for starting at 0 = -1 + #print(f"new key: {new_key}") + tracked_clusters[str(new_key)] = [cluster] + else: #Add cluster to an existing tracked cluster + cluster_to_add_to = tracked_clusters[str(pos)] + cluster_to_add_to.append(cluster) + tracked_clusters[str(pos)] = cluster_to_add_to + + else: #If there are some clusters that match with the same tracked cluster. + kk = 0 + skip_tracked_cluster = [] + skip_cluster = [] + while len(result_int) != len(set(result_int)): + kk += 1 + if kk > 10: + #Debug info: + unique_match_debug_info(result,cluster_dict,t_list) + print("Unresolved mode tracking") + breakpoint() + + for possible_match_id in set(result.values()): #Go through all unique values + if possible_match_id == "new": #Do nothing if "new" + pass + else: + test_if_str = np.argwhere(np.array(list(result.values())) == "new") #Test if "new" is present. If so, then we must match with str instead of int. + if len(test_if_str) > 0: + itemindex = np.argwhere(np.array(list(result.values())) == str(possible_match_id)) #Find the index of the unique cluster match + else: + itemindex = np.argwhere(np.array(list(result.values())) == possible_match_id) #Find the index of the unique cluster match + print(possible_match_id,np.array(list(result.values())),itemindex, len(itemindex)) + + if len(itemindex) > 1: #If multiple clusters match to the same tracked cluster + pos, result, cluster_index = resolve_unique_matches(possible_match_id, itemindex, result, cluster_dict, tracked_clusters) + skip_tracked_cluster.append(str(result[str(cluster_index[pos])])) #Skip the best tracked cluster which is matced with another cluster. + skip_cluster.append(cluster_index[pos]) #Skip the best tracked cluster which is matced with another cluster. + + result = match_cluster_to_tracked_cluster(cluster_dict,tracked_clusters,Params,result,skip_cluster,skip_tracked_cluster) #Match with tracked clusters, but skip the already matched. + + #Debug info: + unique_match_debug_info(result,cluster_dict,t_list) + + result_int = [] + for val in result.values(): + if type(val) == int: + result_int.append(val) + + #Add the clusters to tracked clusters + for ii, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + pos = result[str(ii)] #Find pos in result dict + cluster['id'] = iter + if pos == "new": + new_key = len(tracked_clusters)-1 #-1 for "iteration", + 1 for next cluster and -1 for starting at 0 = -1 + tracked_clusters[str(new_key)] = [cluster] + else: + cluster_to_add_to = tracked_clusters[str(pos)] + cluster_to_add_to.append(cluster) + tracked_clusters[str(pos)] = cluster_to_add_to + + + + return tracked_clusters + +def match_cluster_to_tracked_cluster(cluster_dict: dict[str,Any], tracked_clusters: dict[str,Any], Params: dict[str,Any], result_prev: dict[str,Any] = {},skip_cluster: list = [], skip_tracked_cluster: list = []) -> dict[str,Any]: + """ + Match clusters to tracked clusters + + The result dictionary consist of keys: cluster indecies, and values: indecies of tracked cluster to match with + Example: + Cluster 1 match with tracked cluster 2 + Cluster 2 match with tracked cluster 1 + Cluster 3 match with tracked cluster 1 + Cluster 4 match with "new", i.e. could not be matched with an existing tracked cluster + + Args: + cluster_dict (dict): Dictionary of clusters + tracked_clusters (dict): Previously tracked clusters + Params (dict): tracking parameters + result_prev (dict): Dictionary of previous match result + skip_cluster (list): List of clusters that have proven they are a optimal match with a tracked cluster + skip_tracked_cluster (list): List of tracked clusters that have an optimal match with a cluster + + Returns: + result (dict): Dictionary of matches + + """ + result = {} + for id, key in enumerate(cluster_dict): #Go through all clusters + if id in skip_cluster: #If this cluster is already matched skip it + result[str(id)] = result_prev[str(id)] + continue + + #Get mode shapes + cluster = cluster_dict[key] + omega = cluster['median_f'] + phi = cluster['mode_shapes'][0] + phi_all = cluster['mode_shapes'] + + Xres = [] + MAC_list = [] + D_freq = [] + omega_t_list = [] + MAC_max_list = [] + MAC_avg_list = [] + for key in tracked_clusters: #Go through all tracked clusters. They are identified with keys which are integers from 0 and up to total number of clusters + if key == 'iteration': + pass + else: + tracked_cluster_list = tracked_clusters[key] #Accessing all cluster in a tracked cluster group + tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group + omega_t = tracked_cluster['median_f'] #median freq of last cluster in tracked cluster group + omega_t_list.append(omega_t) + phi_t_all = tracked_cluster['mode_shapes'] #phi of last cluster in tracked cluster group + phi_t = phi_t_all[0] + + MAC_list.append(float(calculate_mac(phi_t, phi))) + + MACs = np.zeros((phi_all.shape[0],phi_t_all.shape[0])) + for ii, phi in enumerate(phi_all): + for jj, phi_t in enumerate(phi_t_all): + MAC = float(calculate_mac(phi_t, phi)) + MACs[ii,jj] = MAC #Compare the cluster with all tracked clusters + + if key in skip_tracked_cluster: + MAC_avg = np.mean(0) + MAC_max = np.max(0) + MAC_max_list.append(0) + MAC_avg_list.append(0) + D_freq.append(10**6) + else: + MAC_avg = np.mean(MACs) + MAC_max = np.max(MACs) + MAC_max_list.append(MAC_max) + MAC_avg_list.append(MAC_avg) + D_freq.append(abs(omega_t-omega)/omega) + + itemindex1 = np.argwhere(np.array(MAC_max_list) > Params['phi_cri']) #Find where the cluster matches the tracked cluster regarding the MAC criteria + itemindex = np.argwhere(np.array(D_freq)[itemindex1[:,0]] < Params['freq_cri']) #Find where the cluster matches the tracked cluster regarding the MAC and frequency criteria + indicies = itemindex1[itemindex[:,0]] + if len(indicies) > 1: #If two or more clusters combly with the mode shape criteria + Xres = [] + Xres_f = [] + Xres_MAC = [] + for nn in indicies: + pos = nn[0] + X = D_freq[pos]/MAC_max_list[pos] #Objective function + Xres.append(X) + Xres_f.append(D_freq[pos]) + Xres_MAC.append(MAC_max_list[pos]) + + if Xres != []: # One or more cluster(s) combly with the frequency criteria + pos1 = Xres.index(min(Xres)) #Find the cluster that is most likely + pos2 = Xres_MAC.index(max(Xres_MAC)) #Find the largest MAC + pos3 = Xres_f.index(min(Xres_f)) #Find the smallest frequency difference + + if len(Xres) > 1: #If more than one cluster comply with criteria + Xres_left = Xres.copy() + del Xres_left[pos1] + if type(Xres_left) == np.float64: + Xres_left = [Xres_left] + + Xres_MAC_left = Xres_MAC.copy() + del Xres_MAC_left[pos1] + if type(Xres_MAC_left) == np.float64: + Xres_MAC_left = [Xres_MAC_left] + + Xres_f_left = Xres_f.copy() + del Xres_f_left[pos1] + if type(Xres_f_left) == np.float64: + Xres_f_left = [Xres_f_left] + + pos1_2 = Xres_left.index(min(Xres_left)) #Find the cluster that is most likely + pos2_2 = Xres_MAC_left.index(max(Xres_MAC_left)) #Find the cluster that is most likely based on MAC + pos3_2 = Xres_f_left.index(min(Xres_f_left)) #Find the cluster that is most likely based on Freq + + if (pos1 == pos2) and (pos1 == pos3): #If one match on all three parameters: objective function, max MAC and frequency difference + pos = int(indicies[pos1][0]) + result[str(id)] = pos #group to a tracked cluster + + #Make different: abs(min(Xres_left)/min(Xres)) < Params['obj_cri'] = 2 + elif abs(min(Xres_left)-min(Xres)) < Params['obj_cri']: #If the objective function results are close + if (min(Xres_f) < Params['freq_cri']) and (min(Xres_f_left) < Params['freq_cri']): #If both frequency differences are close to the target cluster + pos = int(indicies[pos2_2][0]) #Match with best MAC + result[str(id)] = pos #group to a tracked cluster + elif (min(Xres_f) < Params['freq_cri']) and (min(Xres_f_left) > Params['freq_cri']): #If Xres_f is smaller than the threshold + pos = int(indicies[pos3][0]) #Match with lowest frequency difference + result[str(id)] = pos #group to a tracked cluster + elif (min(Xres_f) > Params['freq_cri']) and (min(Xres_f_left) < Params['freq_cri']): + pos = int(indicies[pos3_2][0]) #Match with lowest frequency difference + result[str(id)] = pos #group to a tracked cluster + else: #If none of the above choose the one with highest MAC + pos = int(indicies[pos2_2][0]) + result[str(id)] = pos #group to a tracked cluster + else: #If none of the above choose the one with lowest onjective function + pos = int(indicies[pos1][0]) + result[str(id)] = pos #group to a tracked cluster + + else: #No cluster comply with frequency criteria, so a new cluster is saved + result[str(id)] = "new" + + elif len(indicies) == 1: #If one cluster combly with the mode shape criteria + pos = int(indicies[0][0]) + result[str(id)] = pos #group to a tracked cluster + + else: #Does not comply with mode shape criteria + result[str(id)] = "new" + + return result + +def resolve_unique_matches(possible_match_id, itemindex, result, cluster_dict, tracked_clusters): + """ + Resolve if two clusters match with the same tracked cluster. Determine what match is the most optimal. + Those clusters that does not have an optimal match, they are given the match result = "new" + + Example: + Cluster 2 match with tracked cluster 1 + Cluster 3 match with tracked cluster 1 + + Args: + possible_match_id (int): The index of tracked cluster + itemindex (np.ndarray): The indecies of clusters that have the same match + result (dict): Dictionary of suggested matches + cluster_dict (dict): Dictionary of clusters + tracked_clusters (dict): Previously tracked clusters + + Returns: + pos (int): Value of cluster that have the most optimal match. + result (dict): Dictionary of re-done matches + cluster_index: The indecies of clusters that have the same match + + """ + mean_MAC = [] + keys = [str(y[0]) for y in itemindex.tolist()] #Make keys for dictionary based on indices in itemindex + for nn in itemindex: #Go through possible clusters match index + cluster = cluster_dict[int(nn[0])] + phi_all = cluster["mode_shapes"] #Find mode shapes in cluster + tracked_cluster_list = tracked_clusters[str(possible_match_id)] #Accessing all cluster in a tracked cluster group + tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group + phi_t_all = tracked_cluster['mode_shapes'] #Find mode shapes in tracked cluster + + #Make list of mode shapes have the same length, i.e. same number of poles + if len(phi_all) > len(phi_t_all): + phi_all = phi_all[0:len(phi_t_all)] + elif len(phi_all) < len(phi_t_all): + phi_t_all = phi_t_all[0:len(phi_all)] + else: #Equal length + pass + MAC_matrix = np.zeros((len(phi_all),len(phi_all))) #Initiate a matrix of MAC values + for ii, phi in enumerate(phi_all): + for jj, phi_t in enumerate(phi_t_all): + MAC_matrix[ii,jj] = calculate_mac(phi,phi_t) #Mac + + mean_MAC.append(np.mean(MAC_matrix)) #Save the mean values of MAC from this cluster compared to the matched tracked cluster + pos = mean_MAC.index(max(mean_MAC)) #Find the index with higest mean MAC, i.e. the cluster that match best with the tracked cluster. + + cluster_index = itemindex[:,0] + + for key in keys: + if keys[pos] == key: #Let the best cluster match stay + pass + else: #Add the clusters with the worst match as a new cluster + result[key] = "new" + return pos, result, cluster_index + +def unique_match_debug_info(result,cluster_dict,t_list): + """ + Debug info + + Args: + result (dict): Dictionary of matches + cluster_dict (dict): Dictionary of clusters + t_list (list): List of median frequencies of last tracked tracked clusters + + Returns: + + """ + print('\n') + for ii, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + pos = result[str(ii)] #Find pos in result dict + if pos == "new": + print(cluster_dict[key]['median_f'],str(ii),pos) + else: + print(cluster_dict[key]['median_f'],str(ii),pos,t_list[pos]) \ No newline at end of file diff --git a/src/methods/packages/pyoma/ssiWrapper.py b/src/methods/packages/pyoma/ssiWrapper.py index eadd783..db560e9 100644 --- a/src/methods/packages/pyoma/ssiWrapper.py +++ b/src/methods/packages/pyoma/ssiWrapper.py @@ -1,6 +1,8 @@ import typing import logging +import numpy as np + from pyoma2.algorithms.data.result import SSIResult from pyoma2.algorithms.data.run_params import SSIRunParams from pyoma2.algorithms.base import BaseAlgorithm @@ -106,20 +108,33 @@ def run(self) -> SSIResult: Fns, Xis, Phis, Fn_cov, Xi_cov, Phi_cov = gen.applymask( lista, mask7, Phis.shape[2] ) - + + #Infer minimum order + for ii in range(ordmin): + id = ii + nan_Matrix = np.empty(Fns.shape[0]) + nan_Matrix[:] = np.nan + Fns[:,id] = nan_Matrix + Xis[:,id] = nan_Matrix + Fn_cov[:,id] = nan_Matrix + Xi_cov[:,id] = nan_Matrix + nan_Matrix = np.empty((Phis.shape[0],Phis.shape[2])) + nan_Matrix[:,:] = np.nan + Phis[:,id,:] = nan_Matrix + Phi_cov[:,id,:] = nan_Matrix - # Get the labels of the poles - Lab = gen.SC_apply( - Fns, - Xis, - Phis, - ordmin, - ordmax, - step, - sc["err_fn"], - sc["err_xi"], - sc["err_phi"], - ) + # # Get the labels of the poles + # Lab = gen.SC_apply( + # Fns, + # Xis, + # Phis, + # ordmin, + # ordmax, + # step, + # sc["err_fn"], + # sc["err_xi"], + # sc["err_phi"], + # ) return SSIResult( Obs=Obs, @@ -130,7 +145,7 @@ def run(self) -> SSIResult: Fn_poles=Fns, Xi_poles=Xis, Phi_poles=Phis, - Lab=Lab, + # Lab=Lab, Fn_poles_cov=Fn_cov, Xi_poles_cov=Xi_cov, Phi_poles_cov=Phi_cov, diff --git a/src/methods/sys_id.py b/src/methods/sysid_module.py similarity index 88% rename from src/methods/sys_id.py rename to src/methods/sysid_module.py index ffd2b1e..6b1986b 100644 --- a/src/methods/sys_id.py +++ b/src/methods/sysid_module.py @@ -9,7 +9,7 @@ from data.comm.mqtt import setup_mqtt_client from data.accel.hbk.aligner import Aligner from methods.packages.pyoma.ssiWrapper import SSIcov -from methods.constants import MODEL_ORDER, BLOCK_SHIFT, DEFAULT_FS +from methods.constants import DEFAULT_FS, PARAMS @@ -40,6 +40,7 @@ def sysid(data, params): name="SSIcovmm_mt", method='cov_mm', br=params['block_shift'], + ordmin=params['model_order_min'], ordmax=params['model_order'], calc_unc=True ) @@ -54,7 +55,6 @@ def sysid(data, params): 'Xi_poles': output['Xi_poles'], 'Xi_poles_cov': output['Xi_poles_cov'], 'Phi_poles': output['Phi_poles'], - 'Lab': output['Lab'] } @@ -96,11 +96,6 @@ def get_oma_results( Returns: A tuple (OMA_output, timestamp) if successful, or None if data is not ready. """ - oma_params = { - "Fs": fs, - "block_shift": BLOCK_SHIFT, - "model_order": MODEL_ORDER - } number_of_samples = int(sampling_period * 60 * fs) data, timestamp = aligner.extract(number_of_samples) @@ -109,7 +104,7 @@ def get_oma_results( return None, None try: - oma_output = sysid(data, oma_params) + oma_output = sysid(data, PARAMS) return oma_output, timestamp except Exception as e: print(f"sysID failed: {e}") @@ -129,14 +124,19 @@ def publish_oma_results(sampling_period: int, aligner: Aligner, publish_topic: The MQTT topic to publish results to. fs: Sampling frequency. """ + t1 = time.time() + loop = True while True: try: - time.sleep(0.5) + time.sleep(0.1) + t2 = time.time() + t_text = f"Waiting for data for {round(t2-t1,1)} seconds" + print(t_text,end="\r") oma_output, timestamp = get_oma_results(sampling_period, aligner, fs) - print(f"OMA result: {oma_output}") - print(f"Timestamp: {timestamp}") + if oma_output: + print(f"Timestamp: {timestamp}") payload = { "timestamp": timestamp.isoformat(), "OMA_output": convert_numpy_to_list(oma_output) @@ -150,15 +150,19 @@ def publish_oma_results(sampling_period: int, aligner: Aligner, publish_client.publish(publish_topic, message, qos=1) print(f"[{timestamp.isoformat()}] Published OMA result to {publish_topic}") + loop = True break - except Exception as e: - print(f"Failed to publish OMA result: {e}") + print(f"\nFailed to publish OMA result: {e}") + except KeyboardInterrupt: - print("Shutting down gracefully") - aligner.client.loop_stop() - aligner.client.disconnect() + print("\nShutting down gracefully") + aligner.mqtt_client.loop_stop() + aligner.mqtt_client.disconnect() publish_client.disconnect() + loop = False break except Exception as e: - print(f"Unexpected error: {e}") + print(f"\nUnexpected error: {e}") + + return loop diff --git a/tests/integration/methods/test_sys_id.py b/tests/integration/methods/test_sys_id.py index c047e0b..f74ba04 100644 --- a/tests/integration/methods/test_sys_id.py +++ b/tests/integration/methods/test_sys_id.py @@ -3,7 +3,7 @@ from datetime import datetime from unittest.mock import MagicMock -from methods import sys_id +from methods import sysid_module def test_sysid(): # Define OMA parameters @@ -17,7 +17,7 @@ def test_sysid(): data = np.loadtxt('tests/integration/input_data/Acc_4DOF.txt').T # Perform system identification - sysid_output = sys_id.sysid(data, oma_params) + sysid_output = sysid_module.sysid(data, oma_params) # Extract results using dictionary keys frequencies = sysid_output['Fn_poles'] @@ -59,7 +59,7 @@ def test_sysid_full_flow_success(): "model_order": 20 } - oma_result = sys_id.sysid(data, oma_params) + oma_result = sysid_module.sysid(data, oma_params) # Check output structure assert isinstance(oma_result, dict) @@ -68,7 +68,7 @@ def test_sysid_full_flow_success(): assert isinstance(oma_result[key], list) or isinstance(oma_result[key], np.ndarray) # Convert to JSON-safe structure - converted = sys_id.convert_numpy_to_list(oma_result) + converted = sysid_module.convert_numpy_to_list(oma_result) assert isinstance(converted, dict) assert isinstance(converted["Fn_poles"], list) @@ -76,7 +76,7 @@ def test_sysid_full_flow_success(): def test_get_oma_results_integration(mocker): from datetime import datetime import numpy as np - from methods import sys_id + from methods import sysid_module fs = 100 # sampling frequency mock_aligner = MagicMock() @@ -88,7 +88,7 @@ def test_get_oma_results_integration(mocker): mock_aligner.extract.return_value = (mock_data, mock_timestamp) - oma_output, timestamp = sys_id.get_oma_results(number_of_minutes, mock_aligner, fs) + oma_output, timestamp = sysid_module.get_oma_results(number_of_minutes, mock_aligner, fs) assert isinstance(oma_output, dict) assert "Fn_poles" in oma_output @@ -108,4 +108,4 @@ def test_sysid_raises_on_empty_data(): } with pytest.raises(Exception): - sys_id.sysid(data, oma_params) + sysid_module.sysid(data, oma_params) diff --git a/tests/unit/methods/test_sys_id_unit.py b/tests/unit/methods/test_sys_id_unit.py index 22efc4a..9799841 100644 --- a/tests/unit/methods/test_sys_id_unit.py +++ b/tests/unit/methods/test_sys_id_unit.py @@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch from datetime import datetime import json -from methods.sys_id import ( +from methods.sysid_module import ( sysid, get_oma_results, publish_oma_results, From 62bddcd1058a558a4bc54b32f0b7a2de2f1f6bc2 Mon Sep 17 00:00:00 2001 From: au650680 Date: Tue, 28 Oct 2025 12:19:54 +0100 Subject: [PATCH 3/6] Edits to last pull Splitting function blocks into smaller functions. Other small changes. --- src/data/accel/hbk/accelerometer.py | 2 +- src/data/accel/metadata.py | 1 - src/examples/example.py | 51 +- .../{clustering.py => run_mode_clustering.py} | 30 +- ...{mode_tracking.py => run_mode_tracking.py} | 23 +- ...ting_parameters.py => run_model_update.py} | 16 +- src/examples/{run_pyoma.py => run_sysid.py} | 44 +- src/functions/calculate_mac.py | 16 + src/functions/clean_sysid_output.py | 133 ++ src/functions/plot_clusters.py | 210 +++ src/functions/plot_mode_tracking.py | 2 +- src/functions/plot_sysid.py | 142 ++ src/functions/sysid_plot.py | 455 ------- src/methods/clustering_tracking_module.py | 317 ----- src/methods/mode_clustering.py | 184 +++ .../mode_clustering_functions/__init__.py | 0 .../align_clusters.py | 181 +++ .../mode_clustering_functions/clustering.py | 226 +++ .../create_cluster.py | 398 ++++++ .../expand_cluster.py | 83 ++ .../initialize_Ip.py | 46 + src/methods/mode_tracking.py | 85 ++ .../mode_tracking_functions/__init__.py | 0 .../match_to_tracked_cluster.py | 149 ++ .../mode_tracking_functions/mode_tracking.py | 155 +++ .../resolve_nonunique_matches.py | 58 + src/methods/mode_update_functions/__init__.py | 0 .../mode_pairs.py | 0 .../model_update.py | 2 +- ...model_update_module.py => model_update.py} | 2 +- src/methods/packages/clustering.py | 1206 ----------------- src/methods/packages/mode_tracking.py | 355 ----- src/methods/packages/pyoma/ssiWrapper.py | 14 - src/methods/{sysid_module.py => sysid.py} | 26 +- tests/integration/methods/test_sys_id.py | 14 +- tests/unit/methods/test_sys_id_unit.py | 2 +- 36 files changed, 2180 insertions(+), 2448 deletions(-) rename src/examples/{clustering.py => run_mode_clustering.py} (53%) rename src/examples/{mode_tracking.py => run_mode_tracking.py} (64%) rename src/examples/{updating_parameters.py => run_model_update.py} (86%) rename src/examples/{run_pyoma.py => run_sysid.py} (68%) create mode 100644 src/functions/calculate_mac.py create mode 100644 src/functions/clean_sysid_output.py create mode 100644 src/functions/plot_clusters.py create mode 100644 src/functions/plot_sysid.py delete mode 100644 src/functions/sysid_plot.py delete mode 100644 src/methods/clustering_tracking_module.py create mode 100644 src/methods/mode_clustering.py create mode 100644 src/methods/mode_clustering_functions/__init__.py create mode 100644 src/methods/mode_clustering_functions/align_clusters.py create mode 100644 src/methods/mode_clustering_functions/clustering.py create mode 100644 src/methods/mode_clustering_functions/create_cluster.py create mode 100644 src/methods/mode_clustering_functions/expand_cluster.py create mode 100644 src/methods/mode_clustering_functions/initialize_Ip.py create mode 100644 src/methods/mode_tracking.py create mode 100644 src/methods/mode_tracking_functions/__init__.py create mode 100644 src/methods/mode_tracking_functions/match_to_tracked_cluster.py create mode 100644 src/methods/mode_tracking_functions/mode_tracking.py create mode 100644 src/methods/mode_tracking_functions/resolve_nonunique_matches.py create mode 100644 src/methods/mode_update_functions/__init__.py rename src/methods/{packages => mode_update_functions}/mode_pairs.py (100%) rename src/methods/{packages => mode_update_functions}/model_update.py (97%) rename src/methods/{model_update_module.py => model_update.py} (98%) delete mode 100644 src/methods/packages/clustering.py delete mode 100644 src/methods/packages/mode_tracking.py rename src/methods/{sysid_module.py => sysid.py} (87%) diff --git a/src/data/accel/hbk/accelerometer.py b/src/data/accel/hbk/accelerometer.py index 421034d..94feef9 100644 --- a/src/data/accel/hbk/accelerometer.py +++ b/src/data/accel/hbk/accelerometer.py @@ -86,7 +86,7 @@ def process_message(self, msg: mqtt.MQTTMessage) -> None: if not oldest_deque: # Remove the key/deque from the map if it's empty del self.data_map[oldest_key] total_samples = sum(len(dq) for dq in self.data_map.values()) - print(f" Channel: {self.topic} Key: {samples_from_daq_start}, Samples: {num_samples}") + #print(f" Channel: {self.topic} Key: {samples_from_daq_start}, Samples: {num_samples}") except Exception as e: print(f"Error processing message: {e}") diff --git a/src/data/accel/metadata.py b/src/data/accel/metadata.py index 592f3c9..0e54027 100644 --- a/src/data/accel/metadata.py +++ b/src/data/accel/metadata.py @@ -11,7 +11,6 @@ def extract_fs_from_metadata(mqtt_config: Dict[str, Any]) -> int: def _on_metadata(client: MQTTClient, userdata, message) -> None: try: payload = json.loads(message.payload.decode("utf-8")) - print("Metadata",payload) fs_candidate = payload["Analysis chain"][0]["Sampling"] if fs_candidate: fs_result["fs"] = fs_candidate diff --git a/src/examples/example.py b/src/examples/example.py index 937b1df..1b79b2e 100644 --- a/src/examples/example.py +++ b/src/examples/example.py @@ -1,24 +1,24 @@ # pylint: disable=E1120 import click -from examples.acceleration_readings import read_accelerometers +from examples.acceleration_readings import (read_accelerometers,live_read_accelerometers) from examples.aligning_readings import align_acceleration_readings -from examples.run_pyoma import ( - run_oma_and_plot, - run_oma_and_publish, - run_oma_and_print, - run_oma_and_publish_loop, +from examples.run_sysid import ( + run_sysid_and_plot, + run_sysid_and_publish, + run_sysid_and_print, + live_sysid_and_publish, ) -from examples.clustering import ( - run_clustering_with_local_sysid, - run_clustering_with_remote_sysid, - run_live_clustering_with_remote_sysid, +from examples.run_mode_clustering import ( + run_mode_clustering_with_local_sysid, + run_mode_clustering_with_remote_sysid, + run_live_mode_clustering_with_remote_sysid, ) -from examples.mode_tracking import ( +from examples.run_mode_tracking import ( run_mode_tracking_with_local_sysid, run_mode_tracking_with_remote_sysid, run_live_mode_tracking_with_remote_sysid, ) -from examples.updating_parameters import ( +from examples.run_model_update import ( run_model_update_local_sysid, run_model_update_remote_sysid ) @@ -36,6 +36,11 @@ def cli(ctx, config): def accelerometers(ctx): read_accelerometers(ctx.obj["CONFIG"]) +@cli.command() +@click.pass_context +def live_accelerometers(ctx): + live_read_accelerometers(ctx.obj["CONFIG"]) + @cli.command() @click.pass_context def align_readings(ctx): @@ -44,38 +49,38 @@ def align_readings(ctx): @cli.command() @click.pass_context -def oma_and_publish(ctx): - run_oma_and_publish(ctx.obj["CONFIG"]) +def sysid_and_publish(ctx): + run_sysid_and_publish(ctx.obj["CONFIG"]) @cli.command() @click.pass_context -def oma_and_publish_looping(ctx): - run_oma_and_publish_loop(ctx.obj["CONFIG"]) +def live_sysid_publish(ctx): + live_sysid_and_publish(ctx.obj["CONFIG"]) @cli.command() @click.pass_context -def oma_and_plot(ctx): - run_oma_and_plot(ctx.obj["CONFIG"]) +def sysid_and_plot(ctx): + run_sysid_and_plot(ctx.obj["CONFIG"]) @cli.command() @click.pass_context -def oma_and_print(ctx): - run_oma_and_print(ctx.obj["CONFIG"]) +def sysid_and_print(ctx): + run_sysid_and_print(ctx.obj["CONFIG"]) @cli.command() @click.pass_context def clustering_with_local_sysid(ctx): - run_clustering_with_local_sysid(ctx.obj["CONFIG"]) + run_mode_clustering_with_local_sysid(ctx.obj["CONFIG"]) @cli.command() @click.pass_context def clustering_with_remote_sysid(ctx): - run_clustering_with_remote_sysid(ctx.obj["CONFIG"]) + run_mode_clustering_with_remote_sysid(ctx.obj["CONFIG"]) @cli.command() @click.pass_context def live_clustering_with_remote_sysid(ctx): - run_live_clustering_with_remote_sysid(ctx.obj["CONFIG"]) + run_live_mode_clustering_with_remote_sysid(ctx.obj["CONFIG"]) @cli.command() @click.pass_context diff --git a/src/examples/clustering.py b/src/examples/run_mode_clustering.py similarity index 53% rename from src/examples/clustering.py rename to src/examples/run_mode_clustering.py index 707bb8f..fceea97 100644 --- a/src/examples/clustering.py +++ b/src/examples/run_mode_clustering.py @@ -3,13 +3,13 @@ import matplotlib.pyplot as plt from data.comm.mqtt import load_config from data.accel.hbk.aligner import Aligner -from methods import sysid_module as sysID -from methods import clustering_tracking_module as MT +from methods import sysid as sysID +from methods import mode_clustering as MC from methods.constants import PARAMS -from functions.sysid_plot import plot_clusters +from functions.plot_clusters import plot_clusters # pylint: disable=R0914 -def run_clustering_with_local_sysid(config_path): +def run_mode_clustering_with_local_sysid(config_path): number_of_minutes = 1 config = load_config(config_path) mqtt_config = config["MQTT"] @@ -29,25 +29,25 @@ def run_clustering_with_local_sysid(config_path): t2 = time.time() t_text = f"Waiting for data for {round(t2-t1,1)} seconds" print(t_text,end="\r") - oma_output, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) + sysid_output, aligner_time = sysID.get_sysid_results(number_of_minutes, aligner, fs) data_client.disconnect() # Mode Tracks - dictionary_of_clusters, median_frequencies = MT.run_mode_clustering( - oma_output,PARAMS) + dictionary_of_clusters, median_frequencies = MC.cluster_sysid( + sysid_output,PARAMS) # Print frequencies print("\nMedian frequencies:", median_frequencies) - fig_ax = plot_clusters(dictionary_of_clusters, oma_output, PARAMS, fig_ax = None) - plt.show(block=False) + fig_ax = plot_clusters(dictionary_of_clusters, sysid_output, PARAMS, fig_ax = None) + plt.show(block=True) sys.stdout.flush() -def run_clustering_with_remote_sysid(config_path): - oma_output, dictionary_of_clusters = MT.subscribe_and_cluster(config_path,PARAMS) - fig_ax = plot_clusters(dictionary_of_clusters, oma_output, PARAMS, fig_ax = None) - plt.show(block=False) +def run_mode_clustering_with_remote_sysid(config_path): + sysid_output, dictionary_of_clusters, meadian_frequencies = MC.subscribe_and_cluster(config_path,PARAMS) + fig_ax = plot_clusters(dictionary_of_clusters, sysid_output, PARAMS, fig_ax = None) + plt.show(block=True) sys.stdout.flush() -def run_live_clustering_with_remote_sysid(config_path): - MT.subscribe_cluster_looping(config_path,topic_index=0,plot=[1,1]) +def run_live_mode_clustering_with_remote_sysid(config_path): + MC.live_mode_clustering(config_path,topic_index=0,plot=[1,1]) diff --git a/src/examples/mode_tracking.py b/src/examples/run_mode_tracking.py similarity index 64% rename from src/examples/mode_tracking.py rename to src/examples/run_mode_tracking.py index 1c52c49..2dc1bc4 100644 --- a/src/examples/mode_tracking.py +++ b/src/examples/run_mode_tracking.py @@ -1,9 +1,11 @@ import sys +import time import matplotlib.pyplot as plt from data.comm.mqtt import load_config from data.accel.hbk.aligner import Aligner -from methods import sysid_module as sysID -from methods import clustering_tracking_module as MT +from methods import sysid as sysID +from methods import mode_clustering as MC +from methods import mode_tracking as MT from methods.constants import PARAMS from functions.plot_mode_tracking import plot_tracked_modes @@ -22,29 +24,34 @@ def run_mode_tracking_with_local_sysid(config_path): aligner = Aligner(data_client, topics=selected_topics) aligner_time = None + t1 = time.time() while aligner_time is None: - oma_output, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) + time.sleep(0.1) + t2 = time.time() + t_text = f"Waiting for data for {round(t2-t1,1)} seconds" + print(t_text,end="\r") + sysid_output, aligner_time = sysID.get_sysid_results(number_of_minutes, aligner, fs) data_client.disconnect() # Mode Tracks - dictionary_of_clusters, median_frequencies = MT.run_mode_clustering( - oma_output,PARAMS) + dictionary_of_clusters, median_frequencies = MC.cluster_sysid( + sysid_output,PARAMS) # Print frequencies print("\nMedian frequencies:", median_frequencies) tracked_clusters = {} - tracked_clusters = MT.run_mode_tracking(dictionary_of_clusters,tracked_clusters,PARAMS) + tracked_clusters = MT.track_clusters(dictionary_of_clusters,tracked_clusters,PARAMS) fig_ax = plot_tracked_modes(tracked_clusters, PARAMS, fig_ax = None, x_length = None) plt.show(block=True) sys.stdout.flush() def run_mode_tracking_with_remote_sysid(config_path): - oma_output, clusters, tracked_clusters = MT.subscribe_and_get_clusters(config_path) + sysid_output, clusters, tracked_clusters = MT.subscribe_and_track_clusters(config_path) fig_ax = plot_tracked_modes(tracked_clusters, PARAMS, fig_ax = None, x_length = None) plt.show(block=True) sys.stdout.flush() def run_live_mode_tracking_with_remote_sysid(config_path): - MT.subscribe_cluster_and_tracking_looping(config_path,topic_index=0,plot=[1,1,1]) + MT.live_mode_tracking(config_path,plot=[1,1]) diff --git a/src/examples/updating_parameters.py b/src/examples/run_model_update.py similarity index 86% rename from src/examples/updating_parameters.py rename to src/examples/run_model_update.py index 9391e95..3b420d3 100644 --- a/src/examples/updating_parameters.py +++ b/src/examples/run_model_update.py @@ -1,9 +1,9 @@ import time from data.comm.mqtt import load_config from data.accel.hbk.aligner import Aligner -from methods import sysid_module as sysID -from methods import clustering_tracking_module as MT -from methods import model_update_module as MU +from methods import sysid as sysID +from methods import mode_clustering as MC +from methods import model_update as MU from methods.constants import PARAMS # pylint: disable=R0914, C0103 @@ -24,11 +24,11 @@ def run_model_update_local_sysid(config_path): while aligner_time is None: print("Not enough aligned yet") time.sleep(10) - oma_output, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) + oma_output, aligner_time = sysID.get_sysid_results(number_of_minutes, aligner, fs) data_client.disconnect() # Mode Track - dictionary_clusters, median_frequencies = MT.run_mode_clustering(oma_output,PARAMS) + dictionary_clusters, median_frequencies = MC.cluster_sysid(oma_output,PARAMS) # Run model update update_result = MU.run_model_update(dictionary_clusters) @@ -60,12 +60,12 @@ def run_model_update_local_sysid(config_path): def run_model_update_remote_sysid(config_path): config = load_config(config_path) - cleaned_values, _, _ = ( - MT.subscribe_and_get_cleaned_values(config_path) + sysid_output, clusters, median_frequencies = ( + MC.subscribe_and_cluster(config_path) ) # Run model update - update_result = MT.run_model_update(cleaned_values) + update_result = MU.run_model_update(clusters) if update_result is not None: optimized_parameters = update_result['optimized_parameters'] diff --git a/src/examples/run_pyoma.py b/src/examples/run_sysid.py similarity index 68% rename from src/examples/run_pyoma.py rename to src/examples/run_sysid.py index 0a08ba5..7161728 100644 --- a/src/examples/run_pyoma.py +++ b/src/examples/run_sysid.py @@ -3,14 +3,14 @@ import matplotlib.pyplot as plt from data.comm.mqtt import load_config from data.accel.hbk.aligner import Aligner -from functions.sysid_plot import plot_stabilization_diagram -from methods import sysid_module as sysID +from functions.plot_sysid import (plot_stabilization_diagram, plot_pre_stabilization_diagram) +from methods import sysid as sysID from methods.constants import PARAMS -def setup_oma(config_path, data_topic_indexes): +def setup_sysid(config_path, data_topic_indexes): """ - Helper function to set up OMA (Operational Modal Analysis). + Helper function to set up sysid (Operational Modal Analysis). Parameters: config_path (str): Path to the configuration file. @@ -32,11 +32,12 @@ def setup_oma(config_path, data_topic_indexes): return aligner, data_client, fs -def run_oma_and_plot(config_path): +def run_sysid_and_plot(config_path): number_of_minutes = 1 data_topic_indexes = [0, 2, 3, 4] - aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) + aligner, data_client, fs = setup_sysid(config_path, data_topic_indexes) + fig_ax1 = None fig_ax = None aligner_time = None t1 = time.time() @@ -45,18 +46,19 @@ def run_oma_and_plot(config_path): t2 = time.time() t_text = f"Waiting for data for {round(t2-t1,1)} seconds" print(t_text,end="\r") - results, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) + results, aligner_time = sysID.get_sysid_results(number_of_minutes, aligner, fs) data_client.disconnect() print(aligner_time) + fig_ax1 = plot_pre_stabilization_diagram(results, PARAMS, fig_ax=fig_ax1) fig_ax = plot_stabilization_diagram(results, PARAMS, fig_ax=fig_ax) plt.show(block=True) sys.stdout.flush() -def run_oma_and_print(config_path): +def run_sysid_and_print(config_path): number_of_minutes = 0.2 data_topic_indexes = [0, 2, 3, 4] - aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) + aligner, data_client, fs = setup_sysid(config_path, data_topic_indexes) aligner_time = None t1 = time.time() @@ -65,7 +67,7 @@ def run_oma_and_print(config_path): t2 = time.time() t_text = f"Waiting for data for {round(t2-t1,1)} seconds" print(t_text,end="\r") - results, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) + results, aligner_time = sysID.get_sysid_results(number_of_minutes, aligner, fs) data_client.disconnect() sys.stdout.flush() @@ -75,16 +77,16 @@ def run_oma_and_print(config_path): print(f"\n cov_damping \n{results['Xi_poles_cov']}") -def run_oma_and_publish(config_path): +def run_sysid_and_publish(config_path): number_of_minutes = 1 data_topic_indexes = [0, 2, 3, 4] - aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) + aligner, data_client, fs = setup_sysid(config_path, data_topic_indexes) publish_config = load_config(config_path)["sysID"] - # Setting up the client for publishing OMA results + # Setting up the client for publishing sysid results publish_client, _ = sysID.setup_client(publish_config) # fs not needed here - publish_result = sysID.publish_oma_results( + publish_result = sysID.publish_sysid_results( number_of_minutes, aligner, publish_client, @@ -98,23 +100,23 @@ def run_oma_and_publish(config_path): sys.stdout.flush() -def run_oma_and_publish_loop(config_path): +def live_sysid_and_publish(config_path): number_of_minutes = 1 data_topic_indexes = [0, 2, 3, 4] - aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) + aligner, data_client, fs = setup_sysid(config_path, data_topic_indexes) publish_config = load_config(config_path)["sysID"] - # Setting up the client for publishing OMA results + # Setting up the client for publishing sysid results publish_client, _ = sysID.setup_client(publish_config) # fs not needed here - loop = True - while loop: - loop = sysID.publish_oma_results( + publish_result = True + while publish_result: + publish_result = sysID.publish_sysid_results( number_of_minutes, aligner, publish_client, publish_config["TopicsToSubscribe"][0], fs ) - if loop is True: + if publish_result is True: print(f"Publishing to topic: {publish_config['TopicsToSubscribe'][0]}") diff --git a/src/functions/calculate_mac.py b/src/functions/calculate_mac.py new file mode 100644 index 0000000..f6b73f1 --- /dev/null +++ b/src/functions/calculate_mac.py @@ -0,0 +1,16 @@ +import numpy as np + +def calculate_mac(reference_mode_shape: np.array, second_mode_shape: np.array) -> float: + """ + Calculate Modal Assurance Criterion (MAC) + + Args: + reference_mode (np.array): Mode shape to compare to + mode_shape (np.array): Mode shape to compare + Returns: + MAC (float): Modal Assurance Criterion + + """ + numerator = np.abs(np.dot(reference_mode_shape.conj().T, second_mode_shape)) ** 2 + denominator = np.dot(reference_mode_shape.conj().T, reference_mode_shape) * np.dot(second_mode_shape.conj().T, second_mode_shape) + return np.real(numerator / denominator) \ No newline at end of file diff --git a/src/functions/clean_sysid_output.py b/src/functions/clean_sysid_output.py new file mode 100644 index 0000000..396fd6a --- /dev/null +++ b/src/functions/clean_sysid_output.py @@ -0,0 +1,133 @@ +import numpy as np + +def remove_complex_conjugates(sysid_output): + """ + Remove complex conjucates + + Args: + sysid_output (Dict[str, Any]): Results from Pysysid-2 + + Returns: + frequencies (np.ndarray): Frequencies (mean) + cov_freq (np.ndarray): Covariance of frequency + damping_ratios (np.ndarray): Damping ratios (mean) + cov_damping (np.ndarray): Covariance of damping ratio + mode_shapes (np.ndarray): Mode shapes + """ + sysid = sysid_output.copy() + # sysid results as numpy array + frequencies = sysid['Fn_poles'].copy() + cov_freq = sysid['Fn_poles_cov'].copy() + damping_ratios = sysid['Xi_poles'].copy() + cov_damping = sysid['Xi_poles_cov'].copy() + mode_shapes = sysid['Phi_poles'].copy() + + # Remove the complex conjugate entries + frequencies = frequencies[::2] # This is 'S' as per algorithm + damping_ratios = damping_ratios[::2] # This is 'S' as per algorithm + mode_shapes = mode_shapes[::2, :, :] + cov_freq = cov_freq[::2] + cov_damping = cov_damping[::2] + + return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes + +def transform_sysid_features(frequencies_,cov_freq_,damping_ratios_,cov_damping_,mode_shapes_): + """ + Transform sysid results + + Args: + frequencies_ (np.ndarray): Frequencies (mean) + cov_freq_ (np.ndarray): Covariance of frequency + damping_ratios_ (np.ndarray): Damping ratios (mean) + cov_damping_ (np.ndarray): Covariance of damping ratio + mode_shapes_ (np.ndarray): Mode shapes + + Returns: + frequencies (np.ndarray): Frequencies (mean) + cov_freq (np.ndarray): Covariance of frequency + damping_ratios (np.ndarray): Damping ratios (mean) + cov_damping (np.ndarray): Covariance of damping ratio + mode_shapes (np.ndarray): Mode shapes + """ + # Transpose, flip and sort arrays, such that arrays maps directly to the stabilization diagram. + # This means the the frequency array maps directly to the plot: + # MO. + # 5.| x x + # 4.| x + # 3.| x + # 2.| x + # 1.| + # 0.| + # -1----4------- Frequency + # The frequency array will then have the shape (6,3). Initially (6,6) but the complex conjugates have been removed. So 6 is halved to 3. + # 6 for each model order, including 0 and 3 for maximum poles in a modelorder + # The frequency array will then become: + # _0_1_ + # 0| 1 4 + # 1| 1 Nan + # 0| 1 Nan + # 0| Nan 4 + # 0| Nan Nan + # 0| Nan Nan + + #Transformation of data + frequencies = np.transpose(frequencies_) + frequencies = np.flip(frequencies, 0) + sort_indices = np.argsort(frequencies,axis=1) + frequencies = np.take_along_axis(frequencies, sort_indices, axis=1) + cov_freq = np.transpose(cov_freq_) + cov_freq = np.flip(cov_freq, 0) + cov_freq = np.take_along_axis(cov_freq, sort_indices, axis=1) + damping_ratios = np.transpose(damping_ratios_) + damping_ratios = np.flip(damping_ratios, 0) + damping_ratios = np.take_along_axis(damping_ratios, sort_indices, axis=1) + cov_damping = np.transpose(cov_damping_) + cov_damping = np.flip(cov_damping, 0) + cov_damping = np.take_along_axis(cov_damping, sort_indices, axis=1) + mode_shapes = np.moveaxis(mode_shapes_, [0, 1, 2], [1, 0, 2]) + + mode_shapes2 = np.zeros(mode_shapes.shape,dtype=np.complex128) + for ii, indices in enumerate(sort_indices): + mode_shapes2[ii,:,:] = mode_shapes[(sort_indices.shape[0]-ii-1),indices,:] + + # Array of model orders + model_order = np.arange(sort_indices.shape[0]) + model_orders = np.stack((model_order,) * sort_indices.shape[1], axis=1) + model_orders = np.flip(model_orders) + + return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes2, model_orders + +def remove_highly_uncertain_points(sysid_output,sysid_params): + """ + Remove highly uncertain points + + Args: + sysid_output (Dict[str, Any]): Results from Pysysid-2 + sysid_params (Dict[str, Any]): Parameters + + Returns: + frequencies (np.ndarray): Frequencies (mean) + cov_freq (np.ndarray): Covariance of frequency + damping_ratios (np.ndarray): Damping ratios (mean) + cov_damping (np.ndarray): Covariance of damping ratio + mode_shapes (np.ndarray): Mode shapes + """ + frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes = remove_complex_conjugates(sysid_output) + + # # #=================== Removing high uncertain poles ======================= + freq_variance_treshold = sysid_params.get('freq_variance_treshold', 0.1) + damp_variance_treshold = sysid_params.get('damp_variance_treshold', 10**6) + frequency_coefficient_variation = np.sqrt(cov_freq)/frequencies + damping_coefficient_variation = np.sqrt(cov_damping)/damping_ratios + indices_frequency = frequency_coefficient_variation > freq_variance_treshold + indices_damping = damping_coefficient_variation > damp_variance_treshold + above_nyquist = frequencies > sysid_params['Fs']/2 + combined_indices = np.logical_or(np.logical_or(indices_frequency,indices_damping),above_nyquist) + frequencies[combined_indices] = np.nan + damping_ratios[combined_indices] = np.nan + cov_freq[combined_indices] = np.nan + cov_damping[combined_indices] = np.nan + mask = np.broadcast_to(np.expand_dims(combined_indices, axis=2), mode_shapes.shape) + mode_shapes[mask] = np.nan + + return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes \ No newline at end of file diff --git a/src/functions/plot_clusters.py b/src/functions/plot_clusters.py new file mode 100644 index 0000000..2d90f54 --- /dev/null +++ b/src/functions/plot_clusters.py @@ -0,0 +1,210 @@ +from typing import Tuple, Dict, Any +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.figure +from functions.clean_sysid_output import remove_highly_uncertain_points +from functions.plot_sysid import (add_scatter_data,add_plot_standard_flair,add_plot_annotation) +plt.rcParams['font.family'] = 'Times New Roman' + + +# def plot_clusters(clusters: Dict[str,dict], +# sysid_results: Dict[str, Any], +# sysid_params: Dict[str, Any], +# fig_ax = None)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: +# """ +# Plot stabilization of clusters + +# Args: +# clsuters (dict): Dictionary of clusters +# sysid_results (dict): PyOMA results +# sysid_params (dict): sysid parameters +# fix_ax (tuple): fig and ax of plot to redraw +# Returns: +# fig_ax (tuple): fig and ax of plot + +# """ + +# if fig_ax is None: +# plt.ion() +# fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12, 6), tight_layout=True) +# title_number = 0 +# else: +# fig, (ax1,ax2) = fig_ax +# title = fig.axes[0].get_title() +# ax1.clear() +# ax2.clear() + +# iteration_number = title.split(' ')[-1] +# #print(iteration_number) +# title_number = int(iteration_number) + 1 + +# #Pre-clean +# frequencies, cov_freq, damping_ratios, cov_damping, _ = remove_highly_uncertain_points(sysid_results,sysid_params) + +# ax1.set_ylabel("Model order", fontsize=20, color = 'black') + +# x = frequencies.flatten(order="f") +# y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + +# ax1 = add_scatter_data(ax1,x,y_model_order,None) + +# idx = 0 +# for i, key in enumerate(clusters.keys()): +# cluster = clusters[key] +# MO = cluster['model_order'] +# freq_cluster = cluster['f'] +# freq_cov_cluster = cluster['cov_f'] + +# sc = ax1.scatter(freq_cluster, MO, marker="o", s=40, label=f'Cluster {i}') +# col = sc.get_facecolors().tolist() +# ax1.vlines(np.median(freq_cluster),min(cluster['model_order']), +# max(cluster['model_order']),color=col) + +# xerr_cluster = np.sqrt(freq_cov_cluster) * 2 +# # ax1.errorbar(freq_cluster, MO, xerr=xerr_cluster, +# # fmt="None", capsize=5, ecolor="gray",zorder=200) + +# ax1, col = add_scatter_cluster(ax1,cluster['f'],cluster['model_order'],cluster['cov_f']) +# idx += 1 + +# ax1 = add_plot_standard_flair(ax1,sysid_params) + +# ax1.set_ylim(0, sysid_params['model_order'] + 1) +# # Add major and minor grid lines +# ax1.legend(prop={'size': 20}) +# ax1.set_title(f"Data set: {title_number}") + +# # # # ............................................................................ + +# ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') +# ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') +# ax2.tick_params(axis='both', which='major', labelsize=17) + +# x = frequencies.flatten(order="f") +# y = damping_ratios.flatten(order="f") + +# sc = ax2.scatter(x, y, marker="^", s=20, c="r", zorder=0, label='Non clustered') +# if cov_freq is not None: +# xerr = np.sqrt(cov_damping) * 2 +# xerr = xerr.flatten(order="f") +# ax2.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") + +# for i, key in enumerate(clusters.keys()): +# cluster = clusters[key] +# freq_cluster = cluster['f'] +# damp_cluster = cluster['d'] +# damp_cov_cluster = cluster['cov_d'] + +# ax2.scatter(freq_cluster, damp_cluster, s=50, zorder=3) +# xerr_cluster = np.sqrt(damp_cov_cluster) * 2 +# ax2.errorbar(freq_cluster, damp_cluster, yerr=xerr_cluster, +# fmt="None", capsize=5, ecolor="gray") + +# for i, txt in enumerate(y_model_order): +# ax2.annotate(str(txt), (x[i], y[i])) + +# if y[~np.isnan(y)].shape[0] > 1: +# ax2.set_ylim(0, max(max(y[~np.isnan(y)])+0.005,0.1)) +# else: +# ax2.set_ylim(0, 0.1) +# ax2.set_xlim(0, sysid_params['Fs']/2) + +# # Add major and minor grid lines +# ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) +# ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + +# fig.tight_layout() +# fig.canvas.draw() +# fig.canvas.flush_events() + +# return fig, (ax1,ax2) + +def plot_clusters(clusters: Dict[str,dict], + sysid_results: Dict[str, Any], + sysid_params: Dict[str, Any], + fig_ax = None)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + """ + Plot stabilization of clusters + + Args: + clsuters (dict): Dictionary of clusters + sysid_results (dict): PyOMA results + sysid_params (dict): sysid parameters + fix_ax (tuple): fig and ax of plot to redraw + Returns: + fig_ax (tuple): fig and ax of plot + + """ + + if fig_ax is None: + plt.ion() + fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12, 6), tight_layout=True) + title_number = 0 + else: + fig, (ax1,ax2) = fig_ax + title = fig.axes[0].get_title() + ax1.clear() + ax2.clear() + + iteration_number = title.split(' ')[-1] + title_number = int(iteration_number) + 1 + + #Pre-clean + frequencies, cov_freq, damping_ratios, cov_damping, _ = remove_highly_uncertain_points(sysid_results,sysid_params) + + x = frequencies.flatten(order="f") + y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + + ax1 = add_scatter_data(ax1,x,y_model_order,None,error_dir="h",mark="^",lab='Non clustered',size=20) + + for i, key in enumerate(clusters.keys()): + cluster = clusters[key] + MO = cluster['model_order'] + ax1, col = add_scatter_cluster(ax1,cluster['f'],MO,cluster['cov_f'],i,error_dir="h") + ax1.vlines(np.median(cluster['f']),min(MO), + max(MO),color=col) + + ax1 = add_plot_standard_flair(ax1,sysid_params) + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_ylim(0, sysid_params['model_order'] + 1) + ax1.legend(prop={'size': 20}) + ax1.set_title(f"Data set: {title_number}") + + # # # ............................................................................ + + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + + x = frequencies.flatten(order="f") + y = damping_ratios.flatten(order="f") + + ax2 = add_scatter_data(ax2,x,y,None,error_dir="v", mark="^",size=20) + + for i, key in enumerate(clusters.keys()): + cluster = clusters[key] + ax2, col = add_scatter_cluster(ax2,cluster['f'],cluster['d'],cluster['cov_d'],i,error_dir="v") + + ax2 = add_plot_annotation(ax2,x,y,y_model_order) + ax2 = add_plot_standard_flair(ax2,sysid_params) + + if y[~np.isnan(y)].shape[0] > 1: + ax2.set_ylim(0, max(max(y[~np.isnan(y)])+0.005,0.1)) + else: + ax2.set_ylim(0, 0.1) + + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) + + +def add_scatter_cluster(ax,x,y,cov,i,error_dir="h"): + sc = ax.scatter(x, y, marker="o", s=60, label=f'Cluster {i}') + col = sc.get_facecolors().tolist() + if cov is not None: + xerr = np.sqrt(cov) * 2 + if error_dir == "h": + ax.errorbar(x, y, xerr=xerr, fmt="None", capsize=5, ecolor="gray", zorder=200) + else: + ax.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray", zorder=200) + return ax, col \ No newline at end of file diff --git a/src/functions/plot_mode_tracking.py b/src/functions/plot_mode_tracking.py index bac9357..c07e4d3 100644 --- a/src/functions/plot_mode_tracking.py +++ b/src/functions/plot_mode_tracking.py @@ -64,4 +64,4 @@ def plot_tracked_modes( fig.canvas.draw() fig.canvas.flush_events() - return fig, (ax1) + return fig, ax1 diff --git a/src/functions/plot_sysid.py b/src/functions/plot_sysid.py new file mode 100644 index 0000000..d4df361 --- /dev/null +++ b/src/functions/plot_sysid.py @@ -0,0 +1,142 @@ +from typing import Tuple, Dict, Any +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.figure +from functions.clean_sysid_output import (remove_complex_conjugates,remove_highly_uncertain_points) +plt.rcParams['font.family'] = 'Times New Roman' + + +def plot_pre_stabilization_diagram( + sysid_results: Dict[str, Any], + sysid_params: Dict[str, Any], + fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + + """ + Plot stabilization of raw sysid data before pre-cleaning + + Args: + sysid_results (dict): Pyoma results + sysid_params (dict): sysid parameters + fix_ax (tuple): fig and ax of plot to redraw + Returns: + fig_ax (tuple): fig and ax of plot + + """ + if fig_ax is None: + plt.ion() + fig, (ax1,ax2) = plt.subplots(1,2,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1,ax2) = fig_ax + ax1.clear() + ax2.clear() + + frequencies, damping_ratios, cov_freq, cov_damping, _ = remove_complex_conjugates(sysid_results) + + x = frequencies.flatten(order="f") + y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + + ax1 = add_scatter_data(ax1,x,y_model_order,cov_freq,error_dir="h") + ax1 = add_plot_standard_flair(ax1,sysid_params) + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_ylim(0, sysid_params['model_order'] + 1) + + # # # ............................................................................ + + x = frequencies.flatten(order="f") + y = damping_ratios.flatten(order="f") + + ax2 = add_scatter_data(ax2,x,y,cov_damping,error_dir="v") + ax2 = add_plot_annotation(ax2,x,y,y_model_order) + ax2 = add_plot_standard_flair(ax2,sysid_params) + + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + ax2.set_ylim(0, max(y[~np.isnan(y)])+0.005) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) + + +def plot_stabilization_diagram( + sysid_results: Dict[str, Any], + sysid_params: Dict[str, Any], + fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + """ + Plot stabilization of sysid data before after pre-cleaning + + Args: + sysid_results (dict): PyOMA results + sysid_params (dict): sysid parameters + Returns: + fig_ax (tuple): fig and ax of plot + + """ + + if fig_ax is None: + plt.ion() + fig, (ax1,ax2) = plt.subplots(1,2,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1,ax2) = fig_ax + ax1.clear() + ax2.clear() + + #Pre-clean + frequencies, cov_freq, damping_ratios, cov_damping, _ = remove_highly_uncertain_points(sysid_results,sysid_params) + + x = frequencies.flatten(order="f") + y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + + ax1 = add_scatter_data(ax1,x,y_model_order,cov_freq,error_dir="h") + ax1 = add_plot_standard_flair(ax1,sysid_params) + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_ylim(0, sysid_params['model_order'] + 1) + + # # # ............................................................................ + + x = frequencies.flatten(order="f") + y = damping_ratios.flatten(order="f") + + ax2 = add_scatter_data(ax2,x,y,cov_damping,error_dir="v") + ax2 = add_plot_annotation(ax2,x,y,y_model_order) + ax2 = add_plot_standard_flair(ax2,sysid_params) + + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + ax2.set_ylim(0, max(y[~np.isnan(y)])+0.005) + + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) + + +def add_scatter_data(ax,x,y,cov,error_dir,mark="o",lab='Non clustered',size=50): + ax.scatter(x, y, marker=mark, s=size, c="r", label = lab) + if cov is not None: + xerr = np.sqrt(cov) * 2 + xerr = xerr.flatten(order="f") + if error_dir == "h": + ax.errorbar(x, y, xerr=xerr, fmt="None", capsize=5, ecolor="gray") + else: + ax.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") + return ax + +def add_plot_standard_flair(ax,sysid_params): + ax.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax.tick_params(axis='both', which='major', labelsize=17) + + ax.set_xlim(0, sysid_params['Fs']/2) + + # Add major and minor grid lines + ax.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + return ax + +def add_plot_annotation(ax,x,y,y_model_order): + for i, txt in enumerate(y_model_order): + ax.annotate(str(txt), (x[i], y[i])) + return ax diff --git a/src/functions/sysid_plot.py b/src/functions/sysid_plot.py deleted file mode 100644 index ea6f87c..0000000 --- a/src/functions/sysid_plot.py +++ /dev/null @@ -1,455 +0,0 @@ -from typing import Tuple, Dict, Any -import numpy as np -import matplotlib.pyplot as plt -import matplotlib.figure -from methods.packages.clustering import (remove_complex_conjugates,remove_highly_uncertain_points) -plt.rcParams['font.family'] = 'Times New Roman' - - -def plot_pre_stabilization_diagram( - oma_results: Dict[str, Any], - oma_params: Dict[str, Any], - fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: - - """ - Plot stabilization of raw OMA data before pre-cleaning - - Args: - oma_results (dict): PyOMA results - oma_params (dict): OMA parameters - fix_ax (tuple): fig and ax of plot to redraw - Returns: - fig_ax (tuple): fig and ax of plot - - """ - - - if fig_ax is None: - plt.ion() - fig, (ax1,ax2) = plt.subplots(1,2,figsize=(8, 6), tight_layout=True) - else: - fig, (ax1,ax2) = fig_ax - ax1.clear() - ax2.clear() - - frequencies, damping_ratios, _, cov_freq, cov_damping = remove_complex_conjugates(oma_results) - - ax1.set_ylabel("Model order", fontsize=20, color = 'black') - ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') - ax1.tick_params(axis='both', which='major', labelsize=17) - - x = frequencies.flatten(order="f") - y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 - - ax1.scatter(x, y_model_order, marker="o", s=50, c="r") - if cov_freq is not None: - xerr = 2*np.sqrt(cov_freq) - xerr = xerr.flatten(order="f") - ax1.errorbar(x, y_model_order, xerr=xerr, fmt="None", capsize=5, ecolor="gray") - - ax1.set_ylim(0, oma_params['model_order'] + 1) - ax1.set_xlim(0, oma_params['Fs']/2) - - # Add major and minor grid lines - ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) - ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) - - # # # ............................................................................ - - ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') - ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') - ax2.tick_params(axis='both', which='major', labelsize=17) - - x = frequencies.flatten(order="f") - y = damping_ratios.flatten(order="f") - - ax2.scatter(x, y, marker="o", s=50, c="r") - if cov_freq is not None: - xerr = np.sqrt(cov_damping) * 2 - xerr = xerr.flatten(order="f") - ax2.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") - - ax2.set_ylim(0, 0.1+0.005) - ax2.set_xlim(0, oma_params['Fs']/2) - - for i, txt in enumerate(y_model_order): - ax2.annotate(str(txt), (x[i], y[i])) - - # Add major and minor grid lines - ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) - ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) - - fig.tight_layout() - fig.canvas.draw() - fig.canvas.flush_events() - - return fig, (ax1,ax2) - -def plot_stabilization_diagram( - oma_results: Dict[str, Any], - oma_params: Dict[str, Any], - fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: - """ - Plot stabilization of OMA data before after pre-cleaning - - Args: - oma_results (dict): PyOMA results - oma_params (dict): OMA parameters - Returns: - fig_ax (tuple): fig and ax of plot - - """ - - if fig_ax is None: - plt.ion() - fig, (ax1,ax2) = plt.subplots(1,2,figsize=(8, 6), tight_layout=True) - else: - fig, (ax1,ax2) = fig_ax - ax1.clear() - ax2.clear() - - #Pre-clean - frequencies, cov_freq, damping_ratios, cov_damping, _ = remove_highly_uncertain_points(oma_results,oma_params) - - ax1.set_ylabel("Model order", fontsize=20, color = 'black') - ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') - ax1.tick_params(axis='both', which='major', labelsize=17) - - x = frequencies.flatten(order="f") - y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 - - ax1.scatter(x, y_model_order, marker="o", s=50, c="r") - - if cov_freq is not None: - xerr = 2*np.sqrt(cov_freq) - xerr = xerr.flatten(order="f") - ax1.errorbar(x, y_model_order, xerr=xerr, fmt="None", capsize=5, ecolor="gray") - - ax1.set_ylim(0, oma_params['model_order'] + 1) - ax1.set_xlim(0, oma_params['Fs']/2) - - # Add major and minor grid lines - ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) - ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) - - # # # ............................................................................ - - ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') - ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') - ax2.tick_params(axis='both', which='major', labelsize=17) - - x = frequencies.flatten(order="f") - y = damping_ratios.flatten(order="f") - - ax2.scatter(x, y, marker="o", s=50, c="r") - - if cov_freq is not None: - xerr = np.sqrt(cov_damping) * 2 - xerr = xerr.flatten(order="f") - ax2.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") - - for i, txt in enumerate(y_model_order): - ax2.annotate(str(txt), (x[i], y[i])) - - ax2.set_ylim(0, max(y[~np.isnan(y)])+0.005) - ax2.set_xlim(0, oma_params['Fs']/2) - - # Add major and minor grid lines - ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) - ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) - - fig.tight_layout() - fig.canvas.draw() - fig.canvas.flush_events() - - return fig, (ax1,ax2) - -def plot_clusters(clusters: Dict[str,dict], - oma_results: Dict[str, Any], - oma_params: Dict[str, Any], - fig_ax = None)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: - """ - Plot stabilization of clusters - - Args: - clsuters (dict): Dictionary of clusters - oma_results (dict): PyOMA results - oma_params (dict): OMA parameters - fix_ax (tuple): fig and ax of plot to redraw - Returns: - fig_ax (tuple): fig and ax of plot - - """ - - if fig_ax is None: - plt.ion() - #fig, (ax1,ax2) = plt.subplots(1,2,figsize=(8, 6), tight_layout=True) - fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12, 6), tight_layout=True) - title_number = 0 - else: - fig, (ax1,ax2) = fig_ax - title = fig.axes[0].get_title() - ax1.clear() - ax2.clear() - - iteration_number = title.split(' ')[-1] - #print(iteration_number) - title_number = int(iteration_number) + 1 - - #Pre-clean - frequencies, cov_freq, damping_ratios, cov_damping, _ = remove_highly_uncertain_points(oma_results,oma_params) - - ax1.set_ylabel("Model order", fontsize=20, color = 'black') - ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') - ax1.tick_params(axis='both', which='major', labelsize=17) - - x = frequencies.flatten(order="f") - y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 - - ax1.scatter(x, y_model_order, marker="^", s=20, c="r", zorder=0, label='Non clustered') - - if cov_freq is not None: - xerr = 2*np.sqrt(cov_freq) - xerr = xerr.flatten(order="f") - ax1.errorbar( - x, y_model_order, xerr=xerr, fmt="None", capsize=5, ecolor="r", zorder=1 - ) - - idx = 0 - for i, key in enumerate(clusters.keys()): - cluster = clusters[key] - MO = cluster['model_order'] - freq_cluster = cluster['f'] - freq_cov_cluster = cluster['cov_f'] - - sc = ax1.scatter(freq_cluster, MO, marker="o", s=40, label=f'Cluster {i}') - col = sc.get_facecolors().tolist() - ax1.vlines(np.median(freq_cluster),min(cluster['model_order']), - max(cluster['model_order']),color=col) - - xerr_cluster = np.sqrt(freq_cov_cluster) * 2 - ax1.errorbar(freq_cluster, MO, xerr=xerr_cluster, - fmt="None", capsize=5, ecolor="gray",zorder=200) - idx += 1 - - ax1.set_ylim(0, oma_params['model_order'] + 1) - ax1.set_xlim(0, oma_params['Fs']/2) - # Add major and minor grid lines - ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) - ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) - ax1.legend(prop={'size': 20}) #bbox_to_anchor=(0.1, 1.1) - ax1.set_title(f"Data set: {title_number}") - - # # # ............................................................................ - - ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') - ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') - ax2.tick_params(axis='both', which='major', labelsize=17) - - x = frequencies.flatten(order="f") - y = damping_ratios.flatten(order="f") - - sc = ax2.scatter(x, y, marker="^", s=20, c="r", zorder=0, label='Non clustered') - if cov_freq is not None: - xerr = np.sqrt(cov_damping) * 2 - xerr = xerr.flatten(order="f") - ax2.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") - - for i, key in enumerate(clusters.keys()): - cluster = clusters[key] - freq_cluster = cluster['f'] - damp_cluster = cluster['d'] - damp_cov_cluster = cluster['cov_d'] - - ax2.scatter(freq_cluster, damp_cluster, s=50, zorder=3) - xerr_cluster = np.sqrt(damp_cov_cluster) * 2 - ax2.errorbar(freq_cluster, damp_cluster, yerr=xerr_cluster, - fmt="None", capsize=5, ecolor="gray") - - for i, txt in enumerate(y_model_order): - ax2.annotate(str(txt), (x[i], y[i])) - - if y[~np.isnan(y)].shape[0] > 1: - ax2.set_ylim(0, max(max(y[~np.isnan(y)])+0.005,0.1)) - else: - ax2.set_ylim(0, 0.1) - ax2.set_xlim(0, oma_params['Fs']/2) - - # Add major and minor grid lines - ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) - ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) - - fig.tight_layout() - fig.canvas.draw() - fig.canvas.flush_events() - - return fig, (ax1,ax2) - -def plot_stabilization_diagram_for_paper( - oma_results: Dict[str, Any], - oma_params: Dict[str, Any], - fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: - """ - Plot stabilization of OMA data before after pre-cleaning for paper - - Args: - oma_results (dict): PyOMA results - oma_params (dict): OMA parameters - Returns: - fig_ax (tuple): fig and ax of plot - - """ - if fig_ax is None: - plt.ion() - fig, (ax1) = plt.subplots(1,1,figsize=(8, 6), tight_layout=True) - else: - fig, (ax1) = fig_ax - ax1.clear() - - #Pre-clean - frequencies, cov_freq, damping_ratios, cov_damping,_ = remove_highly_uncertain_points(oma_results,oma_params) - - ax1.set_ylabel("Model order", fontsize=20, color = 'black') - ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') - ax1.tick_params(axis='both', which='major', labelsize=17) - - x = frequencies.flatten(order="f") - y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 - - ax1.scatter(x, y_model_order, marker="o", s=50, c="r") - - if cov_freq is not None: - xerr = 2*np.sqrt(cov_freq) - xerr = xerr.flatten(order="f") - ax1.errorbar(x, y_model_order, xerr=xerr, fmt="None", capsize=5, ecolor="gray") - - ax1.set_ylim(0, oma_params['model_order'] + 1) - ax1.set_xlim(0, oma_params['Fs']/2) - - # Add major and minor grid lines - ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) - ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) - - # # # ............................................................................ - - if fig_ax is None: - plt.ion() - fig, (ax2) = plt.subplots(1,1,figsize=(8, 6), tight_layout=True) - else: - fig, (ax2) = fig_ax - ax2.clear() - ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') - ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') - ax2.tick_params(axis='both', which='major', labelsize=17) - - x = frequencies.flatten(order="f") - y = damping_ratios.flatten(order="f") - - ax2.scatter(x, y, marker="o", s=50, c="r") - - if cov_freq is not None: - xerr = np.sqrt(cov_damping) * 2 - xerr = xerr.flatten(order="f") - ax2.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") - - ax2.set_ylim(0, max(y[~np.isnan(y)])+0.005) - ax2.set_xlim(0, oma_params['Fs']/2) - - # Add major and minor grid lines - ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) - ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) - - fig.tight_layout() - fig.canvas.draw() - fig.canvas.flush_events() - - return fig, (ax1,ax2) - -def plot_clusters_for_paper(clusters: Dict[str,dict], - oma_params: Dict[str, Any], - fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: - """ - Plot stabilization of clusters for paper - - Args: - clsuters (dict): Dictionary of clusters - oma_results (dict): PyOMA results - oma_params (dict): OMA parameters - fix_ax (tuple): fig and ax of plot to redraw - Returns: - fig_ax (tuple): fig and ax of plot - - """ - if fig_ax is None: - plt.ion() - fig, (ax1) = plt.subplots(1,1,figsize=(8, 6), tight_layout=True) - else: - fig, (ax1) = fig_ax - ax1.clear() - - ax1.set_ylabel("Model order", fontsize=20, color = 'black') - ax1.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') - ax1.tick_params(axis='both', which='major', labelsize=17) - - idx = 0 - for i, key in enumerate(clusters.keys()): - - cluster = clusters[key] - MO = cluster['model_order'] - freq_cluster = cluster['f'] - freq_cov_cluster = cluster['cov_f'] - - ax1.scatter(freq_cluster, MO, marker="o", s=50, label=f'Cluster {i+1}') - - xerr_cluster = np.sqrt(freq_cov_cluster) * 2 - ax1.errorbar(freq_cluster, MO, xerr=xerr_cluster, - fmt="None", capsize=5, ecolor="gray",zorder=200) - idx += 1 - - ax1.set_ylim(0, oma_params['model_order'] + 1) - ax1.set_xlim(0, oma_params['Fs']/2) - # Add major and minor grid lines - ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) - ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) - ax1.legend(prop={'size': 20}) #bbox_to_anchor=(0.1, 1.1) - - # # # ............................................................................ - - if fig_ax is None: - plt.ion() - fig, (ax2) = plt.subplots(1,1,figsize=(8, 6), tight_layout=True) - - else: - fig, (ax2) = fig_ax - ax2.clear() - - ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') - ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') - ax2.tick_params(axis='both', which='major', labelsize=17) - - for i, key in enumerate(clusters.keys()): - cluster = clusters[key] - freq_cluster = cluster['f'] - damp_cluster = cluster['d'] - damp_cov_cluster = cluster['cov_d'] - xerr = np.sqrt(damp_cov_cluster) * 2 - xerr = xerr.flatten(order="f") - - ax2.scatter(freq_cluster, damp_cluster, s=50, zorder=3,label=f'Cluster {i+1}') - xerr_cluster = np.sqrt(damp_cov_cluster) * 2 - ax2.errorbar(freq_cluster, damp_cluster, yerr=xerr_cluster, - fmt="None", capsize=5, ecolor="gray") - - ax2.set_ylim(0, max(damp_cluster)+0.005) - ax2.set_xlim(0, oma_params['Fs']/2) - ax2.legend(prop={'size': 20}) - - # Add major and minor grid lines - ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) - ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) - - fig.tight_layout() - fig.canvas.draw() - fig.canvas.flush_events() - - return fig, (ax1,ax2) diff --git a/src/methods/clustering_tracking_module.py b/src/methods/clustering_tracking_module.py deleted file mode 100644 index 3ffdf88..0000000 --- a/src/methods/clustering_tracking_module.py +++ /dev/null @@ -1,317 +0,0 @@ -import json -import sys -import threading -from typing import Any, List, Dict, Tuple -import numpy as np -import matplotlib.pyplot as plt -import paho.mqtt.client as mqtt -from methods.constants import PARAMS -from methods.packages.clustering import cluster_func -from methods.packages.mode_tracking import cluster_tracking -from functions.sysid_plot import (plot_clusters,plot_stabilization_diagram) -from functions.plot_mode_tracking import plot_tracked_modes -from data.comm.mqtt import load_config, setup_mqtt_client -# pylint: disable=C0103, W0603 - -# Global threading event to wait for OMA data -result_ready = threading.Event() -oma_output_global = None # will store received OMA data inside callback - -def _convert_oma_output(obj: Any) -> Any: - """Recursively convert JSON structure into complex numbers and numpy arrays.""" - if isinstance(obj, dict): - if "real" in obj and "imag" in obj: - return complex(obj["real"], obj["imag"]) - return {k: _convert_oma_output(v) for k, v in obj.items()} - - if isinstance(obj, list): - try: - return np.array([_convert_oma_output(item) for item in obj]) - except Exception: - return [_convert_oma_output(item) for item in obj] - - return obj - - -def _on_connect(client: mqtt.Client, userdata: dict, flags: dict, reason_code: int, properties: mqtt.Properties) -> None: - """Callback when MQTT client connects.""" - if reason_code == 0: - print("Connected to MQTT broker.") - client.subscribe(userdata["topic"], qos=userdata["qos"]) - print(f"Subscribed to topic: {userdata['topic']}") - else: - print(f"Failed to connect to MQTT broker. Code: {reason_code}") - - -def _on_message(_client: mqtt.Client, _userdata: dict, msg: mqtt.MQTTMessage) -> None: - """Callback when a message is received.""" - global oma_output_global - print(f"Message received on topic: {msg.topic}") - try: - raw = json.loads(msg.payload.decode("utf-8")) - oma_output = _convert_oma_output(raw["OMA_output"]) - timestamp = raw["timestamp"] - print(f"Received OMA data at timestamp: {timestamp}") - oma_output_global = oma_output - result_ready.set() - except Exception as e: - print(f"Error processing OMA message: {e}") - - -def run_mode_clustering(oma_output: Any, params: dict[str,Any]) -> Tuple[dict[str,Any], np.ndarray]: - """ - Runs the mode clustering algorithm. - - Args: - oma_output (Any): OMA output from subscription or elsewhere. - Returns: - cluster_dict (dict[str,Any]), - median_frequencies (np.ndarray), - """ - dictionary_clusters = cluster_func(oma_output, params) - - median_frequencies = np.array([dictionary_clusters[key]["median_f"] - for key in dictionary_clusters.keys()]) - return dictionary_clusters, median_frequencies - - -def run_mode_tracking(cluster_dict: dict[str,Any], tracked_clusters: dict[str,Any], - params: dict[str,Any]) -> dict[str,Any]: - """ - Runs the mode tracking algorithm. - - Args: - cluster_dict (dict[str,Any]): Clusters from OMA - Returns: - tracked_clusters (dict[str,Any]): Tracked clusters - """ - tracked_clusters = cluster_tracking(cluster_dict, tracked_clusters, params) - return tracked_clusters - - -def subscribe_and_cluster(config_path: str, params: Dict[str,Any] - ) -> Tuple[Dict[str,Any], Dict[str,Any]]: - """ - Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. - - Args: - config_path (str): Path to config JSON. - - Returns: - oma_output_global (Dict[str,Any]): OMA output - clusters (Dict[str,Any]]): Clusters - """ - global oma_output_global - oma_output_global = None # Reset in case old data is present - result_ready.clear() - - config = load_config(config_path) - mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index=0) - - mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) - mqtt_client.on_connect = _on_connect - mqtt_client.on_message = _on_message - mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) - mqtt_client.loop_start() - print("Waiting for OMA data...") - try: - result_ready.wait() # Wait until message arrives - mqtt_client.loop_stop() - mqtt_client.disconnect() - - if oma_output_global is None: - raise RuntimeError("Failed to receive OMA data.") - - print("OMA data received. Running mode clustering and tracking...") - clusters, median_frequencies = run_mode_clustering(oma_output_global,params) - print("Clustered frequencies", median_frequencies) - - except KeyboardInterrupt: - print("Shutting down gracefully") - mqtt_client.loop_stop() - mqtt_client.disconnect() - except Exception as e: - print(f"Unexpected error: {e}") - - return oma_output_global, clusters - - -def subscribe_and_get_clusters(config_path: str) -> Tuple[List[Dict], np.ndarray, np.ndarray]: - """ - Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. - - Args: - config_path (str): Path to config JSON. - - Returns: - oma_output_global (Dict[str,Any]): OMA output - clusters (Dict[str,Any]]): Clusters - tracked_clusters (Dict[str,Any]]): Tracked clusters - """ - global oma_output_global - oma_output_global = None # Reset in case old data is present - result_ready.clear() - tracked_clusters = {} - - config = load_config(config_path) - mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index=0) - - mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) - mqtt_client.on_connect = _on_connect - mqtt_client.on_message = _on_message - mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) - mqtt_client.loop_start() - print("Waiting for OMA data...") - try: - result_ready.wait() # Wait until message arrives - mqtt_client.loop_stop() - mqtt_client.disconnect() - - if oma_output_global is None: - raise RuntimeError("Failed to receive OMA data.") - - print("OMA data received. Running mode clustering and tracking...") - clusters, median_frequencies = run_mode_clustering(oma_output_global,PARAMS) - print("Clustered frequencies", median_frequencies) - tracked_clusters = run_mode_tracking(clusters, tracked_clusters,PARAMS) - - except KeyboardInterrupt: - print("Shutting down gracefully") - mqtt_client.loop_stop() - mqtt_client.disconnect() - except Exception as e: - print(f"Unexpected error: {e}") - - return oma_output_global, clusters, tracked_clusters - - -def subscribe_cluster_looping(config_path: str, topic_index: int = 0, - plot: np.ndarray[bool] = np.array([1,1]) - ) -> Tuple[List[Dict], np.ndarray, np.ndarray]: - """ - Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. - - Args: - config_path (str): Path to config JSON. - topic_index (int): Topic to subscribe - plot (np.ndarray[bool]): Array describing what plots to show - - Returns: - oma_output_global (Dict[str,Any]): OMA output - clusters (Dict[str,Any]]): Clusters - tracked_clusters (Dict[str,Any]]): Tracked clusters - """ - global oma_output_global - oma_output_global = None # Reset in case old data is present - result_ready.clear() - - config = load_config(config_path) - mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index) - - mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) - mqtt_client.on_connect = _on_connect - mqtt_client.on_message = _on_message - mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) - mqtt_client.loop_start() - - fig_ax1 = None - fig_ax2 = None - while True: - # try: - print("Waiting for OMA data...") - result_ready.wait() # Wait until message arrives - - if oma_output_global is None: - raise RuntimeError("Failed to receive OMA data.") - - print("OMA data received. Running mode clustering and tracking...") - result_ready.clear() - - if plot[0] == 1: - fig_ax1 = plot_stabilization_diagram(oma_output_global,PARAMS,fig_ax=fig_ax1) - plt.show(block=False) - - clusters, median_frequencies = run_mode_clustering(oma_output_global,PARAMS) - print("Clustered frequencies", median_frequencies) - - if plot[1] == 1: - fig_ax2 = plot_clusters(clusters,oma_output_global,PARAMS,fig_ax=fig_ax2) - plt.show(block=False) - - sys.stdout.flush() - # except KeyboardInterrupt: - # print("Shutting down gracefully") - # mqtt_client.loop_stop() - # mqtt_client.disconnect() - # break - # except Exception as e: - # print(f"Unexpected error: {e}") - -def subscribe_cluster_and_tracking_looping(config_path: str, topic_index: int = 0, - plot: np.ndarray[bool] = np.array([1,1,1]) - ) -> Tuple[List[Dict], np.ndarray, np.ndarray]: - """ - Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. - - Args: - config_path (str): Path to config JSON. - topic_index (int): Topic to subscribe - plot (np.ndarray[bool]): Array describing what plots to show - - Returns: - - Plots: - Stabilization diagram - Cluster plot - Tracked clusters plot - """ - global oma_output_global - oma_output_global = None # Reset in case old data is present - result_ready.clear() - tracked_clusters = {} - - config = load_config(config_path) - mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index) - - mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) - mqtt_client.on_connect = _on_connect - mqtt_client.on_message = _on_message - mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) - mqtt_client.loop_start() - - fig_ax1 = None - fig_ax2 = None - fig_ax3 = None - while True: - try: - print("Waiting for OMA data...") - result_ready.wait() # Wait until message arrives - - if oma_output_global is None: - raise RuntimeError("Failed to receive OMA data.") - - print("OMA data received. Running mode clustering and tracking...") - result_ready.clear() - - if plot[0] == 1: - fig_ax1 = plot_stabilization_diagram(oma_output_global,PARAMS,fig_ax=fig_ax1) - plt.show(block=False) - - clusters, median_frequencies = run_mode_clustering(oma_output_global,PARAMS) - print("Clustered frequencies", median_frequencies) - tracked_clusters = run_mode_tracking(clusters, tracked_clusters,PARAMS) - - if plot[1] == 1: - fig_ax2 = plot_clusters(clusters,oma_output_global,PARAMS,fig_ax=fig_ax2) - plt.show(block=False) - if plot[2] == 1: - fig_ax3 = plot_tracked_modes(tracked_clusters,PARAMS,fig_ax=fig_ax3,x_length=None) - plt.show(block=False) - sys.stdout.flush() - except KeyboardInterrupt: - print("Shutting down gracefully") - mqtt_client.loop_stop() - mqtt_client.disconnect() - break - except Exception as e: - print(f"Unexpected error: {e}") diff --git a/src/methods/mode_clustering.py b/src/methods/mode_clustering.py new file mode 100644 index 0000000..fc1715f --- /dev/null +++ b/src/methods/mode_clustering.py @@ -0,0 +1,184 @@ +import json +import sys +import threading +from typing import Any, List, Dict, Tuple +import numpy as np +import matplotlib.pyplot as plt +import paho.mqtt.client as mqtt +from methods.constants import PARAMS +from methods.mode_clustering_functions.clustering import cluster_func +from functions.plot_sysid import plot_stabilization_diagram +from functions.plot_clusters import plot_clusters +from data.comm.mqtt import load_config, setup_mqtt_client +# pylint: disable=C0103, W0603 + +# Global threading event to wait for sysid data +result_ready = threading.Event() +sysid_output_global = None # will store received sysid data inside callback + +def _convert_sysid_output(obj: Any) -> Any: + """Recursively convert JSON structure into complex numbers and numpy arrays.""" + if isinstance(obj, dict): + if "real" in obj and "imag" in obj: + return complex(obj["real"], obj["imag"]) + return {k: _convert_sysid_output(v) for k, v in obj.items()} + + if isinstance(obj, list): + try: + return np.array([_convert_sysid_output(item) for item in obj]) + except Exception: + return [_convert_sysid_output(item) for item in obj] + + return obj + + +def _on_connect(client: mqtt.Client, userdata: dict, flags: dict, reason_code: int, properties: mqtt.Properties) -> None: + """Callback when MQTT client connects.""" + if reason_code == 0: + print("Connected to MQTT broker.") + client.subscribe(userdata["topic"], qos=userdata["qos"]) + print(f"Subscribed to topic: {userdata['topic']}") + else: + print(f"Failed to connect to MQTT broker. Code: {reason_code}") + + +def _on_message(_client: mqtt.Client, _userdata: dict, msg: mqtt.MQTTMessage) -> None: + """Callback when a message is received.""" + global sysid_output_global + print(f"Message received on topic: {msg.topic}") + try: + raw = json.loads(msg.payload.decode("utf-8")) + sysid_output = _convert_sysid_output(raw["sysid_output"]) + timestamp = raw["timestamp"] + print(f"Received sysid data at timestamp: {timestamp}") + sysid_output_global = sysid_output + result_ready.set() + except Exception as e: + print(f"Error processing sysid message: {e}") + + +def cluster_sysid(sysid_output: Any, params: dict[str,Any]) -> Tuple[dict[str,Any], np.ndarray]: + """ + Runs the mode clustering algorithm. + + Args: + sysid_output (Any): sysid output from subscription or elsewhere. + Returns: + cluster_dict (dict[str,Any]), + median_frequencies (np.ndarray), + """ + dictionary_clusters = cluster_func(sysid_output, params) + + median_frequencies = np.array([dictionary_clusters[key]["median_f"] + for key in dictionary_clusters.keys()]) + return dictionary_clusters, median_frequencies + +def subscribe_and_cluster(config_path: str, params: Dict[str,Any] + ) -> Tuple[Dict[str,Any], Dict[str,Any]]: + """ + Subscribes to MQTT broker, receives one sysid message, runs mode clustering, and returns results. + + Args: + config_path (str): Path to config JSON. + params (Dict[str,Any]): clustering parameters + + Returns: + sysid_output_global (Dict[str,Any]): sysid output + clusters (Dict[str,Any]]): Clusters + """ + global sysid_output_global + sysid_output_global = None # Reset in case old data is present + result_ready.clear() + + config = load_config(config_path) + mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index=0) + + mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) + mqtt_client.on_connect = _on_connect + mqtt_client.on_message = _on_message + mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) + mqtt_client.loop_start() + print("Waiting for sysid data...") + try: + result_ready.wait() # Wait until message arrives + mqtt_client.loop_stop() + mqtt_client.disconnect() + + if sysid_output_global is None: + raise RuntimeError("Failed to receive sysid data.") + + print("sysid data received. Running mode clustering and tracking...") + clusters, median_frequencies = cluster_sysid(sysid_output_global,params) + print("Clustered frequencies", median_frequencies) + + except KeyboardInterrupt: + print("Shutting down gracefully") + mqtt_client.loop_stop() + mqtt_client.disconnect() + except Exception as e: + print(f"Unexpected error: {e}") + + return sysid_output_global, clusters, median_frequencies + + +def live_mode_clustering(config_path: str, topic_index: int = 0, + plot: np.ndarray[bool] = np.array([1,1]) + ) -> Tuple[List[Dict], np.ndarray, np.ndarray]: + """ + Subscribes to MQTT broker, receives one sysid message, runs mode clustering, plots results. Continue until stopped. + + Args: + config_path (str): Path to config JSON. + topic_index (int): Topic to subscribe + plot (np.ndarray[bool]): Array describing what plots to show + + Returns: + sysid_output_global (Dict[str,Any]): sysid output + clusters (Dict[str,Any]]): Clusters + tracked_clusters (Dict[str,Any]]): Tracked clusters + """ + global sysid_output_global + sysid_output_global = None # Reset in case old data is present + result_ready.clear() + + config = load_config(config_path) + mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index) + + mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) + mqtt_client.on_connect = _on_connect + mqtt_client.on_message = _on_message + mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) + mqtt_client.loop_start() + + fig_ax1 = None + fig_ax2 = None + while True: + try: + print("Waiting for sysid data...") + result_ready.wait() # Wait until message arrives + + if sysid_output_global is None: + raise RuntimeError("Failed to receive sysid data.") + + print("sysid data received. Running mode clustering and tracking...") + result_ready.clear() + + if plot[0] == 1: + fig_ax1 = plot_stabilization_diagram(sysid_output_global,PARAMS,fig_ax=fig_ax1) + plt.show(block=False) + + clusters, median_frequencies = cluster_sysid(sysid_output_global,PARAMS) + print("Clustered frequencies", median_frequencies) + + if plot[1] == 1: + fig_ax2 = plot_clusters(clusters,sysid_output_global,PARAMS,fig_ax=fig_ax2) + plt.show(block=False) + + sys.stdout.flush() + except KeyboardInterrupt: + print("Shutting down gracefully") + mqtt_client.loop_stop() + mqtt_client.disconnect() + break + except Exception as e: + print(f"Unexpected error: {e}") \ No newline at end of file diff --git a/src/methods/mode_clustering_functions/__init__.py b/src/methods/mode_clustering_functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/methods/mode_clustering_functions/align_clusters.py b/src/methods/mode_clustering_functions/align_clusters.py new file mode 100644 index 0000000..b9d3c7f --- /dev/null +++ b/src/methods/mode_clustering_functions/align_clusters.py @@ -0,0 +1,181 @@ +import numpy as np +from typing import Any +from functions.calculate_mac import calculate_mac + +def alignment(cluster_dict: dict[str,dict], Params: dict[str,Any]) -> dict[str,dict]: + """ + Alignment/merging of clusters + + Args: + cluster_dict (dict): Dictionary of multiple clusters + Params (dict): Dictionary of algorithm parameters + Returns: + cluster_dict (dict): Dictionary of aligned clusters + + """ + #print("\nCluster alignment") + median_f = [] + for key in cluster_dict.keys(): #Find the median of each cluster + cluster = cluster_dict[key] + median_f.append(np.median(cluster['f'])) + median_f = np.array(median_f) + + deleted_cluster_id = [] + for ii, m_f in enumerate(median_f): #Go through all medians + if ii in deleted_cluster_id: #If cluster is deleted pass on + #print(deleted_cluster_id) + continue + # Calculate absolute difference of selected median and all medians + diff = abs(median_f-m_f) + # If this difference is above 0 (not itself) and inside the bounds: + # Bounds are the minimum of either median_f * allignment_factor_0 or Sampling frequency / 2 * allignment_factor_1 + # For lower median frequencies the bound is determined by the size of median frequency. + # For higher median frequencies the bound is determined by the sampling frequency + + mask = (diff > 0) & (diff < min(m_f*Params['allignment_factor'][0],Params['Fs']/2*Params['allignment_factor'][1])) + indices = np.argwhere(mask == True) #Indicies of clusters that are closely located in frequency + + + + #print(cluster_dict.keys()) + if indices.shape[0] > 0:# If one or more clusters are found + ids = indices[:,0] + #print("ids",ids) + for id in ids: #Go through all clusters that is closely located + if id in deleted_cluster_id: + continue + + + #print("id",id) + break_loop = 0 + cluster1 = cluster_dict[str(ii)] #Parent cluster + cluster2 = cluster_dict[str(id)] #Co-located cluster + + # Proposed method + # for r in cluster2['model_order']: + # if r in cluster1['model_order']: #If the two clusters have poles with same model order, then skip the allignment + # print("Clusters have the same MO",cluster2['model_order'],cluster1['model_order']) + # break_loop = 1 + # if break_loop == 1: + # break + + MAC = calculate_mac(cluster1['mode_shapes'][0],cluster2['mode_shapes'][0]) # Check mode shape for the first pole in each cluster + if MAC >= Params['tMAC']: #If MAC complies with the criteria, then add the two clusters + cluster, cluster_remaining = join_clusters(cluster_dict[str(ii)],cluster_dict[str(id)],Params) + cluster_dict[str(ii)] = cluster #Save the new larger cluster + if len(cluster_remaining) == 0: #If the remaining cluster is emmpty + cluster_dict.pop(str(id), None) #Remove the co-located cluster + deleted_cluster_id.append(int(id)) #The delete cluster id + else: + cluster_dict[str(id)] = cluster_remaining #Save the remaining cluster + + else: #Check if the mode shapes across any of the poles complies with the MAC criteria + + MAC = np.zeros((cluster1['mode_shapes'].shape[0],cluster2['mode_shapes'].shape[0])) + for jj, ms1 in enumerate(cluster1['mode_shapes']): + for kk, ms2 in enumerate(cluster2['mode_shapes']): + MAC[jj,kk] = calculate_mac(ms1,ms2) + if MAC.max() >= Params['tMAC']: #If MAC criteria is meet add the clusters together + cluster, cluster_remaining = join_clusters(cluster_dict[str(ii)],cluster_dict[str(id)],Params) + cluster_dict[str(ii)] = cluster #Save the new larger cluster + if len(cluster_remaining) == 0: #If the remaining cluster is emmpty + cluster_dict.pop(str(id), None) #Remove the co-located cluster + deleted_cluster_id.append(int(id)) #The delete cluster id + else: + cluster_dict[str(id)] = cluster_remaining #Save the remaining cluster + # else: + # if cluster1['f'][0] > 300: + # breakpoint() + + + cluster_dict_alligned = cluster_dict + return cluster_dict_alligned + +def join_clusters(cluster_1: dict[str,Any], cluster_2: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: + """ + Add two clusters together + + Args: + cluster_1 (dict): Cluster + cluster_2 (dict): Cluster + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Joined cluster + cluster_remaining (dict): The cluster that remains + + """ + #Adding two clusters together + cluster = {} + cluster_remaining = {} + row1 = cluster_1['row'] + row2 = cluster_2['row'] + + #Should the dominant cluster be the one that have the higest model orders? + if row1.shape[0] >= row2.shape[0]: #Let be the largest cluster be the dominant one + cluster1 = cluster_1 + cluster2 = cluster_2 + row1 = cluster_1['row'] + row2 = cluster_2['row'] + else: + cluster1 = cluster_2 + cluster2 = cluster_1 + row1 = cluster_2['row'] + row2 = cluster_1['row'] + + median_f1 = np.median(cluster1['f']) + + for MO in range(Params['model_order']): #Go through all poles in a cluster + jj = np.argwhere(row1 == MO) + id = np.argwhere(row2 == MO) + if MO in row1: #If a pole in the largest cluster exist for the this model order + r1 = MO + if MO in row2: #If a pole exist in the same model order + #Get frequencies of the poles + f1 = cluster1['f'][jj[:,0]] + f2 = cluster2['f'][id[:,0]] + if abs(median_f1-f2) >= abs(median_f1-f1): #If pole in cluster 1 is closer to median of cluster 1 + cluster = append_cluster_data(cluster,cluster1,jj[:,0]) + cluster_remaining = append_cluster_data(cluster_remaining,cluster2,id[:,0]) + else: #If pole in cluster 2 is closer to median of cluster 1 + cluster = append_cluster_data(cluster,cluster2,id[:,0]) + cluster_remaining = append_cluster_data(cluster_remaining,cluster1,jj[:,0]) + else: #If only one pole exist in the largest cluster + cluster = append_cluster_data(cluster,cluster1,jj[:,0]) + elif MO in row2: #If a pole in the smallest cluster exist for the model order + cluster = append_cluster_data(cluster,cluster2,id[:,0]) + + return cluster, cluster_remaining + +def append_cluster_data(cluster: dict[str,Any], cluster2: dict[str,Any], id: int) -> dict[str,Any]: + """ + Add cluster data to an existing cluster + + Args: + cluster (dict): Existing cluster + cluster2 (dict): Cluster + id (int): id of data to append + Returns: + cluster (dict): Cluster + + """ + if len(cluster) == 0: #If it is the first pole + cluster['f'] = cluster2['f'][id] + cluster['cov_f'] = cluster2['cov_f'][id] + cluster['d'] = cluster2['d'][id] + cluster['cov_d'] = cluster2['cov_d'][id] + cluster['mode_shapes'] = cluster2['mode_shapes'][id,:] + cluster['MAC'] = cluster2['MAC'][id] + cluster['model_order'] = cluster2['model_order'][id] + cluster['row'] = cluster2['row'][id] + cluster['col'] = cluster2['col'][id] + else: + cluster['f'] = np.append(cluster['f'],cluster2['f'][id]) + cluster['cov_f'] = np.append(cluster['cov_f'],cluster2['cov_f'][id]) + cluster['d'] = np.append(cluster['d'],cluster2['d'][id]) + cluster['cov_d'] = np.append(cluster['cov_d'],cluster2['cov_d'][id]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],cluster2['mode_shapes'][id,:])) + cluster['MAC'] = np.append(cluster['MAC'],cluster2['MAC'][id]) + cluster['model_order'] = np.append(cluster['model_order'],cluster2['model_order'][id]) + cluster['row'] = np.append(cluster['row'],cluster2['row'][id]) + cluster['col'] = np.append(cluster['col'],cluster2['col'][id]) + return cluster \ No newline at end of file diff --git a/src/methods/mode_clustering_functions/clustering.py b/src/methods/mode_clustering_functions/clustering.py new file mode 100644 index 0000000..28d10ca --- /dev/null +++ b/src/methods/mode_clustering_functions/clustering.py @@ -0,0 +1,226 @@ +from typing import Any +import numpy as np +from functions.clean_sysid_output import (remove_highly_uncertain_points,transform_sysid_features) +from methods.mode_clustering_functions.create_cluster import cluster_creation +from methods.mode_clustering_functions.expand_cluster import cluster_expansion +from methods.mode_clustering_functions.initialize_Ip import cluster_initial +from methods.mode_clustering_functions.align_clusters import alignment + + +# Following the algorithm proposed here: https://doi.org/10.1007/978-3-031-61421-7_56 +# JVM 22/10/2025 + +def cluster_func(sysid_output: dict[str,Any], Params : dict[str,Any]) -> tuple[dict[str,Any], dict[str,Any], dict[str,Any]]: + """ + Clustering of OMA results + + Args: + sysid_output (dict): PyOMA results + Params (dict): Algorihm parameters + Returns: + cluster_dict_1 (dict): Dictionary of clusters after clustering + cluster_dict_2 (dict): Dictionary of clusters after alignment + cluster_dict_3 (dict): Dictionary of clusters after cardinailty check + + """ + + #Preeliminary cleaning + frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes = remove_highly_uncertain_points(sysid_output,Params) + + # Transpose, flip and sort arrays, such that arrays maps directly to the stabilization diagram. + # This means the the frequency array maps directly to the plot: + # MO. + # 5.| x x + # 4.| x + # 3.| x + # 2.| x + # 1.| + # 0.| + # -1----4------- Frequency + # The frequency array will then have the shape (6,3). Initially (6,6) but the complex conjugates have been removed. So 6 is halved to 3. + # 6 for each model order, including 0 and 3 for maximum poles in a modelorder + # The frequency array will then become: + # _0_1_ + # 0| 1 4 + # 1| 1 Nan + # 0| 1 Nan + # 0| Nan 4 + # 0| Nan Nan + # 0| Nan Nan + + frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes2, model_orders = transform_sysid_features(frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes) + + row, col = np.indices(model_orders.shape) + row = row.flatten(order="C") + col = col.flatten(order="C") + + #Initiate data + data1 = {'frequencies':frequencies, + 'damping_ratios':damping_ratios, + 'cov_f':cov_freq, + 'cov_d':cov_damping, + 'mode_shapes':mode_shapes2, + 'row':row, + 'col':col} + + cluster_dict = {} + cluster_counter = 0 + for count, f in enumerate(frequencies.flatten(order="f")): + #Extract data + frequencies = data1['frequencies'] + damping_ratios = data1['damping_ratios'] + cov_freq = data1['cov_f'] + cov_damping = data1['cov_d'] + + #Inital point + r = row[count] + c = col[count] + ip = [frequencies[r,c],cov_freq[r,c],damping_ratios[r,c],cov_damping[r,c]] + + if np.isnan(ip[0]) == True: #Pass if the pole does not exist. + pass + else: + initial_points = cluster_initial(ip,data1) #Algorithm. 1 step 3 - Initialization + + #Creating clusters + cluster1 = cluster_creation(initial_points,Params) + + data2 = data1.copy() + + # Cluster expansion + expansion = True + kk = 0 + while expansion: + kk += 1 + if kk > 10: + print("Expansion never ends, something is wrong.") + breakpoint() + pre_cluster = cluster1 + cluster2 = cluster_expansion(cluster1,data2,Params) + if cluster2['f'].shape == pre_cluster['f'].shape: + if (cluster2['f'] == pre_cluster['f']).all(): + expansion = False + else: + cluster1 = cluster2 + else: + cluster1 = cluster2 + + #Sort if more than one pole exist in the cluster + if isinstance(cluster2['f'],np.ndarray): + cluster2 = sort_cluster(cluster2) + + #Save cluster + if isinstance(cluster2['f'],np.ndarray): #Must atleast have two poles + #print("Cluster saved", np.median(cluster2['f'])) + cluster_dict[str(cluster_counter)] = cluster2 + cluster_counter += 1 + data1 = remove_data_from_S(data2,cluster2) #Remove clustered poles from data + else: + print("cluster2 too short:",1,"But must be:",Params['mstab']) + + + #Allignment or merging of stacked clusters + cluster_dict2 = alignment(cluster_dict.copy(),Params) + + #Custom cardinality check + cluster_dict3 = {} + cluster_counter = 0 + for ii, key in enumerate(cluster_dict2.keys()): + cluster = cluster_dict2[key] + if isinstance(cluster['f'],np.ndarray): + if cluster['f'].shape[0] < Params['mstab']: + print("cluster", np.median(cluster['f']),"too short:",cluster['f'].shape[0],"But must be:",Params['mstab']) + else: + print("Cluster saved", np.median(cluster['f'])) + cluster_dict3[str(ii)] = cluster + cluster_counter += 1 + data1 = remove_data_from_S(data2,cluster) #Remove clustered poles from data + else: + print("cluster too short:",1,"But must be:",Params['mstab']) + cluster_dict2.pop(key) + + #Add median and confidence intervals (one sided) to cluster data + for key in cluster_dict3.keys(): + cluster = cluster_dict3[key] + cluster['median_f'] = np.median(cluster['f']) + ci_f = np.sqrt(cluster['cov_f']) * Params['bound_multiplier'] + ci_d = np.sqrt(cluster['cov_d']) * Params['bound_multiplier'] + cluster['ci_f'] = ci_f + cluster['ci_d'] = ci_d + + #Sort the clusters into accending order of median frequency + median_frequencies = np.zeros(len(cluster_dict3)) + for ii, key in enumerate(cluster_dict3.keys()): + cluster = cluster_dict3[key] + median_frequencies[ii] = cluster['median_f'] + + indices = np.argsort(median_frequencies) + cluster_dict4 = {} + for ii, id in enumerate(np.array(list(cluster_dict3.keys()))[indices]): #Rename all cluster dict from 0 to len(cluster_dict2) + cluster_dict4[ii] = cluster_dict3[id] #Insert a cluster into a key + + return cluster_dict4 + +def remove_data_from_S(data: dict[str,Any],cluster: dict[str,Any]) -> dict[str,Any]: + """ + Remove cluster from data or S + + Args: + data (dict): OMA points data + cluster (dict): cluster + Returns: + data2 (dict): Filtered OMA points data + + """ + #Copy data + frequencies = data['frequencies'].copy() + damping_ratios = data['damping_ratios'].copy() + cov_freq = data['cov_f'].copy() + cov_damping = data['cov_d'].copy() + mode_shapes = data['mode_shapes'].copy() + row = data['row'].copy() + col = data['col'].copy() + #Make new data dictionary + data2 = {'frequencies':frequencies, + 'damping_ratios':damping_ratios, + 'cov_f':cov_freq, + 'cov_d':cov_damping, + 'mode_shapes':mode_shapes, + 'row':row, + 'col':col} + #Remove data + row = cluster['row'] + col = cluster['col'] + for ii, r in enumerate(row): + c = col[ii] + data2['frequencies'][r,c] = np.nan + data2['damping_ratios'][r,c] = np.nan + data2['cov_f'][r,c] = np.nan + data2['cov_d'][r,c] = np.nan + data2['mode_shapes'][r,c,:] = np.nan + + return data2 + +def sort_cluster(cluster: dict[str,Any]) -> dict[str,Any]: + """ + Sort cluster based on row/model order + + Args: + cluster (dict): Cluster + Returns: + cluster (dict): Sorted cluster + + """ + sort_id = np.argsort(cluster['row']) + + cluster['f'] = cluster['f'][sort_id] + cluster['cov_f'] = cluster['cov_f'][sort_id] + cluster['d'] = cluster['d'][sort_id] + cluster['cov_d'] = cluster['cov_d'][sort_id] + cluster['mode_shapes'] = cluster['mode_shapes'][sort_id,:] + cluster['MAC'] = cluster['MAC'][sort_id] + cluster['model_order'] = cluster['model_order'][sort_id] + cluster['row'] = cluster['row'][sort_id] + cluster['col'] = cluster['col'][sort_id] + + return cluster \ No newline at end of file diff --git a/src/methods/mode_clustering_functions/create_cluster.py b/src/methods/mode_clustering_functions/create_cluster.py new file mode 100644 index 0000000..4bcf5b0 --- /dev/null +++ b/src/methods/mode_clustering_functions/create_cluster.py @@ -0,0 +1,398 @@ +import numpy as np +from typing import Any +from functions.calculate_mac import calculate_mac + +def cluster_creation(IP: dict[str,Any],Params: dict[str,Any]) -> dict[str,Any]: #Algorithm 2 + """ + Create cluster + + Args: + IP (dict): Dictionary of data on inital points + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Cluster + + """ #Algorithm 2 + #Extract data: + frequencies = IP['f'] + cov_f = IP['cov_f'] + damping_ratios = IP['d'] + cov_d = IP['cov_d'] + mode_shapes = IP['ms'] + row = IP['row'] + col = IP['col'] + + IPu = {} + if len(row) != len(set(row)): #line 5 in algorithm #If there are multiple points at the same model order + for ii, id in enumerate(row): #Go through all rows/model orders + pos = np.argwhere(row==id) #Locate the indices of one or more poles + #line 6 in algorithm + if len(pos) == 1: #If only 1 pole exist at the model order + if len(IPu) == 0: #First pole + IPu['f'] = frequencies[ii] + IPu['cov_f'] = cov_f[ii] + IPu['d'] = damping_ratios[ii] + IPu['cov_d'] = cov_d[ii] + IPu['ms'] = np.array((mode_shapes[ii,:])) + IPu['row'] = row[ii] + IPu['col'] = col[ii] + unique = 1 #To determine if the unique poles are more than one, for later use. if 1 then only one unique pole exist + else: + IPu['f'] = np.append(IPu['f'],frequencies[ii]) + IPu['cov_f'] = np.append(IPu['cov_f'],cov_f[ii]) + IPu['d'] = np.append(IPu['d'],damping_ratios[ii]) + IPu['cov_d'] = np.append(IPu['cov_d'],cov_d[ii]) + IPu['ms'] = np.vstack((IPu['ms'],mode_shapes[ii,:])) + IPu['row'] = np.append(IPu['row'],row[ii]) + IPu['col'] = np.append(IPu['col'],col[ii]) + unique = 2 #To determine if the unique poles are more than one, for later use. if 2 more than one uniqe pole exist + + if len(IPu) > 0: #If there exist model orders with unique poles + if unique == 1: #If there only exist one unique pole + cluster = {'f':np.array([IPu['f']]), + 'cov_f':np.array([IPu['cov_f']]), + 'd':np.array([IPu['d']]), + 'cov_d':np.array([IPu['cov_d']]), + 'mode_shapes':np.array([IPu['ms']]), + 'model_order':np.array([Params['model_order']-IPu['row']]), + 'row':np.array([IPu['row']]), + 'col':np.array([IPu['col']]), + 'MAC':np.array([1])} + + else: #If more unique poles exist + cluster = {'f':np.array([IPu['f'][0]]), + 'cov_f':np.array([IPu['cov_f'][0]]), + 'd':np.array([IPu['d'][0]]), + 'cov_d':np.array([IPu['cov_d'][0]]), + 'mode_shapes':np.array([IPu['ms'][0,:]]), + 'model_order':np.array([Params['model_order']-IPu['row'][0]]), + 'row':np.array([IPu['row'][0]]), + 'col':np.array([IPu['col'][0]]), + 'MAC':np.array([1])} + + cluster, non_clustered_IPu = cluster_from_mac(cluster,IPu,Params) #cluster the unique poles + + else: #if no unique poles exist then go forth with the initial point, ip. + #Only the initial point is clustered + cluster = {'f':np.array([frequencies[0]]), + 'cov_f':np.array([cov_f[0]]), + 'd':np.array([damping_ratios[0]]), + 'cov_d':np.array([cov_d[0]]), + 'mode_shapes':np.array([mode_shapes[0,:]]), + 'model_order':np.array([Params['model_order']-row[0]]), + 'row':np.array([row[0]]), + 'col':np.array([col[0]]), + 'MAC':np.array([1])} + + #Check if there are multiple points with same model order as ip + ip_ids = np.argwhere(row==row[0]) + if len(ip_ids[:,0]) > 1: # Remove all the other points at the same model order + for ii in ip_ids[1:,0]: + try: + frequencies = np.delete(frequencies,ii) + cov_f = np.delete(cov_f,ii) + damping_ratios = np.delete(damping_ratios,ii) + cov_d = np.delete(cov_d,ii) + mode_shapes = np.delete(mode_shapes,ii,axis=0) + row = np.delete(row,ii) + col = np.delete(col,ii) + except: + breakpoint() + + if len(row) != len(set(row)): #If there still are points at the same model order in IP + IPm = {} + for ii, id in enumerate(row): #Go through all rows/model orders + pos = np.argwhere(row==id) #Locate the indices of one or more poles + #line 6 in algorithm + if len(pos) > 1: #If more than one pole exist for the model order + if len(IPm) == 0: #First pole + IPm['f'] = frequencies[ii] + IPm['cov_f'] = cov_f[ii] + IPm['d'] = damping_ratios[ii] + IPm['cov_d'] = cov_d[ii] + IPm['ms'] = np.array((mode_shapes[ii,:])) + IPm['row'] = row[ii] + IPm['col'] = col[ii] + else: + IPm['f'] = np.append(IPm['f'],frequencies[ii]) + IPm['cov_f'] = np.append(IPm['cov_f'],cov_f[ii]) + IPm['d'] = np.append(IPm['d'],damping_ratios[ii]) + IPm['cov_d'] = np.append(IPm['cov_d'],cov_d[ii]) + IPm['ms'] = np.vstack((IPm['ms'],np.array(mode_shapes[ii,:]))) + IPm['row'] = np.append(IPm['row'],row[ii]) + IPm['col'] = np.append(IPm['col'],col[ii]) + # After the unique poles are clustered, the multiple poles are clusterd + cluster, non_clustered_IPm = cluster_from_mac_IPm(cluster,IPm,Params) + + #Start while loop + cluster_len_before = 0 + while len(cluster['row']) != cluster_len_before: + cluster_len_before = len(cluster['row']) + try: + if len(non_clustered_IPu['row']) > 0: + cluster, non_clustered_IPu = cluster_from_mac(cluster,non_clustered_IPu,Params) #cluster the unique poles again + except: + pass + if len(non_clustered_IPm['row']) > 0: + cluster, non_clustered_IPm = cluster_from_mac_IPm(cluster,non_clustered_IPm,Params) #cluster the non-unique poles again + + else: #line 1 in algorithm: only unique poles + cluster = {'f':np.array([frequencies[0]]), + 'cov_f':np.array([cov_f[0]]), + 'd':np.array([damping_ratios[0]]), + 'cov_d':np.array([cov_d[0]]), + 'mode_shapes':np.array([mode_shapes[0,:]]), + 'model_order':np.array([Params['model_order']-row[0]]), + 'row':np.array([row[0]]), + 'col':np.array([col[0]]), + 'MAC':np.array([1])} + if IP['f'].shape[0] > 1: + cluster, _ = cluster_from_mac(cluster,IP,Params) + + return cluster + +def cluster_from_mac(cluster: dict[str,Any], IP: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: + """ + Add points to cluster based on MAC + + Args: + cluster (dict): Intermediate cluster + IP (dict): Dictionary of data on inital points + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Intermediate cluster + + """ + + #Extract data + frequencies = IP['f'] + cov_f = IP['cov_f'] + damping_ratios = IP['d'] + cov_d = IP['cov_d'] + mode_shapes = IP['ms'] + row = IP['row'] + col = IP['col'] + + ip_ms = IP['ms'][0] + i_ms = IP['ms'][1:] + f_ip = frequencies[0] + f_i = frequencies[1:] + row_i = row[1:] + + skip_id = [] + + for jj, ms in enumerate(i_ms): #Go through all mode shapes in cluster + idx = jj+1 + MAC = calculate_mac(ip_ms,ms) #Does the mode shape match with the first pole + if MAC > Params['tMAC']: #line 2 in algorithm + #Add to cluster + cluster['f'] = np.append(cluster['f'],frequencies[idx]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[idx]) + cluster['d'] = np.append(cluster['d'],damping_ratios[idx]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[idx]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[idx,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[idx]) + cluster['row'] = np.append(cluster['row'],row[idx]) + cluster['col'] = np.append(cluster['col'],col[idx]) + + skip_id.append(idx) + + #Compare remaining points with newly added cluster points, i.e. points are compared with the full cluster, not just ip + if cluster['f'].shape[0] > 1: #If points have been added to cluster proceed + if IP['ms'].shape[0] > len(skip_id): #If there are more points to compare left, then proceed + unclustered_points = 1 + while IP['ms'].shape[0] != unclustered_points: #Run until no points are clustered anymore + unclustered_points = IP['ms'].shape[0] + + i_ms = IP['ms'][1:] + for jj, ms in enumerate(i_ms): + idx = jj+1 + if idx in skip_id: + # print(idx) + continue + + MAC_list = [] + for c_ms in cluster['mode_shapes']: + MAC_list.append(calculate_mac(c_ms,ms)) + + # print("MAC_list",MAC_list) + if max(MAC_list) > Params['tMAC']: #line 2 in algorithm + #Add to cluster + cluster['f'] = np.append(cluster['f'],frequencies[idx]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[idx]) + cluster['d'] = np.append(cluster['d'],damping_ratios[idx]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[idx]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[idx,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[idx]) + cluster['row'] = np.append(cluster['row'],row[idx]) + cluster['col'] = np.append(cluster['col'],col[idx]) + + skip_id.append(idx) + + clustered_id = [] + for r2 in cluster['row']: #For every entry in row cluster + unclustered_point = False + for ii, r1 in enumerate(IP['row']): #For every entry in row IPu + if r1 == r2: #If r1 is a entry of "row" in the cluster, then save that row for later. + clustered_id.append(ii) + + all_id = np.array(list(range(len(IP['row'])))) + + clustered_id = np.array(clustered_id) + if clustered_id.shape[0] > 0: + unclustered_id = np.delete(all_id,clustered_id) + unclustered_id = np.insert(unclustered_id,0,0) + else: + unclustered_id = all_id + + unclustered_IPu = {} + unclustered_IPu['f'] = IP['f'][unclustered_id] + unclustered_IPu['cov_f'] = IP['cov_f'][unclustered_id] + unclustered_IPu['d'] = IP['d'][unclustered_id] + unclustered_IPu['cov_d'] = IP['cov_d'][unclustered_id] + unclustered_IPu['ms'] = IP['ms'][unclustered_id] + unclustered_IPu['row'] = IP['row'][unclustered_id] + unclustered_IPu['col'] = IP['col'][unclustered_id] + + return cluster, unclustered_IPu + +def cluster_from_mac_IPm(cluster: dict[str,Any], IPm: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: + """ + Cluster based on MAC if multiple poles exist for the model order + + Args: + cluster (dict): Intermediate cluster + IP (dict): Dictionary of data on inital points + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Intermediate cluster + + """ + #Cluster based on MAC if multiple poles exist for the model order + #Extract data + frequencies = IPm['f'] + cov_f = IPm['cov_f'] + damping_ratios = IPm['d'] + cov_d = IPm['cov_d'] + mode_shapes = IPm['ms'] + row = IPm['row'] + col = IPm['col'] + + # Find the model orders with multiple poles + pos = [] + for ii, idd in enumerate(set(row)): + pos.append(np.argwhere(row==idd)) + + skip_id = [] + skip_id_before = None + while skip_id != skip_id_before: + ip_ms = cluster['mode_shapes'] + if isinstance(cluster['f'],np.ndarray): + ip_ms_0 = ip_ms[0,:] #Mode shape of the first pole + else: + ip_ms_0 = ip_ms #Mode shape of the first pole + + i_ms = IPm['ms'][:] #Mode shape of the model orders with mutiple poles + + skip_id_before = skip_id.copy() + #Go through all the model orders + for oo, pos_i in enumerate(pos): + MAC = np.zeros(pos_i.shape[0]) + + if oo in skip_id: #Skip these model orders, since they have already been added. + continue + + pos_i = pos_i[:,0] + for ii, id_row in enumerate(pos_i): + MAC[ii] = calculate_mac(ip_ms_0,i_ms[id_row]) #Calculate MAC between first pole of cluster and a pole in IPm + #If MAC is not satisfied + if MAC[ii] < Params['tMAC']: #Search for max across all mode shapes in cluster: + #line 3 in algorithm + MAC_list = [] + for ms in ip_ms: + MAC_list.append(calculate_mac(ms,i_ms[id_row])) + MAC[ii] = max(MAC_list) + + #Find the mask for the poles that meets the MAC criteria + mask = MAC > Params['tMAC'] + pos_MAC = np.argwhere(mask==True) #Get indicies + + #Formatting of the indicies + if pos_MAC.shape[0] > 1: #more than one indice + pos_MAC = pos_MAC[:,0] + else: #Only one or zero indice (No MAC match) + if pos_MAC.shape[0] == 1: + pos_MAC = pos_MAC[0] + + if pos_MAC.shape[0] > 1: #If multiple poles comply with MAC criteria + #ids formatting + ids = pos_i[pos_MAC] + + #Get frequencies of poles + freq = np.zeros(ids.shape[0]) + for jj, idid in enumerate(ids): + freq[jj] = frequencies[idid] + median_f = np.median(cluster['f']) + + #Locate the index of the closest pole + idx = (np.abs(freq - median_f)).argmin() + ll = pos_i[pos_MAC[idx]] + + #Add this pole to the cluster + cluster['f'] = np.append(cluster['f'],frequencies[ll]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[ll]) + cluster['d'] = np.append(cluster['d'],damping_ratios[ll]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[ll]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[ll,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC[pos_MAC[idx]]) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[ll]) + cluster['row'] = np.append(cluster['row'],row[ll]) + cluster['col'] = np.append(cluster['col'],col[ll]) + + skip_id.append(oo) + + elif pos_MAC.shape[0] == 1: #If only one pole complies with MAC + ll = pos_i[pos_MAC[0]] + + #Add this pole to the cluster + cluster['f'] = np.append(cluster['f'],frequencies[ll]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[ll]) + cluster['d'] = np.append(cluster['d'],damping_ratios[ll]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[ll]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[ll,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC[pos_MAC[0]]) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[ll]) + cluster['row'] = np.append(cluster['row'],row[ll]) + cluster['col'] = np.append(cluster['col'],col[ll]) + + skip_id.append(oo) + # else: + # print("Not clustered. MAC not satisfied") + + clustered_id = [] + for r2 in cluster['row']: #For every entry in row cluster + unclustered_point = False + for ii, r1 in enumerate(IPm['row']): #For every entry in row IPm + if r1 == r2: #If r1 is a entry of "row" in the cluster, then save that row for later. + clustered_id.append(ii) + + all_id = np.array(list(range(len(IPm['row'])))) + + clustered_id = np.array(clustered_id) + if clustered_id.shape[0] > 0: + unclustered_id = np.delete(all_id,clustered_id) + else: + unclustered_id = all_id + + unclustered_IPm = {} + unclustered_IPm['f'] = IPm['f'][unclustered_id] + unclustered_IPm['cov_f'] = IPm['cov_f'][unclustered_id] + unclustered_IPm['d'] = IPm['d'][unclustered_id] + unclustered_IPm['cov_d'] = IPm['cov_d'][unclustered_id] + unclustered_IPm['ms'] = IPm['ms'][unclustered_id] + unclustered_IPm['row'] = IPm['row'][unclustered_id] + unclustered_IPm['col'] = IPm['col'][unclustered_id] + + return cluster, unclustered_IPm \ No newline at end of file diff --git a/src/methods/mode_clustering_functions/expand_cluster.py b/src/methods/mode_clustering_functions/expand_cluster.py new file mode 100644 index 0000000..5a4efc8 --- /dev/null +++ b/src/methods/mode_clustering_functions/expand_cluster.py @@ -0,0 +1,83 @@ +from typing import Any +import numpy as np +from methods.mode_clustering_functions.create_cluster import cluster_creation + +def cluster_expansion(cluster: dict[str,Any], data: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: + """ + Expand cluster based on minima and maxima bound + + Args: + cluster (dict): Intermediate cluster + data (dict): OMA points data + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Expanded cluster + + """ + #print("\nExpansion") + unClustered_frequencies = data['frequencies'] + unClustered_damping = data['damping_ratios'] + + freq_c = cluster['f'] + cov_f = cluster['cov_f'] + damp_c = cluster['d'] + cov_d = cluster['cov_d'] + row = cluster['row'] + + bound_multiplier = Params['bound_multiplier'] + + #Find min-max bounds of cluster + f_lower_bound = np.min(freq_c - bound_multiplier * np.sqrt(cov_f)) # Minimum of all points for frequencies + f_upper_bound = np.max(freq_c + bound_multiplier * np.sqrt(cov_f)) # Maximum of all points for frequencies + d_lower_bound = np.min(damp_c - bound_multiplier * np.sqrt(cov_d)) # Minimum of all points for damping + d_upper_bound = np.max(damp_c + bound_multiplier * np.sqrt(cov_d)) # Maximum of all points for damping + + #Mask of possible expanded poles + condition_mask = (unClustered_frequencies >= f_lower_bound) & (unClustered_frequencies <= f_upper_bound) & (unClustered_damping >= d_lower_bound) & (unClustered_damping <= d_upper_bound) + # Get indices satisfying the condition + expanded_indices = np.argwhere(condition_mask) + + #Initiate cluster_points for cluster creation + cluster_points = {} + cluster_points['f'] = data['frequencies'][condition_mask] + cluster_points['cov_f'] = data['cov_f'][condition_mask] + cluster_points['d'] = data['damping_ratios'][condition_mask] + cluster_points['cov_d'] = data['cov_d'][condition_mask] + cluster_points['ms'] = data['mode_shapes'][condition_mask,:] + cluster_points['row'] = expanded_indices[:,0] + cluster_points['col'] = expanded_indices[:,1] + + #Make the first ip from cluster be the previous first point in cluster_points + if isinstance(cluster['f'],np.ndarray): + index_f = np.argwhere(cluster_points['f'] == cluster['f'][0]) + else: + index_f = np.argwhere(cluster_points['f'] == cluster['f']) + if len(index_f[:,0]) > 1: + index_row = np.argwhere(cluster_points['row'][index_f[:,0]] == cluster['row'][0]) + ip_id = int(index_f[index_row[:,0]][:,0]) + else: + ip_id = int(index_f[:,0]) + indecies = list(range(len(cluster_points['f']))) + poped_id = indecies.pop(ip_id) + indecies.insert(0,poped_id) + indecies = np.array(indecies) + + cluster_points['f'] = cluster_points['f'][indecies] + cluster_points['cov_f'] = cluster_points['cov_f'][indecies] + cluster_points['d'] = cluster_points['d'][indecies] + cluster_points['cov_d'] = cluster_points['cov_d'][indecies] + cluster_points['ms'] = cluster_points['ms'][indecies,:] + cluster_points['row'] = cluster_points['row'][indecies] + cluster_points['col'] = cluster_points['col'][indecies] + + #Check if these values can be clustered + cluster = cluster_creation(cluster_points,Params) + if isinstance(cluster['f'],np.ndarray): + if len(cluster['row']) != len(set(cluster['row'])): + print("row_before",cluster_points['row']) + print("row_after",cluster['row']) + print("exp2",cluster['f']) + print("double orders",cluster['row']) + breakpoint() + + return cluster diff --git a/src/methods/mode_clustering_functions/initialize_Ip.py b/src/methods/mode_clustering_functions/initialize_Ip.py new file mode 100644 index 0000000..5dc0dbb --- /dev/null +++ b/src/methods/mode_clustering_functions/initialize_Ip.py @@ -0,0 +1,46 @@ +from typing import Any +import numpy as np + +def cluster_initial(ip: list[float], data: dict[str,Any], bound: float = 2) -> dict[str,Any]: + """ + Find the initial cluster points + + Args: + ip (list): Frequency, damping and covariance for the inital point (ip) + data (dict): OMA points data + bound (float): Multiplier on standard deviation + Returns: + initial_points (float): Initial points to create cluster from + + """ + #Extract data of initial point + ip_f = ip[0] + ip_cov_f = ip[1] + ip_d = ip[2] + ip_cov_d = ip[3] + + # Confidence interval using the ±2*standard_deviation + f_lower_bound = ip_f - bound * np.sqrt(ip_cov_f) + f_upper_bound = ip_f + bound * np.sqrt(ip_cov_f) + z_lower_bound = ip_d - bound * np.sqrt(ip_cov_d) + z_upper_bound = ip_d + bound * np.sqrt(ip_cov_d) + + + frequencies = data['frequencies'] + damping_ratios = data['damping_ratios'] + + # Find elements within the current limit that are still ungrouped + condition_mask = (frequencies >= f_lower_bound) & (frequencies <= f_upper_bound) & (damping_ratios >= z_lower_bound) & (damping_ratios <= z_upper_bound)# & ungrouped_mask + indices = np.argwhere(condition_mask) # Get indices satisfying the condition + + #Generate the data for inital points + initial_points = {} + initial_points['f'] = data['frequencies'][condition_mask] + initial_points['cov_f'] = data['cov_f'][condition_mask] + initial_points['d'] = data['damping_ratios'][condition_mask] + initial_points['cov_d'] = data['cov_d'][condition_mask] + initial_points['ms'] = data['mode_shapes'][condition_mask,:] + initial_points['row'] = indices[:,0] + initial_points['col'] = indices[:,1] + + return initial_points \ No newline at end of file diff --git a/src/methods/mode_tracking.py b/src/methods/mode_tracking.py new file mode 100644 index 0000000..ae3f661 --- /dev/null +++ b/src/methods/mode_tracking.py @@ -0,0 +1,85 @@ +import sys +from typing import Any, List, Dict, Tuple +import numpy as np +import matplotlib.pyplot as plt +from methods.constants import PARAMS +from methods.mode_clustering import (subscribe_and_cluster) +from methods.mode_tracking_functions.mode_tracking import cluster_tracking +from functions.plot_mode_tracking import plot_tracked_modes +from functions.plot_clusters import plot_clusters +# pylint: disable=C0103, W0603 + +def track_clusters(cluster_dict: dict[str,Any], tracked_clusters: dict[str,Any], + params: dict[str,Any]) -> dict[str,Any]: + """ + Runs the mode tracking algorithm. + + Args: + cluster_dict (dict[str,Any]): Clusters from OMA + Returns: + tracked_clusters (dict[str,Any]): Tracked clusters + """ + tracked_clusters = cluster_tracking(cluster_dict, tracked_clusters, params) + return tracked_clusters + +def subscribe_and_track_clusters(config_path: str) -> Tuple[List[Dict], np.ndarray, np.ndarray]: + """ + Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. + + Args: + config_path (str): Path to config JSON. + + Returns: + oma_output_global (Dict[str,Any]): OMA output + clusters (Dict[str,Any]]): Clusters + tracked_clusters (Dict[str,Any]]): Tracked clusters + """ + tracked_clusters = {} + sysid_output, clusters, median_frequencies = subscribe_and_cluster(config_path,PARAMS) + + print("Clustered frequencies", median_frequencies) + tracked_clusters = track_clusters(clusters, tracked_clusters,PARAMS) + + return sysid_output, clusters, tracked_clusters + +def live_mode_tracking(config_path: str, + plot: np.ndarray[bool] = np.array([1,1]) + ) -> Tuple[List[Dict], np.ndarray, np.ndarray]: + """ + Subscribes to MQTT broker, receives one OMA message, runs mode tracking, plot results. Continue until stopped. + + Args: + config_path (str): Path to config JSON. + plot (np.ndarray[bool]): Array describing what plots to show + + Returns: + + Plots: + Stabilization diagram + Cluster plot + Tracked clusters plot + """ + tracked_clusters = {} + fig_ax1 = None + fig_ax2 = None + + while True: + try: + sysid_output, clusters, median_frequencies = subscribe_and_cluster(config_path,PARAMS) + + print("Clustered frequencies", median_frequencies) + tracked_clusters = track_clusters(clusters, tracked_clusters,PARAMS) + + if plot[0] == 1: + fig_ax1 = plot_clusters(clusters,sysid_output,PARAMS,fig_ax=fig_ax1) + plt.show(block=False) + if plot[1] == 1: + fig_ax2 = plot_tracked_modes(tracked_clusters,PARAMS,fig_ax=fig_ax2,x_length=None) + plt.show(block=False) + sys.stdout.flush() + + except KeyboardInterrupt: + print("Shutting down gracefully") + plt.close() + except Exception as e: + print(f"Unexpected error: {e}") \ No newline at end of file diff --git a/src/methods/mode_tracking_functions/__init__.py b/src/methods/mode_tracking_functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/methods/mode_tracking_functions/match_to_tracked_cluster.py b/src/methods/mode_tracking_functions/match_to_tracked_cluster.py new file mode 100644 index 0000000..5707510 --- /dev/null +++ b/src/methods/mode_tracking_functions/match_to_tracked_cluster.py @@ -0,0 +1,149 @@ +from typing import Any +import numpy as np +from functions.calculate_mac import calculate_mac + +def match_cluster_to_tracked_cluster(cluster_dict: dict[str,Any], tracked_clusters: dict[str,Any], Params: dict[str,Any], result_prev: dict[str,Any] = {},skip_cluster: list = [], skip_tracked_cluster: list = []) -> dict[str,Any]: + """ + Match clusters to tracked clusters + + The result dictionary consist of keys: cluster indecies, and values: indecies of tracked cluster to match with + Example: + Cluster 1 match with tracked cluster 2 + Cluster 2 match with tracked cluster 1 + Cluster 3 match with tracked cluster 1 + Cluster 4 match with "new", i.e. could not be matched with an existing tracked cluster + + Args: + cluster_dict (dict): Dictionary of clusters + tracked_clusters (dict): Previously tracked clusters + Params (dict): tracking parameters + result_prev (dict): Dictionary of previous match result + skip_cluster (list): List of clusters that have proven they are a optimal match with a tracked cluster + skip_tracked_cluster (list): List of tracked clusters that have an optimal match with a cluster + + Returns: + result (dict): Dictionary of matches + + """ + result = {} + for id, key in enumerate(cluster_dict): #Go through all clusters + if id in skip_cluster: #If this cluster is already matched skip it + result[str(id)] = result_prev[str(id)] + continue + + #Get mode shapes + cluster = cluster_dict[key] + omega = cluster['median_f'] + phi = cluster['mode_shapes'][0] + phi_all = cluster['mode_shapes'] + + Xres = [] + MAC_list = [] + D_freq = [] + omega_t_list = [] + MAC_max_list = [] + MAC_avg_list = [] + for key in tracked_clusters: #Go through all tracked clusters. They are identified with keys which are integers from 0 and up to total number of clusters + if key == 'iteration': + pass + else: + tracked_cluster_list = tracked_clusters[key] #Accessing all cluster in a tracked cluster group + tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group + omega_t = tracked_cluster['median_f'] #median freq of last cluster in tracked cluster group + omega_t_list.append(omega_t) + phi_t_all = tracked_cluster['mode_shapes'] #phi of last cluster in tracked cluster group + phi_t = phi_t_all[0] + + MAC_list.append(float(calculate_mac(phi_t, phi))) + + MACs = np.zeros((phi_all.shape[0],phi_t_all.shape[0])) + for ii, phi in enumerate(phi_all): + for jj, phi_t in enumerate(phi_t_all): + MAC = float(calculate_mac(phi_t, phi)) + MACs[ii,jj] = MAC #Compare the cluster with all tracked clusters + + if key in skip_tracked_cluster: + MAC_avg = np.mean(0) + MAC_max = np.max(0) + MAC_max_list.append(0) + MAC_avg_list.append(0) + D_freq.append(10**6) + else: + MAC_avg = np.mean(MACs) + MAC_max = np.max(MACs) + MAC_max_list.append(MAC_max) + MAC_avg_list.append(MAC_avg) + D_freq.append(abs(omega_t-omega)/omega) + + itemindex1 = np.argwhere(np.array(MAC_max_list) > Params['phi_cri']) #Find where the cluster matches the tracked cluster regarding the MAC criteria + itemindex = np.argwhere(np.array(D_freq)[itemindex1[:,0]] < Params['freq_cri']) #Find where the cluster matches the tracked cluster regarding the MAC and frequency criteria + indicies = itemindex1[itemindex[:,0]] + if len(indicies) > 1: #If two or more clusters combly with the mode shape criteria + Xres = [] + Xres_f = [] + Xres_MAC = [] + for nn in indicies: + pos = nn[0] + X = D_freq[pos]/MAC_max_list[pos] #Objective function + Xres.append(X) + Xres_f.append(D_freq[pos]) + Xres_MAC.append(MAC_max_list[pos]) + + if Xres != []: # One or more cluster(s) combly with the frequency criteria + pos1 = Xres.index(min(Xres)) #Find the cluster that is most likely + pos2 = Xres_MAC.index(max(Xres_MAC)) #Find the largest MAC + pos3 = Xres_f.index(min(Xres_f)) #Find the smallest frequency difference + + if len(Xres) > 1: #If more than one cluster comply with criteria + Xres_left = Xres.copy() + del Xres_left[pos1] + if type(Xres_left) == np.float64: + Xres_left = [Xres_left] + + Xres_MAC_left = Xres_MAC.copy() + del Xres_MAC_left[pos1] + if type(Xres_MAC_left) == np.float64: + Xres_MAC_left = [Xres_MAC_left] + + Xres_f_left = Xres_f.copy() + del Xres_f_left[pos1] + if type(Xres_f_left) == np.float64: + Xres_f_left = [Xres_f_left] + + pos1_2 = Xres_left.index(min(Xres_left)) #Find the cluster that is most likely + pos2_2 = Xres_MAC_left.index(max(Xres_MAC_left)) #Find the cluster that is most likely based on MAC + pos3_2 = Xres_f_left.index(min(Xres_f_left)) #Find the cluster that is most likely based on Freq + + if (pos1 == pos2) and (pos1 == pos3): #If one match on all three parameters: objective function, max MAC and frequency difference + pos = int(indicies[pos1][0]) + result[str(id)] = pos #group to a tracked cluster + + #Make different: abs(min(Xres_left)/min(Xres)) < Params['obj_cri'] = 2 + elif abs(min(Xres_left)-min(Xres)) < Params['obj_cri']: #If the objective function results are close + if (min(Xres_f) < Params['freq_cri']) and (min(Xres_f_left) < Params['freq_cri']): #If both frequency differences are close to the target cluster + pos = int(indicies[pos2_2][0]) #Match with best MAC + result[str(id)] = pos #group to a tracked cluster + elif (min(Xres_f) < Params['freq_cri']) and (min(Xres_f_left) > Params['freq_cri']): #If Xres_f is smaller than the threshold + pos = int(indicies[pos3][0]) #Match with lowest frequency difference + result[str(id)] = pos #group to a tracked cluster + elif (min(Xres_f) > Params['freq_cri']) and (min(Xres_f_left) < Params['freq_cri']): + pos = int(indicies[pos3_2][0]) #Match with lowest frequency difference + result[str(id)] = pos #group to a tracked cluster + else: #If none of the above choose the one with highest MAC + pos = int(indicies[pos2_2][0]) + result[str(id)] = pos #group to a tracked cluster + else: #If none of the above choose the one with lowest onjective function + pos = int(indicies[pos1][0]) + result[str(id)] = pos #group to a tracked cluster + + else: #No cluster comply with frequency criteria, so a new cluster is saved + result[str(id)] = "new" + + elif len(indicies) == 1: #If one cluster combly with the mode shape criteria + pos = int(indicies[0][0]) + result[str(id)] = pos #group to a tracked cluster + + else: #Does not comply with mode shape criteria + result[str(id)] = "new" + + return result \ No newline at end of file diff --git a/src/methods/mode_tracking_functions/mode_tracking.py b/src/methods/mode_tracking_functions/mode_tracking.py new file mode 100644 index 0000000..40df7f7 --- /dev/null +++ b/src/methods/mode_tracking_functions/mode_tracking.py @@ -0,0 +1,155 @@ +from typing import Any +import numpy as np +from methods.mode_tracking_functions.match_to_tracked_cluster import match_cluster_to_tracked_cluster +from methods.mode_tracking_functions.resolve_nonunique_matches import resolve_nonunique_matches +# JVM 22/10/2025 + +def cluster_tracking(cluster_dict: dict[str,Any],tracked_clusters: dict[str,Any],Params: dict[str,Any]=None) -> dict[str,Any]: + """ + Tracking of modes across experiments + + Args: + cluster_dict (dict): Dictionary of clusters + tracked_clusters (dict): Previously tracked clusters + Params (dict): tracking parameters + + Returns: + tracked_clusters (dict): Previously tracked clusters + + """ + print("Cluster tracking") + if Params == None: + Params = {'phi_cri':0.8, + 'freq_cri':0.2} + + m_f = [] + for key in cluster_dict.keys(): + cluster = cluster_dict[key] + m_f.append(cluster['median_f']) + + t_list = [] + t_length = [] + for key in tracked_clusters: #Go through all tracked clusters. They are identified with keys which are integers from 0 and up to total number of clusters + if key == 'iteration': + pass + else: + tracked_cluster_list = tracked_clusters[key] #Accessing all cluster in a tracked cluster group + t_length.append(len(tracked_cluster_list)) + tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group + #median freq of last cluster in tracked cluster group + t_list.append(tracked_cluster['median_f']) + + # No tracked clusters yet? + if not tracked_clusters: + first_track = 1 + else: + first_track = 0 + + if first_track == 1: + for id, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + cluster['id'] = 0 + + tracked_clusters['iteration'] = 0 + tracked_clusters[str(id)] = [cluster] + else: + iter = tracked_clusters['iteration'] + 1 + tracked_clusters['iteration'] = iter + + result = match_cluster_to_tracked_cluster(cluster_dict,tracked_clusters,Params) #Match clusters to tracked clusters + + result_int = [] + for val in result.values(): #Get all non-"new" results + if type(val) == int: + result_int.append(val) + + if len(result_int) == len(set(result_int)): #If all clusters match with a unique tracked cluster + for ii, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + pos = result[str(ii)] #Find pos in result dict + cluster['id'] = iter + if pos == "new": #Add cluster as a new tracked cluster + new_key = len(tracked_clusters)-1 #-1 for "iteration", + 1 for next cluster and -1 for starting at 0 = -1 + #print(f"new key: {new_key}") + tracked_clusters[str(new_key)] = [cluster] + else: #Add cluster to an existing tracked cluster + cluster_to_add_to = tracked_clusters[str(pos)] + cluster_to_add_to.append(cluster) + tracked_clusters[str(pos)] = cluster_to_add_to + + else: #If there are some clusters that match with the same tracked cluster. + kk = 0 + skip_tracked_cluster = [] + skip_cluster = [] + while len(result_int) != len(set(result_int)): + kk += 1 + if kk > 10: + #Debug info: + unique_match_debug_info(result,cluster_dict,t_list) + print("Unresolved mode tracking") + breakpoint() + + for possible_match_id in set(result.values()): #Go through all unique values + if possible_match_id == "new": #Do nothing if "new" + pass + else: + test_if_str = np.argwhere(np.array(list(result.values())) == "new") #Test if "new" is present. If so, then we must match with str instead of int. + if len(test_if_str) > 0: + itemindex = np.argwhere(np.array(list(result.values())) == str(possible_match_id)) #Find the index of the unique cluster match + else: + itemindex = np.argwhere(np.array(list(result.values())) == possible_match_id) #Find the index of the unique cluster match + print(possible_match_id,np.array(list(result.values())),itemindex, len(itemindex)) + + if len(itemindex) > 1: #If multiple clusters match to the same tracked cluster + pos, result, cluster_index = resolve_nonunique_matches(possible_match_id, itemindex, result, cluster_dict, tracked_clusters) + skip_tracked_cluster.append(str(result[str(cluster_index[pos])])) #Skip the best tracked cluster which is matced with another cluster. + skip_cluster.append(cluster_index[pos]) #Skip the best tracked cluster which is matced with another cluster. + + result = match_cluster_to_tracked_cluster(cluster_dict,tracked_clusters,Params,result,skip_cluster,skip_tracked_cluster) #Match with tracked clusters, but skip the already matched. + + #Debug info: + unique_match_debug_info(result,cluster_dict,t_list) + + result_int = [] + for val in result.values(): + if type(val) == int: + result_int.append(val) + + #Add the clusters to tracked clusters + for ii, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + pos = result[str(ii)] #Find pos in result dict + cluster['id'] = iter + if pos == "new": + new_key = len(tracked_clusters)-1 #-1 for "iteration", + 1 for next cluster and -1 for starting at 0 = -1 + tracked_clusters[str(new_key)] = [cluster] + else: + cluster_to_add_to = tracked_clusters[str(pos)] + cluster_to_add_to.append(cluster) + tracked_clusters[str(pos)] = cluster_to_add_to + + + + return tracked_clusters + + +def unique_match_debug_info(result,cluster_dict,t_list): + """ + Debug info + + Args: + result (dict): Dictionary of matches + cluster_dict (dict): Dictionary of clusters + t_list (list): List of median frequencies of last tracked tracked clusters + + Returns: + + """ + print('\n') + for ii, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + pos = result[str(ii)] #Find pos in result dict + if pos == "new": + print(cluster_dict[key]['median_f'],str(ii),pos) + else: + print(cluster_dict[key]['median_f'],str(ii),pos,t_list[pos]) \ No newline at end of file diff --git a/src/methods/mode_tracking_functions/resolve_nonunique_matches.py b/src/methods/mode_tracking_functions/resolve_nonunique_matches.py new file mode 100644 index 0000000..7cbeea4 --- /dev/null +++ b/src/methods/mode_tracking_functions/resolve_nonunique_matches.py @@ -0,0 +1,58 @@ +from typing import Any +import numpy as np +from functions.calculate_mac import calculate_mac + +def resolve_nonunique_matches(possible_match_id, itemindex, result, cluster_dict, tracked_clusters): + """ + Resolve if two clusters match with the same tracked cluster. Determine what match is the most optimal. + Those clusters that does not have an optimal match, they are given the match result = "new" + + Example: + Cluster 2 match with tracked cluster 1 + Cluster 3 match with tracked cluster 1 + + Args: + possible_match_id (int): The index of tracked cluster + itemindex (np.ndarray): The indecies of clusters that have the same match + result (dict): Dictionary of suggested matches + cluster_dict (dict): Dictionary of clusters + tracked_clusters (dict): Previously tracked clusters + + Returns: + pos (int): Value of cluster that have the most optimal match. + result (dict): Dictionary of re-done matches + cluster_index: The indecies of clusters that have the same match + + """ + mean_MAC = [] + keys = [str(y[0]) for y in itemindex.tolist()] #Make keys for dictionary based on indices in itemindex + for nn in itemindex: #Go through possible clusters match index + cluster = cluster_dict[int(nn[0])] + phi_all = cluster["mode_shapes"] #Find mode shapes in cluster + tracked_cluster_list = tracked_clusters[str(possible_match_id)] #Accessing all cluster in a tracked cluster group + tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group + phi_t_all = tracked_cluster['mode_shapes'] #Find mode shapes in tracked cluster + + #Make list of mode shapes have the same length, i.e. same number of poles + if len(phi_all) > len(phi_t_all): + phi_all = phi_all[0:len(phi_t_all)] + elif len(phi_all) < len(phi_t_all): + phi_t_all = phi_t_all[0:len(phi_all)] + else: #Equal length + pass + MAC_matrix = np.zeros((len(phi_all),len(phi_all))) #Initiate a matrix of MAC values + for ii, phi in enumerate(phi_all): + for jj, phi_t in enumerate(phi_t_all): + MAC_matrix[ii,jj] = calculate_mac(phi,phi_t) #Mac + + mean_MAC.append(np.mean(MAC_matrix)) #Save the mean values of MAC from this cluster compared to the matched tracked cluster + pos = mean_MAC.index(max(mean_MAC)) #Find the index with higest mean MAC, i.e. the cluster that match best with the tracked cluster. + + cluster_index = itemindex[:,0] + + for key in keys: + if keys[pos] == key: #Let the best cluster match stay + pass + else: #Add the clusters with the worst match as a new cluster + result[key] = "new" + return pos, result, cluster_index \ No newline at end of file diff --git a/src/methods/mode_update_functions/__init__.py b/src/methods/mode_update_functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/methods/packages/mode_pairs.py b/src/methods/mode_update_functions/mode_pairs.py similarity index 100% rename from src/methods/packages/mode_pairs.py rename to src/methods/mode_update_functions/mode_pairs.py diff --git a/src/methods/packages/model_update.py b/src/methods/mode_update_functions/model_update.py similarity index 97% rename from src/methods/packages/model_update.py rename to src/methods/mode_update_functions/model_update.py index 2e64d45..4926365 100644 --- a/src/methods/packages/model_update.py +++ b/src/methods/mode_update_functions/model_update.py @@ -4,7 +4,7 @@ import os from methods.packages import eval_yafem_model as beam_new import json -from methods.packages.mode_pairs import pair_calculate +from methods.mode_update_functions.mode_pairs import pair_calculate def par_est(x, comb): diff --git a/src/methods/model_update_module.py b/src/methods/model_update.py similarity index 98% rename from src/methods/model_update_module.py rename to src/methods/model_update.py index 2342816..b6fa1ef 100644 --- a/src/methods/model_update_module.py +++ b/src/methods/model_update.py @@ -6,7 +6,7 @@ from scipy.optimize import minimize from scipy.linalg import eigh from methods.packages.eval_yafem_model import eval_yafem_model -from methods.packages import model_update +from methods.mode_update_functions import model_update from methods.constants import X0, BOUNDS # pylint: disable=C0103, W0603 diff --git a/src/methods/packages/clustering.py b/src/methods/packages/clustering.py deleted file mode 100644 index 36642a5..0000000 --- a/src/methods/packages/clustering.py +++ /dev/null @@ -1,1206 +0,0 @@ -from typing import Any -import numpy as np - -# Following the algorithm proposed here: https://doi.org/10.1007/978-3-031-61421-7_56 -# JVM 10/10/2025 - -def cluster_func(oma_results: dict[str,Any], Params : dict[str,Any]) -> tuple[dict[str,Any], dict[str,Any], dict[str,Any]]: - """ - Clustering of OMA results - - Args: - oma_results (dict): PyOMA results - Params (dict): Algorihm parameters - Returns: - cluster_dict_1 (dict): Dictionary of clusters after clustering - cluster_dict_2 (dict): Dictionary of clusters after alignment - cluster_dict_3 (dict): Dictionary of clusters after cardinailty check - - """ - - #Preeliminary cleaning - frequencies_, cov_freq_, damping_ratios_, cov_damping_, mode_shapes_ = remove_complex_conjugates(oma_results) - frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes = remove_highly_uncertain_points(oma_results,Params) - - # Transpose, flip and sort arrays, such that arrays maps directly to the stabilization diagram. - # This means the the frequency array maps directly to the plot: - # MO. - # 5.| x x - # 4.| x - # 3.| x - # 2.| x - # 1.| - # 0.| - # -1----4------- Frequency - # The frequency array will then have the shape (6,3). Initially (6,6) but the complex conjugates have been removed. So 6 is halved to 3. - # 6 for each model order, including 0 and 3 for maximum poles in a modelorder - # The frequency array will then become: - # _0_1_ - # 0| 1 4 - # 1| 1 Nan - # 0| 1 Nan - # 0| Nan 4 - # 0| Nan Nan - # 0| Nan Nan - - frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes2, model_orders = transform_oma_features(frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes) - - row, col = np.indices(model_orders.shape) - row = row.flatten(order="C") - col = col.flatten(order="C") - - #Initiate data - data1 = {'frequencies':frequencies, - 'damping_ratios':damping_ratios, - 'cov_f':cov_freq, - 'cov_d':cov_damping, - 'mode_shapes':mode_shapes2, - 'row':row, - 'col':col} - - cluster_dict = {} - cluster_counter = 0 - for count, f in enumerate(frequencies.flatten(order="f")): #np.count_nonzero(~np.isnan(frequencies)) - - #print("\nIteration",count,"Unclustered poles:",np.count_nonzero(~np.isnan(frequencies))) - - #Extract data - frequencies = data1['frequencies'] - damping_ratios = data1['damping_ratios'] - cov_freq = data1['cov_f'] - cov_damping = data1['cov_d'] - - #Inital point - r = row[count] - c = col[count] - ip = [frequencies[r,c],cov_freq[r,c],damping_ratios[r,c],cov_damping[r,c]] - - if np.isnan(ip[0]) == True: #Pass if the pole does not exist. - pass - else: - initial_points = cluster_initial(ip,data1) #Algorithm. 1 step 3 - Initialization - - #Creating clusters - cluster1 = cluster_creation(initial_points,Params) - - data2 = data1.copy() - - # Cluster expansion - expansion = True - kk = 0 - while expansion: - kk += 1 - if kk > 10: - print("Expansion never ends, something is wrong.") - breakpoint() - pre_cluster = cluster1 - cluster2 = cluster_expansion(cluster1,data2,Params,oma_results) - if cluster2['f'].shape == pre_cluster['f'].shape: - if (cluster2['f'] == pre_cluster['f']).all(): - expansion = False - else: - cluster1 = cluster2 - else: - cluster1 = cluster2 - - #Sort if more than one pole exist in the cluster - if isinstance(cluster2['f'],np.ndarray): - cluster2 = sort_cluster(cluster2) - - #Save cluster - if isinstance(cluster2['f'],np.ndarray): #Must atleast have two poles - #print("Cluster saved", np.median(cluster2['f'])) - cluster_dict[str(cluster_counter)] = cluster2 - cluster_counter += 1 - data1 = remove_data_from_S(data2,cluster2) #Remove clustered poles from data - else: - print("cluster2 too short:",1,"But must be:",Params['mstab']) - - - #Allignment or merging of stacked clusters - cluster_dict2 = alignment(cluster_dict.copy(),Params) - #Median filter - #cluster_dict3 = median_filter(cluster_dict2.copy()) - - #Custom cardinality check - cluster_dict3 = {} - cluster_counter = 0 - for ii, key in enumerate(cluster_dict2.keys()): - cluster = cluster_dict2[key] - if isinstance(cluster['f'],np.ndarray): - if cluster['f'].shape[0] < Params['mstab']: - print("cluster", np.median(cluster['f']),"too short:",cluster['f'].shape[0],"But must be:",Params['mstab']) - else: - print("Cluster saved", np.median(cluster['f'])) - cluster_dict3[str(ii)] = cluster - cluster_counter += 1 - data1 = remove_data_from_S(data2,cluster) #Remove clustered poles from data - else: - print("cluster too short:",1,"But must be:",Params['mstab']) - cluster_dict2.pop(key) - - #Add median and confidence intervals (one sided) to cluster data - for key in cluster_dict3.keys(): - cluster = cluster_dict3[key] - cluster['median_f'] = np.median(cluster['f']) - # ci_f_upper = [] - # ci_f_lower = [] - # ci_d_upper = [] - # ci_d_lower = [] - # for ii, cov_f in enumerate(cluster['cov_f']): - # ci_f_upper.append(np.sqrt(cov_f) * Params['bound_multiplier']) - # ci_f_lower.append(np.sqrt(cov_f) * Params['bound_multiplier']) - # ci_d_upper.append(np.sqrt(cluster['cov_d'][ii]) * Params['bound_multiplier']) - # ci_d_lower.append(np.sqrt(cluster['cov_d'][ii]) * Params['bound_multiplier']) - ci_f = np.sqrt(cluster['cov_f']) * Params['bound_multiplier'] - ci_d = np.sqrt(cluster['cov_d']) * Params['bound_multiplier'] - cluster['ci_f'] = ci_f - cluster['ci_d'] = ci_d - - #Sort the clusters into accending order of median frequency - median_frequencies = np.zeros(len(cluster_dict3)) - for ii, key in enumerate(cluster_dict3.keys()): - cluster = cluster_dict3[key] - median_frequencies[ii] = cluster['median_f'] - - indices = np.argsort(median_frequencies) - cluster_dict4 = {} - for ii, id in enumerate(np.array(list(cluster_dict3.keys()))[indices]): #Rename all cluster dict from 0 to len(cluster_dict2) - cluster_dict4[ii] = cluster_dict3[id] #Insert a cluster into a key - - return cluster_dict4 - -def calculate_mac(reference_mode: np.array, mode_shape: np.array) -> float: - """ - Calculate Modal Assurance Criterion (MAC) - - Args: - reference_mode (np.array): Mode shape to compare to - mode_shape (np.array): Mode shape to compare - Returns: - MAC (float): Modal Assurance Criterion - - """ - numerator = np.abs(np.dot(reference_mode.conj().T, mode_shape)) ** 2 - denominator = np.dot(reference_mode.conj().T, reference_mode) * np.dot(mode_shape.conj().T, mode_shape) - return np.real(numerator / denominator) - -def cluster_initial(ip: list[float], data: dict[str,Any], bound: float = 2) -> dict[str,Any]: - """ - Find the initial cluster points - - Args: - ip (list): Frequency, damping and covariance for the inital point (ip) - data (dict): OMA points data - bound (float): Multiplier on standard deviation - Returns: - initial_points (float): Initial points to create cluster from - - """ - #Extract data of initial point - ip_f = ip[0] - ip_cov_f = ip[1] - ip_d = ip[2] - ip_cov_d = ip[3] - - # Confidence interval using the ±2*standard_deviation - f_lower_bound = ip_f - bound * np.sqrt(ip_cov_f) - f_upper_bound = ip_f + bound * np.sqrt(ip_cov_f) - z_lower_bound = ip_d - bound * np.sqrt(ip_cov_d) - z_upper_bound = ip_d + bound * np.sqrt(ip_cov_d) - - - frequencies = data['frequencies'] - damping_ratios = data['damping_ratios'] - - # Find elements within the current limit that are still ungrouped - condition_mask = (frequencies >= f_lower_bound) & (frequencies <= f_upper_bound) & (damping_ratios >= z_lower_bound) & (damping_ratios <= z_upper_bound)# & ungrouped_mask - indices = np.argwhere(condition_mask) # Get indices satisfying the condition - - #Generate the data for inital points - initial_points = {} - initial_points['f'] = data['frequencies'][condition_mask] - initial_points['cov_f'] = data['cov_f'][condition_mask] - initial_points['d'] = data['damping_ratios'][condition_mask] - initial_points['cov_d'] = data['cov_d'][condition_mask] - initial_points['ms'] = data['mode_shapes'][condition_mask,:] - initial_points['row'] = indices[:,0] - initial_points['col'] = indices[:,1] - - return initial_points - -def cluster_creation(IP: dict[str,Any],Params: dict[str,Any]) -> dict[str,Any]: #Algorithm 2 - """ - Create cluster - - Args: - IP (dict): Dictionary of data on inital points - Params (dict): Dictionary of algorithm parameters - Returns: - cluster (dict): Cluster - - """ #Algorithm 2 - #print("\nCluster creation") - #Extract data: - frequencies = IP['f'] - cov_f = IP['cov_f'] - damping_ratios = IP['d'] - cov_d = IP['cov_d'] - mode_shapes = IP['ms'] - row = IP['row'] - col = IP['col'] - - IPu = {} - if len(row) != len(set(row)): #line 5 in algorithm #If there are multiple points at the same model order - for ii, id in enumerate(row): #Go through all rows/model orders - pos = np.argwhere(row==id) #Locate the indices of one or more poles - #line 6 in algorithm - if len(pos) == 1: #If only 1 pole exist at the model order - if len(IPu) == 0: #First pole - IPu['f'] = frequencies[ii] - IPu['cov_f'] = cov_f[ii] - IPu['d'] = damping_ratios[ii] - IPu['cov_d'] = cov_d[ii] - IPu['ms'] = np.array((mode_shapes[ii,:])) - IPu['row'] = row[ii] - IPu['col'] = col[ii] - unique = 1 #To determine if the unique poles are more than one, for later use. if 1 then only one unique pole exist - else: - IPu['f'] = np.append(IPu['f'],frequencies[ii]) - IPu['cov_f'] = np.append(IPu['cov_f'],cov_f[ii]) - IPu['d'] = np.append(IPu['d'],damping_ratios[ii]) - IPu['cov_d'] = np.append(IPu['cov_d'],cov_d[ii]) - IPu['ms'] = np.vstack((IPu['ms'],mode_shapes[ii,:])) - IPu['row'] = np.append(IPu['row'],row[ii]) - IPu['col'] = np.append(IPu['col'],col[ii]) - unique = 2 #To determine if the unique poles are more than one, for later use. if 2 more than one uniqe pole exist - - if len(IPu) > 0: #If there exist model orders with unique poles - if unique == 1: #If there only exist one unique pole - cluster = {'f':np.array([IPu['f']]), - 'cov_f':np.array([IPu['cov_f']]), - 'd':np.array([IPu['d']]), - 'cov_d':np.array([IPu['cov_d']]), - 'mode_shapes':np.array([IPu['ms']]), - 'model_order':np.array([Params['model_order']-IPu['row']]), - 'row':np.array([IPu['row']]), - 'col':np.array([IPu['col']]), - 'MAC':np.array([1])} - # print("371, IPu",cluster['f'],cluster['row']) - else: #If more unique poles exist - cluster = {'f':np.array([IPu['f'][0]]), - 'cov_f':np.array([IPu['cov_f'][0]]), - 'd':np.array([IPu['d'][0]]), - 'cov_d':np.array([IPu['cov_d'][0]]), - 'mode_shapes':np.array([IPu['ms'][0,:]]), - 'model_order':np.array([Params['model_order']-IPu['row'][0]]), - 'row':np.array([IPu['row'][0]]), - 'col':np.array([IPu['col'][0]]), - 'MAC':np.array([1])} - # print("381, IPu",cluster['f'],cluster['row']) - # print("IPu",IPu['row']) - # if cluster['f'][0] > 300: - # breakpoint() - cluster, non_clustered_IPu = cluster_from_mac(cluster,IPu,Params) #cluster the unique poles - - else: #if no unique poles exist then go forth with the initial point, ip. - #Only the initial point is clustered - cluster = {'f':np.array([frequencies[0]]), - 'cov_f':np.array([cov_f[0]]), - 'd':np.array([damping_ratios[0]]), - 'cov_d':np.array([cov_d[0]]), - 'mode_shapes':np.array([mode_shapes[0,:]]), - 'model_order':np.array([Params['model_order']-row[0]]), - 'row':np.array([row[0]]), - 'col':np.array([col[0]]), - 'MAC':np.array([1])} - - #Check if there are multiple points with same model order as ip - ip_ids = np.argwhere(row==row[0]) - if len(ip_ids[:,0]) > 1: # Remove all the other points at the same model order - for ii in ip_ids[1:,0]: - try: - frequencies = np.delete(frequencies,ii) - cov_f = np.delete(cov_f,ii) - damping_ratios = np.delete(damping_ratios,ii) - cov_d = np.delete(cov_d,ii) - mode_shapes = np.delete(mode_shapes,ii,axis=0) - row = np.delete(row,ii) - col = np.delete(col,ii) - except: - breakpoint() - # print("379,ip is alone",cluster['row'],row) - - - # try: - # print("Cluster after IPu",cluster['row']) - # except: - # pass - - if len(row) != len(set(row)): #If there still are points at the same model order in IP - IPm = {} - for ii, id in enumerate(row): #Go through all rows/model orders - pos = np.argwhere(row==id) #Locate the indices of one or more poles - #line 6 in algorithm - if len(pos) > 1: #If more than one pole exist for the model order - if len(IPm) == 0: #First pole - IPm['f'] = frequencies[ii] - IPm['cov_f'] = cov_f[ii] - IPm['d'] = damping_ratios[ii] - IPm['cov_d'] = cov_d[ii] - IPm['ms'] = np.array((mode_shapes[ii,:])) - IPm['row'] = row[ii] - IPm['col'] = col[ii] - else: - IPm['f'] = np.append(IPm['f'],frequencies[ii]) - IPm['cov_f'] = np.append(IPm['cov_f'],cov_f[ii]) - IPm['d'] = np.append(IPm['d'],damping_ratios[ii]) - IPm['cov_d'] = np.append(IPm['cov_d'],cov_d[ii]) - IPm['ms'] = np.vstack((IPm['ms'],np.array(mode_shapes[ii,:]))) - IPm['row'] = np.append(IPm['row'],row[ii]) - IPm['col'] = np.append(IPm['col'],col[ii]) - # After the unique poles are clustered, the multiple poles are clusterd - # try: - # print("IPu",IPu['f'],IPu['row']) - # except: - # print("No IPu") - # try: - # print("IPm",IPm['f'],IPm['row']) - # except: - # print("No IPm") - # print("to compare",cluster['f'][0],cluster['row'][0]) - cluster, non_clustered_IPm = cluster_from_mac_IPm(cluster,IPm,Params) - - - - #Start while loop - cluster_len_before = 0 - while len(cluster['row']) != cluster_len_before: - # print(len(cluster['row']),cluster_len_before) - # print("c", cluster['row']) - # try: - # print("u", non_clustered_IPu['row']) - # except: - # pass - # try: - # print("m", non_clustered_IPm['row']) - # except: - # pass - - cluster_len_before = len(cluster['row']) - try: - if len(non_clustered_IPu['row']) > 0: - cluster, non_clustered_IPu = cluster_from_mac(cluster,non_clustered_IPu,Params) #cluster the unique poles again - except: - pass - if len(non_clustered_IPm['row']) > 0: - cluster, non_clustered_IPm = cluster_from_mac_IPm(cluster,non_clustered_IPm,Params) #cluster the non-unique poles again - - else: #line 1 in algorithm: only unique poles - cluster = {'f':np.array([frequencies[0]]), - 'cov_f':np.array([cov_f[0]]), - 'd':np.array([damping_ratios[0]]), - 'cov_d':np.array([cov_d[0]]), - 'mode_shapes':np.array([mode_shapes[0,:]]), - 'model_order':np.array([Params['model_order']-row[0]]), - 'row':np.array([row[0]]), - 'col':np.array([col[0]]), - 'MAC':np.array([1])} - if IP['f'].shape[0] > 1: - cluster, _ = cluster_from_mac(cluster,IP,Params) - - #Here lies the algorithms cardinality check - # print(cluster) - # if cluster['f'].shape[0] < Params['mstab']: - # print("cluster too short:",cluster['f'].shape[0],"But must be:",Params['mstab']) - # cluster = {} - - return cluster - -def cluster_from_mac(cluster: dict[str,Any], IP: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: - """ - Add points to cluster based on MAC - - Args: - cluster (dict): Intermediate cluster - IP (dict): Dictionary of data on inital points - Params (dict): Dictionary of algorithm parameters - Returns: - cluster (dict): Intermediate cluster - - """ - - #Extract data - frequencies = IP['f'] - cov_f = IP['cov_f'] - damping_ratios = IP['d'] - cov_d = IP['cov_d'] - mode_shapes = IP['ms'] - row = IP['row'] - col = IP['col'] - - ip_ms = IP['ms'][0] - i_ms = IP['ms'][1:] - f_ip = frequencies[0] - f_i = frequencies[1:] - row_i = row[1:] - # print(cluster['row']) - # print(IP['ms'].shape) - - skip_id = [] - - for jj, ms in enumerate(i_ms): #Go through all mode shapes in cluster - idx = jj+1 - MAC = calculate_mac(ip_ms,ms) #Does the mode shape match with the first pole - # print(row_i[jj],MAC) - if MAC > Params['tMAC']: #line 2 in algorithm - #Add to cluster - cluster['f'] = np.append(cluster['f'],frequencies[idx]) - cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[idx]) - cluster['d'] = np.append(cluster['d'],damping_ratios[idx]) - cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[idx]) - cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[idx,:]))) - cluster['MAC'] = np.append(cluster['MAC'],MAC) - cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[idx]) - cluster['row'] = np.append(cluster['row'],row[idx]) - cluster['col'] = np.append(cluster['col'],col[idx]) - - skip_id.append(idx) - - - #IP['ms'] = np.delete(IP['ms'],skip_id,axis=0) - - # print(cluster['row']) - # print(IP['ms'].shape) - # print("skip_id",skip_id) - #Compare remaining points with newly added cluster points, i.e. points are compared with the full cluster, not just ip - if cluster['f'].shape[0] > 1: #If points have been added to cluster proceed - if IP['ms'].shape[0] > len(skip_id): #If there are more points to compare left, then proceed - unclustered_points = 1 - while IP['ms'].shape[0] != unclustered_points: #Run until no points are clustered anymore - unclustered_points = IP['ms'].shape[0] - - i_ms = IP['ms'][1:] - for jj, ms in enumerate(i_ms): - idx = jj+1 - if idx in skip_id: - # print(idx) - continue - - MAC_list = [] - for c_ms in cluster['mode_shapes']: - MAC_list.append(calculate_mac(c_ms,ms)) - - # print("MAC_list",MAC_list) - if max(MAC_list) > Params['tMAC']: #line 2 in algorithm - #Add to cluster - cluster['f'] = np.append(cluster['f'],frequencies[idx]) - cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[idx]) - cluster['d'] = np.append(cluster['d'],damping_ratios[idx]) - cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[idx]) - cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[idx,:]))) - cluster['MAC'] = np.append(cluster['MAC'],MAC) - cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[idx]) - cluster['row'] = np.append(cluster['row'],row[idx]) - cluster['col'] = np.append(cluster['col'],col[idx]) - - skip_id.append(idx) - - #IP['ms'] = np.delete(IP['ms'],skip_id,axis=0) - - # skip_id.insert(0,0) - # skip_id_array = np.array(skip_id) - - # all_id = np.array(list(range(len(row)))) - # unclustered_id = np.delete(all_id,skip_id_array) - - clustered_id = [] - for r2 in cluster['row']: #For every entry in row cluster - unclustered_point = False - for ii, r1 in enumerate(IP['row']): #For every entry in row IPu - if r1 == r2: #If r1 is a entry of "row" in the cluster, then save that row for later. - clustered_id.append(ii) - - all_id = np.array(list(range(len(IP['row'])))) - - clustered_id = np.array(clustered_id) - if clustered_id.shape[0] > 0: - unclustered_id = np.delete(all_id,clustered_id) - unclustered_id = np.insert(unclustered_id,0,0) - else: - unclustered_id = all_id - - unclustered_IPu = {} - unclustered_IPu['f'] = IP['f'][unclustered_id] - unclustered_IPu['cov_f'] = IP['cov_f'][unclustered_id] - unclustered_IPu['d'] = IP['d'][unclustered_id] - unclustered_IPu['cov_d'] = IP['cov_d'][unclustered_id] - unclustered_IPu['ms'] = IP['ms'][unclustered_id] - unclustered_IPu['row'] = IP['row'][unclustered_id] - unclustered_IPu['col'] = IP['col'][unclustered_id] - - return cluster, unclustered_IPu - -def cluster_from_mac_IPm(cluster: dict[str,Any], IPm: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: - """ - Cluster based on MAC if multiple poles exist for the model order - - Args: - cluster (dict): Intermediate cluster - IP (dict): Dictionary of data on inital points - Params (dict): Dictionary of algorithm parameters - Returns: - cluster (dict): Intermediate cluster - - """ - #Cluster based on MAC if multiple poles exist for the model order - # print("cluster_IPm") - #Extract data - frequencies = IPm['f'] - cov_f = IPm['cov_f'] - damping_ratios = IPm['d'] - cov_d = IPm['cov_d'] - mode_shapes = IPm['ms'] - row = IPm['row'] - col = IPm['col'] - - # if isinstance(cluster['f'],np.ndarray): - # ip_ms = cluster['mode_shapes'][0,:] #Mode shape of the first pole - # else: - # ip_ms = cluster['mode_shapes'] #Mode shape of the first pole - - # Find the model orders with multiple poles - pos = [] - for ii, idd in enumerate(set(row)): - pos.append(np.argwhere(row==idd)) - - skip_id = [] - skip_id_before = None - while skip_id != skip_id_before: - ip_ms = cluster['mode_shapes'] - if isinstance(cluster['f'],np.ndarray): - ip_ms_0 = ip_ms[0,:] #Mode shape of the first pole - else: - ip_ms_0 = ip_ms #Mode shape of the first pole - - i_ms = IPm['ms'][:] #Mode shape of the model orders with mutiple poles - - - skip_id_before = skip_id.copy() - # print("Cluster in IPm",cluster['row']) - - - #Go through all the model orders - for oo, pos_i in enumerate(pos): - MAC = np.zeros(pos_i.shape[0]) - # print("IPm model order",list(set(row))[oo]) - - if oo in skip_id: #Skip these model orders, since they have already been added. - continue - - pos_i = pos_i[:,0] - for ii, id_row in enumerate(pos_i): - #print(IPm['row'][id_row],id_row) - #print(ip_ms.shape,i_ms[id_row].shape) - MAC[ii] = calculate_mac(ip_ms_0,i_ms[id_row]) #Calculate MAC between first pole of cluster and a pole in IPm - - #If MAC is not satisfied - if MAC[ii] < Params['tMAC']: #Search for max across all mode shapes in cluster: - #line 3 in algorithm - MAC_list = [] - for ms in ip_ms: - MAC_list.append(calculate_mac(ms,i_ms[id_row])) - MAC[ii] = max(MAC_list) - - #Find the mask for the poles that meets the MAC criteria - mask = MAC > Params['tMAC'] - pos_MAC = np.argwhere(mask==True) #Get indicies - - #Formatting of the indicies - if pos_MAC.shape[0] > 1: #more than one indice - pos_MAC = pos_MAC[:,0] - else: #Only one or zero indice (No MAC match) - if pos_MAC.shape[0] == 1: - pos_MAC = pos_MAC[0] - - # print("MAC",MAC) - # print("MACpos",pos_MAC) - if pos_MAC.shape[0] > 1: #If multiple poles comply with MAC criteria - #ids formatting - ids = pos_i[pos_MAC] - #ids = ids[:,0] - - #Get frequencies of poles - freq = np.zeros(ids.shape[0]) - for jj, idid in enumerate(ids): - freq[jj] = frequencies[idid] - median_f = np.median(cluster['f']) - - #Locate the index of the closest pole - idx = (np.abs(freq - median_f)).argmin() - ll = pos_i[pos_MAC[idx]] - - # print("IPm point mac approved",row[ll],frequencies[ll],MAC) - - #Add this pole to the cluster - cluster['f'] = np.append(cluster['f'],frequencies[ll]) - cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[ll]) - cluster['d'] = np.append(cluster['d'],damping_ratios[ll]) - cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[ll]) - cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[ll,:]))) - cluster['MAC'] = np.append(cluster['MAC'],MAC[pos_MAC[idx]]) - cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[ll]) - cluster['row'] = np.append(cluster['row'],row[ll]) - cluster['col'] = np.append(cluster['col'],col[ll]) - - skip_id.append(oo) - - elif pos_MAC.shape[0] == 1: #If only one pole complies with MAC - ll = pos_i[pos_MAC[0]] - - - # print("IPm point mac approved",row[ll],frequencies[ll],MAC) - - - #Add this pole to the cluster - cluster['f'] = np.append(cluster['f'],frequencies[ll]) - cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[ll]) - cluster['d'] = np.append(cluster['d'],damping_ratios[ll]) - cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[ll]) - cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[ll,:]))) - cluster['MAC'] = np.append(cluster['MAC'],MAC[pos_MAC[0]]) - cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[ll]) - cluster['row'] = np.append(cluster['row'],row[ll]) - cluster['col'] = np.append(cluster['col'],col[ll]) - - skip_id.append(oo) - # else: - # print("Not clustered. MAC not satisfied") - # print("skip",skip_id) - - clustered_id = [] - for r2 in cluster['row']: #For every entry in row cluster - unclustered_point = False - for ii, r1 in enumerate(IPm['row']): #For every entry in row IPm - if r1 == r2: #If r1 is a entry of "row" in the cluster, then save that row for later. - clustered_id.append(ii) - - all_id = np.array(list(range(len(IPm['row'])))) - - clustered_id = np.array(clustered_id) - if clustered_id.shape[0] > 0: - unclustered_id = np.delete(all_id,clustered_id) - else: - unclustered_id = all_id - # print("709,unclustered_id",unclustered_id) - - unclustered_IPm = {} - unclustered_IPm['f'] = IPm['f'][unclustered_id] - unclustered_IPm['cov_f'] = IPm['cov_f'][unclustered_id] - unclustered_IPm['d'] = IPm['d'][unclustered_id] - unclustered_IPm['cov_d'] = IPm['cov_d'][unclustered_id] - unclustered_IPm['ms'] = IPm['ms'][unclustered_id] - unclustered_IPm['row'] = IPm['row'][unclustered_id] - unclustered_IPm['col'] = IPm['col'][unclustered_id] - - # print("unclustered_IPm['row']",unclustered_IPm['row']) - - - return cluster, unclustered_IPm - -def remove_data_from_S(data: dict[str,Any],cluster: dict[str,Any]) -> dict[str,Any]: - """ - Remove cluster from data or S - - Args: - data (dict): OMA points data - cluster (dict): cluster - Returns: - data2 (dict): Filtered OMA points data - - """ - #Copy data - frequencies = data['frequencies'].copy() - damping_ratios = data['damping_ratios'].copy() - cov_freq = data['cov_f'].copy() - cov_damping = data['cov_d'].copy() - mode_shapes = data['mode_shapes'].copy() - row = data['row'].copy() - col = data['col'].copy() - #Make new data dictionary - data2 = {'frequencies':frequencies, - 'damping_ratios':damping_ratios, - 'cov_f':cov_freq, - 'cov_d':cov_damping, - 'mode_shapes':mode_shapes, - 'row':row, - 'col':col} - #Remove data - row = cluster['row'] - col = cluster['col'] - for ii, r in enumerate(row): - c = col[ii] - data2['frequencies'][r,c] = np.nan - data2['damping_ratios'][r,c] = np.nan - data2['cov_f'][r,c] = np.nan - data2['cov_d'][r,c] = np.nan - data2['mode_shapes'][r,c,:] = np.nan - - return data2 - -def cluster_expansion(cluster: dict[str,Any], data: dict[str,Any], Params: dict[str,Any], oma_results) -> dict[str,Any]: - """ - Expand cluster based on minima and maxima bound - - Args: - cluster (dict): Intermediate cluster - data (dict): OMA points data - Params (dict): Dictionary of algorithm parameters - Returns: - cluster (dict): Expanded cluster - - """ - #print("\nExpansion") - unClustered_frequencies = data['frequencies'] - unClustered_damping = data['damping_ratios'] - - freq_c = cluster['f'] - cov_f = cluster['cov_f'] - damp_c = cluster['d'] - cov_d = cluster['cov_d'] - row = cluster['row'] - - bound_multiplier = Params['bound_multiplier'] - - #Find min-max bounds of cluster - f_lower_bound = np.min(freq_c - bound_multiplier * np.sqrt(cov_f)) # Minimum of all points for frequencies - f_upper_bound = np.max(freq_c + bound_multiplier * np.sqrt(cov_f)) # Maximum of all points for frequencies - d_lower_bound = np.min(damp_c - bound_multiplier * np.sqrt(cov_d)) # Minimum of all points for damping - d_upper_bound = np.max(damp_c + bound_multiplier * np.sqrt(cov_d)) # Maximum of all points for damping - - #Mask of possible expanded poles - condition_mask = (unClustered_frequencies >= f_lower_bound) & (unClustered_frequencies <= f_upper_bound) & (unClustered_damping >= d_lower_bound) & (unClustered_damping <= d_upper_bound) - # Get indices satisfying the condition - expanded_indices = np.argwhere(condition_mask) - - #Initiate cluster_points for cluster creation - cluster_points = {} - cluster_points['f'] = data['frequencies'][condition_mask] - cluster_points['cov_f'] = data['cov_f'][condition_mask] - cluster_points['d'] = data['damping_ratios'][condition_mask] - cluster_points['cov_d'] = data['cov_d'][condition_mask] - cluster_points['ms'] = data['mode_shapes'][condition_mask,:] - cluster_points['row'] = expanded_indices[:,0] - cluster_points['col'] = expanded_indices[:,1] - - #print(cluster_points['f']) - #print(cluster_points['row']) - - #Make the first ip from cluster be the previous first point in cluster_points - if isinstance(cluster['f'],np.ndarray): - index_f = np.argwhere(cluster_points['f'] == cluster['f'][0]) - else: - index_f = np.argwhere(cluster_points['f'] == cluster['f']) - if len(index_f[:,0]) > 1: - index_row = np.argwhere(cluster_points['row'][index_f[:,0]] == cluster['row'][0]) - ip_id = int(index_f[index_row[:,0]][:,0]) - else: - ip_id = int(index_f[:,0]) - indecies = list(range(len(cluster_points['f']))) - poped_id = indecies.pop(ip_id) - indecies.insert(0,poped_id) - indecies = np.array(indecies) - - cluster_points['f'] = cluster_points['f'][indecies] - cluster_points['cov_f'] = cluster_points['cov_f'][indecies] - cluster_points['d'] = cluster_points['d'][indecies] - cluster_points['cov_d'] = cluster_points['cov_d'][indecies] - cluster_points['ms'] = cluster_points['ms'][indecies,:] - cluster_points['row'] = cluster_points['row'][indecies] - cluster_points['col'] = cluster_points['col'][indecies] - - # print("row_before",cluster_points['row']) - #print("exp1",cluster_points['f']) - - #Check if these values can be clustered - cluster = cluster_creation(cluster_points,Params) - if isinstance(cluster['f'],np.ndarray): - if len(cluster['row']) != len(set(cluster['row'])): - print("row_before",cluster_points['row']) - print("row_after",cluster['row']) - print("exp2",cluster['f']) - print("double orders",cluster['row']) - - breakpoint() - - # print("row_before",cluster_points['row']) - #print("exp1",cluster_points['f']) - # print("row_after",cluster['row']) - # print("exp2",cluster['f']) - - return cluster - -def sort_cluster(cluster: dict[str,Any]) -> dict[str,Any]: - """ - Sort cluster based on row/model order - - Args: - cluster (dict): Cluster - Returns: - cluster (dict): Sorted cluster - - """ - sort_id = np.argsort(cluster['row']) - - cluster['f'] = cluster['f'][sort_id] - cluster['cov_f'] = cluster['cov_f'][sort_id] - cluster['d'] = cluster['d'][sort_id] - cluster['cov_d'] = cluster['cov_d'][sort_id] - cluster['mode_shapes'] = cluster['mode_shapes'][sort_id,:] - cluster['MAC'] = cluster['MAC'][sort_id] - cluster['model_order'] = cluster['model_order'][sort_id] - cluster['row'] = cluster['row'][sort_id] - cluster['col'] = cluster['col'][sort_id] - - return cluster - -def alignment(cluster_dict: dict[str,dict], Params: dict[str,Any]) -> dict[str,dict]: - """ - Alignment/merging of clusters - - Args: - cluster_dict (dict): Dictionary of multiple clusters - Params (dict): Dictionary of algorithm parameters - Returns: - cluster_dict (dict): Dictionary of aligned clusters - - """ - #print("\nCluster alignment") - median_f = [] - for key in cluster_dict.keys(): #Find the median of each cluster - cluster = cluster_dict[key] - median_f.append(np.median(cluster['f'])) - median_f = np.array(median_f) - - deleted_cluster_id = [] - for ii, m_f in enumerate(median_f): #Go through all medians - if ii in deleted_cluster_id: #If cluster is deleted pass on - #print(deleted_cluster_id) - continue - # Calculate absolute difference of selected median and all medians - diff = abs(median_f-m_f) - # If this difference is above 0 (not itself) and inside the bounds: - # Bounds are the minimum of either median_f * allignment_factor_0 or Sampling frequency / 2 * allignment_factor_1 - # For lower median frequencies the bound is determined by the size of median frequency. - # For higher median frequencies the bound is determined by the sampling frequency - - mask = (diff > 0) & (diff < min(m_f*Params['allignment_factor'][0],Params['Fs']/2*Params['allignment_factor'][1])) - indices = np.argwhere(mask == True) #Indicies of clusters that are closely located in frequency - - - - #print(cluster_dict.keys()) - if indices.shape[0] > 0:# If one or more clusters are found - ids = indices[:,0] - #print("ids",ids) - for id in ids: #Go through all clusters that is closely located - if id in deleted_cluster_id: - continue - - - #print("id",id) - break_loop = 0 - cluster1 = cluster_dict[str(ii)] #Parent cluster - cluster2 = cluster_dict[str(id)] #Co-located cluster - - # Proposed method - # for r in cluster2['model_order']: - # if r in cluster1['model_order']: #If the two clusters have poles with same model order, then skip the allignment - # print("Clusters have the same MO",cluster2['model_order'],cluster1['model_order']) - # break_loop = 1 - # if break_loop == 1: - # break - - MAC = calculate_mac(cluster1['mode_shapes'][0],cluster2['mode_shapes'][0]) # Check mode shape for the first pole in each cluster - if MAC >= Params['tMAC']: #If MAC complies with the criteria, then add the two clusters - cluster, cluster_remaining = join_clusters(cluster_dict[str(ii)],cluster_dict[str(id)],Params) - cluster_dict[str(ii)] = cluster #Save the new larger cluster - if len(cluster_remaining) == 0: #If the remaining cluster is emmpty - cluster_dict.pop(str(id), None) #Remove the co-located cluster - deleted_cluster_id.append(int(id)) #The delete cluster id - else: - cluster_dict[str(id)] = cluster_remaining #Save the remaining cluster - - else: #Check if the mode shapes across any of the poles complies with the MAC criteria - - MAC = np.zeros((cluster1['mode_shapes'].shape[0],cluster2['mode_shapes'].shape[0])) - for jj, ms1 in enumerate(cluster1['mode_shapes']): - for kk, ms2 in enumerate(cluster2['mode_shapes']): - MAC[jj,kk] = calculate_mac(ms1,ms2) - if MAC.max() >= Params['tMAC']: #If MAC criteria is meet add the clusters together - cluster, cluster_remaining = join_clusters(cluster_dict[str(ii)],cluster_dict[str(id)],Params) - cluster_dict[str(ii)] = cluster #Save the new larger cluster - if len(cluster_remaining) == 0: #If the remaining cluster is emmpty - cluster_dict.pop(str(id), None) #Remove the co-located cluster - deleted_cluster_id.append(int(id)) #The delete cluster id - else: - cluster_dict[str(id)] = cluster_remaining #Save the remaining cluster - # else: - # if cluster1['f'][0] > 300: - # breakpoint() - - - cluster_dict_alligned = cluster_dict - return cluster_dict_alligned - -def join_clusters(cluster_1: dict[str,Any], cluster_2: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: - """ - Add two clusters together - - Args: - cluster_1 (dict): Cluster - cluster_2 (dict): Cluster - Params (dict): Dictionary of algorithm parameters - Returns: - cluster (dict): Joined cluster - cluster_remaining (dict): The cluster that remains - - """ - #Adding two clusters together - cluster = {} - cluster_remaining = {} - row1 = cluster_1['row'] - row2 = cluster_2['row'] - - #Should the dominant cluster be the one that have the higest model orders? - if row1.shape[0] >= row2.shape[0]: #Let be the largest cluster be the dominant one - cluster1 = cluster_1 - cluster2 = cluster_2 - row1 = cluster_1['row'] - row2 = cluster_2['row'] - else: - cluster1 = cluster_2 - cluster2 = cluster_1 - row1 = cluster_2['row'] - row2 = cluster_1['row'] - - median_f1 = np.median(cluster1['f']) - - for MO in range(Params['model_order']): #Go through all poles in a cluster - jj = np.argwhere(row1 == MO) - id = np.argwhere(row2 == MO) - if MO in row1: #If a pole in the largest cluster exist for the this model order - r1 = MO - if MO in row2: #If a pole exist in the same model order - #Get frequencies of the poles - f1 = cluster1['f'][jj[:,0]] - f2 = cluster2['f'][id[:,0]] - if abs(median_f1-f2) >= abs(median_f1-f1): #If pole in cluster 1 is closer to median of cluster 1 - cluster = append_cluster_data(cluster,cluster1,jj[:,0]) - cluster_remaining = append_cluster_data(cluster_remaining,cluster2,id[:,0]) - else: #If pole in cluster 2 is closer to median of cluster 1 - cluster = append_cluster_data(cluster,cluster2,id[:,0]) - cluster_remaining = append_cluster_data(cluster_remaining,cluster1,jj[:,0]) - else: #If only one pole exist in the largest cluster - cluster = append_cluster_data(cluster,cluster1,jj[:,0]) - elif MO in row2: #If a pole in the smallest cluster exist for the model order - cluster = append_cluster_data(cluster,cluster2,id[:,0]) - - return cluster, cluster_remaining - -def append_cluster_data(cluster: dict[str,Any], cluster2: dict[str,Any], id: int) -> dict[str,Any]: - """ - Add cluster data to an existing cluster - - Args: - cluster (dict): Existing cluster - cluster2 (dict): Cluster - id (int): id of data to append - Returns: - cluster (dict): Cluster - - """ - if len(cluster) == 0: #If it is the first pole - cluster['f'] = cluster2['f'][id] - cluster['cov_f'] = cluster2['cov_f'][id] - cluster['d'] = cluster2['d'][id] - cluster['cov_d'] = cluster2['cov_d'][id] - cluster['mode_shapes'] = cluster2['mode_shapes'][id,:] - cluster['MAC'] = cluster2['MAC'][id] - cluster['model_order'] = cluster2['model_order'][id] - cluster['row'] = cluster2['row'][id] - cluster['col'] = cluster2['col'][id] - else: - cluster['f'] = np.append(cluster['f'],cluster2['f'][id]) - cluster['cov_f'] = np.append(cluster['cov_f'],cluster2['cov_f'][id]) - cluster['d'] = np.append(cluster['d'],cluster2['d'][id]) - cluster['cov_d'] = np.append(cluster['cov_d'],cluster2['cov_d'][id]) - cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],cluster2['mode_shapes'][id,:])) - cluster['MAC'] = np.append(cluster['MAC'],cluster2['MAC'][id]) - cluster['model_order'] = np.append(cluster['model_order'],cluster2['model_order'][id]) - cluster['row'] = np.append(cluster['row'],cluster2['row'][id]) - cluster['col'] = np.append(cluster['col'],cluster2['col'][id]) - return cluster - -def median_filter(cluster_dict: dict[str,dict]) -> dict[str,dict]: - """ - Apply median filter to cluster - - Args: - cluster_dict (dict): Dictionary of multiple clusters - Returns: - cluster_dict3 (dict): Median filtered multiple clusters - - """ - print("\nMedian filter") - cluster_dict3 = {} - for key in cluster_dict.keys(): - cluster = cluster_dict[key] - #print(cluster['mode_shapes']) - median_f = np.median(cluster['f']) #Calculate median - - cluster_new = {} - for ii, f in enumerate(cluster['f']): #Go through all cluster poles - lower_bound = f - np.sqrt(cluster['cov_f'][ii]) * 2 - upper_bound = f + np.sqrt(cluster['cov_f'][ii]) * 2 - if (median_f > lower_bound) & (median_f < upper_bound): #Check if a cluster confidence interval wraps the median - cluster_new = append_cluster_data(cluster_new,cluster,ii) - # else: - # print("not",cluster['model_order'][ii]) - - cluster_dict3[key] = cluster_new - - return cluster_dict3 - - -def remove_complex_conjugates(oma_results): - """ - Remove complex conjucates - - Args: - oma_results (Dict[str, Any]): Results from PyOMA-2 - - Returns: - frequencies (np.ndarray): Frequencies (mean) - cov_freq (np.ndarray): Covariance of frequency - damping_ratios (np.ndarray): Damping ratios (mean) - cov_damping (np.ndarray): Covariance of damping ratio - mode_shapes (np.ndarray): Mode shapes - """ - OMA = oma_results.copy() - # OMA results as numpy array - frequencies = OMA['Fn_poles'].copy() - cov_freq = OMA['Fn_poles_cov'].copy() - damping_ratios = OMA['Xi_poles'].copy() - cov_damping = OMA['Xi_poles_cov'].copy() - mode_shapes = OMA['Phi_poles'].copy() - - # Remove the complex conjugate entries - frequencies = frequencies[::2] # This is 'S' as per algorithm - damping_ratios = damping_ratios[::2] # This is 'S' as per algorithm - mode_shapes = mode_shapes[::2, :, :] - cov_freq = cov_freq[::2] - cov_damping = cov_damping[::2] - - return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes - -def transform_oma_features(frequencies_,cov_freq_,damping_ratios_,cov_damping_,mode_shapes_): - """ - Transform oma results - - Args: - frequencies_ (np.ndarray): Frequencies (mean) - cov_freq_ (np.ndarray): Covariance of frequency - damping_ratios_ (np.ndarray): Damping ratios (mean) - cov_damping_ (np.ndarray): Covariance of damping ratio - mode_shapes_ (np.ndarray): Mode shapes - - Returns: - frequencies (np.ndarray): Frequencies (mean) - cov_freq (np.ndarray): Covariance of frequency - damping_ratios (np.ndarray): Damping ratios (mean) - cov_damping (np.ndarray): Covariance of damping ratio - mode_shapes (np.ndarray): Mode shapes - """ - # Transpose, flip and sort arrays, such that arrays maps directly to the stabilization diagram. - # This means the the frequency array maps directly to the plot: - # MO. - # 5.| x x - # 4.| x - # 3.| x - # 2.| x - # 1.| - # 0.| - # -1----4------- Frequency - # The frequency array will then have the shape (6,3). Initially (6,6) but the complex conjugates have been removed. So 6 is halved to 3. - # 6 for each model order, including 0 and 3 for maximum poles in a modelorder - # The frequency array will then become: - # _0_1_ - # 0| 1 4 - # 1| 1 Nan - # 0| 1 Nan - # 0| Nan 4 - # 0| Nan Nan - # 0| Nan Nan - - #Transformation of data - frequencies = np.transpose(frequencies_) - frequencies = np.flip(frequencies, 0) - sort_indices = np.argsort(frequencies,axis=1) - frequencies = np.take_along_axis(frequencies, sort_indices, axis=1) - cov_freq = np.transpose(cov_freq_) - cov_freq = np.flip(cov_freq, 0) - cov_freq = np.take_along_axis(cov_freq, sort_indices, axis=1) - damping_ratios = np.transpose(damping_ratios_) - damping_ratios = np.flip(damping_ratios, 0) - damping_ratios = np.take_along_axis(damping_ratios, sort_indices, axis=1) - cov_damping = np.transpose(cov_damping_) - cov_damping = np.flip(cov_damping, 0) - cov_damping = np.take_along_axis(cov_damping, sort_indices, axis=1) - mode_shapes = np.moveaxis(mode_shapes_, [0, 1, 2], [1, 0, 2]) - - mode_shapes2 = np.zeros(mode_shapes.shape,dtype=np.complex128) - for ii, indices in enumerate(sort_indices): - mode_shapes2[ii,:,:] = mode_shapes[(sort_indices.shape[0]-ii-1),indices,:] - - # Array of model orders - model_order = np.arange(sort_indices.shape[0]) - model_orders = np.stack((model_order,) * sort_indices.shape[1], axis=1) - model_orders = np.flip(model_orders) - - return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes2, model_orders - -def remove_highly_uncertain_points(oma_results,oma_params): - """ - Remove highly uncertain points - - Args: - oma_results (Dict[str, Any]): Results from PyOMA-2 - oma_params (Dict[str, Any]): Parameters - - Returns: - frequencies (np.ndarray): Frequencies (mean) - cov_freq (np.ndarray): Covariance of frequency - damping_ratios (np.ndarray): Damping ratios (mean) - cov_damping (np.ndarray): Covariance of damping ratio - mode_shapes (np.ndarray): Mode shapes - """ - frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes = remove_complex_conjugates(oma_results) - - # # #=================== Removing high uncertain poles ======================= - freq_variance_treshold = oma_params.get('freq_variance_treshold', 0.1) - damp_variance_treshold = oma_params.get('damp_variance_treshold', 10**6) - frequency_coefficient_variation = np.sqrt(cov_freq)/frequencies - damping_coefficient_variation = np.sqrt(cov_damping)/damping_ratios - indices_frequency = frequency_coefficient_variation > freq_variance_treshold - indices_damping = damping_coefficient_variation > damp_variance_treshold - above_nyquist = frequencies > oma_params['Fs']/2 - combined_indices = np.logical_or(np.logical_or(indices_frequency,indices_damping),above_nyquist) - frequencies[combined_indices] = np.nan - damping_ratios[combined_indices] = np.nan - cov_freq[combined_indices] = np.nan - cov_damping[combined_indices] = np.nan - mask = np.broadcast_to(np.expand_dims(combined_indices, axis=2), mode_shapes.shape) - mode_shapes[mask] = np.nan - - return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes \ No newline at end of file diff --git a/src/methods/packages/mode_tracking.py b/src/methods/packages/mode_tracking.py deleted file mode 100644 index 5ca9858..0000000 --- a/src/methods/packages/mode_tracking.py +++ /dev/null @@ -1,355 +0,0 @@ -from typing import Any -import numpy as np -from methods.packages.clustering import calculate_mac - -# JVM 14/10/2025 - -def cluster_tracking(cluster_dict: dict[str,Any],tracked_clusters: dict[str,Any],Params: dict[str,Any]=None) -> dict[str,Any]: - """ - Tracking of modes across experiments - - Args: - cluster_dict (dict): Dictionary of clusters - tracked_clusters (dict): Previously tracked clusters - Params (dict): tracking parameters - - Returns: - tracked_clusters (dict): Previously tracked clusters - - """ - print("Cluster tracking") - if Params == None: - Params = {'phi_cri':0.8, - 'freq_cri':0.2} - - m_f = [] - for key in cluster_dict.keys(): - cluster = cluster_dict[key] - m_f.append(cluster['median_f']) - - t_list = [] - t_length = [] - for key in tracked_clusters: #Go through all tracked clusters. They are identified with keys which are integers from 0 and up to total number of clusters - if key == 'iteration': - pass - else: - tracked_cluster_list = tracked_clusters[key] #Accessing all cluster in a tracked cluster group - t_length.append(len(tracked_cluster_list)) - tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group - #median freq of last cluster in tracked cluster group - t_list.append(tracked_cluster['median_f']) - - # No tracked clusters yet? - if not tracked_clusters: - first_track = 1 - else: - first_track = 0 - - if first_track == 1: - for id, key in enumerate(cluster_dict.keys()): - cluster = cluster_dict[key] - cluster['id'] = 0 - - tracked_clusters['iteration'] = 0 - tracked_clusters[str(id)] = [cluster] - else: - iter = tracked_clusters['iteration'] + 1 - tracked_clusters['iteration'] = iter - - result = match_cluster_to_tracked_cluster(cluster_dict,tracked_clusters,Params) #Match clusters to tracked clusters - - result_int = [] - for val in result.values(): #Get all non-"new" results - if type(val) == int: - result_int.append(val) - - if len(result_int) == len(set(result_int)): #If all clusters match with a unique tracked cluster - for ii, key in enumerate(cluster_dict.keys()): - cluster = cluster_dict[key] - pos = result[str(ii)] #Find pos in result dict - cluster['id'] = iter - if pos == "new": #Add cluster as a new tracked cluster - new_key = len(tracked_clusters)-1 #-1 for "iteration", + 1 for next cluster and -1 for starting at 0 = -1 - #print(f"new key: {new_key}") - tracked_clusters[str(new_key)] = [cluster] - else: #Add cluster to an existing tracked cluster - cluster_to_add_to = tracked_clusters[str(pos)] - cluster_to_add_to.append(cluster) - tracked_clusters[str(pos)] = cluster_to_add_to - - else: #If there are some clusters that match with the same tracked cluster. - kk = 0 - skip_tracked_cluster = [] - skip_cluster = [] - while len(result_int) != len(set(result_int)): - kk += 1 - if kk > 10: - #Debug info: - unique_match_debug_info(result,cluster_dict,t_list) - print("Unresolved mode tracking") - breakpoint() - - for possible_match_id in set(result.values()): #Go through all unique values - if possible_match_id == "new": #Do nothing if "new" - pass - else: - test_if_str = np.argwhere(np.array(list(result.values())) == "new") #Test if "new" is present. If so, then we must match with str instead of int. - if len(test_if_str) > 0: - itemindex = np.argwhere(np.array(list(result.values())) == str(possible_match_id)) #Find the index of the unique cluster match - else: - itemindex = np.argwhere(np.array(list(result.values())) == possible_match_id) #Find the index of the unique cluster match - print(possible_match_id,np.array(list(result.values())),itemindex, len(itemindex)) - - if len(itemindex) > 1: #If multiple clusters match to the same tracked cluster - pos, result, cluster_index = resolve_unique_matches(possible_match_id, itemindex, result, cluster_dict, tracked_clusters) - skip_tracked_cluster.append(str(result[str(cluster_index[pos])])) #Skip the best tracked cluster which is matced with another cluster. - skip_cluster.append(cluster_index[pos]) #Skip the best tracked cluster which is matced with another cluster. - - result = match_cluster_to_tracked_cluster(cluster_dict,tracked_clusters,Params,result,skip_cluster,skip_tracked_cluster) #Match with tracked clusters, but skip the already matched. - - #Debug info: - unique_match_debug_info(result,cluster_dict,t_list) - - result_int = [] - for val in result.values(): - if type(val) == int: - result_int.append(val) - - #Add the clusters to tracked clusters - for ii, key in enumerate(cluster_dict.keys()): - cluster = cluster_dict[key] - pos = result[str(ii)] #Find pos in result dict - cluster['id'] = iter - if pos == "new": - new_key = len(tracked_clusters)-1 #-1 for "iteration", + 1 for next cluster and -1 for starting at 0 = -1 - tracked_clusters[str(new_key)] = [cluster] - else: - cluster_to_add_to = tracked_clusters[str(pos)] - cluster_to_add_to.append(cluster) - tracked_clusters[str(pos)] = cluster_to_add_to - - - - return tracked_clusters - -def match_cluster_to_tracked_cluster(cluster_dict: dict[str,Any], tracked_clusters: dict[str,Any], Params: dict[str,Any], result_prev: dict[str,Any] = {},skip_cluster: list = [], skip_tracked_cluster: list = []) -> dict[str,Any]: - """ - Match clusters to tracked clusters - - The result dictionary consist of keys: cluster indecies, and values: indecies of tracked cluster to match with - Example: - Cluster 1 match with tracked cluster 2 - Cluster 2 match with tracked cluster 1 - Cluster 3 match with tracked cluster 1 - Cluster 4 match with "new", i.e. could not be matched with an existing tracked cluster - - Args: - cluster_dict (dict): Dictionary of clusters - tracked_clusters (dict): Previously tracked clusters - Params (dict): tracking parameters - result_prev (dict): Dictionary of previous match result - skip_cluster (list): List of clusters that have proven they are a optimal match with a tracked cluster - skip_tracked_cluster (list): List of tracked clusters that have an optimal match with a cluster - - Returns: - result (dict): Dictionary of matches - - """ - result = {} - for id, key in enumerate(cluster_dict): #Go through all clusters - if id in skip_cluster: #If this cluster is already matched skip it - result[str(id)] = result_prev[str(id)] - continue - - #Get mode shapes - cluster = cluster_dict[key] - omega = cluster['median_f'] - phi = cluster['mode_shapes'][0] - phi_all = cluster['mode_shapes'] - - Xres = [] - MAC_list = [] - D_freq = [] - omega_t_list = [] - MAC_max_list = [] - MAC_avg_list = [] - for key in tracked_clusters: #Go through all tracked clusters. They are identified with keys which are integers from 0 and up to total number of clusters - if key == 'iteration': - pass - else: - tracked_cluster_list = tracked_clusters[key] #Accessing all cluster in a tracked cluster group - tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group - omega_t = tracked_cluster['median_f'] #median freq of last cluster in tracked cluster group - omega_t_list.append(omega_t) - phi_t_all = tracked_cluster['mode_shapes'] #phi of last cluster in tracked cluster group - phi_t = phi_t_all[0] - - MAC_list.append(float(calculate_mac(phi_t, phi))) - - MACs = np.zeros((phi_all.shape[0],phi_t_all.shape[0])) - for ii, phi in enumerate(phi_all): - for jj, phi_t in enumerate(phi_t_all): - MAC = float(calculate_mac(phi_t, phi)) - MACs[ii,jj] = MAC #Compare the cluster with all tracked clusters - - if key in skip_tracked_cluster: - MAC_avg = np.mean(0) - MAC_max = np.max(0) - MAC_max_list.append(0) - MAC_avg_list.append(0) - D_freq.append(10**6) - else: - MAC_avg = np.mean(MACs) - MAC_max = np.max(MACs) - MAC_max_list.append(MAC_max) - MAC_avg_list.append(MAC_avg) - D_freq.append(abs(omega_t-omega)/omega) - - itemindex1 = np.argwhere(np.array(MAC_max_list) > Params['phi_cri']) #Find where the cluster matches the tracked cluster regarding the MAC criteria - itemindex = np.argwhere(np.array(D_freq)[itemindex1[:,0]] < Params['freq_cri']) #Find where the cluster matches the tracked cluster regarding the MAC and frequency criteria - indicies = itemindex1[itemindex[:,0]] - if len(indicies) > 1: #If two or more clusters combly with the mode shape criteria - Xres = [] - Xres_f = [] - Xres_MAC = [] - for nn in indicies: - pos = nn[0] - X = D_freq[pos]/MAC_max_list[pos] #Objective function - Xres.append(X) - Xres_f.append(D_freq[pos]) - Xres_MAC.append(MAC_max_list[pos]) - - if Xres != []: # One or more cluster(s) combly with the frequency criteria - pos1 = Xres.index(min(Xres)) #Find the cluster that is most likely - pos2 = Xres_MAC.index(max(Xres_MAC)) #Find the largest MAC - pos3 = Xres_f.index(min(Xres_f)) #Find the smallest frequency difference - - if len(Xres) > 1: #If more than one cluster comply with criteria - Xres_left = Xres.copy() - del Xres_left[pos1] - if type(Xres_left) == np.float64: - Xres_left = [Xres_left] - - Xres_MAC_left = Xres_MAC.copy() - del Xres_MAC_left[pos1] - if type(Xres_MAC_left) == np.float64: - Xres_MAC_left = [Xres_MAC_left] - - Xres_f_left = Xres_f.copy() - del Xres_f_left[pos1] - if type(Xres_f_left) == np.float64: - Xres_f_left = [Xres_f_left] - - pos1_2 = Xres_left.index(min(Xres_left)) #Find the cluster that is most likely - pos2_2 = Xres_MAC_left.index(max(Xres_MAC_left)) #Find the cluster that is most likely based on MAC - pos3_2 = Xres_f_left.index(min(Xres_f_left)) #Find the cluster that is most likely based on Freq - - if (pos1 == pos2) and (pos1 == pos3): #If one match on all three parameters: objective function, max MAC and frequency difference - pos = int(indicies[pos1][0]) - result[str(id)] = pos #group to a tracked cluster - - #Make different: abs(min(Xres_left)/min(Xres)) < Params['obj_cri'] = 2 - elif abs(min(Xres_left)-min(Xres)) < Params['obj_cri']: #If the objective function results are close - if (min(Xres_f) < Params['freq_cri']) and (min(Xres_f_left) < Params['freq_cri']): #If both frequency differences are close to the target cluster - pos = int(indicies[pos2_2][0]) #Match with best MAC - result[str(id)] = pos #group to a tracked cluster - elif (min(Xres_f) < Params['freq_cri']) and (min(Xres_f_left) > Params['freq_cri']): #If Xres_f is smaller than the threshold - pos = int(indicies[pos3][0]) #Match with lowest frequency difference - result[str(id)] = pos #group to a tracked cluster - elif (min(Xres_f) > Params['freq_cri']) and (min(Xres_f_left) < Params['freq_cri']): - pos = int(indicies[pos3_2][0]) #Match with lowest frequency difference - result[str(id)] = pos #group to a tracked cluster - else: #If none of the above choose the one with highest MAC - pos = int(indicies[pos2_2][0]) - result[str(id)] = pos #group to a tracked cluster - else: #If none of the above choose the one with lowest onjective function - pos = int(indicies[pos1][0]) - result[str(id)] = pos #group to a tracked cluster - - else: #No cluster comply with frequency criteria, so a new cluster is saved - result[str(id)] = "new" - - elif len(indicies) == 1: #If one cluster combly with the mode shape criteria - pos = int(indicies[0][0]) - result[str(id)] = pos #group to a tracked cluster - - else: #Does not comply with mode shape criteria - result[str(id)] = "new" - - return result - -def resolve_unique_matches(possible_match_id, itemindex, result, cluster_dict, tracked_clusters): - """ - Resolve if two clusters match with the same tracked cluster. Determine what match is the most optimal. - Those clusters that does not have an optimal match, they are given the match result = "new" - - Example: - Cluster 2 match with tracked cluster 1 - Cluster 3 match with tracked cluster 1 - - Args: - possible_match_id (int): The index of tracked cluster - itemindex (np.ndarray): The indecies of clusters that have the same match - result (dict): Dictionary of suggested matches - cluster_dict (dict): Dictionary of clusters - tracked_clusters (dict): Previously tracked clusters - - Returns: - pos (int): Value of cluster that have the most optimal match. - result (dict): Dictionary of re-done matches - cluster_index: The indecies of clusters that have the same match - - """ - mean_MAC = [] - keys = [str(y[0]) for y in itemindex.tolist()] #Make keys for dictionary based on indices in itemindex - for nn in itemindex: #Go through possible clusters match index - cluster = cluster_dict[int(nn[0])] - phi_all = cluster["mode_shapes"] #Find mode shapes in cluster - tracked_cluster_list = tracked_clusters[str(possible_match_id)] #Accessing all cluster in a tracked cluster group - tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group - phi_t_all = tracked_cluster['mode_shapes'] #Find mode shapes in tracked cluster - - #Make list of mode shapes have the same length, i.e. same number of poles - if len(phi_all) > len(phi_t_all): - phi_all = phi_all[0:len(phi_t_all)] - elif len(phi_all) < len(phi_t_all): - phi_t_all = phi_t_all[0:len(phi_all)] - else: #Equal length - pass - MAC_matrix = np.zeros((len(phi_all),len(phi_all))) #Initiate a matrix of MAC values - for ii, phi in enumerate(phi_all): - for jj, phi_t in enumerate(phi_t_all): - MAC_matrix[ii,jj] = calculate_mac(phi,phi_t) #Mac - - mean_MAC.append(np.mean(MAC_matrix)) #Save the mean values of MAC from this cluster compared to the matched tracked cluster - pos = mean_MAC.index(max(mean_MAC)) #Find the index with higest mean MAC, i.e. the cluster that match best with the tracked cluster. - - cluster_index = itemindex[:,0] - - for key in keys: - if keys[pos] == key: #Let the best cluster match stay - pass - else: #Add the clusters with the worst match as a new cluster - result[key] = "new" - return pos, result, cluster_index - -def unique_match_debug_info(result,cluster_dict,t_list): - """ - Debug info - - Args: - result (dict): Dictionary of matches - cluster_dict (dict): Dictionary of clusters - t_list (list): List of median frequencies of last tracked tracked clusters - - Returns: - - """ - print('\n') - for ii, key in enumerate(cluster_dict.keys()): - cluster = cluster_dict[key] - pos = result[str(ii)] #Find pos in result dict - if pos == "new": - print(cluster_dict[key]['median_f'],str(ii),pos) - else: - print(cluster_dict[key]['median_f'],str(ii),pos,t_list[pos]) \ No newline at end of file diff --git a/src/methods/packages/pyoma/ssiWrapper.py b/src/methods/packages/pyoma/ssiWrapper.py index db560e9..c88bdc8 100644 --- a/src/methods/packages/pyoma/ssiWrapper.py +++ b/src/methods/packages/pyoma/ssiWrapper.py @@ -123,19 +123,6 @@ def run(self) -> SSIResult: Phis[:,id,:] = nan_Matrix Phi_cov[:,id,:] = nan_Matrix - # # Get the labels of the poles - # Lab = gen.SC_apply( - # Fns, - # Xis, - # Phis, - # ordmin, - # ordmax, - # step, - # sc["err_fn"], - # sc["err_xi"], - # sc["err_phi"], - # ) - return SSIResult( Obs=Obs, A=A, @@ -145,7 +132,6 @@ def run(self) -> SSIResult: Fn_poles=Fns, Xi_poles=Xis, Phi_poles=Phis, - # Lab=Lab, Fn_poles_cov=Fn_cov, Xi_poles_cov=Xi_cov, Phi_poles_cov=Phi_cov, diff --git a/src/methods/sysid_module.py b/src/methods/sysid.py similarity index 87% rename from src/methods/sysid_module.py rename to src/methods/sysid.py index 6b1986b..cf09512 100644 --- a/src/methods/sysid_module.py +++ b/src/methods/sysid.py @@ -33,7 +33,7 @@ def sysid(data, params): if data.shape[0] Tuple[MQTTClient, float]: return data_client, fs -def get_oma_results( +def get_sysid_results( sampling_period: int, aligner: Aligner, fs: float ) -> Optional[Tuple[Dict[str, Any], datetime]]: """ @@ -91,10 +91,10 @@ def get_oma_results( Args: sampling_period: How many minutes of data to pass to sysid. aligner: An initialized Aligner object. - fs: Sampling frequency to use in the OMA algorithm. + fs: Sampling frequency to use in the sysid algorithm. Returns: - A tuple (OMA_output, timestamp) if successful, or None if data is not ready. + A tuple (sysid_output, timestamp) if successful, or None if data is not ready. """ number_of_samples = int(sampling_period * 60 * fs) @@ -104,18 +104,18 @@ def get_oma_results( return None, None try: - oma_output = sysid(data, PARAMS) - return oma_output, timestamp + sysid_output = sysid(data, PARAMS) + return sysid_output, timestamp except Exception as e: print(f"sysID failed: {e}") return None, None -def publish_oma_results(sampling_period: int, aligner: Aligner, +def publish_sysid_results(sampling_period: int, aligner: Aligner, publish_client: MQTTClient, publish_topic: str, fs: float) -> None: """ - Repeatedly tries to get aligned data and publish OMA results once. + Repeatedly tries to get aligned data and publish sysid results once. Args: sampling_period: Duration (in minutes) of data to extract. @@ -132,14 +132,14 @@ def publish_oma_results(sampling_period: int, aligner: Aligner, t2 = time.time() t_text = f"Waiting for data for {round(t2-t1,1)} seconds" print(t_text,end="\r") - oma_output, timestamp = get_oma_results(sampling_period, aligner, fs) + sysid_output, timestamp = get_sysid_results(sampling_period, aligner, fs) - if oma_output: + if sysid_output: print(f"Timestamp: {timestamp}") payload = { "timestamp": timestamp.isoformat(), - "OMA_output": convert_numpy_to_list(oma_output) + "sysid_output": convert_numpy_to_list(sysid_output) } try: message = json.dumps(payload) @@ -149,11 +149,11 @@ def publish_oma_results(sampling_period: int, aligner: Aligner, publish_client.reconnect() publish_client.publish(publish_topic, message, qos=1) - print(f"[{timestamp.isoformat()}] Published OMA result to {publish_topic}") + print(f"[{timestamp.isoformat()}] Published sysid result to {publish_topic}") loop = True break except Exception as e: - print(f"\nFailed to publish OMA result: {e}") + print(f"\nFailed to publish sysid result: {e}") except KeyboardInterrupt: print("\nShutting down gracefully") diff --git a/tests/integration/methods/test_sys_id.py b/tests/integration/methods/test_sys_id.py index f74ba04..2045b07 100644 --- a/tests/integration/methods/test_sys_id.py +++ b/tests/integration/methods/test_sys_id.py @@ -3,7 +3,7 @@ from datetime import datetime from unittest.mock import MagicMock -from methods import sysid_module +from methods import sysid def test_sysid(): # Define OMA parameters @@ -17,7 +17,7 @@ def test_sysid(): data = np.loadtxt('tests/integration/input_data/Acc_4DOF.txt').T # Perform system identification - sysid_output = sysid_module.sysid(data, oma_params) + sysid_output = sysid.sysid(data, oma_params) # Extract results using dictionary keys frequencies = sysid_output['Fn_poles'] @@ -59,7 +59,7 @@ def test_sysid_full_flow_success(): "model_order": 20 } - oma_result = sysid_module.sysid(data, oma_params) + oma_result = sysid.sysid(data, oma_params) # Check output structure assert isinstance(oma_result, dict) @@ -68,7 +68,7 @@ def test_sysid_full_flow_success(): assert isinstance(oma_result[key], list) or isinstance(oma_result[key], np.ndarray) # Convert to JSON-safe structure - converted = sysid_module.convert_numpy_to_list(oma_result) + converted = sysid.convert_numpy_to_list(oma_result) assert isinstance(converted, dict) assert isinstance(converted["Fn_poles"], list) @@ -76,7 +76,7 @@ def test_sysid_full_flow_success(): def test_get_oma_results_integration(mocker): from datetime import datetime import numpy as np - from methods import sysid_module + from methods import sysid fs = 100 # sampling frequency mock_aligner = MagicMock() @@ -88,7 +88,7 @@ def test_get_oma_results_integration(mocker): mock_aligner.extract.return_value = (mock_data, mock_timestamp) - oma_output, timestamp = sysid_module.get_oma_results(number_of_minutes, mock_aligner, fs) + oma_output, timestamp = sysid.get_oma_results(number_of_minutes, mock_aligner, fs) assert isinstance(oma_output, dict) assert "Fn_poles" in oma_output @@ -108,4 +108,4 @@ def test_sysid_raises_on_empty_data(): } with pytest.raises(Exception): - sysid_module.sysid(data, oma_params) + sysid.sysid(data, oma_params) diff --git a/tests/unit/methods/test_sys_id_unit.py b/tests/unit/methods/test_sys_id_unit.py index 9799841..dfaeede 100644 --- a/tests/unit/methods/test_sys_id_unit.py +++ b/tests/unit/methods/test_sys_id_unit.py @@ -3,7 +3,7 @@ from unittest.mock import MagicMock, patch from datetime import datetime import json -from methods.sysid_module import ( +from methods.sysid import ( sysid, get_oma_results, publish_oma_results, From aedc8787840adab9ee2ca343972e0f466ac02078 Mon Sep 17 00:00:00 2001 From: au650680 Date: Fri, 31 Oct 2025 09:29:14 +0100 Subject: [PATCH 4/6] Bug fix and small change to plot_cluster Fixed an error in create_cluster, that resulted in doublicate model orders for a cluster. Allowed for scatter point size to be adjusted. --- src/functions/plot_clusters.py | 6 ++++-- src/methods/mode_clustering_functions/create_cluster.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/functions/plot_clusters.py b/src/functions/plot_clusters.py index 2d90f54..1043781 100644 --- a/src/functions/plot_clusters.py +++ b/src/functions/plot_clusters.py @@ -122,7 +122,8 @@ def plot_clusters(clusters: Dict[str,dict], sysid_results: Dict[str, Any], sysid_params: Dict[str, Any], - fig_ax = None)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + fig_ax = None, + legend = True)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: """ Plot stabilization of clusters @@ -168,7 +169,8 @@ def plot_clusters(clusters: Dict[str,dict], ax1.set_ylabel("Model order", fontsize=20, color = 'black') ax1.set_ylim(0, sysid_params['model_order'] + 1) - ax1.legend(prop={'size': 20}) + if legend is True: + ax1.legend(prop={'size': 10}) ax1.set_title(f"Data set: {title_number}") # # # ............................................................................ diff --git a/src/methods/mode_clustering_functions/create_cluster.py b/src/methods/mode_clustering_functions/create_cluster.py index 4bcf5b0..c97de88 100644 --- a/src/methods/mode_clustering_functions/create_cluster.py +++ b/src/methods/mode_clustering_functions/create_cluster.py @@ -87,7 +87,7 @@ def cluster_creation(IP: dict[str,Any],Params: dict[str,Any]) -> dict[str,Any]: #Check if there are multiple points with same model order as ip ip_ids = np.argwhere(row==row[0]) if len(ip_ids[:,0]) > 1: # Remove all the other points at the same model order - for ii in ip_ids[1:,0]: + for ii in ip_ids[:,0]: try: frequencies = np.delete(frequencies,ii) cov_f = np.delete(cov_f,ii) From 802139e47d1d84eb96a78b0eb332418192986311 Mon Sep 17 00:00:00 2001 From: au650680 Date: Wed, 5 Nov 2025 09:42:43 +0100 Subject: [PATCH 5/6] Small changes --- config/production.json.template | 46 ++++++++++++++++----- src/examples/README.md | 17 +++++--- src/examples/example.py | 23 ++++------- tests/integration/methods/test_sys_id.py | 51 +++++++++++++----------- tests/unit/methods/test_sys_id_unit.py | 42 +++++++++---------- 5 files changed, 102 insertions(+), 77 deletions(-) diff --git a/config/production.json.template b/config/production.json.template index 8336ba8..fb4d112 100644 --- a/config/production.json.template +++ b/config/production.json.template @@ -1,29 +1,53 @@ { "MQTT": { - "host": "test.mosquitto.org", - "port": 1883, + "host": "", + "port": , "userId": "", "password": "", - "ClientID": "ReplaySubscriber", + "ClientID": "NOT_NEEDED", "QoS": 1, "TopicsToSubscribe": [ - "cpsens/recorded/1/data", - "cpsens/recorded/1/metadata", - "cpsens/recorded/2/data", - "cpsens/recorded/+/data" + "cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/1/acc/raw/data", + "cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/1/acc/raw/metadata", + "cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/2/acc/raw/data", + "cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/3/acc/raw/data", + "cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/4/acc/raw/data" ] }, "sysID": { "host": "", - "port": 1883, + "port": , "userId": "", "password": "", - "ClientID": "sub.232.sds.213s", - "QoS": 1, - "TopicsToSubscribe": ["cpsens/d8-3a-dd-f5-92-48/cpsns_Simulator/1_2/oma_results"] + "ClientID": "NOT_NEEDED", + "QoS": 2, + "TopicsToSubscribe": ["cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/1/acc/sysid/data"] + }, + + "mode_cluster": { + "host": "", + "port": , + "userId": "", + "password": "", + "ClientID": "NOT_NEEDED", + "QoS": 2, + "TopicsToSubscribe": ["cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/1/acc/mode_cluster/data"] + }, + + "model_update": { + "host": "", + "port": , + "userId": "", + "password": "", + "ClientID": "NOT_NEEDED", + "QoS": 2, + "TopicsToSubscribe": ["cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/1/acc/model_update/data"] } } + + + diff --git a/src/examples/README.md b/src/examples/README.md index 8170b0f..b8668fa 100644 --- a/src/examples/README.md +++ b/src/examples/README.md @@ -62,13 +62,18 @@ To run the examples with the default config, use: ```bash python .\src\examples\example.py accelerometers python .\src\examples\example.py align-readings -python .\src\examples\example.py oma-and-print -python .\src\examples\example.py oma-and-plot -python .\src\examples\example.py oma-and-publish -python .\src\examples\example.py mode-tracking-with-local-sysid -python .\src\examples\example.py mode-tracking-with-remote-sysid +python .\src\examples\example.py sysid-and-print +python .\src\examples\example.py sysid-and-plot +python .\src\examples\example.py sysid-and-publish +python .\src\examples\example.py live-sysid-publish +python .\src\examples\example.py clustering-with-local-sysid +python .\src\examples\example.py clustering-with-remote-sysid +python .\src\examples\example.py live-clustering-with-remote-sysid +python .\src\examples\example.py mode-tracking-remote-sysid +python .\src\examples\example.py mode_tracking-with-local-sysid +python .\src\examples\example.py live-mode-tracking-remote-sysid python .\src\examples\example.py model-update-local-sysid -python .\src\examples\example.py model-update-remote-sysid +python .\src\examples\example.py ilve-model-update-remote-sysid ``` diff --git a/src/examples/example.py b/src/examples/example.py index 1b79b2e..38242f8 100644 --- a/src/examples/example.py +++ b/src/examples/example.py @@ -1,22 +1,22 @@ # pylint: disable=E1120 import click -from examples.acceleration_readings import (read_accelerometers,live_read_accelerometers) +from examples.acceleration_readings import read_accelerometers from examples.aligning_readings import align_acceleration_readings from examples.run_sysid import ( run_sysid_and_plot, run_sysid_and_publish, run_sysid_and_print, - live_sysid_and_publish, + live_sysid_and_publish ) from examples.run_mode_clustering import ( run_mode_clustering_with_local_sysid, run_mode_clustering_with_remote_sysid, - run_live_mode_clustering_with_remote_sysid, + run_live_mode_clustering_with_remote_sysid ) from examples.run_mode_tracking import ( run_mode_tracking_with_local_sysid, run_mode_tracking_with_remote_sysid, - run_live_mode_tracking_with_remote_sysid, + run_live_mode_tracking_with_remote_sysid ) from examples.run_model_update import ( run_model_update_local_sysid, @@ -25,7 +25,7 @@ @click.group() -@click.option('--config', default="config/production.json", help="Path to config file") +@click.option('--config', default="config/DTU_config.json", help="Path to config file") @click.pass_context def cli(ctx, config): ctx.ensure_object(dict) @@ -36,17 +36,11 @@ def cli(ctx, config): def accelerometers(ctx): read_accelerometers(ctx.obj["CONFIG"]) -@cli.command() -@click.pass_context -def live_accelerometers(ctx): - live_read_accelerometers(ctx.obj["CONFIG"]) - @cli.command() @click.pass_context def align_readings(ctx): align_acceleration_readings(ctx.obj["CONFIG"]) - @cli.command() @click.pass_context def sysid_and_publish(ctx): @@ -89,15 +83,14 @@ def mode_tracking_with_local_sysid(ctx): @cli.command() @click.pass_context -def mode_tracking_with_remote_sysid(ctx): +def mode_tracking_remote_sysid(ctx): run_mode_tracking_with_remote_sysid(ctx.obj["CONFIG"]) @cli.command() @click.pass_context -def live_mode_tracking_with_remote_sysid(ctx): +def live_mode_tracking_remote_sysid(ctx): run_live_mode_tracking_with_remote_sysid(ctx.obj["CONFIG"]) - @cli.command() @click.pass_context def model_update_local_sysid(ctx): @@ -105,7 +98,7 @@ def model_update_local_sysid(ctx): @cli.command() @click.pass_context -def model_update_remote_sysid(ctx): +def live_model_update_remote_sysid(ctx): run_model_update_remote_sysid(ctx.obj["CONFIG"]) if __name__ == "__main__": diff --git a/tests/integration/methods/test_sys_id.py b/tests/integration/methods/test_sys_id.py index 2045b07..4558044 100644 --- a/tests/integration/methods/test_sys_id.py +++ b/tests/integration/methods/test_sys_id.py @@ -5,19 +5,22 @@ from methods import sysid + + def test_sysid(): # Define OMA parameters - oma_params = { + sysid_params = { "Fs": 100, # Sampling frequency in Hz "block_shift": 30, # Block shift parameter - "model_order": 20 # Model order + "model_order": 20, # Model order + "model_order_min": 1 # Lowest model order } # Load test data data = np.loadtxt('tests/integration/input_data/Acc_4DOF.txt').T # Perform system identification - sysid_output = sysid.sysid(data, oma_params) + sysid_output = sysid.sysid(data, sysid_params) # Extract results using dictionary keys frequencies = sysid_output['Fn_poles'] @@ -25,7 +28,8 @@ def test_sysid(): damping_ratios = sysid_output['Xi_poles'] cov_damping = sysid_output['Xi_poles_cov'] mode_shapes = sysid_output['Phi_poles'] - poles_label = sysid_output['Lab'] + + # Load stored reference results stored_data = np.load('tests/integration/input_data/expected_sysid_output.npz') @@ -34,41 +38,39 @@ def test_sysid(): stored_damping_ratios = stored_data['damping_ratios'] stored_cov_damping = stored_data['cov_damping'] stored_mode_shapes = stored_data['mode_shapes'] - stored_poles_label = stored_data['poles_label'] - + tolerance = 0.4 assert np.allclose(frequencies, stored_frequencies, atol=tolerance, equal_nan=True), "Frequencies do not match!" assert np.allclose(cov_freq, stored_cov_freq, atol=tolerance, equal_nan=True), "Covariance frequencies do not match!" assert np.allclose(damping_ratios, stored_damping_ratios, atol=tolerance, equal_nan=True), "Damping ratios do not match!" - assert np.allclose(cov_damping, stored_cov_damping, atol=tolerance, equal_nan=True), "Covariance damping ratios do not match!" + assert np.allclose(cov_damping, stored_cov_damping, atol=tolerance*2, equal_nan=True), "Covariance damping ratios do not match!" assert np.allclose(mode_shapes, stored_mode_shapes, atol=tolerance, equal_nan=True), "Mode shapes do not match!" - assert np.array_equal(poles_label, stored_poles_label), "Pole labels do not match!" - -def test_sysid_full_flow_success(): +def test_oma_full_flow_success(): """ Simulates full OMA flow: aligned data → sysid → conversion to JSON-safe format. """ # Simulate 600 samples, 3 channels (e.g., 1 min * 10 Hz) data = np.random.randn(3, 600) - oma_params = { + sysid_params = { "Fs": 100, "block_shift": 30, - "model_order": 20 + "model_order": 6, + "model_order_min": 1 } - oma_result = sysid.sysid(data, oma_params) + sysid_result = sysid.sysid(data, sysid_params) # Check output structure - assert isinstance(oma_result, dict) + assert isinstance(sysid_result, dict) for key in ["Fn_poles", "Xi_poles", "Phi_poles"]: - assert key in oma_result - assert isinstance(oma_result[key], list) or isinstance(oma_result[key], np.ndarray) + assert key in sysid_result + assert isinstance(sysid_result[key], list) or isinstance(sysid_result[key], np.ndarray) # Convert to JSON-safe structure - converted = sysid.convert_numpy_to_list(oma_result) + converted = sysid.convert_numpy_to_list(sysid_result) assert isinstance(converted, dict) assert isinstance(converted["Fn_poles"], list) @@ -88,24 +90,25 @@ def test_get_oma_results_integration(mocker): mock_aligner.extract.return_value = (mock_data, mock_timestamp) - oma_output, timestamp = sysid.get_oma_results(number_of_minutes, mock_aligner, fs) + sysid_output, timestamp = sysid.get_sysid_results(number_of_minutes, mock_aligner, fs) - assert isinstance(oma_output, dict) - assert "Fn_poles" in oma_output + assert isinstance(sysid_output, dict) + assert "Fn_poles" in sysid_output assert timestamp == mock_timestamp -def test_sysid_raises_on_empty_data(): +def test_oma_raises_on_empty_data(): """ SSI should raise an error if data is empty (simulating a low-data scenario). """ data = np.empty((0, 3)) # No samples - oma_params = { + sysid_params = { "Fs": 10.0, "block_shift": 5, - "model_order": 6 + "model_order": 6, + "model_order_min": 1 } with pytest.raises(Exception): - sysid.sysid(data, oma_params) + sysid.sysid(data, sysid_params) diff --git a/tests/unit/methods/test_sys_id_unit.py b/tests/unit/methods/test_sys_id_unit.py index dfaeede..8e00fe4 100644 --- a/tests/unit/methods/test_sys_id_unit.py +++ b/tests/unit/methods/test_sys_id_unit.py @@ -5,8 +5,8 @@ import json from methods.sysid import ( sysid, - get_oma_results, - publish_oma_results, + get_sysid_results, + publish_sysid_results, setup_client, ) from paho.mqtt.client import Client as MQTTClient @@ -18,24 +18,25 @@ def sample_data(): @pytest.fixture -def oma_params(): +def sysid_params(): return { "Fs": 100.0, "block_shift": 5, - "model_order": 6 + "model_order": 6, + "model_order_min": 1 } -def test_sysid_returns_expected_keys(sample_data, oma_params): - result = sysid(sample_data, oma_params) +def test_sysid_returns_expected_keys(sample_data, sysid_params): + result = sysid(sample_data, sysid_params) assert isinstance(result, dict) - expected_keys = {'Fn_poles', 'Fn_poles_cov', 'Xi_poles', 'Xi_poles_cov', 'Phi_poles', 'Lab'} + expected_keys = {'Fn_poles', 'Fn_poles_cov', 'Xi_poles', 'Xi_poles_cov', 'Phi_poles'} assert expected_keys.issubset(result.keys()) -def test_sysid_transposes_data_if_needed(oma_params): +def test_sysid_transposes_data_if_needed(sysid_params): data = np.random.randn(3, 600) # More columns than rows - result = sysid(data, oma_params) + result = sysid(data, sysid_params) assert isinstance(result, dict) assert "Fn_poles" in result @@ -46,7 +47,7 @@ def test_get_oma_results_success(mocker): mock_aligner = MagicMock() mock_aligner.extract.return_value = (mock_data, datetime.now()) - result, ts = get_oma_results(0.1, mock_aligner, fs) + result, ts = get_sysid_results(0.1, mock_aligner, fs) assert result is not None assert "Fn_poles" in result @@ -56,7 +57,7 @@ def test_get_oma_results_no_data(mocker): mock_aligner = MagicMock() mock_aligner.extract.return_value = (np.empty((0, 3)), datetime.now()) - result, ts = get_oma_results(1, mock_aligner, fs) + result, ts = get_sysid_results(1, mock_aligner, fs) assert result is None assert ts is None @@ -68,7 +69,7 @@ def test_get_oma_results_not_enough_samples(mocker): data = np.random.randn(100, 3) mock_aligner.extract.return_value = (data, datetime.now()) - result, ts = get_oma_results(10, mock_aligner, fs) # ask for too many samples + result, ts = get_sysid_results(10, mock_aligner, fs) # ask for too many samples assert result is None assert ts is None @@ -79,9 +80,9 @@ def test_get_oma_results_sysid_failure(mocker): mock_aligner = MagicMock() mock_aligner.extract.return_value = (np.random.randn(600, 3), datetime.now()) - mocker.patch("methods.sys_id.sysid", side_effect=Exception("fail")) + mocker.patch("methods.sysid.sysid", side_effect=Exception("fail")) - result, ts = get_oma_results(1, mock_aligner, fs) + result, ts = get_sysid_results(1, mock_aligner, fs) assert result is None assert ts is None @@ -95,13 +96,12 @@ def test_publish_oma_results_retries_and_publishes_once(mocker): 'Xi_poles': np.array([[0.02, 0.03], [0.04, 0.05]]), 'Xi_poles_cov': np.array([[0.001, 0.001], [0.001, 0.001]]), 'Phi_poles': np.array([[1.0, 0.0], [0.0, 1.0]]), - 'Lab': ['mode1', 'mode2'] } - mocker.patch("methods.sys_id.time.sleep", return_value=None) + mocker.patch("methods.sysid.time.sleep", return_value=None) mocker.patch( - "methods.sys_id.get_oma_results", + "methods.sysid.get_sysid_results", side_effect=[ (None, None), (dummy_result, datetime(2024, 1, 1)) @@ -109,7 +109,7 @@ def test_publish_oma_results_retries_and_publishes_once(mocker): ) mocker.patch( - "methods.sys_id.convert_numpy_to_list", + "methods.sysid.convert_numpy_to_list", return_value={k: v.tolist() if hasattr(v, "tolist") else v for k, v in dummy_result.items()} ) @@ -118,7 +118,7 @@ def test_publish_oma_results_retries_and_publishes_once(mocker): aligner = MagicMock() aligner.client = MagicMock() - publish_oma_results(0.1, aligner, mock_client, "test/topic", fs) + publish_sysid_results(0.1, aligner, mock_client, "test/topic", fs) assert mock_client.publish.called assert mock_client.publish.call_count == 1 @@ -132,10 +132,10 @@ def test_setup_client_with_multiple_topics(mocker): "topics": ["topic1", "topic2"] } - extract_mock = mocker.patch("methods.sys_id.extract_fs_from_metadata", return_value=123.0) + extract_mock = mocker.patch("methods.sysid.extract_fs_from_metadata", return_value=123.0) mock_mqtt_client = MagicMock() - mocker.patch("methods.sys_id.setup_mqtt_client", return_value=(mock_mqtt_client, None)) + mocker.patch("methods.sysid.setup_mqtt_client", return_value=(mock_mqtt_client, None)) client, fs = setup_client(mqtt_config) From 9d145fdd07c08601b2a8f2b88aa5ed8c43faf99d Mon Sep 17 00:00:00 2001 From: au650680 Date: Wed, 5 Nov 2025 16:10:36 +0100 Subject: [PATCH 6/6] Small changes Removed debug info from some files, added missing type info for function arguments and returns, updated to YaFEM 1.0.0, changed some function descriptions, added timestamp to clustering data --- pyproject.toml | 2 +- src/examples/example.py | 6 +- src/examples/run_sysid.py | 2 +- src/functions/clean_sysid_output.py | 41 ++--- src/functions/plot_clusters.py | 145 +++--------------- src/functions/plot_mode_tracking.py | 13 +- src/functions/plot_sysid.py | 73 +++++++-- .../align_clusters.py | 20 --- .../mode_clustering_functions/clustering.py | 27 +--- .../create_cluster.py | 33 ++-- .../expand_cluster.py | 6 +- .../mode_tracking_functions/mode_tracking.py | 9 +- .../packages/yafem-0.2.6-py3-none-any.whl | Bin 77190 -> 0 bytes 13 files changed, 144 insertions(+), 233 deletions(-) delete mode 100644 src/methods/packages/yafem-0.2.6-py3-none-any.whl diff --git a/pyproject.toml b/pyproject.toml index 46d69f5..7ff673c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ paho-mqtt = "^2.1.0" numpy = "^2.2.5" click ="^8.1.8" pyOMA-2 = "1.0.0" -yafem = { path = "src/methods/packages/yafem-0.2.6-py3-none-any.whl" } +yafem = "1.0.0" [tool.poetry.group.dev.dependencies] diff --git a/src/examples/example.py b/src/examples/example.py index 38242f8..4344c68 100644 --- a/src/examples/example.py +++ b/src/examples/example.py @@ -25,7 +25,7 @@ @click.group() -@click.option('--config', default="config/DTU_config.json", help="Path to config file") +@click.option('--config', default="config/production.json", help="Path to config file") @click.pass_context def cli(ctx, config): ctx.ensure_object(dict) @@ -83,12 +83,12 @@ def mode_tracking_with_local_sysid(ctx): @cli.command() @click.pass_context -def mode_tracking_remote_sysid(ctx): +def mode_tracking_with_remote_sysid(ctx): run_mode_tracking_with_remote_sysid(ctx.obj["CONFIG"]) @cli.command() @click.pass_context -def live_mode_tracking_remote_sysid(ctx): +def live_mode_tracking_with_remote_sysid(ctx): run_live_mode_tracking_with_remote_sysid(ctx.obj["CONFIG"]) @cli.command() diff --git a/src/examples/run_sysid.py b/src/examples/run_sysid.py index 7161728..0a4bb61 100644 --- a/src/examples/run_sysid.py +++ b/src/examples/run_sysid.py @@ -78,7 +78,7 @@ def run_sysid_and_print(config_path): def run_sysid_and_publish(config_path): - number_of_minutes = 1 + number_of_minutes = 0.2 data_topic_indexes = [0, 2, 3, 4] aligner, data_client, fs = setup_sysid(config_path, data_topic_indexes) publish_config = load_config(config_path)["sysID"] diff --git a/src/functions/clean_sysid_output.py b/src/functions/clean_sysid_output.py index 396fd6a..ee67e10 100644 --- a/src/functions/clean_sysid_output.py +++ b/src/functions/clean_sysid_output.py @@ -34,6 +34,27 @@ def remove_complex_conjugates(sysid_output): def transform_sysid_features(frequencies_,cov_freq_,damping_ratios_,cov_damping_,mode_shapes_): """ Transform sysid results + + Transpose, flip and sort arrays, such that arrays maps directly to the stabilization diagram. + This means the the frequency array maps directly to the plot: + MO. + 5.| x x + 4.| x + 3.| x + 2.| x + 1.| + 0.| + -1----4------- Frequency + The frequency array will then have the shape (6,3). Initially (6,6) but the complex conjugates have been removed. So 6 is halved to 3. + 6 for each model order, including 0 and 3 for maximum poles in a modelorder + The frequency array will then become: + _0_1_ + 0| 1 4 + 1| 1 Nan + 0| 1 Nan + 0| Nan 4 + 0| Nan Nan + 0| Nan Nan Args: frequencies_ (np.ndarray): Frequencies (mean) @@ -49,26 +70,6 @@ def transform_sysid_features(frequencies_,cov_freq_,damping_ratios_,cov_damping_ cov_damping (np.ndarray): Covariance of damping ratio mode_shapes (np.ndarray): Mode shapes """ - # Transpose, flip and sort arrays, such that arrays maps directly to the stabilization diagram. - # This means the the frequency array maps directly to the plot: - # MO. - # 5.| x x - # 4.| x - # 3.| x - # 2.| x - # 1.| - # 0.| - # -1----4------- Frequency - # The frequency array will then have the shape (6,3). Initially (6,6) but the complex conjugates have been removed. So 6 is halved to 3. - # 6 for each model order, including 0 and 3 for maximum poles in a modelorder - # The frequency array will then become: - # _0_1_ - # 0| 1 4 - # 1| 1 Nan - # 0| 1 Nan - # 0| Nan 4 - # 0| Nan Nan - # 0| Nan Nan #Transformation of data frequencies = np.transpose(frequencies_) diff --git a/src/functions/plot_clusters.py b/src/functions/plot_clusters.py index 1043781..feec388 100644 --- a/src/functions/plot_clusters.py +++ b/src/functions/plot_clusters.py @@ -2,138 +2,26 @@ import numpy as np import matplotlib.pyplot as plt import matplotlib.figure +import matplotlib.axes from functions.clean_sysid_output import remove_highly_uncertain_points from functions.plot_sysid import (add_scatter_data,add_plot_standard_flair,add_plot_annotation) plt.rcParams['font.family'] = 'Times New Roman' - -# def plot_clusters(clusters: Dict[str,dict], -# sysid_results: Dict[str, Any], -# sysid_params: Dict[str, Any], -# fig_ax = None)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: -# """ -# Plot stabilization of clusters - -# Args: -# clsuters (dict): Dictionary of clusters -# sysid_results (dict): PyOMA results -# sysid_params (dict): sysid parameters -# fix_ax (tuple): fig and ax of plot to redraw -# Returns: -# fig_ax (tuple): fig and ax of plot - -# """ - -# if fig_ax is None: -# plt.ion() -# fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12, 6), tight_layout=True) -# title_number = 0 -# else: -# fig, (ax1,ax2) = fig_ax -# title = fig.axes[0].get_title() -# ax1.clear() -# ax2.clear() - -# iteration_number = title.split(' ')[-1] -# #print(iteration_number) -# title_number = int(iteration_number) + 1 - -# #Pre-clean -# frequencies, cov_freq, damping_ratios, cov_damping, _ = remove_highly_uncertain_points(sysid_results,sysid_params) - -# ax1.set_ylabel("Model order", fontsize=20, color = 'black') - -# x = frequencies.flatten(order="f") -# y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 - -# ax1 = add_scatter_data(ax1,x,y_model_order,None) - -# idx = 0 -# for i, key in enumerate(clusters.keys()): -# cluster = clusters[key] -# MO = cluster['model_order'] -# freq_cluster = cluster['f'] -# freq_cov_cluster = cluster['cov_f'] - -# sc = ax1.scatter(freq_cluster, MO, marker="o", s=40, label=f'Cluster {i}') -# col = sc.get_facecolors().tolist() -# ax1.vlines(np.median(freq_cluster),min(cluster['model_order']), -# max(cluster['model_order']),color=col) - -# xerr_cluster = np.sqrt(freq_cov_cluster) * 2 -# # ax1.errorbar(freq_cluster, MO, xerr=xerr_cluster, -# # fmt="None", capsize=5, ecolor="gray",zorder=200) - -# ax1, col = add_scatter_cluster(ax1,cluster['f'],cluster['model_order'],cluster['cov_f']) -# idx += 1 - -# ax1 = add_plot_standard_flair(ax1,sysid_params) - -# ax1.set_ylim(0, sysid_params['model_order'] + 1) -# # Add major and minor grid lines -# ax1.legend(prop={'size': 20}) -# ax1.set_title(f"Data set: {title_number}") - -# # # # ............................................................................ - -# ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') -# ax2.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') -# ax2.tick_params(axis='both', which='major', labelsize=17) - -# x = frequencies.flatten(order="f") -# y = damping_ratios.flatten(order="f") - -# sc = ax2.scatter(x, y, marker="^", s=20, c="r", zorder=0, label='Non clustered') -# if cov_freq is not None: -# xerr = np.sqrt(cov_damping) * 2 -# xerr = xerr.flatten(order="f") -# ax2.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") - -# for i, key in enumerate(clusters.keys()): -# cluster = clusters[key] -# freq_cluster = cluster['f'] -# damp_cluster = cluster['d'] -# damp_cov_cluster = cluster['cov_d'] - -# ax2.scatter(freq_cluster, damp_cluster, s=50, zorder=3) -# xerr_cluster = np.sqrt(damp_cov_cluster) * 2 -# ax2.errorbar(freq_cluster, damp_cluster, yerr=xerr_cluster, -# fmt="None", capsize=5, ecolor="gray") - -# for i, txt in enumerate(y_model_order): -# ax2.annotate(str(txt), (x[i], y[i])) - -# if y[~np.isnan(y)].shape[0] > 1: -# ax2.set_ylim(0, max(max(y[~np.isnan(y)])+0.005,0.1)) -# else: -# ax2.set_ylim(0, 0.1) -# ax2.set_xlim(0, sysid_params['Fs']/2) - -# # Add major and minor grid lines -# ax2.grid(which='major', color='gray', linestyle='-', linewidth=0.5) -# ax2.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) - -# fig.tight_layout() -# fig.canvas.draw() -# fig.canvas.flush_events() - -# return fig, (ax1,ax2) - def plot_clusters(clusters: Dict[str,dict], sysid_results: Dict[str, Any], sysid_params: Dict[str, Any], fig_ax = None, - legend = True)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + legend = True)-> Tuple[matplotlib.figure.Figure, Tuple[matplotlib.axes.Axes,matplotlib.axes.Axes]]: """ Plot stabilization of clusters Args: - clsuters (dict): Dictionary of clusters - sysid_results (dict): PyOMA results - sysid_params (dict): sysid parameters - fix_ax (tuple): fig and ax of plot to redraw + clsuters (Dict[str,dict]): Dictionary of clusters + sysid_results (Dict[str,dict]): PyOMA results + sysid_params (Dict[str,dict]): sysid parameters + fix_ax (Tuple[matplotlib.figure.Figure, Tuple[matplotlib.axes.Axes,matplotlib.axes.Axes]]): fig and ax of plot to redraw Returns: - fig_ax (tuple): fig and ax of plot + fig_ax (Tuple[matplotlib.figure.Figure, Tuple[matplotlib.axes.Axes,matplotlib.axes.Axes]]): fig and ax of plot """ @@ -200,8 +88,23 @@ def plot_clusters(clusters: Dict[str,dict], return fig, (ax1,ax2) -def add_scatter_cluster(ax,x,y,cov,i,error_dir="h"): - sc = ax.scatter(x, y, marker="o", s=60, label=f'Cluster {i}') +def add_scatter_cluster(ax: matplotlib.axes.Axes, x: np.ndarray[float], y: np.ndarray[float], cov: np.ndarray[float], cluster_id = int, error_dir: str = "h") -> Tuple[matplotlib.axes.Axes, Any]: + """ + Add scatter plot of clusters to existing axes + + Args: + ax (matplotlib.axes.Axes): ax from matplotlib + x (np.ndarray[float]): x-axis data + y (np.ndarray[float]): y-axis data + cov (np.ndarray[float]): covariance for errorbars + cluster_id (int): Index of cluster for labeling + error_dir (str): Direction of errorbars, either "h" horizontal or "v" vertical + + Returns: + ax (matplotlib.axes.Axes): + col (Any): + """ + sc = ax.scatter(x, y, marker="o", s=60, label=f'Cluster {cluster_id}') col = sc.get_facecolors().tolist() if cov is not None: xerr = np.sqrt(cov) * 2 diff --git a/src/functions/plot_mode_tracking.py b/src/functions/plot_mode_tracking.py index c07e4d3..c3e25a9 100644 --- a/src/functions/plot_mode_tracking.py +++ b/src/functions/plot_mode_tracking.py @@ -2,21 +2,22 @@ import numpy as np import matplotlib.pyplot as plt import matplotlib.figure +import matplotlib.axes plt.rcParams['font.family'] = 'Times New Roman' def plot_tracked_modes( tracked_clusters: Dict[str, Any], - oma_params: Dict[str, Any], + sysid_params: Dict[str, Any], fig_ax: Any = None, - x_length: int = None)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + x_length: Any = None)-> Tuple[matplotlib.figure.Figure, matplotlib.axes.Axes]: """ Plot tracked modes Args: - oma_results (dict): PyOMA results - oma_params (dict): OMA parameters + tracked_clusters (Dict[str, Any]): Tracked clusters + sysid_params (Dict[str, Any]): sysid parameters Returns: - fig_ax (tuple): fig and ax of plot + fig_ax (Tuple[matplotlib.figure.Figure, matplotlib.axes.Axes]): fig and ax of plot """ @@ -50,7 +51,7 @@ def plot_tracked_modes( ax1.set_xlabel("Dataset", fontsize=20, color = 'black') ax1.tick_params(axis='both', which='major', labelsize=17) - ax1.set_ylim(0, oma_params['Fs']/2) + ax1.set_ylim(0, sysid_params['Fs']/2) if x_length is not None: ax1.set_xlim(np.maximum(max(max_x)-x_length,0),max(max_x)+1) ax1.set_xticks(np.arange(np.maximum(max(max_x)-x_length,0), diff --git a/src/functions/plot_sysid.py b/src/functions/plot_sysid.py index d4df361..f3cc82c 100644 --- a/src/functions/plot_sysid.py +++ b/src/functions/plot_sysid.py @@ -2,6 +2,7 @@ import numpy as np import matplotlib.pyplot as plt import matplotlib.figure +import matplotlib.axes from functions.clean_sysid_output import (remove_complex_conjugates,remove_highly_uncertain_points) plt.rcParams['font.family'] = 'Times New Roman' @@ -9,17 +10,17 @@ def plot_pre_stabilization_diagram( sysid_results: Dict[str, Any], sysid_params: Dict[str, Any], - fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[matplotlib.axes.Axes,matplotlib.axes.Axes]]: """ Plot stabilization of raw sysid data before pre-cleaning Args: - sysid_results (dict): Pyoma results - sysid_params (dict): sysid parameters - fix_ax (tuple): fig and ax of plot to redraw + sysid_results (Dict[str, Any]): Pyoma results + sysid_params (Dict[str, Any]): sysid parameters + fix_ax (Tuple): fig and ax of plot to redraw Returns: - fig_ax (tuple): fig and ax of plot + fig_ax (Tuple): fig and ax of plot """ if fig_ax is None: @@ -63,7 +64,7 @@ def plot_pre_stabilization_diagram( def plot_stabilization_diagram( sysid_results: Dict[str, Any], sysid_params: Dict[str, Any], - fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[plt.Axes,plt.Axes]]: + fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[matplotlib.axes.Axes,matplotlib.axes.Axes]]: """ Plot stabilization of sysid data before after pre-cleaning @@ -113,7 +114,40 @@ def plot_stabilization_diagram( return fig, (ax1,ax2) -def add_scatter_data(ax,x,y,cov,error_dir,mark="o",lab='Non clustered',size=50): +def add_scatter_cluster(ax: matplotlib.axes.Axes, x: np.ndarray[float], y: np.ndarray[float], cov: np.ndarray[float], cluster_id = int, error_dir: str = "h") -> Tuple[matplotlib.axes.Axes, Any]: + """ + Add scatter plot of clusters to existing axes + + Args: + ax (matplotlib.axes.Axes): ax from matplotlib + x (np.ndarray[float]): x-axis data + y (np.ndarray[float]): y-axis data + cov (np.ndarray[float]): covariance for errorbars + cluster_id (int): Index of cluster for labeling + error_dir (str): Direction of errorbars, either "h" horizontal or "v" vertical + + Returns: + ax (matplotlib.axes.Axes): + col (Any): + """ + +def add_scatter_data(ax: matplotlib.axes.Axes, x: np.ndarray[float], y: np.ndarray[float], cov: np.ndarray[float], error_dir: str = "h", mark: str ="o", lab: str ='Non clustered',size: float = 50) -> matplotlib.axes.Axes: + """ + Add scatter plot of sysid results to existing axes + + Args: + ax (matplotlib.axes.Axes): ax from matplotlib + x (np.ndarray[float]): x-axis data + y (np.ndarray[float]): y-axis data + cov (np.ndarray[float]): covariance for errorbars + error_dir (str): Direction of errorbars, either "h" horizontal or "v" vertical + mark (str): marker type option + lab (str): Labeling for legend + size (float): Size of markers + + Returns: + ax (matplotlib.axes.Axes): + """ ax.scatter(x, y, marker=mark, s=size, c="r", label = lab) if cov is not None: xerr = np.sqrt(cov) * 2 @@ -124,7 +158,16 @@ def add_scatter_data(ax,x,y,cov,error_dir,mark="o",lab='Non clustered',size=50): ax.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") return ax -def add_plot_standard_flair(ax,sysid_params): +def add_plot_standard_flair(ax: matplotlib.axes.Axes, sysid_params: Dict[str,Any]) -> matplotlib.axes.Axes: + """ + Add labels, grid and limit existing axes + + Args: + ax (matplotlib.axes.Axes): ax from matplotlib + sysid_params (Dict[str, Any]): sysid parameters + Returns: + ax (matplotlib.axes.Axes): + """ ax.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') ax.tick_params(axis='both', which='major', labelsize=17) @@ -136,7 +179,19 @@ def add_plot_standard_flair(ax,sysid_params): return ax -def add_plot_annotation(ax,x,y,y_model_order): +def add_plot_annotation(ax: matplotlib.axes.Axes, x: np.ndarray[float], y: np.ndarray[float], y_model_order: np.ndarray[float]) -> matplotlib.axes.Axes: + """ + Add model order annotations + + Args: + ax (matplotlib.axes.Axes): ax from matplotlib + x (np.ndarray[float]): x-axis data + y (np.ndarray[float]): y-axis data + y_model_order (np.ndarray[float]): Model order data + + Returns: + ax (matplotlib.axes.Axes): + """ for i, txt in enumerate(y_model_order): ax.annotate(str(txt), (x[i], y[i])) return ax diff --git a/src/methods/mode_clustering_functions/align_clusters.py b/src/methods/mode_clustering_functions/align_clusters.py index b9d3c7f..3b970e3 100644 --- a/src/methods/mode_clustering_functions/align_clusters.py +++ b/src/methods/mode_clustering_functions/align_clusters.py @@ -13,7 +13,6 @@ def alignment(cluster_dict: dict[str,dict], Params: dict[str,Any]) -> dict[str,d cluster_dict (dict): Dictionary of aligned clusters """ - #print("\nCluster alignment") median_f = [] for key in cluster_dict.keys(): #Find the median of each cluster cluster = cluster_dict[key] @@ -23,7 +22,6 @@ def alignment(cluster_dict: dict[str,dict], Params: dict[str,Any]) -> dict[str,d deleted_cluster_id = [] for ii, m_f in enumerate(median_f): #Go through all medians if ii in deleted_cluster_id: #If cluster is deleted pass on - #print(deleted_cluster_id) continue # Calculate absolute difference of selected median and all medians diff = abs(median_f-m_f) @@ -35,29 +33,14 @@ def alignment(cluster_dict: dict[str,dict], Params: dict[str,Any]) -> dict[str,d mask = (diff > 0) & (diff < min(m_f*Params['allignment_factor'][0],Params['Fs']/2*Params['allignment_factor'][1])) indices = np.argwhere(mask == True) #Indicies of clusters that are closely located in frequency - - - #print(cluster_dict.keys()) if indices.shape[0] > 0:# If one or more clusters are found ids = indices[:,0] - #print("ids",ids) for id in ids: #Go through all clusters that is closely located if id in deleted_cluster_id: continue - - #print("id",id) - break_loop = 0 cluster1 = cluster_dict[str(ii)] #Parent cluster cluster2 = cluster_dict[str(id)] #Co-located cluster - - # Proposed method - # for r in cluster2['model_order']: - # if r in cluster1['model_order']: #If the two clusters have poles with same model order, then skip the allignment - # print("Clusters have the same MO",cluster2['model_order'],cluster1['model_order']) - # break_loop = 1 - # if break_loop == 1: - # break MAC = calculate_mac(cluster1['mode_shapes'][0],cluster2['mode_shapes'][0]) # Check mode shape for the first pole in each cluster if MAC >= Params['tMAC']: #If MAC complies with the criteria, then add the two clusters @@ -83,9 +66,6 @@ def alignment(cluster_dict: dict[str,dict], Params: dict[str,Any]) -> dict[str,d deleted_cluster_id.append(int(id)) #The delete cluster id else: cluster_dict[str(id)] = cluster_remaining #Save the remaining cluster - # else: - # if cluster1['f'][0] > 300: - # breakpoint() cluster_dict_alligned = cluster_dict diff --git a/src/methods/mode_clustering_functions/clustering.py b/src/methods/mode_clustering_functions/clustering.py index 28d10ca..56710d4 100644 --- a/src/methods/mode_clustering_functions/clustering.py +++ b/src/methods/mode_clustering_functions/clustering.py @@ -27,27 +27,6 @@ def cluster_func(sysid_output: dict[str,Any], Params : dict[str,Any]) -> tuple[d #Preeliminary cleaning frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes = remove_highly_uncertain_points(sysid_output,Params) - # Transpose, flip and sort arrays, such that arrays maps directly to the stabilization diagram. - # This means the the frequency array maps directly to the plot: - # MO. - # 5.| x x - # 4.| x - # 3.| x - # 2.| x - # 1.| - # 0.| - # -1----4------- Frequency - # The frequency array will then have the shape (6,3). Initially (6,6) but the complex conjugates have been removed. So 6 is halved to 3. - # 6 for each model order, including 0 and 3 for maximum poles in a modelorder - # The frequency array will then become: - # _0_1_ - # 0| 1 4 - # 1| 1 Nan - # 0| 1 Nan - # 0| Nan 4 - # 0| Nan Nan - # 0| Nan Nan - frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes2, model_orders = transform_sysid_features(frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes) row, col = np.indices(model_orders.shape) @@ -93,8 +72,7 @@ def cluster_func(sysid_output: dict[str,Any], Params : dict[str,Any]) -> tuple[d while expansion: kk += 1 if kk > 10: - print("Expansion never ends, something is wrong.") - breakpoint() + raise("Expansion never ends, something is wrong.") pre_cluster = cluster1 cluster2 = cluster_expansion(cluster1,data2,Params) if cluster2['f'].shape == pre_cluster['f'].shape: @@ -111,7 +89,6 @@ def cluster_func(sysid_output: dict[str,Any], Params : dict[str,Any]) -> tuple[d #Save cluster if isinstance(cluster2['f'],np.ndarray): #Must atleast have two poles - #print("Cluster saved", np.median(cluster2['f'])) cluster_dict[str(cluster_counter)] = cluster2 cluster_counter += 1 data1 = remove_data_from_S(data2,cluster2) #Remove clustered poles from data @@ -131,7 +108,7 @@ def cluster_func(sysid_output: dict[str,Any], Params : dict[str,Any]) -> tuple[d if cluster['f'].shape[0] < Params['mstab']: print("cluster", np.median(cluster['f']),"too short:",cluster['f'].shape[0],"But must be:",Params['mstab']) else: - print("Cluster saved", np.median(cluster['f'])) + print("Cluster saved:", np.median(cluster['f'])) cluster_dict3[str(ii)] = cluster cluster_counter += 1 data1 = remove_data_from_S(data2,cluster) #Remove clustered poles from data diff --git a/src/methods/mode_clustering_functions/create_cluster.py b/src/methods/mode_clustering_functions/create_cluster.py index c97de88..e020b74 100644 --- a/src/methods/mode_clustering_functions/create_cluster.py +++ b/src/methods/mode_clustering_functions/create_cluster.py @@ -1,8 +1,8 @@ import numpy as np -from typing import Any +from typing import Any, Dict from functions.calculate_mac import calculate_mac -def cluster_creation(IP: dict[str,Any],Params: dict[str,Any]) -> dict[str,Any]: #Algorithm 2 +def cluster_creation(IP: Dict[str,Any],Params: Dict[str,Any]) -> Dict[str,Any]: #Algorithm 2 """ Create cluster @@ -36,7 +36,6 @@ def cluster_creation(IP: dict[str,Any],Params: dict[str,Any]) -> dict[str,Any]: IPu['ms'] = np.array((mode_shapes[ii,:])) IPu['row'] = row[ii] IPu['col'] = col[ii] - unique = 1 #To determine if the unique poles are more than one, for later use. if 1 then only one unique pole exist else: IPu['f'] = np.append(IPu['f'],frequencies[ii]) IPu['cov_f'] = np.append(IPu['cov_f'],cov_f[ii]) @@ -45,10 +44,9 @@ def cluster_creation(IP: dict[str,Any],Params: dict[str,Any]) -> dict[str,Any]: IPu['ms'] = np.vstack((IPu['ms'],mode_shapes[ii,:])) IPu['row'] = np.append(IPu['row'],row[ii]) IPu['col'] = np.append(IPu['col'],col[ii]) - unique = 2 #To determine if the unique poles are more than one, for later use. if 2 more than one uniqe pole exist if len(IPu) > 0: #If there exist model orders with unique poles - if unique == 1: #If there only exist one unique pole + if type(IPu['f']) == np.float64: cluster = {'f':np.array([IPu['f']]), 'cov_f':np.array([IPu['cov_f']]), 'd':np.array([IPu['d']]), @@ -87,7 +85,7 @@ def cluster_creation(IP: dict[str,Any],Params: dict[str,Any]) -> dict[str,Any]: #Check if there are multiple points with same model order as ip ip_ids = np.argwhere(row==row[0]) if len(ip_ids[:,0]) > 1: # Remove all the other points at the same model order - for ii in ip_ids[:,0]: + for ii in np.flip(ip_ids[:,0]): try: frequencies = np.delete(frequencies,ii) cov_f = np.delete(cov_f,ii) @@ -151,7 +149,7 @@ def cluster_creation(IP: dict[str,Any],Params: dict[str,Any]) -> dict[str,Any]: return cluster -def cluster_from_mac(cluster: dict[str,Any], IP: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: +def cluster_from_mac(cluster: Dict[str,Any], IP: Dict[str,Any], Params: Dict[str,Any]) -> Dict[str,Any]: """ Add points to cluster based on MAC @@ -199,25 +197,25 @@ def cluster_from_mac(cluster: dict[str,Any], IP: dict[str,Any], Params: dict[str skip_id.append(idx) #Compare remaining points with newly added cluster points, i.e. points are compared with the full cluster, not just ip - if cluster['f'].shape[0] > 1: #If points have been added to cluster proceed + if cluster['f'].shape[0] > 1: #Proceed if points have been added to cluster if IP['ms'].shape[0] > len(skip_id): #If there are more points to compare left, then proceed - unclustered_points = 1 - while IP['ms'].shape[0] != unclustered_points: #Run until no points are clustered anymore - unclustered_points = IP['ms'].shape[0] + cluster_length = len(cluster['row']) + new_cluster_length = 0 + while cluster_length != new_cluster_length: #Run until no points are clustered anymore + cluster_length = len(cluster['row']) i_ms = IP['ms'][1:] for jj, ms in enumerate(i_ms): idx = jj+1 - if idx in skip_id: - # print(idx) + if idx in skip_id: #Skip indecies of points that have already been added continue MAC_list = [] for c_ms in cluster['mode_shapes']: MAC_list.append(calculate_mac(c_ms,ms)) - # print("MAC_list",MAC_list) if max(MAC_list) > Params['tMAC']: #line 2 in algorithm + MAC = calculate_mac(ip_ms,ms) #Add to cluster cluster['f'] = np.append(cluster['f'],frequencies[idx]) cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[idx]) @@ -231,6 +229,9 @@ def cluster_from_mac(cluster: dict[str,Any], IP: dict[str,Any], Params: dict[str skip_id.append(idx) + new_cluster_length = len(cluster['row']) + + clustered_id = [] for r2 in cluster['row']: #For every entry in row cluster unclustered_point = False @@ -258,7 +259,7 @@ def cluster_from_mac(cluster: dict[str,Any], IP: dict[str,Any], Params: dict[str return cluster, unclustered_IPu -def cluster_from_mac_IPm(cluster: dict[str,Any], IPm: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: +def cluster_from_mac_IPm(cluster: Dict[str,Any], IPm: Dict[str,Any], Params: Dict[str,Any]) -> Dict[str,Any]: """ Cluster based on MAC if multiple poles exist for the model order @@ -368,8 +369,6 @@ def cluster_from_mac_IPm(cluster: dict[str,Any], IPm: dict[str,Any], Params: dic cluster['col'] = np.append(cluster['col'],col[ll]) skip_id.append(oo) - # else: - # print("Not clustered. MAC not satisfied") clustered_id = [] for r2 in cluster['row']: #For every entry in row cluster diff --git a/src/methods/mode_clustering_functions/expand_cluster.py b/src/methods/mode_clustering_functions/expand_cluster.py index 5a4efc8..bf53060 100644 --- a/src/methods/mode_clustering_functions/expand_cluster.py +++ b/src/methods/mode_clustering_functions/expand_cluster.py @@ -12,9 +12,11 @@ def cluster_expansion(cluster: dict[str,Any], data: dict[str,Any], Params: dict[ Params (dict): Dictionary of algorithm parameters Returns: cluster (dict): Expanded cluster + + Raises: + Double orders exist """ - #print("\nExpansion") unClustered_frequencies = data['frequencies'] unClustered_damping = data['damping_ratios'] @@ -78,6 +80,6 @@ def cluster_expansion(cluster: dict[str,Any], data: dict[str,Any], Params: dict[ print("row_after",cluster['row']) print("exp2",cluster['f']) print("double orders",cluster['row']) - breakpoint() + raise ValueError("Failed due to double orders exist") return cluster diff --git a/src/methods/mode_tracking_functions/mode_tracking.py b/src/methods/mode_tracking_functions/mode_tracking.py index 40df7f7..361c3ac 100644 --- a/src/methods/mode_tracking_functions/mode_tracking.py +++ b/src/methods/mode_tracking_functions/mode_tracking.py @@ -17,7 +17,6 @@ def cluster_tracking(cluster_dict: dict[str,Any],tracked_clusters: dict[str,Any] tracked_clusters (dict): Previously tracked clusters """ - print("Cluster tracking") if Params == None: Params = {'phi_cri':0.8, 'freq_cri':0.2} @@ -70,7 +69,6 @@ def cluster_tracking(cluster_dict: dict[str,Any],tracked_clusters: dict[str,Any] cluster['id'] = iter if pos == "new": #Add cluster as a new tracked cluster new_key = len(tracked_clusters)-1 #-1 for "iteration", + 1 for next cluster and -1 for starting at 0 = -1 - #print(f"new key: {new_key}") tracked_clusters[str(new_key)] = [cluster] else: #Add cluster to an existing tracked cluster cluster_to_add_to = tracked_clusters[str(pos)] @@ -86,8 +84,7 @@ def cluster_tracking(cluster_dict: dict[str,Any],tracked_clusters: dict[str,Any] if kk > 10: #Debug info: unique_match_debug_info(result,cluster_dict,t_list) - print("Unresolved mode tracking") - breakpoint() + raise("Unresolved mode tracking") for possible_match_id in set(result.values()): #Go through all unique values if possible_match_id == "new": #Do nothing if "new" @@ -98,7 +95,6 @@ def cluster_tracking(cluster_dict: dict[str,Any],tracked_clusters: dict[str,Any] itemindex = np.argwhere(np.array(list(result.values())) == str(possible_match_id)) #Find the index of the unique cluster match else: itemindex = np.argwhere(np.array(list(result.values())) == possible_match_id) #Find the index of the unique cluster match - print(possible_match_id,np.array(list(result.values())),itemindex, len(itemindex)) if len(itemindex) > 1: #If multiple clusters match to the same tracked cluster pos, result, cluster_index = resolve_nonunique_matches(possible_match_id, itemindex, result, cluster_dict, tracked_clusters) @@ -107,9 +103,6 @@ def cluster_tracking(cluster_dict: dict[str,Any],tracked_clusters: dict[str,Any] result = match_cluster_to_tracked_cluster(cluster_dict,tracked_clusters,Params,result,skip_cluster,skip_tracked_cluster) #Match with tracked clusters, but skip the already matched. - #Debug info: - unique_match_debug_info(result,cluster_dict,t_list) - result_int = [] for val in result.values(): if type(val) == int: diff --git a/src/methods/packages/yafem-0.2.6-py3-none-any.whl b/src/methods/packages/yafem-0.2.6-py3-none-any.whl deleted file mode 100644 index 0ad7e9f90ddf63594e8bdf2866deefd202a81b3c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 77190 zcmYhiQ*bU!w6z=Cwv!dxPTttIZCfk0ZQC|iY}>Z&^X+r7_un^N-FKsEcFkGO7_A@; z3Wf#*1Ox?yFXpKkwanj?`rnBJ7zhaYzunW&%+!`aU*FQs(nVjN-oaBhP7x-E5iWEW zv;$xu!4bb>0J(-e0_9p&MB7sMV%Vmy?ONz7`S|;{k9FqNFnuhAq*llxZ13D!>(Uid z&rOGydV1zXGGUp1dWIk;&WHlDldXvg3TjF$IHT4@9lX{GY$KL>tf-v{VTP0iSx!~7 z%~Zz*g+%IyJrMnJvHG{vtM?v{izWxOlHz2CSK!1pgW-J}QQ?0@d!0M0@sR!}_5wu{abmU3rOb=#KDfPNw6e-#& zt^!A!IWaMj{_~cHbUh+zjB1%UekzN@-fTytHHRHxr$+5D15SUb_4GA-qJNhwp1kQoDbB02gJ`#^}JXOEJ&#epIJE#)_5rQuXV92@T3l3p@Qx{ zGW&eptqkOM5}O1~imPzS*Z>^0W+zi{o=_Vb%#Ym-X>u2um90V2075Yc$#}`&3dMOJ zDJ*+}60KX|(nVt|oU4hx$u~Hctf(r#Yatk{YbG1ndHUo5*-v}e=;OLFnzVTs{uZ7OpT=auPgBw}A9<_lpJ>Pr zsdorVhQ+W0AsRW&S9*v7Y72!J0M(a24`Fy&?aj))vvxnjgw>pKP%UX<*eOF`?KKgf zLLM@LgjH0n3ahpW?q+AfeR&eIg3|gxC7BJNse|FD zzS?bZdRcpIweu9`XUOyY*=_zFHNs+Uo04Lix-(rR{uK(Qy21^ufxG({N;#yi-0Z2` z(7FFeOQ+F_9B6iR9ObHe0ul1Z`d*yAd~cGVvAu#+ZS{&N_$Kvxl>$Q_pDhKdGRI|c zOS)0K_2fO<{O%9EHNk#cFR?0-_j$^lGg7(lc zD^>LDTf;2QYkU&(t&h`K_VC#BU(;k7!%XknKkM7Yk)YqU*DX|eL&~hyn}gPB{ocF1 zq`MYOQOkr@#Ul=0?J&I`EcE{30((j-INq;*Imom^48Kr&bG<SOAvfu|@4#A0z4 zNXlSm^)rFSO3VjQm3TR3Ub1l`CP>||zlUT;E4Bghn7YbiTsJ-Lu72>2D3X`tr^dah zRx<_wDe}~Pos=1$^}xlU!F6UC(oQpv8WNoLsqjAqjhR0jSp zu4Ee*CrrD(PzaR^oxQE|9;!kkaV- zGT1tIu)N5N(?Q~8HnLWt4S!YJ@!^Z-!2~o)c@#P+@>&I?G$_d<7Mf zBTDtlrW9ttcRgmyqJ|55crNicCZP6NJuYPif_ zI#>d*(4{lbktvhGG3C#-1^9WWuRkXrGy%}z^eV&k%C=&#-lC}AbqOK4 z8Ql_lIRMRDZV~9{KnS*HpCC`z$3Ba8!nLh&VAvfXbWpJlhXY-Az;CS~Eom_mkXCHz zr6IvqY{}wBk2rsBiD#roaR)NqKYE5Ol32PE-8S)Ng2}50_IXxtS4c+}oBA+tjcBm5 zN-%B0ZlCPHR3rZMYkX@Uv+?*>Ts%KY5GXdc)-tw(fzl94^yY(p`g*+^-~Aj0jBvHq z;YxW`P=(LSjH}^U-Q5R)Hvz|g-=v4b<@8jLV= zKY7)fF!;TopkXWcy%mDDHSZfc{IUex>Ha&C>gQg%cVZ%G(33R#chenKg5%E@)F^}L z56G!!bSkb9lVDhK4uRqgdaBA_VsmYzML>NUEA1N+cjmk>A?SzptMcaA#qh&2pL@@n zc3_qD4w9OwerVN(9{schJ&>`|cvDW4$&>7)+CxH-e^WN_l<|N)%0{Zg4lOv~rpXMhjJ2P%R$OWa!bsd=>GNIZ19P02 zD!J@$EG@)OYC`AP9W1QtVh{s?qo8@*7sGi6LAE)FF5>ka#Kgy>Uh$hLam+JM zZf~q-5(y`nKmA4z5zJZ$4TQrc^;4Y_mIYUEMVah7yLfFwxPyH{DSJj?gCVo^SavgQ zxr)F0^NHV!IM9{?igYvV6l1LM744*{Zcb+y`y?A{Ac(Y!JloV8MuBMP_w=X3@fr$6 z32k$p=2uh?+wkngRrVVF_0WYpjAE)B>h$FPR$fv*VK{4;(aEJ+J$p+$2WQOvA#>i@ zNgLmP(mtH(D`3N|;K&rE=CV9O5;C?KM9y3UQ6bkMiP{DDKXuX^wK?7(c?R(93vdIx z;DlVjXBi!aPaom!zp*i9grVGn`W>KTGu<YlJXgrn`^u(q3q=SXjji`Q=w%sy=_wQ|+l zkHEIDV2cW7GG6gPAZYuCnI*E^I9G?beNY2}wnges02=Ee+BE_q;C6r@W{tL){0TT; z0kJ8Oiw-Xw@{y?gM z4!E6iT~{i{zz^nMN*J1vJja(Idx8b1l!_u9)b2YgeWuelxESdnsRs`@eoaqmq9M#T z+jRMACbZ|CLq9yPSAHigI8WdW49WI|{w*6fpn4jO!iH9GPO#X$Y(HPr7e#?#_lMXa zB_=bkKQIOytTyGSZvJcNXjg+}7hQ(8!qYdoj{yg7;U%}_FhkbN`#wiJF(YG%Kc-pH z7}-GY;%kAcn4?l5PFt2fCrSGBU_sV-ox8(ESEsdYVC^$XMV@S>E!2D|ufKi@M#Ft} z?(+GZxbjhof8d&tFUo}ByYV8Ii-I&|mQuLb0$f?QE_=Pa{pga7p6jVbrlcVW1y7j9EuTNj zlE#;=MN*8lnEg4ssQ08=G<+unai860a4B~26ED{Q_7Yss29FBFDq{kddg&&5z1|G_ z9m2QHvpc|fNv$KtbM-mjiU1`$_;e&9_&uMrCcbf5*CLGL4UWNRoM=*+wqlL!(GuB>IEuLJyt&%2#9~coRQZcf9$@H_~ zM5b7K$S}f;^N^8yEuJwmlY^tJZ6__)`B3N*@GfMlE{tEvGm$xL$}U+1`#y+#BAJy@ z8Y6c*wT8=Vz9UtMshVK9E|OFmHRNplHIF9yFHG_uWRk-r8Ce%T?$rcS(Uw|1zdC2y zhqHFCv6}9CqjCWXw~DZ&x8oEZ`4|Pwt)K@CsSr7#s>3j!cd_9K)dL+BRNq6 zTLOM1C|T)#U1$;6cPo!eh_&xZSM|b25tVh0)WE`ch1I+j;ve%`RZ(xO=nvA^6-E2v z=QWIIW%uW{zWwUZKqN_6R;Kh{y4drJvOaC(L`${axfpuSa@c4^R<3^~yT<9f@y0(0 z4$}A(anRx@*`kB9_wQ9@g-xv3S{cIL%AXm(&<@*#id6K<&b;j1JzK=xO6=h~E{nbWLR}F2(=CI!bM{4gul<&Xv|SYkY(DqF zHR}q1Z`9@i*{=RF&6R3ip0~Swd+%?wz^>{@@{5OSKChjO#b=DqmrS|Dn)Lv!PF(da zR~UZ`HJztE-%M3o*z4;>h@nl-!?YQDAJf}WTbOUuQI&P)Pn;xG3FeHkYKE`W$Yl45 zf)L|D<|VYo8HMr30*g|nXhm~z&8%Znb>8thWbY3Zxg9DyT{O8qF1=#TjOK5~-M4V} z6bn6pou|lp9Y<}E6NG4R5<|>>-#?vJ+~@nR+~kKe`S&S~4-|n&*#f zK5dL-!Dq4UaNno9tEXa{`w9rmHGH|digPIUx6Ds7QPk=8Q~K-y<&(kV?~?C;9_r|6 zqA`zI<8Y)MObpd@*Uj(wq2nL+))#*`S9bUjM;?q7YlOxjYtUwqdg(7{NB^~&%*+~q z7}dcjlUBK5YWw=vW{rw{j66dry_+O$y_Ab@#fn4^n1GZM{(y46@LtI+A6nGdJ6FD1 zaCBN@oV|HvpPjMI-L|*L-5x(#|K`awf5m%zdRa5)FwC>a;RLy^2g0R|C5Xn*NuBDn zM+=NJS{-nP+zz?~>I{lqcWKG0-Ek+Dl`?T+MKg* zY~zTod;F+s^+y5aZ{do(O7|~pA?la>kLLmr6pwfZcU-r z`pYAv!F|5=xLlJolDkBdcaTJ_^g(J4g>>R8c%HRa6@zG0_3ixp<{WL6?w;WBaEu;t-zg8IxZJ6r_VW~;g?pQJ6Ymqt-0;H60G@>{Jku*Ez^wQrKqw_ zq-;bu^y&6L5fx$FKGH{#)Pazjx1aTQ@i_?Hdj_^p9zWOSn@*lIVKdGS!!Y z@>arGA*rJ|e+d)|Ou!3P8sQ^%p70aSB4KsVNI^#Qk?Il-_}Oh#Se>Yi6{ow#XXqhT z%4i0B^g_0OoF=KQC`V7(?A0{2o?BMCC^>(?sVaFcJO7_*7SJy01(D?*K>^Qbz ze%oVPT1k=TrjKHWHKl%Aa@VJ69jv6-hI(qjg^fba2ks7G9rFaoHjOmciFvpOBFb9d z>lG*bkzMvB{wlpvY$>03D!4t*HDo+3! zrabG!P~1lIIx>Saze84vgWg%8-Pq?_H)+`H$ z@Ut3!1v)tl6O~}fZ)T*ChXsOyAM~mI{3HY(eU}PK%V9=1Z&w zwJwAb;j6M2P!i{AV81NC)f%C>AK+)xC%HeAo}U-IrqZHCl#2h058fEP6r4 zIJ*C?xa6Eh)D1O#SW~s<-H2GD5~#K40_yem4gY?Ao4i&JY)kpjfDD+hIIE^gS`wMr zYFA9QvpL!cZ<=E=nvGHz+?pu4_^mk)O29qq!w4sc^5Op z`QttvlajYhdT~{m`+k)XQ~t2tFDAV`nWwVN3eFvq`Nc9R^S!Htb4e5;?CoWBH#WzQ z+GW=2vAEuh_^neYM(?(*zYWyU%?EW#?^Rl7J8H*J{c25kPu+>VlXixy;w z=(motBk-sbuTmkX)_F$qv|R8FW@I+XcPiyvHCv>sxEGr@Uj8?50ik({-&!eS7Hg`; zP&u~(YLVy0s%A%UIZLD+S{TpYZIr@x7q$4d<2+!!$y8}lL4EXSPL*pka_>BAbYv=C zy%S*O4)pK;RZ-~-#KF5xCRjhqJ=Bznlo>Jj@dN)HoQlAuCMwntc*}ZhLjG&)w7Oo1 z35HbgP(5)k7T<1Iuq%pMS75<-Mj@tnYJSBva6a_oq_)yx#`(3XwA8(_M@dxRQNMs< z18O+r&&JYQS=qo$5GPr$69{B(lyahJdP_iOab?%W)%?l+jMM{&xyVJb9ttzmfp!-C9{WG$^Z$2-p@VtnqlReq%RhKBi&c_Rgg z&ZwyWxA=5*eaUE6RJX-U)vIqwS456K&XZ(#txqyMR-R^0RJm?1GSjX-)?0L^albF{ zyIA_-*e2Z}W&>L;yR9Ct%({C=?Ir4GHi>|(PuseQVeXePY`3a4x9=QtQ)u;}g`7(* zH$Gj}YIFHDS5ytr!^wdAP79HI)`i&ZFR-hu-SUJlY3|tsYljr3npB-XpT?o*x0}cr zMtH;io~TZnzsWYOB9bY88?+kn&r3d?uv?S7)%6_DcUx~%juE_(ME*W;dwv|9C(T=c zEsOoVn()k0HEpi8x@w`T!~N2vD*|UIaJ5ArcMF`jg!`}?IPOY25dGrj5|2gpkt^){ zKx%LT?tZAHEUMNqaX!P_Q&&5Fgw#_+hQfv$2M2mcW741V5#l z?8?c;s5VvjdQKU9*ocn}VoJ4(l7PDxdo{u%?P7KFAeY?OYGljnCRp4Rv2x07DvgM= z8gK?+Ysk9L%i4Syv)c>x_NkDExzFCw(Rje(#AVv`%&?s0W_e%K*Y3L(wB+Q-*ZKTTh&;3oo)XWyMI1hv&Y2@?Bwf~QFU8W~AKH%F~p!P=8i zY<1Yer$|kz`JR*AM%Pul;5b;oc+IN5yJ;`l=(M)2lja8;6k|gcv6G-)zVI@FkZREN zr%G$iN$9#WP{r}B>SnORq;Fz!H(=2E45k!pz{ZI zz+SC)71}mCs(}{*g!1N1F)hfhwc+D49jq3M*2!bMu3+E&F>{uAXs(?H?nzVSF}rGB#RiqGOb%0((1TGoMIso1L@*f)0mp%f%n0I>`va7zGJ zJqF_=U}a|^pIX%}z)`zp>edM}9z3N|#J_&{C^Iv7r`;M8_0)?j5@*z-n2F%bs}wGBTg=^|Wg+ zH?Rj`9ndl8{h_qoeTXDOt;ff-!lV{gz{WRh7y6G3T>Zli>ePAFj=kZ0id)c>4b*OI zJKV+54ybZMjhs3R*ngsbr$Q7XuRza8DR$bA(qb8I4AX)+KskgWR(|o!G*NiX0db#a zt!EG^_+VJpua4JB9sI1xcW?lcZyJk_YRwpV=A>;*Qk6KuR*7s1!<{9U(JeiPTkD^|Cy$0&n?XWTapMM^V#Qz4VzAR*j5$vbt5_vHy>TzHK`ZT2L)9 zWcC13|3kVK%`sM`oQkINKkOMg>8l##$gMDlp~8D8rRlx4LpdBs4Esw5R$G z_84)AVGLMxRCu{OTaQ+0uGC0NR0W@iiQAUEPNx9$rmpIYY;FpjM-VUkd4xvr^u zATU`{>YVX{SU3|oh=nI?$hJKo$NDxVZLkT!1Wh>zsLl+LM#_pqTIjTta}a8+Iv6)h z)HX6Q3@o@65ZP@E8~oGic1=dbNl%3xkuNC+w7d23%7m(WM=H@0Vm6%b#22xID^{6$ zy&bI)kgt~EbS04j!auCDlq$RYH%lBetTN{hW>+$_m6DpkJPz_+_y)A6-M`n3mN-;v42?rU^Qq3)t5? z#Dud=B!FnPP_c|UxTv$w{w%vN3!FUNJFN1|g%0mgh+XhkScePfxf)Cj?_0@7xMtSV zp7ou2h)@LPqiu*V!bmHnpd4I24JNzA#!m1Wusf{kH1S;#^Rlw={@UTn+0M{`KYFny zG&^pcCx7q$77l)so|dRPKFlft4W~5qT|@mb9pPsu_)B=+iy>5$&hWi+zc809`AT?f z7xN~xdCHi+mn^`BB8$Zo_8`%T08XB^7?p7*mM+_+VYGrI19D};r;B)xD?y0OB`pOE zKd_59Im*I*nicfLgQ#N2oZ7(aja1DvuvW-q%O$s|!=J+zGN*enMOsawy}XMr(a;;W z{c1D&={5Vgg(+!L;eCahd+fmlMe3N(jI8)2#j6pdd3ZISB6xyA!5tXqi16K8JE1Yn zF-ks@XU^5N&!xfA9ct-{+9xI0y8ou_i^ickmAR}{J!(0_k51H~Q+WQ@6~WW7Gu0ZbUlxz^>sVhZavBU*>i z-NBY5)8koX=K_y+v0-`GW6*EZKQqYc`%|n>CME*%%|T?B#{n%SF*buIV9|J5#~!!4 zwUSNuqo(>U@9wh`U_J_6d*JjwiEUuQ6#puv7@c)fI;K^CLYzk)N$`xq1cNYIy6UD2 zY$?E4F7Y!*EsPI#kzz@v+A$xWbAN--=#Xsf7Qolo?&}XOzh7r45#TXjkY^kC)FfLz zbb56wXoA!TR1-k+83%4_XL(>L+9bGP4`lUk!4_q zOt&FLllZ4%s47UW*^P6+RC0npR=qn${LLX*I60FHBQ`9JlC2KBc0?g^eXqqLVNs*r zswwbwG9{PM3EvL|d-!~w5?{)pa?39PlKP4iTvQ?i(5~KzWsf}GsK^e5wMZE z#`iU9faP58ND1~drz!(t#fG;LMTNT!Q~mInTll#08L|>UCcWiCVoedFx@UTe+(+j+GedIAE)UV z2c%r%l<|HWT~Y6k`s{4&baTSqJpNP>?ok6*`sm1>A%tJw9eZ(>%07|Y-3E+*)Wc26 z5_D+_(Z4k_B<9e4MAR&iECs&^DWRq5Fvt1S%h@?xuGhV5EEktlU+U2HFU%uF29QlD z5(;-EA!qOe%V&m{o2q%Enovf=W1v7_5-%_rZ@Nr5qBQKJIhMK)U|J0>L8MF|zsL*7 z?7znzeimPURi4clepT4lO0MaweP=0}eBX^l64#*>a=tYydLG zb>pQm4;J8^*TFh$gmu_gBLqIQwWX>P=6yi7nW+ssd8MXe#8pl@e1d44%^pT`8s^#2 zcCpXj!vnb^)nfni_dFMO(DK;$**OWD6ie#--3(qygW6z3EnQWBDU#YXZmtX)jVHN)PsasHc9RMtO7d|pkK4|#Qx!ufa zdB4XJ;{VWRLz6sAbU3IQ4Y*IMBbm9FYBFpkkIyVF)oM|d?UGHGq*hC_uobZn*?L#E z$Z=X7{d|N!T^|*p9Q^6hyFVL~LNt;`*tX7MZk1JTuJK#-a2#E|ID~Y$Z9HHZa>(w+m>;X8|cGl|jd<1$?v_yG6*`~T< zQ;^YM`FJI8*88{iZ001&Bro=?->tCrbs<}Hey*;*_*&ld zSN_e@3xEvtwYg#<9kLC7nQ&0y=p^AlQa#C`Y|VqnrP`K+Z0giA+9`CXAyU*%8Iw60 z&f1Cf!4iV*ZdLg9zy~)$+B-k z_RbH9((?t67?;lJJ@;Udg4{c{Q6<`#G11my`q=N83*o}M)P*?A6j z!IDBM)@ogNiRAy%3Fh8FnA|QUwwRpKE#gD<5~vrY{Q~0M311*o{qrV!&fKy8a(Q&| zV(rlXWw~>E$MWd%0qfTNS?$jG=y`l={wQ~*kgm32SJ|Gr?C4#XTF)d$$Oxv!REzVZA~3Zz{%oFR_ka=& zFBg;R^1py;5rnjr3O%d0hvPO_zg<*TH$sq~Ify3QoM6tNsYI422$uxGM( zLWc-&vBJ@sl2Woq=RlFYA4Hf14XIuv2s-j5Z1w=SIV!8$Fqy;CE0r4gHpXbgetJgl z*X=^lX<=`yAJrXYl^#o|9AaZL7GzaUa4mA)1+Z`RRq$|&gBIm*%26XmZQZ?qLSB+s zS{T#c-(X`tFryF$@v1nv<}Nh+v-a#P9w4XN$p2nQxQNR@hKEu_6Q>w%48;eYZR9SD zBkvh}OB2VdBtp16;IHi%iNVR`A=kMa!D-Scil;;!VO36@Z=xe31u{sHPt zmW#M*!)|_L62HCP?OhmdGbgz}yNxas;52f-Expm?Bo%Vaff>PFZrX&@!c$bvg;(~C zj@WFt+7&eX1s1!zM&XnS7VEYTjrV&O{bBQucluk}+z&&ov==xEF?dRXqk=R#Y4Cssc1)R+w()2^AU9n^9WC>0YxE=J1a_zmZ*^w{+n`E-an@gx1o6QB-B`%Zc{u#S>b$QLP`hlrrNEz{;mL zfI}cqWRBg(PJEwQ(d@Y!AE1_Q$n2PGYH=^%67sLxnFTGyA+vem4=*Bx;`8 zj=9krP7?`?^z*gngo){$klr3ElC{Xu!mB32kaQ&a^?=Q*znQf(=tRAZ(Fbz!!(|1>NgoG02gq!+~Ij0-q}5bWL8M^%m-FlfyHK_xYVRqIs_fc z#%Kl8L8#iNz?=AutJL@NS1k?6&tNvUm6f%gK=`gtdLajpnuO`NY*7~yJn5oWEX-q<{q;~Nw zPrq~@VKb}jeumcUrpfh!^52Z&R@i6JMkn@clL53)PyX!*XA@bq5e(F({r&kTdFBh& zWGP<9(1WnnSa=+6hed*e@CNHhQMH%RTCe%US)=#|lKw&3h1HlaPx06i&FRw-`c+g= zau!!!j6SNWX7V$Z6P}pwq3cwd+lN30YHr38?R5)a01@GR%fnFV+-Kv$esr6nJPv-^ zORAP*(;QJ!H`c5LCf{?@+!y^2YkE*9Cu0gXmYfL9_2#&Rc~5~n1m~(@JBYS5ydf&@ zT>KIXswC^S_}=U~(_2&1zqlYmLY6(Po=vka zfWar`_J?FznL%u$-khikz&$LMFEnCwk`s3E*v@bNZj$MFIh4n02@0I`{v6k>>T)T9SR`-Q*v=$&whR+$(BYi5;AJUMazYz68(kOEO<*L-l7`mJfQEG^>1R0eC*}zjshfpjbb;EaSuw^ao zBwW%avtqLMkOf)-B@;s1slTguZ;^&;QR}juqw-uo=Uecja_Ef6wTQrLRV(qcDQz3S zC!U)$|IDMw>vX;@?=(WBx!j3Sfef1+VW!Z;2u^IYs;Li^$aNH0i#bkn(eT{#!7nvK z?U0%3{@~bUbBCE%(K#G5bQgA*(z(g$<>cn|73%gJxz4xAsaI4^m;OWYzS!_L^0lw$ z_eQ&HB3?m+xJ-*P`hMUp=NtW8=z^xAin%$T%IwpUsB2 z^@yRwb13*tDWWmOD4TtL?!|hP5v@rzAejqC_ctD|cY0&)CGnYP%eA~nl@f}2`i?wh zN_M9z9BoCNQz0|&URneW!I!4BJzy$j+NE$I_UsP{nZO|u(2B@i z%&}m#4hlrp|DqLUgb!IO+NdVD|BW5iy$ve)G}B2cP#F~!8w-ha<}@gyCVs2PZM^Od zjl7CqV^9h%V6^I22#{$nXvZ$%uL0C$FjFIz{L}3V%@XU)LyC+_)Fh~CC0w&ubQFeb z&<@kdk-D0eV$rmTjUcpajA5#S^vjVz;&Hs4r_{OF#s#3bh*&sA79K*b#&%DKp_rOI zU2Y|&>5FVw6af`;OeYT0)PR!WFoS0M&hd(sYA89pFU-=29cB=mJG4DY_TYIJX*k$) zd`V{{1^bHOT^hl5YlN=RgeWlDs?%%s;c}TaH^Xjs%_t2|xP^w41EzDA3h~L@devX0 zFO@DZ#9SDv3~F#2nw!y^b2z*2q;jj8ezSY5Z)x#_l-Z5Mb14jdH$XM-cPojH7v}O_ zSp*n=91RrlQ*J?-Cvg5BTw)=E5A>PeR_cp+q%X)p{MeXZ_=EnTT*{(MIV5I@1z3QZ zgXXw*bAt&vD5&{!OuwzK(pH~KMlG#IW#F%JgF!bQ^6AWTh_C_MDO1@Ic&K7HO0@)H z`MId-)C+-;Jat2Nlvxc$!ZX{yfa}9Pli|z^6_pS*GwP@D#`bnvYrj_;L%^n>mNtNv zMbHAih^J_2R18e3<^v9=bB4FJCG~N+@zuNGgHu0eZ;oK2Md&{lLD%{05$Fq?Yp5-N zo!ApONmHW#^4oN>N{^sm3FoA(r8Hg>nC*F&PYkwQMBaF%@FqKjE@A)Nnbkim zz&eBXFX-Oeb@D9*FL2o1D=$LW*H}GIA=bz7r@1CSx5_Np4YohpV#`&-`RoCMYEf~3^5-MjRAROk=BQXGZ*TMEyw{$M9$?& z^$;&ENOt#Uh*;@eWiW|!JdH%z!bp6{wEk#t)%KX6jCrf$e0_>Gj*<<|hV#Md;(4<4 z<$k|6#bOXXX|i+daPcNZa2cb25H1&+HhZ#V%@x1xj~yc0l9IJUM1g^GiA9_+bcY|s3I+8Fh| ziajJGaY8e~oE;!+WzB}N@VxDwj%(mCh_)$S!#;m08Q6V!`@sJzzERffjDMUTLBO^G zkXn3B;wPVZj(dtYu>|*}Z*9OM1kb-&cf20k9t2zAsHOEO^Ee!f7;|`TX}VA$ha_xz z*ED-fnu_H&i)xy}AT_IH@}r&nGVli)Hh9%Dsg9S>+B#%`*%+*}5wn|iwTM8Vn>Unj zv5APKn{Oa~us*a2)sI+)*ALivQMV>X%?>F>Y18U+;)s2^K0`b`j{|%Z<@G``Mr1@N zgma&$xmi2BGCG`k!4yd~=66I-=lA5sYt9p_r%A1E+SSLzMMj(EhktX=xWu)GO)F=b zzR4N<8}Q+elb{0EQ-w;uKVr{d1#fpueSS9M7D|T9-4p=Yq+!nTyNv?n?^NYPamI>jcs*^z zLM8TbAz&J9z(Qq;O&d;M+b`0FR~({62u1E@E3deu_>7o^al78rnlR%a$)UP;^f(k6(wgpcc-%Z@}QZxQu(Ny1qa$1&8 z1ZkNB+lK82E-U$twPUYUh>zU1apzWx@`sn_H1*>HV)#dnw*PsUwm$%??RZZ)Jn9&4 z`Jg6kv4%2P7BeiN4tD$rePj=v^n+^y7%vS^Exxxk_T?yFw7GN=mNL<1H(9AuXzeAj z4c8k*T3{xUtNLI61^&O@&SLDSWzqxtD+LDxl*IuAg!;c<$e&|!glN-Vn=W__y{EtpS!lL`Nf9I>$m{m{1yM`yJ=z*Lp{1g9SEcG3 zrM{*Qi%pi#++|i%IF|;R(p}@(;rnK=s1lNEzP!?Gxh{2=7)i%OvSdr{Mk{DR+1M&+ zxj46pMbF~9p0v9FPorj<5(LNhj9L~2{l-v*lX{?)?W#_v&U%auB~&<^fsq!Y2Jf0uMi5_% zk9J{kGiu^h`Pp1E-p}_PnrZA^U&tf@=2*oZ;aa^VSy`f&O(^^@fBf@d*`UqSPMk_R zFess9TYCn*vxQAw4CtLYM|bjkQMIAUzn`wDgkzS6@N8}$O!}~juQYbHOni-f$Q2da zBb#xUX?s20y?A^rl(U}wCn*_~-Sup~70(t+^=r18_2Um&ymU9Zbqk<&>~=^m^fNx=9VADC4oi23fj~Q4 zt-!j#G$}n;gjD{jzsHJL0YnSzU!fusA(m8_xdwI9TA|STx29Sg=n!0UFe1s>7+CEH zau40PQxDxcJTXV$q)(xrm1$aWDwZT#qb8NK4@1^d3ZznqMQ(lr+Xcd%Zte8f&lG28QM*zDH? zkKj!Q(M8C0DzY6b=;&a$MaX-2r`4W;W}h$W#vVCddh@5X3p4pB)Ma9M@hH}cJhHiD z>eml}TZsk(%L!Drtzvlnte6KT=2=giKtgT9;ub-_=yz$W3k@WC-fbmea{~qx9XD~r zJ*xQ*GmGB05ORvG=Mm?fr#1x3FRA}1szz$#CnjG@sV%ib>JB>0nkqvBSAZJN8L{sB zJuKa=n#s)dBtEwEa`ODvP;2VAqEx?r#g!V@Mm)w)kP;Cnd38v?z8ea0F;+hmzS5N! z9<@tdx%Nm{f?mbjw&)@&sv2Fq8*cR)4nO&CIaklr z@Rg4HT%Vw6sO@a-r*Ct|(-T*SjXS|D!&>B9YiA;iy0I7s@#gVz0a}sP(g1JlKJe-h zI;IxQ=oayzq_PKB=Zi7m^-N;axud z*)}`qxtc!EpB2;O*rb>;HZ>*P)^!+wfqD@MOTiVB8`zC=WF^9*mrB1{sg>2mE}d_6 z+l`+q@#W<_zvH_vY|GLOx<5y-|uMcry1L8hOJPLP9{f>|XBN?ej(CL_!M5IuG0tul6^#$O84$_r`72K5t z#x!nZck*!;pXCCg+{7>Z4`P?lcM>`|C%*4rv4#6te|LtzQ;_so!LVYk=pODNr-{VC zA^>>}HBw{7$QxrK*W|?s;GR;2mQ7NaL=B@5c8^kGKPW(LM{t!<7W+laJs9D4_850r z^x@kMuG}e)6~YBS!D}yr9bvlp{sU}3(?T+Owi}46l4;p_Sq5i|7Q|0fQ>6m&hY^3t z(oD<7dyTo`_ZN#q)4NXia;It{-MaoQfH>gG8|e@i_h6+pL)EuiVfkLNv&@I421d_( zgKebo`8E&YEUZ1s7;YiX@dJ|DCE>CyOJIoTGY4=a+_yk2*B0d6c0>tGI(I$?BraPHg&0HncP(Nj6X z#KEH`{ltdDSk~OZk`V8R`zX_Q7B8(5|^_IM)!ER-~GP&9n&MyX!3LAKH;?!^25Oe`|KedRr z`Eg(j-KeJWu|ZVJdLeS3@(6ZPog%WC4yV9Z}2m|Iyld~qc zm6K)C>h)D9PYq%gnNdW3{)_Hv`$OXV60{7sbliDN)AF|EVOnBhT>4{u7w0y8Cii}R z8wb0kmus96%*S))#t;f+1?bd&DTpmh801T|B5CDV!21(q-18a-9%%3+JM8tV_b2!pQ z#hH*-=-%o;>C(~)8Q@`3J_82DAv}2;5p|10nC*5!f8s_DWW`+kH=`JS%^hwq#XH92dk#(i!qPVZslm=Nib|G^s$}#yH8@}+q5EAceHUjZph5k#5GmoBNcx7Lmdsikv z<_*F|eALSGi9T@E4~yfgVkQITrhQct%02tBiq5nq<;{6rTEN>?pA087->}G(e7aa+ z_yta=EP=J`gPsFgJ1tT);_EZQd=4C-HBLnH`&v0Q=KxYRl^&jo-Pev*2s6$iFK7!1 zQCRD9gjt5;HyCl}7yunoW z^cU-h_l?z)ZAXBk^Kjt7Hxb6<5Phr{;D6EMSX5cFtJg$S{ue#0*#Q7h{}1%||K>HZ zbnG_S@4NYbfNT2)91XpyFSmI?PDI=i_v0>H@oZnNNg$v_3vhJHVkMI@n@;_DF%U@FX{b7)wRQQ3FerttAEpbxjRNx&2{;l~JuU zkstFzJ@T+ps28Y1L#*NsQtAFibCB-JC81cy$brQLT9B=1ZPd&I;lZOzdO!)z*0RVa*6%29~}N#w4fRIS?^Y`n`T{ zl-$DFf7-+9oxG`wbD-8?LbYK*iH`!I8UYTg_tD58YU|2yIk8E@%8<;IttmzI+B-moC8;2NZ6obi5(B$<}l1z@5&h7$Gq#dGCCzifuOR1XM-v>-; z(dQ(LqM*>fNFw#c`bMsYR;_X$Id;QJlCn02&SPX;pD&(RSj{x&xqFHo(gcMmCRhv2 zJ*iVl5i+sV)Nw%*cbL2NT7nuVnY=XQ?GlLtpq4-e$?)4t8bQdD?39dAR0k7tQBtaM zK3YAp++*+NmdoI>jg(@j~nlr?dag~mY zb?gNe*QNi3pj-+B%3ilJ;%2{qzfDBrPVTQP(^n46*rYL6^*_Y$5Rs+sP|6ha^HL*a z#^gx*6jL=e(YLS;P-Uf-5eGoYz$`1?eH49UR!UF8S#W5fKs%zDUSGcVJl1gi6E)m5 z1sc;7!!hK>uU!j7LF5Mk?>j1t3h2ipYYvAFEP)*Gg<9g~vVTwD&oo}ePX<~qHYI=n z?%G)cd#yHSBcNpOWA8cc)ZY&WHeCA&(+EfW9JrGRU?@OgL0L)~sClN-nCUuBE_E;2 z@YJb)Ze0lXVM_<)5pOU6f+3g`VG&qv0U{M=8S2hC>m}`xH~Y|SEs;Ync;m6IrD4gV zKyrqk72m@+R?mbkWzXO?HXL0%+0wmq4@I+l9Bg=O36pH$23#EzBwDzM0}~?AXw*#U}7mpWG5B+r7~(p0(?9J<4o5SWIq0h zgZYm9+UkhB;;_nMUv?dXC#FeC$m^c>?y@u+3-7{y`dPxFMEIvHEZ8Kv0CHMOq_7Tc z*UljgE^TrP|ML23Rvf|x>Q*I|(a$O!wL3l~s#&2-Eg4v0U~-8@KR1&ozhu>#8PiYV zDyIIKchPD)k+0wLU8+5n8hB@6nFP}QcMaWsq}mX^D?8y#MV(T?0bq?3+;5gNU92{z z$l$<$^de!p1(~J6jMa?kj6u&@%ods`IH+Yar)*)ObX(!Uz4I|jgJ}0Ip{%V1q!EHO@Vzc5871^Qsl?1b4wZn~bkhe1O21!-&30&Y zsI8C+p~~4HYZt5m`#^Au`*~B9Aqb~*K!olvsF17(d)iD?-XF8hxpI$|ix=}`{UgTU zpDJflOvSAp{-n{T^^Zs>UnTqSpW7<#)u!5py?q>ym<}$B$*|S36r@9^>)W>nFjAdk z)(no)A@h6au_DRpIvkO@hT?I!R{0;KXxkin0)~nrMCf!oRtl>qcC72GJn5akNoR8% zuGywS*>Gs4u}ekOGSmIN=1E)j8LXH#EZo6B5ZQ`y;&bGW!C7*d`w}M}b|g3>ag<>x5$pI2rY{~oV5k65Ol#oR|B8#^AyESV-6aKd5&R9P-Y8*3x7RBGWp1aVbaIJ35Fm)9|q6J7YfoZt}r96Yt8P1hq`Rfg_T4h zFR1&I3ii0HNDxHz>HBAK+}^Ux81FXx5pkbdNB~u8rJCawBJRz3N4pPC+j8O-l`+~z zBSkiZU-i}IAmL0BZR_n-rwLFhT0Z66OIWUe45p1X70h+8K3Af_?Ksg?a7Mi5+62Po z?ag>Ex7yg^*dU~6g`%d)^VysG>W3vbnzolxf{VxuoagdNi&$tyA#Q3&EZxRYpQ4Gn zmhN;Au#|WKy<*6>3%VV`z_pv!NB4SiH6F7rSYi->O8_0S{3C~?#WYP!DK6(GnNfDr zQiAz8f~dF!fVT$f!DArwFeR{YPl%ECL`GX1=e{lshD z+m3kGy#Fj8q7{;5NBKq=EVFb(7nVhyX7h`5g3i3tu7bm=rDe1&S`@-F662S0FbND@ z1iwfwaEo|fFGoQCMt!#}b1(-PX~KG~6FJ%@VFsC1hfPGcXlX(Dj_MC3lJ2ZpAPyny z2QBt+dSg=yOzE5g9Wtj#!4T zLPM?_9xCzTxdq#aC^=AoTsL>b7Pbntw-Ccc%jJXs;Tar7G4D4Qctdi?(OqFQM_XKN@aMFH20CQQjVT1LkOF8^!H>=c0_Orb zc&+!iEH5ON4!4b&&+gV5IDDNOM~C4p2b%8=yM#f^%9^X%X3+pNRol?rmhywWxDc zn~iQKvjawq2dPtPb$MOJd%3P-CFb9?zG8w9%26vVbJN=lvZk7i?2qgSC0rd>w(9B$ z0K5u~FIE7jnK_gfv}H{fv>JHd8g2f00NN{)QD&@IYvvJV3^^-$>_SiDAOUWikyBEA z`!ii6eovh~yfqN)26CZFv}D;;r!EW!T_^@=`ndEWQyfPEXT)6K!SIvvd+$@6fVRu^ zO4?-a^tv3tupy*FLPE~lOBDjLinV8ARkX3W3gB6E8E34<^ zcb-=VpSW2VO1tl!|3Q=e?~b?;pkFkxuI%6>9__&Kuqh44 z3)>{&*X%2ye*oQs7UVqaz4HOT91OaWL;%D>WeM{^Scw5kOGb&R1AS~xaIt1sx;}*k z4VvUE6pV@dasSeNIna;}7K-|6bzzr_)rlOZ+jx15g=BqZ2pDC3X%?tBpm6%Y`3x8t z?Vj7*dBrw5V5+h3!v*Uk>9svzw!kV)KTkOSDC>gv`yR)K|<0aagNnrzG7#VWhF2yRx&ZJZ=#R z7JG(p`9ar=+OuGZTl^T(3eR3=U=EpIcIL_^Rij;YW)q#W@nTMR`)#{}uXIH#JV(h@ z7XL&R`P2`-s_?fE0Qb_lFL?d1^8ZwSQ60j4$oyhR+8Qhq#p`33ee)D^Wc`V~ z`K(TnRS3_%8{WAYfXzi^8*ZTW&e;x5ptas%EJ4+9owB#%j*qA*|Dg6nbaGbDqw zIqmvz`wCGWOM3rR6Q~)%ANLK;94|BfCKU;82`7XcARXRQS@<+47(`=F8xx&n2B@Ot z-*c<01}OVV&jcoWiv7-2xAgu*O-FVbd|@Glo>PeCA{`Mt63**S57Ul%I8JO+poH-K zXPG3k1ev%IyY*gp8ufHLmo6v~0m{^Fga&*+vj`aaaWvKmed`K{6(7c=`bXU9eI>oV zbwM_T_%@*U>Jp;1@ATi=#3}ja3cf*exNgn~%Q{fyTHIJzs)vE3Jc8qxe7zBcU>EyT zG?kW#i1&B;td=~eHbPe%;NEJxC|WTKhE&LmC8?Bv8^M5`^om3fj25H>;tgj7kJlIJqjH;Q#x844XP=lBy8im;b`) z%kM+-zmWPr56FMD^x4EQzycA#i+CYk^BW2nMMu^*HZbS(p$|~DVF|E?hqR)BthiwO zVf>ke8pJ!QBCyLZ(h$d~Ir!%1T+}*|AZJia^d$<>(1S`qmagZE1YBxN*0lSRtSsZP zY6FF2W}hsV`Sj}Hv=Us|;%Az8rZ=b6Ms^!$I_9H@!2X6XL+>dGGg07ly1g~|UPf&b6V z8JZZ_FdF~<0Gc&_<2~$%-@17}0iC}9UeT7a`1}kF($xzj}Atn&;qY< zFX?FXvbqCZj+TIEvf9m}sX=On3u_iSxJSR|nOMXT(RwkRGZ#IS*=i>^pW=~kK6&24 zGR>xtAePUyc6kT$!KSb+$?s%0%qUBsyVC=?U`*o=u7JL1F7bz#O|mOB1aHAaps5s4 z3vPc1JUB4)N4`S?1ddK(9cV|gqat78g0TblEs7)xt)KN-^6x{d-F6Sh#}QhfF!c;t z;9qxyX?@{u5#dag-=3o}kzFTJ2!mbr$;Ikavhveg4Sv)pdf12IfeCfwHREo*TaUH8 zB<4y65ZRWkx+Q>6yluNDa1?XU(RpS1mPw`aBX)S5#;jufs5Nje<2^>v^;6b~aWzaE zh3aK^{Np@*9sG)AFq%iIJ2JJYxX(Wvka>Iu`q_NytI-7=3& zzN@Z6yxnf@+)-Uf~1GT(a`WZU0FHCXPF${^stYZVj14QXT97v;t zTJ-fR1D}PomW{HwP>cSz=EN%h>a1ijAMtq6Y(dmp>M>^5Bkp<$$ zDmm7-rJD=l#yxlAL;}i#inwY4Mohg85M1$nW>30NJR6``SpQffbZlHuf9_eoMdl(~ zV!H8K6C`;kzV;Rw+Nn;mtu45%ExD~N=8YFAcL~{( zit}1G0``>Dz7lvL=S1pTamZ8M4IbQb7u|G``}|S03WOd^@>0&jC4O{wOk!qqa;I!Y zHhId6#DNw4jMmE8&1686yiql^+;&kaB{`Sfas>8UZJ+PoWaZpF$jtzBU;vpbHADgR z7I8c>3%5r#&xNT+NtDZJYvr9r?r`HKvFfv1xF&o7ETlsrl*1yyPz>$RG#~+YxbM2i z2sf{1N|*pV(-^xj#IoUXcBIdV$<{7h{j4i|IXT-dcKAQ&D!L+xKnuQ^^18c7^he{gMn(}eF649>mP-c_u$Pt1Hl*L&(DljiskjN6= zw~3}t)iRS%bL^>s(-&AGEg=Run2#2b<_Yl5$34flEkQim)v!tv2GH$zOa=JT>mLy! zTJWIL(t}Rr>prLB#X*VXhDc|V*dHKf+d3E z^8_WHMSmo6uV5{;x|?k@+Dn8+808d^0 z+JYg(THGygQb&x3;Jx^hY0lMsw-5^>S?>8XVPZ(>_RNE-`}yrP?%vi&87-cQ9U8}x zvK{JMAuenE+G1Jkk_uKa03jutNq0$bTufZ;Ky)7;G^mM?)rSDl;~0C_0$!(IaeYTGG6D zkh?iQH$X5GHG@A_zam0ig3AJ;E*XqJr zm*G}DJJdt>b)0gp-&~roioOwixu9ES5L&6w0DTok)R6UF_0mT-$5glUkeVVOxW4EWQp()aj{SU3XD-wkS-6vgIPe)-e0yp%K zcvI+FHJU#-LABE;DL&H;Z1wDvldTy?nGq&MD5{JmtLaIIdzA91W3?aT-dFfqwN#WnG{@w!#9{ZEu@ji|y*bxTi{D$|DuRM`EO&Ria1MRev)Bd(J6{1A zdj`JrtMR-{!pau68Q^J1q8AcWW+7nWcCQ1C5!oTv|xUAu0 z)L3mLk&<%P{siYP97BWl2T63DYChL?T&qnO=ySB5(Mc9b@qtKFN{S zV9RwhA@G~Z*K_)>@Rf(lmK+RO+AZbNcQ_$fA!lm~F$2!tccQK&)!s^OK4yTWrq)z@ zW1houwW~_QZO=Z}D4E0?bcyPpCN#Ltb@A8A^D}+#zx7;MW0cL|wK1`7s&#y!1;{#Xf?84ekyz$E zQxV|5Tl1m_MNFE!z*9_92@d&5wKroa0m=9rh){JBH%pJQ0yk-A*_b=!w{)GS*%>`U zKlqDycA_Bj(y5F;bw_`C+G%vLle?u?GqzvI}79dUvsdY znTf5Qsf(@Ae+FWeN!tN>_%5P5I9{;H{=Y&LS2QXdc?;hFf@1;l@{>Xq^20lBU*h(N zV%_M(_}kfA={mL9UwhsYP|jVyIXK_iolRq{q~GOZw#wPdHcuo0!MUV73<0OWp%)J_ zh>Zz5$vHHnd+?n5t%y9sR20M8e>ViLhZ+v!v7C4EhwW`(Q6hn*$P2;eeiCzp%&0c% zu-b^eUzQMrm~gk)mHg__(}ENADHCcyTNB$i1k`|{D0$6kM>1Jgz5QE2tyVXG+Aqgi ztX10`8_|5%kE-;OL8$7M=9t^b$e${R_W3yrouZ8bM~;}n>aTCb^I2_+atZocGSpot zEh8+;Xyp=vxHzDlRs|k5OYtKuKJ(<{DNj(2Nemrh+8KD}#Z&|nA@UF^=_yrWrVtkAuq@9rvPy%0eR}&wr2zJ4V~BRymlX#b=i{N;7i{KK_tuNISyB~I0SOlRHFTm0OJ@3h=Ln1 z8wbJa{vLqFjCF9=%yvJ&uJX%8mUZ&x+-|H(>B z|6!%+|6(O`d0|`RRae)}>Hq*WvhF6CY#r+$E^O$*JQB_o(Ud0SZ1wjiHX+3p@d-N~ zWOm?s6w70b8GH6Y0t}jlB&Y;xGz|fB*@WM6Y4K*7vax;Tq4}n4l-J;!Ms;RHd4wqq zT8gEENi?P$T5()qjrqTY!6R_O4>AYL?peVIK6ApL+4Uk^5<>iQ%=dT)EKz$@;vAW~ zN+PYc_7Z;VFXQ0TsgUbV}*K{b2C z8IU8Yfs6ck_#OIz4(M6L!GIHmVIUu(srC4B%(?~HtAu%(Hq)E}%C&%;W`W@xLZ#`9 zvI`iHix|+{+(vHJ%yqwqK_C`gZm3Ht8Tas6Li9k4iQXxWw-w|vbM5X;gn%(YCe)d% z-dWm|*6?Bn68`vaWn82uFw@!f6!*CZX@K7a09`QeIYr|h6{+kXu||}BNy|+S3}A_Am*UlYWD%Yl`RPb;KI14@ zFeMoGCE`lZjeW*JnK6*gXPkBl@c58Muxt+S`2>pIce-^BTsxu5#8t43c;I}p6AneAWA&L`huIuL#M8X8&PXTwqVjFJT?SnE) zP)CBJ;78;E z@nZ{Zs6D`3YyT9a25_>e3+y{L-2R5~Px7K}Hq7letaW&Y6)PuU7Y+BV7}kkJ1r!eJ z=b{3OMfDSCjx`T@z`h}L=0=&bN}*G8DYG`CGBS>6Db@sery{p=HYC6z13*_fZ|q#v zgP-KaZZ^!ME?0O`RxI5=o&B((7??P_lO`;Mkr5K}al?%hUfnVikNcUTCQRTa1}C}v zofKFQ9xcK*DbCFbNIDlI#;<03KD!K&i2%KA%R@~lK{FBf+fH_yKZP{4M&_0ZLXdt zgi*qNbPejLh%uaxI*X2mU`sQ9TT7x`m+$>0MsQ343po~ezrddihACk37$vtY6td>f zXM_XKdRxRT3h;Lw!g0Hn_YDTb84f`nn}A=ulON`zUjX&O!>7&WyzpTQ(-WVl{vQPd ztCwnw7HI1o!SD-x&7Zx^Hk=}jy>;X+z*3WIoi5+%##d1>CpR007{obM4oEp6N70Wu)9+Po;a!qXU3l0NA%e0zCCxYN3wB z1!G0V*@>qBg)GKM+O)G(mEu?Uh;6>@NR3DyH_InQ!nqu^3b~Aif)6Mbgt|2RJq;zX zVJq){8VQXW;56dXwg@}FGHMzX4yq=_WX=$8A9RJEWCxyP?@!=Us6T}MFu^jZTP6pd zBnOLT4&@lP-FMd9S=i29$3-69{w?~wl=;Z{G|IkyT#Vtl!Ym7Ou{1iK7Xg!SSIfP$ z@KMP$No4ml&N+|s(Qe-4d5*+TwuEkZ{)$WQP@CBz#kGzvT)`u>L8pCqoccQf0}NOR z^$AeSGg86YTkP6Nd3Pk)hiy7c-?Ztn4$JoprII%oOCz}4)oN>~qOy0@&Um+_^dh^bhb!062GY_S!05dbb9G3ePw;`5$rhZ@3iC?v9;U+(I zBUOhhEp8&ptu|R8JuI zlNwl+QE@~kV$hE=Y+x@k5AgCdzHoQ3SUC+#Rn8C z+iC7-81zzN^{@?a^HF2?#cP&ix95E+ABCjpueK??toftrvbhp@RL|tXUxF1^F|!6f zcv21uOat(+`Ju;U*LewSsMAOs8;k^1Ui~wrP!K6j5LAUNliGm{cPT~06g^M6(qx~! z{)cQ;?ZjXxIf(1DI7k;aLf2L#b@Lgl!+L!eoFa3&0AJ8L?MqNi$2wU6c2pz?l$`Ar zhJU>Quxi!DmRqgWz>w3#!8UJ64*=bE>9Iw}qKq-=aJ@dfT(7%a6=#{H_!)Yo={@_z zdAcUHr+hivp?xr=?rSDil!=i@i@(2oWf~q3MD74x6#0fkqoCg6-tB8-bj17|C&TLB zy##q=w9bA54+2`ZY9MXbp0-sOW|(E0i}+yRKVnQb8T%k!7A=kr;1O;ST%xt9ZIxmb2j|DoGTs^~dvP-=#K!vK>kQTk3MZJvZUa9wP8Oq>pI z14FwV{fDnxl`@LL6!#f5vxh!?Nl9`MT~<78s4O!a|DNq~k=vpOkkt@q;A29NwJVnp%3vdUqA*)fn;Ru+b7M zSD!9iQw9{Q5<@JdRZ~hf=_A&X?X@(JJY}s4hz=B9TOlbKZy>-x9$lzFK$g{ZQ902@rKg^xrEQuHd?LgkO7Zi z9BOf4N#z40$Nf;9mU+s{Iy%itE>bMvSYy)H2;XS`J4=H`YicH3I+0TUzAUJd2LJ%~ z|IJ%W#(G8;jz<3va9wF_+HSHVd&9-~%@xu_G>gwSb;XJVVyd_D$%6!BLm^(4z;x06 z`M5|WB(Wrq|LdtBh^3>yHC8aRm$jEA6yfXb`Q5QDs@KEAb2BqLGi!TuUHG(hPSM-l zsQNL!G}ldhP7(v{K+g4r@?``-^(Yu-bj@CGb%|#XSgu6ZZuf~ zof;KkW_*K&B1@+v(a9Aj%+`z5i>C{VYr~z7KIgqdSM+koJ-;&e@)}4^C6=eZFE%=v z2pAe-WEV5;k;5QSDmP`(bqVXFxDUh7;k3v=@Q1K-|TwQO`7zl*fR;TvAlZ z&|R(r#(WYOodJ%_^hqC$vGKfQwTUX0gqh&TY`dr0+RDHALoqkNJb=$6nkM^te8&}v zNi)RxEEnM#EHw%M!#$O36o(?tkD?h4YHbg?@nV3NKkuC&_r!GQA_19ApOHL@Sp99u zn{HA97=P7{ZR(?I)$4_jJtw`zI&)#S6tWgRCd$oZvs7I*gT2~xi@4PcS_)IYsPNK2 ztMA7QR-FnLTZ0n~%jfw@7gLQjj9Y)Id!OLk#H<_Hq2=^daObdF&XE4+F)YNl6M!Op zdXC5xT3Rrdkv2ZxNkO5l+G?baOqt3E`DkA7rNSLkK?OgBS6#V<;zK>!s&(24%@t_( zBrU;lZ9+@St4Hstl5$grnXWIW zjz?+q>ZR(_cs)g+>`?T2c;$;1kSV@M3Wms>^Eqht$?u0z1pbZVv=#~zdZ6k&OOWVv z0tnlfzG)7Z$PE0Wp#tvu`3E3p19Z7KhkY|{ff1(dC?(EL<+di*};c94^IPLFSLRQ)tt9ebw@nsTpoD@IwrUk#y{C z(6746$~>CO(m(Hdlc+-#+wa9t`?6>@0Xd?K-_>Aoj|~1GS1XAzaHQe-C=MM@pL8|( z5G&SSDe^Q5Ob1|xgm^a3Smg{-lKNMqxu zk1uROdL2iMZ~*c|y+tAyN0&_Sxu+mGiI)EEi11q?0YMF50(^!AeeEW6<$0CNu3QJ0j;{U#FWc5r@Dk;ZgLOCz4g!3zd#ig`AdY^Kr> zCwkkv@ay)10boQAnZo}Q;R&oa)RX>c(d`E#j41?K7I-4?=zEGTL4B{w*gG-staF?= z@zF1oOrd#r)xc{W3X2pdA17&z%Z$eM0~3ABh>Jmnk&M+k;_0IPMR7sIvIW-?e}ZPD zV>`KLTx>JpM|aUex0Rb}%e)u1sp50%u-I?!qdfomU(P1PT}hLY!T1a1*V!ceIvceA z^MB@lDuMt1w2i`&EpqMEr4$>#e`TibM#l_|?XR{8?8_tGSP?~SMo#{Jw9R-+R~wL7 zUb5f}_k*Bgs1So@skV5+;YAZZGm*smy-3mKigF131N0?YyJWn)P2h zl)aXIeVd;gI*XvtE>bBlzmxRCi!x&Z4@1HzcR^1!ymy*?Z_!n>J??N}dc8vld4irz zxlZE2TwapFlO(r5oYxu=)+AevKD+5B~VdK26TZ3Z5s z@WXRBo=G6i_u)@5h=~Z5&U^?+Jv^vaP92Cd-CLE`tv#(2jpF{27IzwV2e5P?!?LNgp zhLEsuR^mK|xX}uWQL;YI5SV>)Za8@2PCTBthm_;JAe#Fdgf_r$5kPkYXE}FAUB-T@ zu+^^mbfPO1;XnS;ab;xhFnzI9sHb;I+rrYr5Wy2i$&k@~@E`$MnB+82q( z-&+tHBnWYIdK}O-2-gB3Q$XlDL`WG}wHkkB9FtWwSF&})x~z17=JLAGL^UuM`DGL# ze9-OZ!jg>HbJH51brazeBq#f&g&D1c;$x56hr1KgsE7<@Ws4vGIqXJ@E>$=uac|#_rc6c|}yw zMbNg*(FhIUa;o&Z@jh}K?DRA+zX2rzUYUc6`dUlaU4v34+(^^ZFXRGDs4^I{ZtNJG z0wVXTQJ$N}e+mc-e6?dUUcD+Z1U9`u-FO%!bz`R|$sW4OJ$W-v*dB@;Itq?`>V58_ zuCy$+al?Ka8Zz1H_m5#rql(~Umiep+?S=aY@=0Bb26vJM{M1zMa~8Hcs||d@KGRvp za^dD?Gj1D!52Nw`;bg#1O%io+^W1zMU`UJVx`h6W>i3$XU!u7{(w;wJsB5FOQ`0V1 zQJA`_PZ65`N+YqU@#EB*Hx?er zkei1cf1MFGbl2na{cZGBmLKosB&Qy0=6Q9Q;^b=S&yI3%vi^&7QCa7&GrGwMQ&!mE zfuXp+Ne@Y?FC`|p%3gGgbDXE35cfLcGQ+odhlCtU+vfN&Z5M#(na%CQMo8crcVmVWWermXqLTrF3`a}KY zYG|HP-)xsHO`QM7O#R^JQ=fggKY|id1v+ek)=Ku8TMqXP6ePL>f?O6JKUt5K``!vz!&c43 zZD&dB#La9sAo^F}6RhE+Hs`QP8|hFTD5$Jl#PszSI`rPiPw$_9T4TZi<)HGU3O4ON zULC};*-69htefLqR)3KLT0Id5oabi6pOAY*r)%xV9eBZ`*c|g@+2&+8e}Ox?hHUyX zO2-MP|^I!R&qv($7$% z9+r|67h7Y`R;ZgeKKsF9+z}%B+F4~uzA zr&c(Y9YPym+%^7_f7-ZVwL1V^cU1Sv63KM3xX*SEB{CDAa7%zQF18~Mw(-&Al#c`R zd1|ya0d!hrv0F9$V~SmnoO*pxeYtaKSVrfARZS`~`*GejS;x~`vE1sLOK!R9tW)E;5jtmc*_0^hK+1kK zrl8Cp;M<{b}Pozc5R3uN8|nMc{BtA^bK;)G+yiJiC>xZRIuOI zh~ZmvV?!%U@j2Fk_L5T+-ae>f7E_wi5CYx}zlpa9;dGWA$d<`hI;?eH3VdC@A2S6K z7HEs~7ygN0q0gN}F_2h$7x@WB67qRJnfT6wW<>R=Qp;!qMT#DwA=hKY!UDzAHRx6a z!~nT)~!(L2|9(MjOU*e75J%-Acd*0ZJVZYjEmAu9(%kRR|!mjw1{SgcQ8@6a; z2QLVL>bmbg36J~l(mgn$XR7~bpD1G@Ooi2NkwFNjY9K+V!{M=68A$B57hzO%5*zAX zbIe9G*(>LnLGijg5+WMKW=JPjLS|z;X8-n_x;Y@*LpJ@OPU`${RJeL@=l| zrHme3Ar`ltPnnzADld$!+Bfv34#>LvU_rSXEdn6%{{M)dT0)Lr@$+r=)i5$+(E+y= zl6fePO3^t0<4j05@)e*@iT^SqM%f~iRD*6mga2o$B6>M8lmL;{zvay1WbL1hjzo=V zpU4i$*9L^49s}Zlb~zU)(~nq>tWyNm&Xx9#r;y3dy9-VBS2D-07)ue&CV?!as5 zH?Y(jbc>C;-XQ*~AOCBO7v2~8@r5%j$Fb_yjLR$oGTcM;trhS{KLhpUvO9{mskhKDiE#o>FjxOK#ak_T1W4D$k<;CbV8W z>;~7PaCCR|Q}eIPoK^1I0nqN^Huz{L3;ReGq!Y=!oU3e_1}gmkC=fmi%7G^j(AC6k zUJZlkHpflHrG*71PTAS;8=lA-@<5K-=Qv+6(znAyATa5^YBwp1(>N#-26jH4M(!w5 zPD&c}wXE{4T?K0n8-owiPKI;_)8UaK8v}_=YO5kbX_6XW%n-}{!i5BvGEz!578j~o z8S~j9QbHz?sVPPnYcz7tjx$J5G};aULNU7};PhXO8VG*I2$ zLM_Y`qP99%M?mB^6fhl@K#P^8*A7DGAlXbqGI=+R)Vg}&#haG^b@ZSWbWS!Oo z0_Yhn#s;6EA9S*A@11D!;miaM>Qkge097u(;kTfU+A*j4VASN1yC_xcaYiPV~+=K;2I$<_(b)}-~ zWKuEp*G7zlrEryihfL>^=nlP9x(QJci2Mz!itLmKsI*xsCeYKVI*_O2hCjNbW&#Hd ziBVbNk`0~=iFB>Ikt60`=CYbILxa-v$WGw|DFGnT%^u7OqSwYi3jc`qWMEPc6dYZV zME7}_YWglkY_by{89UzFW!=w!f0GOxMRuBtN=U6+nX$)LZB5HhZ2@J~b4b0hcrSFb zi+a-BzG$~;0m2EJMt~L62Bicz-kq9?LJBvD1GqE-!VL-7PffH}Nzf1hbnNkSN3%Xc zSY1uZ9C)BWZr>1+t`(|gfnDqZJjFM1e%11t^Hj$B54He1uRjnbk@3`t#6Q6QYd?8( z9W>`Jb#&c+lP1I@|1T;vvU4=i`%nJ*H%xH#Utt29y6`2+NR!siC~WA<=5?Nbs9KNy zopdbF0WId45!B|YP)o9B#=fjc8fAU4Ntmcf zSz`r#6uS_Vo9Er~mVKHxBFBzpG9R!U27D3Nuuqqopi;YUEhTn$WRtPc?Te#OIE_(P z`#=XUU}2@gRIy!dz6RO69&vz0Y1_)(7bp6&rENl6Up*rsiB^4sCF(j-MolVj^N!P> z9+Kt0ON064>tUPW9H5GD4?~4V4Sl`?&!r^}zQ#RejklIfu(It4u!SZquL2pmzfw4- zWH*9X&t=(AsS7ydNjqabgE#HO`Wl=#K-^df{~$dh`b(a^Js^&Xl`norTw;5peQ zVqNi`Yv>QVwk1CMWwDGwDjsnl1}Ntm2Zxx)xZ&4?Y~ub|)KtH;YvAKTW)a2P$s~1D z&s|5%Hyd0oxoN@`EzjE~WO0@b3a*V4R<$Pw{w!>uIw?q}obwLT-r^85aGbtVBTs$D zNRLaW9EKe`nWA{$gTMLO`ZLT;%3;EXatp3}Ht5t98GW$obJo+`T`U+Y^s}OAA!2{w z>AgsV=f#fC?h1{=Nb@5#G0_{Mm0*L#m5%mAa!RXF8U$lX1W*D{4QY^;8o83c!Tf{O zrP`wgrgt+A3D2!8?#0mvKnGbr9N zdGo>!>H7`X35DH$jq0~TbdTNG;i21`2fF;wDsG^{193`7@u3$Cb*SZZUNXN~hAPWp z#jQUK=UA#q*B@R}BN5k}-c}^!M=cSmc}pG!Mz-2SbgsuvB1h zR1W57r_K>@9#Rnyv+&mUD~bYFhdWnc9&WxNBz38GZ1NLL$*T`=hH5LxqBF%)jTfGx zbA($<$yS4D@onzs(ZPVDKV9Mx{acrG7}LU3h;rKi$*j%W4D2xTTSe3;w`X>~=FIA- zKxmB6-y$7|3PNv@=3tz%VbYs21w)94qw)-8=>!mvO;<&pXcuN^OpmOo8)#?E^en8yETJ9kKxOls$yJxnvRrQ8FiuJxx+KX02GmaC1^?Xb zbyRjd=3|3g!`CX&yN4%Zd-R(8C($XSwDc@RB&jvuAKHyCD(NYwD`Y(uefSX^dUuFA zC9&+ccQF;e%C+R@G0U`Khx0~eVCW>fSPGZ}vSWkx;xb-@{ENLm!LF>9J}lIPB%rZ> zs4R_UUqt>g?T^D24s02K$uC=d!D~sqk%lWI>Mez><+}W(=ctx0)O2c-H3{fFw;xyB z8LV}>Kh8yJd@`2@8K|9+%&@tT`XOMo1io{>G8po!!}!DozX{4cdC;O`*_tNQM8n1- zXWCwD8bpP45`{~{+QkAFV1nER?{#r?g&+?ONhEWTWVfY>4}$0+c0tD|fl$twcfqC0 z1MrPOmRr?RW52h#38ehW+X>m^fKf*;g07yXOS4z-gjkj$i`HS94M!X6@5fNn*b;a7 z)b58wiKf87%6-%)e!(fa#B#+{IbbW}OVb|Hk=CFH)OLZppOVasn}cH`qhn(_#bJ%t zXpqL^ajeSgxdXzvU*hFlF#Qs~eVofb1HV9h1KeDFT@486FAIl$wRdo$gTkutqrsp( zDd^w39u7I;Lq>&jr)R_23Wn*%a9o1bGMtA)gFyM5aa?+h?cula5;PVzlc?4+-{}Z! zv>2YYM_@1PbOi8sMrk+|kBh^OD!sFteQExwKmuBLwqHuGrZYHw26)e`@<{fx&o}+Umv@GH455GmXBu4IIhfp? zzDjxv%C7z{-B){w+C4L=?Xi)%5XXfa56)kneEjbs6#?%|nZw)rut{}UWe{8Dz| zgRsOOk`MkC?U!rShfR%x>PT%!J6Aes9> zR>+LX=4!?KSIoLoTD@*8Eq-dk^qJ$7qW3pi%HIO3qzJ}&0{x}EXc~Q& zRQmJNXe-|XXdlJ!+68vbN%reg?A7;e-Q8#|pOl^#^EWbF-#=PAv*@n{aQekJ=xH~c zYr>4SbC9V`j3;I^F>z`Rf(hxP9+QplXj;g0Qy1g;uNNbhXC{Ch`lGG|&zm+pRv|N$ zt1T_mTwAPsTda;9pa0zLW*V8to$6UJA(L2;Lsw(yEEbmXK#zNb87q+f{tR9dPNJmd zn7rEqYDU(XOrO&Trp50zpwe`fluM|#ZH9TnN#+I@X0*SW0larz9Yz0lo#MEtk`@`N z`?&D0f^tjo>yiDR3d;Y>s{cEuXG=@_*C&YPM?d$|$A%$dg*x1@D7P0HXqWl-T5hf& zc+0ok5TZ%+pNvbR1gE3_SI#f3x<+nI89WrXP?cj zj;5Ipz7v&$?vEhUFx8ZZ4#I<(4y@7KFVzIOhl%v^_`ZB=Y|Ql|qA`P%4x}#u!deCm zL7^GSp-e)2%|cvaVtxOz?nzPwk0^A+SR{=G{To#O$)ZG@K1eHn^siDh6D3ST#CO3p zOEHsUVx{!ZmpZU7quz20wmBm*i~*R9R0wU344Ut5tfpu_McgQ-fzeT_u6UD)$YLNO z1J%XGftJtrdzF84gdkULX7@odDxRFuPEP65FR8#I8HoFDf@Ywyj`?tuD$fyPPDC_M z3AO25l%ao&;q1Q6uxU5OBL<^xwLv`;m1pY8Dfb9Ko`I;8aYHF+=_E$pHG}_wHwUQ$ z6BUX0l^v3E(Y0IR27D>+x z6aS15JmT_*B;hinNtJL63&7QOVkTv{;(razUJ|>6HJpTSs)6L7voz`f5?iSCtdPPv zPMBeF26r2k$sQO9o8@GK^3LjK)hQW>=?HTO1p>WD5@t^zUP!jKbY5depR zd2+-M2toErI1IGLkP0!;TR^ktdSFN9P50M8Lq6F-ULCCtkqJcs1u}H zE{{UcQ*frQ*ksvhx1S5x?Y54~T`CDHiM8C{YGtjU8@TkmOz>#ejt7>cRLHl<6FJ3N ze~ni3DEy}v#~wMm9WjSnlqL%i4I9Sx(?zNJU6zcL+kis@>_J^h{}x^kuG8woi=>O2 zaS_Xo)W93MNIM_vfZ$kcnWyuiq9g1Lw(zH0tmbgKvo zTvoEpn%yG3ieoH*R0H>aW8k;SrReT%eJC2m$G#mMje&eK=$2dm=__F8=UBr2S2GxV zO>*APs2$wTM`LLLNJ|9_2UClIk#of<{;2&>hVvNK72dJe%*e-ciJt~$m~;Q=1X@Et z2cn(NNl4VU+dx#Wg|K!ILU$<5tWIV##M1kF;Mth#M#Unl@~c!%bzKKNE3K z2)`{I)Gl*$O1T~-GlY*r3Q9zIIT>%tIkWPerJBd9534?eb9QOPHg|55G5FcwkHdsU^u$OnJ^_5T|PyQ|PJv0nscmkrc#63o`LBj?>D4 zV!Z$mI8*_jaZ*n!D>elRwC|j<+$>IS`ddQ>Cjg{igzkkN47n^e*1i~yYHPSnFk{fntN3V$r=vFtMltdN;6UH{B}uT`gGO#X#-xrh2F- zWWqSSTx)L5=?<)wUFP7zmW&22&2vi!hw|loj}U%WIA#fRJkHcpXZwJVNoYqauzQUh z7&d9pHFj_isNoIe6j}H+XQzF#m}yct6Le+H_HzLetkIp5pPwGCpxUS_QM4yZTvo+U z&PL|Z^LW69si)QCj^;EH!$q5)?DmH1s@l)6jpfM+xI)Nn%%8|H7UQuV`mJ_ABz1nL? z4|A(9a@;nOeFHfeWzY~y_0`{@ zu(fr0nHIheK+=&M0vXlF-D1(BpL;EQS2qhN4%|Y~N@0u-=&pfl{Zw6rmF8Qq90y}@GjovMEk(ouxbfZp)~0MDf2CaCQ8!hp-4tj- z64Kgy>Me|AEu)%FMG&{dpgo&ScGfPOkZVERu>z|j%FTtX*}J6Ba#lyDc|JN|Lc;${ z_KJeL!v7Sbvil=rRQGa(s<$l%3Lx9sp5ccO*%J6IyNO+bNB7^Xb>45&p*8DRWY7_4 z9Bbo=5p6EE4Whz0fug11>|%KfFx_o?@VozS_zom$l5G8j?=Ap8u{Irp^v!rrse{+FiPjf8K4)?y~npJxtJDjj;=z-7$vxOsSm22rWmS`U2DaU+JqdN95n9BEv zdw*h%pmSJI;e+mXY#!HZ!HL7N#XPK5!KY#w0;g&MMId)Z7pY?2rr_Af*jN-U@o8eT znGT5vBYl;$(9_#5n~f9rqP+qWSe{ZuBCJaGMb6hMHn!d@v;I6fwc9IZu)u= z?T>t)v6`ujHhB{W`~+A`xhqtuRcI zHM#0ZPn3H=nd6CPlt3nC&q0H-PEuZLt3pbO9wYAE<-$}PM+FD4yKyKk=*&_YN)<*4 zn@&Hgn}4ZON-?EyD%s=Hk+-xLrGiggs82C?Z?k+T8d)jV&PfWQg- zW^6{OxUWQ1T}>NkOn3-Oodcq_A>_ZhgxQf7>+sO6wWWK|KDR{vO&C1Kk5gfbRIb!* zp%bM8jv&EbFh9>#-lK*SorW>@HG3 zp@tgwtQ`0+`^b^6?BgMHtt@`rJ(%h=IP)Y;w7Rg}@3h%HO>&gmSr zh=p=_0IZM#JEPj@jxc)O?GGz_SA`gGXsaZhpR5)QAwxnCZHNT>2!_)$`1C9Gz0EUkDE}|^My0Vb zo{7^q#icy~BA<^p99aBiZ(_YucL(9`)u+V;zvcR!5Z64nON(FjrmJX;Eur@4vi|f~ z9XfZSuf0q+H%l@TEg(*MuTi{aDSXIaV6_MgwN{L&5IFwcJ0h&aP?BdEWHyPfvlu+5 z5L9=sGqINF?4XPNs^gbI1E3Vrk|bfW?YEh=4%dCQ3G(_ptzWMcSF};=FzhfZyE#C+LJTy=)%( zTaG{m92cvycpO-W{1v-`(Z0F~!26ZpB|+>+LC~HTh5&XcSJOvyVxQ%mI0qTG%8ikr z3n!=V`3H0o`=rG>DdG&~WR*#zv`&Yu(hA7hx&$llMXh!hnwrxj)#_z~;MH53%{ z1Bz-w)d#Fk_U&`U?Q{WjCKc=NtePz_gHp$dygWPUOM?#mIyGc@fDbDQuU~^N>ZOi(zAA1$gbLzkJJb7sxx z;lF10gH?iBtsQw+3cp!FBEMY;IREoLWMgP+WMXONsqbv)X6p1GH=w$#{Q(<7H~5%; zI+ccml}`RD*cjp377*ZuZ6L~6J|&D+fh`+UagvJB;I01+jHuLae(Jg#baI;RpQX&d z?D#(I0YSBfTVtCMQgp1|$K&w@hq-IW`meU#EGfKNJgTC~_mAhFmp~Sgr5#FOAxo^Y z(-vu#n|I#mH{1QyTj)-}v~VQu^aF?545&hfrpgYx>txz}xi;V_K};D-D(#yXG3C>kweO#C?}gB-B+eKj>rPpC%s!En~xt2xq1*8u)ax)iu6?!RY> zG6pn?Eg>|3u4@8V?Wt*mWxo;>nt`7j+t*R9H)mDOAO{rDz^LwQZLovc6)hTaC>~T6 zG?(@k^GOrR9P-W^7GqkZI1yYSHi`F%Ka`byT32M_9g4Rd&l{oOM4>7P1@bUFG8O_< z{5>7_#A)9@XlL=uN#}(Ji%4~qr~(8CMpvI#4MP=-joGE*b34xKHN z$b@71pd6n(!H{0PMGo^5hg zxXWk+BzK$Pw9js61kXLqr1&2|&_^XorXwAVCq>l-v!K5Bx z0VHnVW8ukV2Hxr!Gk!0toAyD#{7Nw6#dvv}=~h8pqygQ9>1BlJW})&Lb%640?8H-K z>1zDuj@8+(4dZ| zaAExu5w|rW<(qI9L)@$MlSqwww>0s4Ys}kO;`V0+GDnL;P^@Fju8Hpbm|dxp&2EQZ zENE|N5X4T2ndHwF4&dBGbWd%1XhfcHd>^CFTa5$ZQH$%(@huwQe$G{@8Y`P!;9p5X z<4{RL)QbpusaXtFER7^GU)6-wBcOO*Qs<0*@zKli?{#0M4qzpNMlHW+JXL7wi8O310VXKvzYg4Bex$8jTTrh}k z!Iwnt=1dp!yF(Jz5LebkSvO=`iPx18RW4M`QuB3r=+8p&eaQ8REXaKDQTcE$QR-5+ z%pP#G-6?0LaYSDFqU3#6i(KXog!)sMUyi;OGT1J60V_mJa#=JLHSo60WlXw~HDGI+ znoyNBC*qv9TWQvaS+sFch0zm=-y>g2e;h*sWzMX zZs-b4oog5#fY3nmM43x#&I!(QNoXHCtrfN9d|}}}B;E*tqBAP$zYnjjt}hAoit3h_ zse087@ruaN``I5EKI`MZJ}Zy2$Eut+=NV~N9_!7z)7TH^xSfSNqhUwf1C0h(T(uiK zotU*Zci2c(4{Q>B8*jAsAfcaD(QI}rRd%l&vr?(GMR}aeELUG#)avrNG#3?hP$E$Q zJI`yVvtex`tPbr^R1gl0C$LkgCzVC-3b6bt1^dmgs{>Q4*=C9Lr?C>bc z4+bqpeDjiz$86R_ueIGr^Ig^(6=Qg>L{UGFT%PZTXMg4`K$gY)FDE=RRZW|!tS*~r zYq38yX$wK=^IdIG%iV$|E@0m6`jDd~5uqSb=f{W?48T+^LmxE43Fg0XdYGiCajoIu4 zdwNw!!`)|ZX{kM+vEnoAx@VYAb27c>4Rr@EB%CSH=Z>B8Q$rK3pnA+qKei?>a&X(` zVUzp)DpOyeTA*0$|EgE{ zK6MHJ69)vhRNQ2%SnL7b3UI3VeUP-e1e(@9?^$`2kwtL0sE4qY%jb<`k? z2)d+R6m(3%PdZ@s6?bJ!K2K5Sz^;~6>m+C;xM@5M9A)Gy+dHeBg{BXtwC2OT$;7Gq21@p~evC=$b}oyV#~OZJj1b&k62d zYT>qw?t2ki^YNVwT8T-0<1s;>v-fGAI>ww|gtZ^kiCM;*vhvNu&4aK2sY3+LR}@ga zT_&7JGfiXNap?-_Kn0#IY0EvE@5UGO2Rm(J5|lH5U=|gyHGr!Eo%&2@ZZ%R+rE2YK ztJXSpZ-*XBl2RuUTr+ZZhbVdIOihDMh+smR3CpiiBoM&_za=MY+0ia+wlR`B13G6_%T6Q zng;w-k$|edhuI&g%AVd2JDEg*?F^Z*QnK1Fhd^bFi(4xF0Skr$W7mM5!rhLhU!LZM z>n}Q9MmEMZ{=)cO7P&N|kX3R*-HCMIb3ZUc&Wu|Dq^iVp5WXf^7;XnFqtZ?(!vO=R z0P-$HQ}qe+iVStKc@`F|G$im5lp%Adm>asY#Ojt&g228e^a1a5#|TpBY8$_47Qdwz8Ij$vYE-IzGWvNn7B#i(FO9rh^bAxC#ND)sN73uhFD7&Zv@I z)o=`CUgah0%DX)%02nyd0aS!Sn4Y5#)9BXZP5KTfn7=AQh{v|NT%OJbIGk4YsrOM%jjqAiIhiqDZal#Hdul53>csePcNb7IV|ZH93g-BW=LCf0 zX)uo}+eRNfGMtU)EAIeW2WAX;9gi=cxjrtFaJhCodCLHFPvXD}%#kRb$-ue}2pkUx zWbhsAqTvw0J+qlb3t&<(O@;yn*^wnuQ7t*9hD=tt1X5_#Lb;(s+eu2$Fkn=HW^~Z5 zam{EtHX1afKIC;pex~eF>^3T^606@kQkoVMvL^XSdKXH#VUcOj+tV5YevcTHp(vh? zMy8viT+|#*FlTdyN15cI6ZtQ3a>Cimo18q;62_0joee+w3+Dz6tsO<#+-)U4gFZekt+<)5>_*wz1a$77k}P+yV>7 zM_eun$;Rf@V6#Qb|Dg_GR$?F6PX~Il=!V zkVRh1_p41uBCXMX`V9>UKZz4SKy)nl>4MX9eR6nfkfGUy8~MnIK0Dc z!-F294x{dYK{l8F3_TJtQQ!wQq7w3ugaCn&_Fr z2ba7pZgQF4JVJ3;V4qr+(XJzt$hugmC?u7_Y*YG>g99g9@1HAZ(N|v{(Gzh#7sOH} zl4OU|LP9p;4P1jmmUL$yp7thRUr_07TDlT}k;|!hCc&2tGSwaD2R8zC2u)uZkz_oL zW8(GFfMuSyj&zY^aq7YN)U8N0Q%Y*_^Q>~-q0cPp> z5uvKLyhVoBC>gMJj9oa!mUwi%|DPb*X|r&a%=lEZ6Ebihl;2^?=?;T)>I3?|+o`n^ zP%W9&2r2=uAU=jY9Kl3HxYe~fXGvDG3)T>Y`f;QEPHI8VY#m+R<;xkVq#z5EhZ|%0UoK-6=KEXcCTP-XO^#nne`ztlpW0BheU{TktV017L-Bl zOSwDE1LJ|W^lBPB^$qrG<^SB|Y^dDgDG zKsvvVN_y~W(soPd3UFPg++86LJKIEz7cL@r(TaBIEplz^t(Pk>Jllmb*}^GpQkwsGf_e9+Kc%EV?qkZPx>`h-W%*OSCmZvI`0scJ|E60qdc*0DZL!_@BR= zg7aJusl`e)&y}M|txNZLHxX2JbpY;(YNT(u1cK7nD-`+IGIrLb#<{&ALVR~ucLre8 zicZmAl8FXeD>)?(T$1+l((an9t7*1PgF+vmrC5I%>`(BBA%TN=9i*Cm2?^|55$&jh zN!SeLK7VLWb#StT{$VQ(TqMq!B{rqx_;B7=aOe+glKEu}%+)uG4rV9Cu?5|~I4W2d0Q7fo?xb@m zkzNF3OLE@<7%!h0o6Q~Y@zEe4=i3ax>m4H?hYu*PbQFl?D#9OGCbh(Kh1mohUwes*3FZlOZVGs_v)VU*6j_# zqw}*iklmW=nWpCgpjD!tTmr3U%NZ}(Iu9yk1P?$t>Ah@7JZvB&+Os!7Dl4Dix> zp?n5SeZ#tfD|7A1yA-pzb1IJ+QiYBd?{Tq5a&zp}BHO=RMij~dJkBei4&p~3+$KuQ zg25g_m%fIb(%Mc*Da>;(6~#4`7p&rKcJeCZVo+`rbxanIctK-2Mu;Ni#W^FjB(KW0 zM4KplHxE1t)9h!ABD+t7X!Zd*yYnd8BVjZz(=j zTJZub`SAeAEfv+7zx{#mMw4He9mwRC22Fp!rIsBNvA6wp4RnRD>RHuquQAcjxsWJ> zxzwCobE1MnSI>PUgQc~MqMxP3EBMVwuo4O^;$$LCVY!iVR9%Dk)E?mu^zm#h;{HAv zhcY#4=5(JdOdXEpo19NiwbWVUy3Qva)C-FkIM*>Nx#W5moyK8McKpPmycdc`$@Mv~ zvnaXr{!J|ScqA5mSr?1Cc8)>WIuL_e1um}AcPKg?_dn4pS(MQ_MCrRAdfwXT`V37})1ij8ew8nLG-`0F-Aj*_si}f<0 zj2zT+jLrU=n&fTOjt_BlNC%hZ4F9P@dP=5_vfrskL;!8K6{vn*ZX~)M*Frzc#v&5M z;lbQn1;7@Y%M6vks7?o$jQS0iyXdKte`l(1MOcfe+ex!ogxkep1ECg~h2^MUsxE`? z514%o!I|j#US|uEVHh&aRnPdPu{s|Iv{8Fa1j_vzzd|n^&-F~pM!J0ZD$%E z7v$i!-xzfNDLJMR+|ZQq9TLz!mHO)1tqO3SZCm^v4kGPIhF~Q<|OV(QO1#H?d{zldhL!ams3bDLjTkqMHvAGa`APp z@2%(w$!drrr_VCAaH)~#{~U^b-edCU|I1uDZQFYU9ft6#5}#q2ZkHiY|LetUDw6P9 zhg<1^*wcYQ$X@DOt?wV(2#waQBXXXjCR+}vqOdL0w$s#@7QMy*hc_L$L`&3JiaoYk z0K*jemWxK{U>@P&Do2kuznX?%NEWuRDIy>nx?~_SKkM}ui5!Ei&w^sfPvt@ALE_n7 z;_LlKQ&%YDPW3VEyASAUe`zC?O8$%1gBmhf*9W(Okv8NaJ+--Pv3*HzPQpP9R2l0+ zqvonbf35_C2&8Hv3c9CAH(%hVk!Gp6rSc#lT4&iTMv+Ikg`(#Mfd0X6ZUH9L+=|zJ zVxt)SOH|Sp{XE*_$cAG)h!Vz2XlugRL{@DC4SD1C*5aKU)fK52Du+Y(PFQ;sED~9G0QL@`Zi}A}#A3IWg`FSYM{Tc7#FHH7#q1%-z7SqFhB1Mi=}*e)G}Y z*%>H)I6s~~&ar4)q{m~|<)+zXT1+*pveoAT>XpWpf#55f%scqNEB*a#c}HKUV~UH= zwkWI9=ehb(hC7eMMh4yMV|2J)yhRIRH^|Ll~`3#+33VeY~f3W+bQ2x)Kb zDrxNRc5QnGOq$i4xyl?UyE1Iv7io0TaiF!~y~gAqG`elIK*U!E|K+b=S#?jEu#Dv~_fDM@;T9BwuZ) zR6>pH88iz=EOcl2YSGg3hbYb%=mB>-}NGFYRw>U_LxWNGKWd-*JUlQm;Yob0rJg~xYr004~t zsc|{{mV%r9Ke;^JDxG%03@BTPui){1fS{&EF#`+v%IqOj3ku~*!U$|i_b;q*kQg}O z6(AuX9ktH0?LFUoYNZ*$4V-G|yIeKS6g>h@e;)P^V40veqIAI{O6fe5(^dueYcD>5 z7E!|b!3@U&z=iIP?xp*qeR%h5e1~>BN!{d(Bp%6nVuD=KyTPgkbWqtQ6ZoR9zV8_9 z1dw>vTmbf8ton59C5)aN8xr}{7ce7+oIf%E}vRyvRtJ` z$+tVGT5yR=hB_}g@u~#>T3nlUOx;uMOVinFJ561%{v>x%aG<-{oe;%RMhs5*i+}G8 z<%fFzdkb)v7jaC5X6^!JB00_{7d9oGN<3`2Ql zE?SIfkjPfQ{S0A7|3~zwvxTXR4a=|Cc*XN&f6!Lni+=jkmu7&-S?OWRIZigBy-Szo z`>|mgBxq&w*e^ZCx5Gi(8tqJ%G^V>J{8t=jl_kSOWTI17J#n?xSj*X2l+JK z|5hlCaX^g)M!q^p+&<<7!Us;jn+RwmnmDx>n?73Eak3PeDHBvNVcHC>aBqO@!e`{e z7+FPIG*p?$=FePHD2CswZ{;d|Wa=SDGn4|URIBE>7bcU-DoY_HvEAX|ACrD2A&Abe0H3B>Rf|-O4Qca5@L1dPR2z+`O6&s=(5==n0LAHx7(W#rwxqrLPMC(yW ztgggJ^tXGRgj%ZS!FwhZA!h)m;*$7+?QS199wUK*{nu;;H(;@wEs%LDj%31_F{OvB z@0yX$6-b3AdF{i=!9_WR_0?& zYhH$_M9!YHL;xj9zn6b%?SbBrB_58>IGqy2kZDt?S|0u z7}=rsBl_gAY#>=24jzHQ@P?6Fegqt23aUFoKD7iSM8-D230^1?W;ne*ox!dr`sk5&~bg@@Upp=_y_M z-XV6DK7eGBSe^m9#TzgXhZ2}Qrq3pPAyj6aDIQ-0PvI@KZl#^AbRTWX|KZ(K`u7^{ zuIY;TBBfYU0I0qTk;lIla zDx`wP%rn-3bw4+(PR|}`cVtCKVU?5hC7N>js^J~9X4zX%22VHXfiu6*H7tYbFdOOwY(Cp#X+&3>GH zqY_S(@L_XA*7Qh~9%2a@Au)981h$Sco9kt4G?#J)J;HH85_~2X%+J@HU4CWmDYOHs zxWZ5iz%oRRmBgE#v>^^%CG0LI+?Tn}F=*|A5=~(G7g0%#$E7}uYt%UW!DgRv$%6q3 zcw^^Jq%i|#Gm|3=**cZP%Ml}1=DXbi<3b9m{esQ%go(|A0aT6^TFBCVe+)KlIcuG( zcff8bxa2y5g?=HafeU<%Rw_|KSyK3d%^xvT^yS;sz2E4mRe4>2{Dpg>sdwnZ;Nal% zc=+qE`aivMq_LmLNl<@^uqCMb0DUGJRdJp&oi-R}ZzNgBS~H)r?qf(I`?*kOS77W@4dhxw!3-%0N`FXdo;vmy2s@b{bJeW>%yM4L2f`@9_nEJ(lF0cMA zHcqwzs5KFXS+Fq5fsPEnIdBA}8mXx7w?ulAInaZj47R4>yx6!ndih0S z0Dtc#rP=~HR3N{#a*g3!I1=&*83q{x_;C&D5%17^$D}}Z1|4V}P3^@%#I`^VZ8i~h1H?>1xq+3pnuhyy7 zgCtb+>DVIN*Kg0bxThuX86Uw|Nv0AqQKczyf1dT=6P5$uOUX*hhBamE*~d*rEdf z01_3HM&O7@Y7m;$diGeU3Vkti{KJ||G*r5>=M)H{APNBKW5CpN`qa2)Y?)F~h}JWz z6`&MXi88N1n-ECr?T#(p#5(pWM-T;){6`%PEDEX=CjEn!#bE3m<|1v(vg)Oqf^D4K zVLo9$?Mb1BjJTt(G9NMW8Ve|+sI3J##uu5YrhFQ1tu3Jz?*cX6#QIQbKIwPxEGygGQc3qHpE59ty#8erLI^dd?V{_&4%fUs($ zwc!vCUDT8i{3!VsW19J{nA6PJcF>R11SS_bdFo;lNAKe6@bN+7K z9(WO?N;uMh(eg#u^1$(9m<DEP6 z9_Z^zSD8HRbBPd(>g3>4Loyd#+6kddP+Q)v9kTeL%xPiyc-}+U&HxKYA(zjI*es(& zWA+&n1~-Slv9-SA&s!2R80b(VrC1B7IvAFwTX)H}}BW_QCc6Ygp04xtY$N3NSBA{q?BUJOVUlWFD+BN?}1c7HS* z8FS4BM&kM%1=)vgc)#5sm+oWGCBA$6@itQW$W1RO3Zy-f;8B2(X!C{QUFg476~${| zM_~X_-Txd-C@|}BehGEBtEy(!rY!H<_xZ)uV~-96{s<~d-r6uaaFz-OVH5V#rtPA& z-Kx#KFmp_L{wru?X+EQw-J_n=60>3b-TNipI%w+zTH(7WiiftD&6_3vO7aUflBLY4 zR^vBdqIu3-wX|ZoyM<7(h169Sj^D|)FZRwkS+s)V-a{0zKWRLTgHt-U=dq#_zKKFn zT`K_~+~O9n2t#q|wFpRnGEe`e0Sp@WtOSSnnE&P#@Ms6vvkr%Nw~0aK{b4`Nxq4{| zu4SVwb<&da7hH70{+pIlQyVpYkM%+rTSCug*npnH@R!pNZHtNYw8~`(C8qt%SxLE? zW*E(A%RbN=LW2>CDBVG`4o-+2)>)5R0#u^Id*Yq9wA$CtnbS5p)qGb3o4hKnG=}48 zTxz=xI>IhzHlqp!$F?EqsxxtEyh6n5Y`lV7Rdwh>Le>8~>)t%OF+=mm6qj{N{lAMLY`#6J({5_ZN&2adK> zY5D>w`D2$rQjf^=0}!^^J;H7SA?+8L-qQNlH{RG~%O549Ahx#&162I0FvX@oD!pB6 zpCw@SgObnYAtP#pkl_s9#p-DL%qGXTKN^U?mSaO+c*=fUUN*;JZm z>WGr|aDB=g#fF<}h~pEP-XDB12CIo!Ir|QK3`bHj_Df-ySpm5y$tXRJZIgRk@LL0) zoqyH3Bb01&=-jweX!^8bT$_zRKTLuP@_MoqIU3%q`97ifE$Vo}u}+;`U^DJ_6!e}^ z(3?q*l3fgI^`w%X+>&;ZQdZhY71TH)lK=DyL>?ku(Q;ZI`Vro};%Fg;urUhBMoaGJAH@uqU79IN*Di_A&Xa*=NRQX$&KJ zmmKmkG6>;3@=_D}!tmI^_xRh06z$hp&m=OJIXsYgfgzZyuu0O5#M);<^UkDJY89@x8fYbh! z^!_OuD|_0KemUI*wNAsTw^)2N9wCQMmQJh8Ufs)OQxbBVJf4+})|Ezz)~9Z>#;{Yr z$4BO`3Q>5#rQ6o~iAcbrL}@fifgn}(-qTp*8#+(LIZ!|Nz z3gI{VcG2YiV?9l5O6IH9znETo3QVh;_NUe-ARyRAq;xVG2tvXdBnO|P$|B7NCQ!W9 zVyPRAFwoN*IjQuGe()nsS`1@!1F^iyZW}15Ckw}9WPjLUj>!H;we7fxp&LSb?XeH* zjL?5!_&i@sa;&Q-IVL^UbTi9fy);8=En3`J#U1XmTu;Hd?qb^t7N)x%Aap6hJ(I#Rpyy zZ3S=bIxLHQp6KPW;LkcDeig4y=;QWoURCjMvlspLT42>`etCUE4-}$wy%ll|w&pq9 zF0q;aMblh@@ay|nETDi=R6}j|J1j^E7w|4SmJvs77qMv;`-V`q}jX#qpYMp93^>= zZp}aDso9w?$|g>B>v8zrMyM|Inx@yuvCyFVp$gNk8S2Gjdz@qM?q&maKaEKCrn^`2 z));Awvf%zabApSF|AB}xkjSq2Dy|!X43D|SxjLV)DbZe3p{CJZ-m01^p=c(@QPU;IiwQR zSU&T9LkA>|_F9eUi|ep)J37E2X}91xu7!!RI&??Lc5N6DtO?(<8^xy-{k{sIy6*_K z)(!eX0r*0Jfy(E0#dnC=ms@lda2sh#??x3Bg4+SC*@{F`um&;7qLHqz8FNmCQU8UD9}lVA)8$DeBTr< za9;zY-;HB9s9XA+1E%M;VgxI&wjL8iBnasW_7pLfXJv|Cc_LdHcBs;J?9+yW-}c2u znN+o3Wd?`XlqsrgBje|)F6_%lBku94GT7&3z4IR9RVg;$YlYBg?|~-VhOXm(VBj#_ z*y>uJ=U&=2xG|q33Dx9ff^hC_GGu+kiu7g#QXOt!V2K~CXL%aLUP60sFdlr>Ah3qv(D#85yaPiGx|-99W6a5$^3=qoX- zL(u*DuL#+HXtEvvAxHk*m;bj7;QUo#FTdy$@RwNy1lNG z;Z21aAandWAN>+;O5=N&y*1Syy;#$N7yc!$gi%SUw{}E-D12VN){Af8Y(H$_bdFzD zMcGhmF`-+rphZW3Pz?ZwRC}qVerak;aX7F@!u^QgEv%$Xn$(yUV0pD`*>;9}G| zSw_nMo~^XP1{LK)B+rdm!8^U*E^NgLH1aiLZFQ@|CQRw0N;yh~N85M(i)z!NAt)C8 zfwI;t3^-ZO5pELDIFtG+N_7?e(>JJ1m3Qu7!}eIapvus$g|pp(2JlCqbhcqu2&rcr|N) z$ccO)5WGhOQ2~9pq)p+`fyI#hK2eKZoc8Vrd>O_nd4Ge}iA?e#f;)Fq!(FM&TJb5^ zdfB>>;azgyEn zyT$4CgJAF{hMD=7nSn^eng_eG&v;0><<8u9S&C)T@n5^G{pee`O*D5c-a%6@9mUynn;D$Gu!gJkLgi5)m9m-(`E=z;Rw9L}Q)V42wrcT) zAxoa6jYS4peg)JUzd)@~nmH(9`l^Ol?_)>;fN0zhZBn0zu%`GOy27B8CQh z0o%hj%%eGFQ*`Rqj*HI31XQ0G%86U9xe(xw;K*9VF-)43(!`c6EkUJ$<0#D~1Xqd5 zwfzb2`Ya2@XLD4?92Y9*W;|78;JrR`H%|;L!g*=3V=T_&+)Flq27BJgO~98J3V?GQ4WEnW#(DXM>j)Qn1Jw@i3OKP>qkaojug_a>D)1>#-T}O=37=* z#f(E(Pt~HpH1biYrE<%oKsh6jp&2lI_ok-Zhg1{9b7>=(A+J@!-w&*ojQhous*Tm^5FY67msTi9J1@03 zkiL>Wl|JBJgV{_S0S~=o;*ce1yky_3G5tdaPhse`biPzfz2mQp~^l}N~dva0~pq@CK|SNORhR$IYU!PWvwgi5Ca%$;z0 zto?z_u4j#v`XKC*eqq`}paRlDtf|uxxlkq@vt@40=g+1|x`%%PzbhS$G37VAc@sw- z*WSaSy%lUjzkjkHRvK&SclU7IqS`skCPG$9laUS_u5MoM!AP}^nA6!x22Jl^MhnHO zYH@^W>x;(VTV%hHBCWG+3FylUf5D{LFq2zEuwq?Z=1OiyC!Wr>J7*aSWWl2u$1E0B zNlo?jm?m!8rZZz&F>wY0L1f9xh|ZEd1ZK))?1>$_*^oRoVgnc)DsT?wpCeoNc$IkLhff~2{SY6KX*A>SG5H5F5`dgXB`X<{t zA$bcl6=km1?(AnD9Kqp~t%L%6Scd-`hevAId$D_01sAc{lJs*5L=%_iA43Y-J6fuRUjEi`B>2-54=F9PRaz#f0 z!6nY?g9~F}UvqMm14{CM6eDKotpy0qttl&|y1!~=@-Ky6q&K33&FqZz&tIol#|s(7 zyG}8qHVA=A=pbRLrqbp&&I*RLS><5@euQ8}f2}LcJVVXX&I(wMJ?1=Zh<8l;PV*pI zAepumuLZ#}N`|%Jm}IFpK1s)EO*?GL*({oyM_MC=A>6|;5O3fy3G|)#KS|DU3%Oq| zhC!nvzFL>q7y}H{;XKxe?5z_q15Bzy#v_|GG@!jlbO#eiw^z)54I=IZEOfJbVpH)= zYMlTb&@+MknGdN3P5e=D($BeEM6ifV2^>W}x5M|$e!ZdjvfUg?1U}~{1<++CotG08 z?ua)IIirPmU3}2qRc<6(Q&eT(`=p&3CTP-yAqcO49B4z%ht>)L=Nvh3wdbfbHz=N_foe_j&uEM#g`~5;$NOnGw~#?sBhFB**1H@`_Wxkq zOPo-t%4+|)lj%HCU;wxsTaR*m3Yt;stFKyzs{!uaD&%Q(!4A!9*@UEppQz{iO*d_t;gd#a7Z>#o&{ zw+ez?PbN@-mL$F6(1~HE4NWge7n@dSjAKvW_$vo^AoRHG&hrGvuk~WBf+oo`tu`Al zWDx0qkdXc6!ixIKL_L#sYlpLW0K?Qma^|yLDr#aPXvI`wf7x!=!s03EjqAnED|QA} zoko+Cu4H)6k*s=%AEfq1vWgp6CddaNY458&b{NRkP|LESot=239mmb8Bor@XgM?ST zx0vo8bQgx7{h;U83;bdr;8GkB5DS$l#0zmb3M@4VC88Gep()zz8e=I>x+ZiSSOHeItz#r!zgMp}8tDcBqtI6qGH5@ZVZq+IEl z7ssdhWR)NKZups38Z(L%?Bc?32+RZY?)(d!Y3s|d%gZ;v>0fS21d+Y@Hj@wpTomle zjAF9)6J)>R0}tL%IuYS*wyNTw*_q_J^0@#uOXaNO#A|4bpOAECR#v5lO+x-6_aF`* znCcN*CM;32Z+#lUnX7cnLDP$l9O=X=w2O``qB9n5%yCbjEm!d6&PchZ2$_na@9;ve zx`7uZ-ev;e9$MFVk8ft)@2XGA1Ne8T<^sahRX7HWFCpHR{Z{e2rb))|x?@|@8LdK# zAg(Ux((~v{;!sFBpO;r|zbzSlP~uZKI^z>8HQ3d(Q)CdIgM3fvCAxD>IUk zoL0=RCnc*9b+Mi?`ie)1RL+u>kSJ9kfF^eQ^EG3N#rkz1FL1OKZC?U^TrVu!R8m@I zDd?TplQwhIkf~rWYRAJ)zsJ!cantto5C&4F31#4vpiwrU3`jEehctDneiVQfNxmhm zg@y09>6AQMP_lqm$G2_dTEx!XKif8&IarTaCzOj90-(;)!6Lv?7)i=(6;VDzZgR#{=8# zgn(*xe?26$RC9!3T||hG9d3Cyg>W52l3is;9FC0P&0mkSMj$LLE-eznG#dR#2}&$E zFXqBzp@9rDI_4CJjO^Kf2O`x_JgeBhJwZ{^*CV|xR8sH4c zNksr4W+|tTe%gex#E!Vztq)kFgqq0j&R86leUW>N@WGhS=0L;wC7k8KrOhs;o|@P#$7L(D zY1uD+54nbBWx~+tYg%GiiGW*cN|5$ZFrIqCu%2G-Svx{>oUb+x1X|wSGp)6{upo*T zOSK|$^GEx-M86Q&v+?UQNo3llq`HjRqiHrp3LG?u3xT44*d=<-cgK>{_jKIgyVG9! z!30~)toRlYyE6^6-66qfL(%9IEwVj?je8LiED%UY-6bp5gh{PFq_4eX5c~yqmn5>G z>$ZQe{dLxp7ATdOz5=#`lP_n^k@GN{qC=zDjn|RQ^KAUd)Ajn?w&i&+;LiR2Uq%Rt zKO`|mq#M|YpMF36pV;jG)%X9;cKbI^EGUKx-k%O3*yY#PK-8fnUpN&h9PyvAXjmxXXH6!RO@6ELV&IMXDg!rvjjHr6c1@@$c{s$7RDpgz zJF@2D@^(U1Hj51)_Ox3z^t6GaG2#4)l!-2+A7=Q7d&Vmi%&t$eC1ORXZ0RVvUf7ig#)ZWI)#PnZP zN6IFH4t?Z0?hdC?c|kM;;Q_rY#QdC`O{zl((LRbmIm+s_h~oXh!&0<2wsf5luCFh~ z=j+{#!6Io96>_BXDeVTlmr0djUITUS1679s_UR_@$nVOCP1=n?lL^YA<5O`y$}NfLpVNmb z!c#WT?{fbuFToV3pokzpfVm4FiPLg|0xS|8se7QW{85r=Vf z!O|=VMQo9yksdcB%)uIjC&S+XeKnK4u(fC8y1zRW(y}or>mD}$9lCz#hf+4qqep!} zh4EH{LHz|FKIkixqcH5KL)w>mEtmM7$z$S9ybmgsU80-7v`Feg>q72638jvV1>ycX z%d{JFaeUm4CFbFtdX$0pjtV}MpbTMdUWI{#z^k^{Rd}Gq6x~!P*+X?Gaxe8k(4Z`x z{x5ZK$8;+7_O^OAUP{i)K$+=<8JO?P3IdU z*0764*7wmjJxFSD$H>++P$q9&MTn$1jHDAc0Y zIg{&;O7IuYm?!TG4uPr0)9Vn6QHa*Z!HQ8re*Qht6UUa+jYifdk*3H6kS$s4{5-Mj zVQruun^_h`fcFp=_*|VLt~{{?oY!>qlA+cqMs1!lzlpw)isbZJdRbzSc>WT9`(W{4 z!mX0gDcTILR=hV2z7eVi>HP?i8IEBXE)w|kN5ym6>u_26%4$$1<1**x@5L))^Wzgr zbC>g_{|eoz`l$>*iT4?6KfB@Lr!xGnaL0dM#Q(D^{#_iRVrR?;=wL#w{W=4p!_HX4 zF9+>xxE>a}`N#)mdqN8PBJ{l8%_*_;7-exlcnrL9W}C3A^#Zw_2Y!P8q~J!7JfFPL zX?Ia)Qet@5#Cz3!(Q2ns%z!j^S%+tNN($lowi`C1i6O}ni4SUmwy*D1I;S=|^W&SE z+DVsM%blWrh%=1hOv7qYRzmBTnG8YGK2sf{XUnYlXH}nY-yTxIR(}_k#}r|DkY>|18Rs zt-+Jd&w}XwUrw!qjis64zn7y)k;~?XQ~NjW1s*qwK4*MTHK<6S#Co(`HjhFc2qHl6 z%WDcad=r2~(6G*)yQjlrn6@KXNU5N+Et#dzqCL4#CloD(>XodAVUoN&igzXUMagVa zkuALTI1|R7tv>emGNy7k6`)ILS#Hk(A~Q-oNuFeqPX-crjO~404QMK#u6f}&g{XWKfLjNar)C_w>?_x zHlpMW+p}7V} zpm3m=wd>$4WG3x2v@F0=C+PrP*GvRK?~KWlBRASzP!TZzGX*&Au+e?KY}2DI=r7Ue z-^9ThAAtY4DO7rP>b><7Ro?Ib0E0XL06%^{0B(B5Mpm@{*zeOi8adL_{ZqMrSlau{ zcOHIU;QDAn2a0wNHw>n}+8skbwxqVE&u@BO!2}wO1yNLG?uJ(1Zy5yS5ru`R9$YOM zWQ(09tl07Ura47!V)5oVIAXG~#P3fTp)n}`oOI1o3tV^@sA{oji4dhy>DB(6@X^d` z$ZS5yJ0rRh&jZA;W^w`zJjmvDi6FPt-aXL^Q}+~+{#pD1!#4&AM~*=Q!PI7_ z>=jl?mgYMafQ>VIN@>Nn4vMG(FGSRzw&({p4Z$DO;-%kL$BfaZ#gX#|8#XsRE!$oq zv5ZpVO9L}_qoi<9vw@$4ky%d3CblXDH!B&8m+0vvO^?pkb6*GPR4-xaG+;UW$IdmZ9$@`)*7Y#t}o-57OvMPs30=Vg`W9_Xo|Te z$--h|Bm;e!s2FeiLGiSV>s(3fpIXKdA+!hDoI@2o<@l2U^*}R;Zicsdg+1c;u}A6X zw8&H9~v%W zuhfh+cv8#cV)tGM66JYJ4XmQ3z)s__?xdvqwh1LddVZzMH%pR=e4a~#`GJTWe*@*T zXOb}Q@I~tbb*cTG^s?%MTCi6K>&Bv9Hq_vf1Hj1Fe}#Pbn(C>mgWsHx50vp1&H$DH zTw*=h%C#D#*oqKft2}^s!le}-Un9>eWLx(-p67-_@hRIIxdIT;u@EE9XA zhmY5f$j?iH{1`6)Uej;%OVm^3ZM1ULM(7r6A;t%td`_*$D5CeC!QeSI)9q)5bcf(wT=-t^iEA)TGmR!C) zcBj^J|HQPMXDU_pxKZg>mQ0SI-NLEXVtJ(;LK1Ze~w9oY@OiHLWbr z;fY)w_)+i;vbRl=?fo<9p7_P!mZ#dnd*MVzDJ87_Rc{%5ENEJW8wn!lv#aVAuB40aygUMkV^$a0K(tW_DA9Az9iUN{U9On{PyOqB<%3W^egXtz-J=GYf2dc5|f zyq5A?e9dROLlj{;Z~bv9_(@XkThMP_C*rECT^0HI97J?8Fq166SZRJyQ@y0dx(W=n zX&9@2VJ7R|Es%zwg>|zrQf3d68R6Le@Lt`jQ)G@zeTPn-){e3J} z2kceQ(-F4v)kMO-{u67_Kh30foL+k`x#~uG(Ydk;+3?)CgpD((TA8Fe;-zb%4?nd5 z&rt`@HpfX_8FancO_X!PsR`sNfvZd)H9cbbssLrdrJNyG+mJ4w+T~f_;jN|f-cnui zQHM=NwOWzFSpAeJp`hqd!Ohu8>dG3l(F@o{f>h)`G#hPMKXKz4M(PV+#Eep!=59U# z6LH9i>rgfTis#(48xLPmqOQ4j-zlflOShA$qu!>qdYBvarMCES#=6}$z;TW#3bUAk zz*up=ti3VYO^C-J2Z#koC!5$y!zN*HsCiP73|7%p8IXwH%HAAX7;ujF>8UuI6H97q zc7;a8w^WUS6N@({Gl@{)1WtK~UZGGY!i*9AXWbe}9r^KwBWb=M_MmR!=z4I%<#8ez zd8$poD|YU3z4jeu-x+9^8psRLX;7_8?4lUcD((h;u+5?b>IP)zGpq%y=fEvp!{%2z z^s3({rF_q+>>XG3%=U+{3nQe%Dw~Q2qGEqN1!r;Q^>tfF2wq><-%uW?9G!r{VX7xT z=}$ieA5H}yWW}O`MdR2*1cK$HN)|Ftk!ni-+(tNco~1FkW&&RB3H%oEGC1asY$?x- zUT+h_1V{Wa}80iE1uo;$8-eagFc|Xp9V%p)AsidetzClXBKI#X2^#vvXzP%_w zcw?}3Df*CM&L2(UI^(Q4mbcrvTpl+Ikw=Y{cZF>lpo_Jf#t*nJyoOU?1AjJ=+n9D- z63QvhQbif!`wfFjoMe;9&8qv)I+(Oeg)&TzSS8IlyzL7U1bSzf!#a|?SQlt1GfLiO z6Lfa?s2KmY!=+*_)<4}LZJ!Lt=td9m#iE!*1!BHDI8xjt24;mW$IM2MZ(!819g|0LU8{BEw4RSjq^svH* zmgH`BV38wfYdaR)&tIoMM1u^Nx^r5bSjs?gFBt8#ezpG*bK{_t*mS=v(vR9K-32n) z)%bVGEQ3QbE3n3b!hGn>T_X%L^&z7b>h-ee#Qp<5qK(;z z(H*vFP4PV(N3Dh8*E!%k^5Fi|H6ukhh&+h)mQrW>DRTMrSnV(7>dtB@&nieO`~`v8 zI(D-UTz#iQly@L2NI3i&GA3A>Q6Fcc!6(9{U^7(F0y-fd03YV}x3lu#PfGC~CdD=1 zjqi5Up3m{6m#SO$kMFpv^FxLd*zX-%@+Od2tr7hMSvH6j{hxXE3iA z_O_%~-?Ta9R=rm&+RMq$8A1b?qM$LpqdxYcq|(>D2glzb$&3=xq>h;Fk{sfc-}!Nt zoJv+~q1CiH8`Sw>3cG6ruPtX=ML*@j?`#Uu@HuGhbI~;wmgjD@%{S)B^e@E~CA$0Q zbMHtgx9Ip`z_0{#2iymTQ7H|I2c$_jWdXKVi3{!!E+N0>8r5KuU8tXyl zAvzF-+Ec1FR#(xSol19S&uE7>Js~`|ERnDmv7hYZxvo3{`sm1P2iapxr2>8lFgF}p zvv#xTb@D16NxNm(qelP6m@Ns^$^-JAnhtK0?CtIv5hvTc%)=3bkIAxHw~el~MLbANBIOZaz68|J_RlgH-T;F9aszC( zTkLgML{)35DINSs-CvtU_DoiXG9;Xt?|E|IjiGvN46?U-_CqnT+76x-(oQCoao@S* zn7DV3y|d3;^L-?$($#lR(0Y#wCtNQ%{LQ=q=DAr_<9#h%2WSbW&w_9LATQLdDjT@T z9dj6>9ZZ8Z?9OsJ-n(z1_du2_mF<PNE1|t84I7POgLb`$ z)qV`=TA+uQhL9Pg)3uJ=_}KAkP*a}HqrJFd0|XNpO%W4Thu;lzj+~EKKkkTqt<#fE zo+DXiQ+5{WgcxA^lbg4Wdu?B>z{C4=1#_<4=}|}1ofGBU0k?l&|K5r;oI=0@weJY~ zFB`JoZk~1mOWy;dEmR+NA)EKJHxsx@Y2~KLJ%c=|R8x?gX-jRQYa-}0)8MrCkJkj0 ze$tuqocK|IYwV19^+*^t^Y&?C0~U z_Lz541wI}n&;(bI0-Dx^-3mtAdN)OL`L0fp( z5Xc8R+N5%YFk0v*5p<)B-$b%=!pbRvegn)M>L_rMre&URri|tto$j~$pvw;Xp1ase zhNRr>!U_eq4Y~DfVM}UlS53{1G~lg0EnR-JW3xgc-degTkF^Dl-Z)+l|115*+zjLP zYsv<$+?SfhV_VvhLDn&H>on|xv)W*(GO)_j?2#*8XIMdCdIit5ED%|mDg9;b-$?W{ z>*<*vH@k+3Qdwwn;eH06rzswJ#aHH4(Xar;mn4*5i&Ns4#d3b~H#WG;Q3aYoyXXsW=HcjO1S;(8x;nj#jH3|7EVu->ioB-~pEF7b`efgw{kz^Wp zLD|vS+WB(((D3s4($!frr;Mhh_29&JuHVxSqzaMDl#E5yqIy-^sLBk~PzfE(`~Qz- zM061Q|26KSda2OxXlbE7&a+RdH{b+oa67Kzl5~2Q&nt|URNgoC1PCgsD(9h2k)z7D z^W1vM#cx;18~q6*HL1~nfS~4)DfP^4iEw)(e)v_w%Q8+6k_8C2=|{HGdpxuKKC z{Vh;68+O2;X;g3AQ2(0rnW%Cu4k08|uWyewOAii)2UJ@B65t0DwFx{>?pX@Xnp2WY z(5=D&Q`;mK9{k-WFJ>#{-{OpH=#+qtkOPuKKmbG-Ku58IkRe=!W^h^vX2```iWtlB zUA24{1mn-=Z}#hGTnsi;#-Aj92Mn@WEEW1U6FxU2x4_=6Uf%r0dCf6Pyz%Z%1(Yz( zvw@WRKAjFLt4-jefdMU6nJKm;vee0+yqRi>K2e7LMfl<^y170<*oEF)%fSpF_Y()p z(*SD5Rey$s8U?E&;nnVsXTFMv-EGLsKOB7u#=m{XIdKFVmf#E5&cGM0+FCmDohv+% z^`2a>j?W{Rwpi@;4G=r0ip7B}PzqpQesQg9PG7BTcJmZ}Rar-tM(GcN4=!2;Y$ z2dcs|#hfTJ9h@VhSj$D?^;f-6=|9pXi$sD}GtmR3YmLKn#^v>4>adGfj{Cdu z9yCMk^4I62K%<=IYvRJ7MlUmFE8+_c!iG$b4)$|7o@O~PT7bGs23P7(tMa}H{--MD z>n@`2>a16|gl~}8SMEgT^qZ_bkl&Auy9$q*$+61vR~o{`Y%^)Qm1ZQ)ebB{8t4*}2 z%K9mNy6R)zLX#QFciYVOKA2;k{iEM$LoT^UBa1V1UyYp4q=iY18V{E0h-jC)4u?Gd zDIMTWhUz8)-7(Lw000m4006)K7t&{EXZatX?;YqJ&lNI!wCqAPJ}X2H`0@%h!Z{;% zRpOZ&J(%xbNT1oGDAm7^zOcfBLAFe8)?ZB57}GFfB&(!$=?SH)q$GJ5Ve!w`K=jE~ zKWE=6dsItdp+z%}VBO3gLcf=Qr}UtkhH!=E-VUg0T+@Lg9`C>iC&Vg>RR_ohseU2c zkzIpW6JXa+hZ($CWN*sR9{*6c9PVMgQDrv-k?ZX)AD6cU8IaR@GM&HPAN2!k6+*hX&Zm3ORO-_gB?4NPiO{$!(Q(3A$d>!cg@eNN3{{0ow@_;>6i zYH_s>0Q8+N9u+L5Vao@sOj)Yvf-%HCy8WZXF|PrOQhAW}uoUcu>Jp_$1|6J571%|^ z8p#ceM2vTRU23&qvU2KyY;O5_eyNyNVePz#Rd8%Yf`c(Dq!e&|qW+PNP|<1z0mHwW z!6F2)s`r9Xb$f~7@-=^6e&iFs9`2BcW~f#<4@hgI^d~Zo!w*R{KgddR<~spWj?Hw# zet^ADg`A>b^c)NjrOrY&^o~A7p}5X5c4@v!-A8o#SQPCBSb%%<#X=~`vVQe)Svpxk z52?!E!Fqv+0CiY-*RunIeTXR$OPn6CgfR2Aa(SXxOZYc(R4)gu0J#K{*dBj=dl}e5 z1W=@}@H@a4l(7rhwHM{(W3sbBnhj!>digIV08WBnT7y+EPXB=xZ1Sto%Lh7A5TXiY zt<&0upQoT#jO7rEFD9q1!q75a=1yz7&z1^iKAkw;?PR%T675Te8KVt78fj|+PPszL zd=qSQ76mnZex0i5Ptxj7l4s33!F{>-B`{M1GjESs)9Waa4w^<>(K^2kh!~pY~`S5VAF0D}pgwA2 zR3@tl_wQRfUue&WIVQU8np5R*ncfla)naTJuB-vxNrvFh#Z_Ke)>Vk5B5fzAgR5W- zkzggP&d0t}*o(!gDgR;`YoDqNhcK-+SJ&L@LAe(E;bDKeoLIadg0&i zq{IzECQH#rMg~%H$G~MZV0Ho%=X*n8upn72{=H$P*eyiPF%?!jfp)F8#6nqHQo1#` z(dmle>kvkyrQ+H#zh}{p9SN!X-4AruV^P*;QPykG*10&_eWKmqW{1M=k-j#Oem9Z+ zsOUhf9sri>=<(uCPXs61BI!?dUbG3JM^t?!SmlfZW>j0%du}PX;VKYUwtCE5w8bi4 z?Z_^$6HsyaH((oBk2wSe!|1VlXq|*jW*fcR@QHJB3{2|76j{Sbu7Kq2bT@$wtnw*H z@OCfI6X)HAQth20c=|$O$uGc7RVk%CTD*&Dp5x;!(8}_~khbjE7s~XX9J8_TDy6EX z8AzJ+TB36KyztOEq*cR&o&FG^Ayi78rX8t=Pb6#d%F0%Ng%Ae%i%F@M1yZ0#G$~zf zxkjB&%cAD)0D@7)rss~zt~U==z8gbGRLW0$ z$lA(&d+ta;Oem`MWl~70cOWW|$7JzXz8PPVU7g=Je;h(?xUfRI(Sc%~p=HlW)C2i)`Dc_Pf(iB;jOx~U zHQftdNNalbSPAG|pMtEih??zh_-n73Av*}!El~lB+j=&^qUuiqI%aAOA^9u`OrwV``0!#`+5Wf&t&A+}NG2ggtdPKil(X{Wo&kCY z-y_61B!SJ@L-6mI=2MSU|hj$Srdei9^uNLQ@2+N$v-}|#fRH~#1muk{P zp$i&xN;DDhm=2!#SFP4MU^Ha(+{cJ7Stu6BmKMT~FAmPH-2LNT(QeDlXw^YrN9C}s zsEG{Rb<cLV2LF~9^F|fii3ZBsH^Is&76}KJdNbHS$p(nY8MJ*` z2pL|Iv;!hoOntZ4>5_ zJKtgRu@~hJlSA~`+|(3EqV`*9TbR;J;x)+MgL<|iJo$QIzkezBR|Sbk%2Ml!1&6ds z;^gxgB84CSc0@BgL9)OEq5$5!Mg3t1%@ixUp71%j)XETA7bI5oh zyG9P?)0(+_1Y;aPtiYv*Ye5P23@$6F*egxH2a;>!SlXv}G)0}EZx(|)7l8fvn0G#I zj)h!pS@DP9!jfhrR<%L8Pj41LMRi$C+vj>KS$n_K&yzSjftj(}C`|||s2fFR9p{M; zvADT`yR~J)r5jl=P;&)~m?J>Vwk~TGb#OR-M~I2*Gw=)@8NC^PGezAG!(aecjdZdB z=|EGJv^1gF`vcEU`b2Bms7ELB;2Z3@OvHl(-hCy6t>r$wBn6(i3rZXLyWFvfu#UQ(sJCTZL zb_qM~lV2%%r2>{bmj!sxn#RUFOfYHvT!>3O(V&*Eut=qc}#Zwt$C zzMuV>OR)a;Q;Sv{8Sst27_zN`V*+(U+FV{WYSEkDTR?;W#Q?1uRVv6QJ2u8He7l+t z*x#aVxy2&gOMCpNWbo2KnaO|>*yOGOv5)XVe*oT_mcyK^FqPBaqzozJn%_jew zNGlFS!|-$;VTE%}4W}1#Q}b^x-b@#I)cJ2u zqxJWwu%hraWq-4x^e#!Je1Uh4NQb>woGf&21gZqMpohkcts%q)G4k)L!)+z(;nN38 zrTLS|lNo1Mm;hNM?Hu;*K_G!?ro?(aa`-$}_VI4MH9UU1sC^nMpEA|AK7}0y`!SwT z)9!nJh_JCxLyZ#;g5)2+)p&z5f-Wfd*R9@{BAJpwrhC_GW_BR^NfxZX`=-&+JJ!;; zxCr7A*(j&!)#yBL;Srg1J_Y5{F2FFz7mJAV{-Weok#*~&3vbf!sA{_6oyvB47|n%oD4#q6U*8!=9#^bGG#M|IA-Br-4S*ad({iE@XYe6Eod^!?>E%N9B3xo;%e-*>Zpt zMB;r3nA8@@nSl|u;~0BN>96OT-;S0B*ka71SVQccIP4t5D}3UE5@x)0^UwcLHI<`G zpxo^8dHyeiDJ;`Ky~lKkA^(Ta_vgoK?SDmXIsUYX{|1;gRr;dW=@7bzZsEDvrC|mU z0^KXen|OR>g*23l=H(LuDZbvLaeh1NX9N;cDpfM~M~=*R+mJFG-Fd$!9G$rP^xVE$ za=*KI3zR%~_P;|!B8n3w3YCWL>fKcCuJtDLHbfzSbNqX3gXJ7CfcTuU^^~YIsuK(D z(MoSKM4lSg5t5TLlL?xq_SS|%ChH`z2_RRYRq#qyO_1w_2Zi+?t$UOd?|2}P$@C}8 z69#M8?s>aTmzQB?+dynJb&_y;Duc9=_9jH*SORrM@a91xZ@_t30tLnJ%7VT=pN<65 zE8j{jAdQ3!xPs*w^16&Gw@(=H1rCd_CL%ib;}JQg;tL6+_{9BNn|Hvo8+o7IB&|yA#kSS+vc(Owj0y2kIP@LYKKt6r0BG#PF9a3KS^wplj zNA-3M$+HU@x9=B2t9(*AIg(=%*J>ui@Wigm8E=a=9X72R?~_E`ctSN?iu26|O>klk z{a?9mt!f{G*mBHz_tD0rI`PYoJYS~lmYL;^R<86g-M%*Y%Ak{J0pKw=aycC)W%H84 zVcCk0tHmdl=+m}FQvj3!cDNs5oXMi0*T0U>MZ=gy!{qd4j;7Bj18!x79yqqX0#Ir@ zGcDPlLqJKkjHWb?qI8j}^bMko2u10$|PU1e!}+yv%7|pAY^4i z7yDJ%$dRP5V7$G4B91~PQ5~zG;qSTxSfVCDwpSA(V0vzhlyWEd=2mA`LkFbmyP1mx z`n-|IV?3_aYnnDgsa4F0^kvT3;nD@(f6x!+AD$C8B#k}^8~^|g!~aD;?9G_}K|e6p ztoMJUxj!@~Yk$6j$cvNOtie>v^0!wy=fgYyP=pT7kJ_fei`?WNDKg=U7P}SC^1E^}O*)`*?YJJ%fsZy9z5yWA*1B7_JBJwYNzQ#8id|w#Dliv^Y zVT8?8{lwniSDzgVKiSQqDMLdaPx~tMk*09J?s)t=E7g;?3i%PbCWxmaDm78Xp1yGW ze(O^H$qBJjojg{a&QFqJ?qFM4BPE}*=kpsqPCPIguLmBy+5J?ukwvN+vomL}Ba6fn zyX_5(6Z`pVyexws=>SvFaa4*rPdd^nB9aC=G2L3VuRe2@72x@3mt9!M3S>eM%lS?pXs2ur{K^<3n`u^4LYB*H#)0IxV|BLFZ9kqdI)DTKq9;s5!9CfFMa^Ubrf{gZ_p$g0a${Qv8;9;;BFF4aJUe8t;y0Q|`=m zmh&mw)0HgeOrpEttOsa<(DD82{u0mRa4z|r8ihcO?;nRsyUtqumxZ?cb6huT#lhLz z=!JV!YW}CFjPMR(%63Mzpg#mC*?$O7tYw&{E3l0VzE7LH9tx!#R};1~t>)kdd&wa= z4zaVcZX+5l!TZmhPBry=h@LfW2wipixM>*jn0=st*%cn!qOJ2{uBhwNNz-Pf>JKK6LGc3{Pni;|cVyDUA|n28Hv zF+?XSV$QZU>+G|q%Rl6�YD?$SV2?Q`>NZ7Mj&bAuDq++d%j6(Jgm?y;J;Zz&+s5 zngCZIL6n#OSJzoURq_07oCXOgFWsmh(%p@8cY}a*cM4L{4T1;~lG1Pq>68ZPmQau` z>3A3Y`vS^u@7=SPbC}P5XLdH`*_meySdMK;FW+|?LfWZp9{Fw`zso+4|BRAI?2zl~ zS@V;m<2{jPYD15OCwpGMLtrCULP!W5lcNowdl2#c(LKn0K`%{m@8SK{Ju;1&&|Vwi zuc#WY(0hGhNK5m_MZD5IzWmZXa1E3RZ|EK%d4@8h*d~wXP}^_hAh@h;5~W(fWcMAM604?Bn7x$fGLN#uQ{d?hHH8DEIgWbEd`_J4NQ@F7Rl<&} zvi+8!3L{&J1aBqG7UCU^{5bg@G8npaPknKZw71nladL^D4})(!*L1Td+{Zq`Z!g?$ zFTxL>=pGvB?P52sgPtJIy9Sckgd8Bl3Q(sbchJQ#}HNL5Dk zD#Y~{VeYR@XLmML#X-=CszC3*ssRmzl#D^t@rz5md_Pu<9Y!yN^@h7)mu13o2q{EW zOJNSJgYrqkGctos+}N|xVP}gu`4G(e4yT9%8{B%fb&cIb7-HgY&V*{fN$Nq)yyMz0 zmzOWDbdQL{lmQp)UK~*L(zo`!(aB{PY-u%gRVy$C;~^}QndF} zKiaUc#Aa<)slMiA=RDi<#ZlZw3iX+#k>1n5&V{JwS>LtE{h#oGAA?3768%f~;B7j* z54qJHLbv_$%QfXZ&*gO8BVGavSF8z?tr%_FJ5$wV<|hx5OiMv6zcQHAAN1 z%n++gE|<)lC=4TMdam`vM#tA(?aNC>a;!-gYqtKmSHzD#hGdpuaY>P?B(P$nPab%r z1gv1T32=5vp71=fMxOoTRXq_3~QII=BbZ zXh+JfR;DyvkUEvgW7>pxb{Ln+FJ|nGx&Yw=&%=6|LrwOV@L?x(uOpTaQEXoz&O-S@ z`y(oqyIW>nk)2t-Xw5;DMz?dC0}E}v&1M|w(iC>t_^Nlka@r-a-Mf1E(xD6^b9$&6 zaofy#k_1=3UHM$5W%(mYn#!eUCFSN zFC`Kp-Vci4cC!hc^@f+dNg+MKwQ5hDC!JHzk35$u&Jq}J!ed;WSY~?bQrJ@HPSQd9 zWbr5N5!1b{9A@bU37lqHP=!p}=$KiH+_KLbzwKrD1UXg*-<$Jxb7iqZ!>(=BBX|ZA z_)Kjb`F}b|380hUN+zDLOp8JvqTAB7dnEM{QrHJtbEtyUjR~#M04t0%BAF)CqJ9=CzoT_bE$8$-6BA8XxI?NjP&8%V|zSa4> zkQ+O6tAp$o7im&GDQlkAs&P6F1)fcyR6z*CJRT(pvu16?JQG z*O;onfcTz{e1b`b{NY2#Ey`6Lk#iFpagWi-))mC34T$Ln-Non6;qvetIGD%^>QGLK z*y$8&a|4%CYq8cU7?(#M_s4yG1BW^tyZYIef`X#AgH49!Ws5ZJVNA`Ozj>G`u`C=M z%Ha2nBDb9kgaK?u>Cf8Q>fmuFg_vfu%md6_d``q%9CyzO6RNX4KMyI!T2gpegu-JD zwk77%+Sp#%pt0qTX6-8XCl_Y=PeZdTQvK8dUOpwisgmH*rC-u$M1mo9ZK=wc{4t@`4*@KSL>lUS83XlBjE zIpO%adR>VDrRH)@fpS`eMsx4iQPW)7OOn7b%CxVxpCIGl?u8KddNRI_@AW3RZy8UC4zZBDf&3`_@|wCRzQr%o>lOAqPJ=N081^tteD<{k!I^h=zS zswkfirOoz!U#P08^!tjEguZmCIOLA|#=7`N4y+>AdgcqIFD>M`+n1612wxCuH~w(o zDtICb2_*yzw2#4?TpF5ei<4J9c&sQy4k?3mD1d%EWaWM3GqYXXdWpF&sJed;R*k3o zR(JTgcK96E9fY-0#Dn}`&24$85N(YE3tYeFSRK*YJ_|TPW0S-8fcc)6W=?4JIPk5% zJKChj?DsuZ;sO-vW1J*MF)mAT8!}pJd)K#`=sQ0%)6f~v*rQ0T@pr-cB7Uf!p@orQ zWLg(3iLCdi$Xc9e^MY^?l0Uz$40D;vAzPTI=izVV|0;}Z&~MWKf$5)dtSW;xE$u{z zcnN#msFZtQsZoI?G@bxxB}c%>nZLCXpiz0PmGoX|CH3Sx_jx(_vh)xHc~rhlV)N%n zXsUEeNJbIn9c^$qC65OYVnlx|hti+#3GKM3PwaWKclwjAd*IuF7ie$7<6wFJm4cJR z4lkU3s9LB?=q?Bu(E!B&n%t)(EDECf&u1J?cbN%>*s05E{Z)L(L!}GsA9Udi6V&mf zNP8&3ktvVqdsRtQA~br?1=iXaBl8^7qSNCZppQ?hZIvUY`0{H8kUe>5R|88vxT!Os zpj?gp7Htb}w2S8~9>NcjBs!c>3nCqf-wI-B>b{p2ge$)Hd*ofs6ZdAEwIjxOL=W~* zQM4y8hS^5`)f!l>CLVs|ApN3KgV6h!nI@SEv9X^v%^)6selKPeId-|D277$sc2P{C z=(ruJg0pWsvXdK(M2Qr|jYptq*DWYjASZ@SCp?V-B4UVoVOH|!+qc*;6y$Z{)O!x0 z>(Q6F6kMEEyXh`jS;WrB)T`+{A-{tt1q+E^j8e>R^;XvyiCWCtMd4 zNXz8lm^Bza74%}kn7*z%xDUb4Kf_93M>-*sv zUe4}OwIfEDAFd<);DXQs}IfiUDMP8%0 zhw5z0a*BFgmperiG|Ev5{VD+O0DHj5446{*=0Gq;6e~l8fJ}Tdz@ySp|Y>x!IOo~_Ve*FGyies&Gq_QB1HYMj3J;vFnlZJw8PAGdj3oXIi;ujIu zbFZo@&o~Wi7@Tr5l_-!3DJAPjbvjtc|E*W z6x0`e*C+)xVe9Kf}%lsDm=9B5YzQ!ZmdgH@3^nC=j_q|PG zC!f-ap*4AQ^Fj#V5Lf$bA}G7G&~jxl59W_5#}P+U$n~61MiXTtAPvd!)=Q zE@Gb^u7>JdP-G+=;&5atw%CT5kgK>j3T{HB{a`&eT8FluG&dOXcy-@PvS9VzK}gH% z@7Y~;qLWD3Z;nd49bv|4EZj>s=$L6Li(yM))o?(#Bi!*NT6`2(o?kGsqY9C=ilRnf$Ik6l0cXF`F-N>;ZB*eHLG0Y{GC8>F!(;Y3NCx#yDkg9& zomqX$asRn%(Kd?NhobBk;%pa;m6AuKooQaDpNDCJ?)lh+ z9LdgIHi5P$Od6h!ed&OltsujsTvAT~i9lv;O-C9(@`ys6OadWDF+Cm{195Azxp;0pNv1GfJu6=@MA0bv0p0n6x_;3$N;`=k150XBe#m7ri_FC!Ffg&)T;+jt<+KSEA<8i4=I5&+S^@xd{WP(JP zaY~M)d5dEhkl|5X!R<-yHcf;ht_N=Wrt}0Jon_LRV=Ht9E@=puwk2e!l33v|NRlk# zexSB4+@ozqby^kTd5U<^X~h&CJ8_I0wNho@j;*pbw=$gJ$NgS|sqsAi&-+Nj1tx2% zYvqMHw{qaQ8?x1B>!Zv=!yHSvy2`SHp_x4`<>rI*Bp>Rn2#>NDLR1LsE8~AvKZ<2< z?dItbf6&NaRP|}7;R1~6s?x}210{@k#$){Y!qgy!WN!CH87s_0u>1H< z;>F-4)a{y4VVY0_%qjqJwf?`!$3HbgRZK)g3aMXAx_1yol7_0Ur@u^rewgX2ooufp zjRbWkU8#bYL?<QNL*8j0RcsZs^XCsfkk5XGe` z6)1CbB}b%t@|PB%ZVaW*1FCD~g_$~FxB&qNtmMG@Yt;5XK{;64=vo*#8ClRe zxH#lXNlEs8y9v6g_ijoxmfrx125{FW_)n06h>)y;aK&gxX@(dU+*eONq&%P2M3w*A-hTj*S=^qn!i079(ClmL+atJ~aTWtto z87wqx&De@rjj2w6VwLo^P4g@eer1OvL~TPVv#ufx;d@bWhGL|x%`rdSpw%#^XYHL| zC&-ciV-_`Re{g5ULAS5-ObG+2H(vBnF*0vn@iY2pl8eDEPJ|GY7-o460kbFSQMIE< zx#Dz0#G#L>ezfX;_j{Fap1kBB*MUGHhGzl1m1K(*aEi%3JXl9fs<#__%D_U75<$J@ zi2E#tBVml`C5fM87(}qZOORS!uGL~IG7jfhWj-a@AyNB?Ja!0=p+-9VuwBK_DOrK< z=0-eT8pD~Yk+#Nt|EX-piVB(Wyu_e)VlJ=yko~H$O$6jCdYQ5_D@e?nY!!7<3OQb( zLr^s6r?Md8t}bARr8VO3Vt~5d-J3LP46&Qpup(n2YTK5e0rzjr5$(87v@D#`rJ1G}z9KP39bK(sh zo78+k9UfsnK|tphJjoBXX2L?n%h&w*Bc%s8hF~4s-Pp1f@p_tP(7Da^VZX2 zCq6K;YIuKQ9(|A-_wtZXi3Q`X@CGN|LRrLd*}%^;^z8uNT6hpS{l4ti!?xjb7_Q9C zh0!L0w>B1UwSK(v*VranTY^}&nX}hu9_iom!t|cV^EyX-m-=$5X3*TmaQ&@CT8t&F ztNf49%PPaOk5Jn=h@`p8pE->azP+MFDcN0#jjT zbqU5+tBD=KrzH%p(qPVVxZQ}Ie?Bc(un*v??`*K`xWqmfw-pEoxkf^)D8@YBa#9~oHHsbx1eUD?Uitb3pM`qejc z8{X(LQ4$zmUz(FZp#|HexH#+lWH`X#5hbNHQj`Tq}+jY5?6cB3hw?o z;FN?Lc-Xg@lWjE)VgM%%Ks}XAS@;Uwr=S+Bb(Ttw(3pL2$^PI|mnHXDKbuaFE_9I! ze1vCeIByiyr>My}NfZS{Qf{vC=i{4=elpWC2#pL!6q{>QP6xJaq@d0Op-w@os<4lf zwml&Mj?PBkg_=)QJPP9XdPsF&Y*S276uG^QvHT3)_Sbk3k*KIx_VEZ$K*z?}2Gsko zE+w0%0JChtaeLMha=$Y2^0lsg^e5A_R`U;H9>fo@rJXZOCJwuKsL2;ICka}xRcz3o zi+Wgh^dAh1;CcM)i(wZg+&)Ys$Z@lek+bK1^qeFDk+`~yA-G(~X3ZB<4#szz`~~p= zPEARxeYBA+cj1}8EmZEfWOj%8hD2@qX4X2auky&4gLC`lmbp(eDX)Cmi`}c^C5p-I(7EW( zCyLfBD4Kvs+$`Grs(4(NgcR+Z9Rre&zpqi?z1xj0G7U*r0LQX? zCfK%rky{L`@CBJB5gEV87FyP7j~%oRL7l&IcKAJ?bBV|5a=M#qAO1+-9N|mh+y`Z) zOHuTQ|qj=`LIfkK9ecTVE6%(xf%ow1sMutn@A=2$^#?w^?UTjL!)M>QcEbm0jl8Q*pwP z((v4~9$$XjYh-|c2yJOYi*oV<9uJ473KOC1DePvHuF#?M`da&w=AHA}&E<>3w}EIc z2wG;+%ZQLPh%~kI2Z!S&SfwZObc5VI9Xa8~!Z`%bkt}RPXY|mQiY@_n6iBEC5J2A- z2m}biXRhiYBfMqtz+Ys*!y!7b{j-35u73S?OVQOewK8?k)dd>JHu$jT z3K0TukG*PD|5?C3ZNSgp$<6Em_dFvP2RnTOhrbYu>JA!=*^ewKfPl_)0b{>E>47cS z=K=u#Mp#-K0(be_yJvJ%)L4djyAT8<$dZ3 zJ>aOL0(rY#IQX%*I4^+9hP9okmGNJ}w0soMsH}OvAqUibVL+h1dc6O$fPISNZb2;c zEuR~jzHrsG*LO0q`-^ln$3dRrHqK+9h_Qex$?a2t^X)CtuS?OtS-NXd-Ro##i*GS* zO|g5k4&OBs>^0?4=`G6t&kK8#aMyed*Mz3_+l0SnZ@3A%%Ww4>Br$Xw1em`4&wceK z<1Q1)Yex0xos7HArn{UNuR(QV|A4N28E?jOmv!AWW@z%iFn5J?m*d+tYh~&->!$zP z&4})@D7&V3&fKE>F)X_YxNFe#HGq2KU%*|NxoiC7HN|-6Hs$wd%9~N#)v@+EefZ@+ z>F)~Su5OIi5F;qa|J~4ko45V$)Oa(9yWUt`r>{Z#C;eSP-1Vy98WMnc3v&J9;AR|m zsgTzQEA0Ov{*g1THOaqef2()bG(VjGqW#hG{$~BH_gu45@cxVSOZ~YCx=W+E1~K9P s7wDE^a}#)%R`Ul~?&I;Fz~9P^oFpu8%Z7lU1U@6c4essL(=5dQ0nwJkZ2$lO