diff --git a/config/production.json.template b/config/production.json.template index 8336ba8..fb4d112 100644 --- a/config/production.json.template +++ b/config/production.json.template @@ -1,29 +1,53 @@ { "MQTT": { - "host": "test.mosquitto.org", - "port": 1883, + "host": "", + "port": , "userId": "", "password": "", - "ClientID": "ReplaySubscriber", + "ClientID": "NOT_NEEDED", "QoS": 1, "TopicsToSubscribe": [ - "cpsens/recorded/1/data", - "cpsens/recorded/1/metadata", - "cpsens/recorded/2/data", - "cpsens/recorded/+/data" + "cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/1/acc/raw/data", + "cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/1/acc/raw/metadata", + "cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/2/acc/raw/data", + "cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/3/acc/raw/data", + "cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/4/acc/raw/data" ] }, "sysID": { "host": "", - "port": 1883, + "port": , "userId": "", "password": "", - "ClientID": "sub.232.sds.213s", - "QoS": 1, - "TopicsToSubscribe": ["cpsens/d8-3a-dd-f5-92-48/cpsns_Simulator/1_2/oma_results"] + "ClientID": "NOT_NEEDED", + "QoS": 2, + "TopicsToSubscribe": ["cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/1/acc/sysid/data"] + }, + + "mode_cluster": { + "host": "", + "port": , + "userId": "", + "password": "", + "ClientID": "NOT_NEEDED", + "QoS": 2, + "TopicsToSubscribe": ["cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/1/acc/mode_cluster/data"] + }, + + "model_update": { + "host": "", + "port": , + "userId": "", + "password": "", + "ClientID": "NOT_NEEDED", + "QoS": 2, + "TopicsToSubscribe": ["cpsens/d8-3a-dd-37-d2-7e/3050-A-060_sn_106209/1/acc/model_update/data"] } } + + + diff --git a/pyproject.toml b/pyproject.toml index 46d69f5..7ff673c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ paho-mqtt = "^2.1.0" numpy = "^2.2.5" click ="^8.1.8" pyOMA-2 = "1.0.0" -yafem = { path = "src/methods/packages/yafem-0.2.6-py3-none-any.whl" } +yafem = "1.0.0" [tool.poetry.group.dev.dependencies] diff --git a/src/data/accel/hbk/accelerometer.py b/src/data/accel/hbk/accelerometer.py index 421034d..94feef9 100644 --- a/src/data/accel/hbk/accelerometer.py +++ b/src/data/accel/hbk/accelerometer.py @@ -86,7 +86,7 @@ def process_message(self, msg: mqtt.MQTTMessage) -> None: if not oldest_deque: # Remove the key/deque from the map if it's empty del self.data_map[oldest_key] total_samples = sum(len(dq) for dq in self.data_map.values()) - print(f" Channel: {self.topic} Key: {samples_from_daq_start}, Samples: {num_samples}") + #print(f" Channel: {self.topic} Key: {samples_from_daq_start}, Samples: {num_samples}") except Exception as e: print(f"Error processing message: {e}") diff --git a/src/data/accel/hbk/aligner.py b/src/data/accel/hbk/aligner.py index 335e4ef..ab339a1 100644 --- a/src/data/accel/hbk/aligner.py +++ b/src/data/accel/hbk/aligner.py @@ -130,7 +130,7 @@ def _extract_aligned_block(self, group: List[int], batch_size: int, ch.clear_used_data(group[0], requested_samples) aligned_array = np.array(aligned_data, dtype=np.float32) - print(f"Aligned shape: {aligned_array.shape}") + print(f"\nAligned shape: {aligned_array.shape}") return aligned_array, utc_time diff --git a/src/examples/README.md b/src/examples/README.md index 8170b0f..b8668fa 100644 --- a/src/examples/README.md +++ b/src/examples/README.md @@ -62,13 +62,18 @@ To run the examples with the default config, use: ```bash python .\src\examples\example.py accelerometers python .\src\examples\example.py align-readings -python .\src\examples\example.py oma-and-print -python .\src\examples\example.py oma-and-plot -python .\src\examples\example.py oma-and-publish -python .\src\examples\example.py mode-tracking-with-local-sysid -python .\src\examples\example.py mode-tracking-with-remote-sysid +python .\src\examples\example.py sysid-and-print +python .\src\examples\example.py sysid-and-plot +python .\src\examples\example.py sysid-and-publish +python .\src\examples\example.py live-sysid-publish +python .\src\examples\example.py clustering-with-local-sysid +python .\src\examples\example.py clustering-with-remote-sysid +python .\src\examples\example.py live-clustering-with-remote-sysid +python .\src\examples\example.py mode-tracking-remote-sysid +python .\src\examples\example.py mode_tracking-with-local-sysid +python .\src\examples\example.py live-mode-tracking-remote-sysid python .\src\examples\example.py model-update-local-sysid -python .\src\examples\example.py model-update-remote-sysid +python .\src\examples\example.py ilve-model-update-remote-sysid ``` diff --git a/src/examples/example.py b/src/examples/example.py index ea2834a..4344c68 100644 --- a/src/examples/example.py +++ b/src/examples/example.py @@ -2,16 +2,23 @@ import click from examples.acceleration_readings import read_accelerometers from examples.aligning_readings import align_acceleration_readings -from examples.run_pyoma import ( - run_oma_and_plot, - run_oma_and_publish, - run_oma_and_print, +from examples.run_sysid import ( + run_sysid_and_plot, + run_sysid_and_publish, + run_sysid_and_print, + live_sysid_and_publish ) -from examples.mode_tracking import ( +from examples.run_mode_clustering import ( + run_mode_clustering_with_local_sysid, + run_mode_clustering_with_remote_sysid, + run_live_mode_clustering_with_remote_sysid +) +from examples.run_mode_tracking import ( run_mode_tracking_with_local_sysid, run_mode_tracking_with_remote_sysid, + run_live_mode_tracking_with_remote_sysid ) -from examples.updating_parameters import ( +from examples.run_model_update import ( run_model_update_local_sysid, run_model_update_remote_sysid ) @@ -34,22 +41,40 @@ def accelerometers(ctx): def align_readings(ctx): align_acceleration_readings(ctx.obj["CONFIG"]) +@cli.command() +@click.pass_context +def sysid_and_publish(ctx): + run_sysid_and_publish(ctx.obj["CONFIG"]) @cli.command() @click.pass_context -def oma_and_publish(ctx): - run_oma_and_publish(ctx.obj["CONFIG"]) +def live_sysid_publish(ctx): + live_sysid_and_publish(ctx.obj["CONFIG"]) @cli.command() @click.pass_context -def oma_and_plot(ctx): - run_oma_and_plot(ctx.obj["CONFIG"]) +def sysid_and_plot(ctx): + run_sysid_and_plot(ctx.obj["CONFIG"]) @cli.command() @click.pass_context -def oma_and_print(ctx): - run_oma_and_print(ctx.obj["CONFIG"]) +def sysid_and_print(ctx): + run_sysid_and_print(ctx.obj["CONFIG"]) +@cli.command() +@click.pass_context +def clustering_with_local_sysid(ctx): + run_mode_clustering_with_local_sysid(ctx.obj["CONFIG"]) + +@cli.command() +@click.pass_context +def clustering_with_remote_sysid(ctx): + run_mode_clustering_with_remote_sysid(ctx.obj["CONFIG"]) + +@cli.command() +@click.pass_context +def live_clustering_with_remote_sysid(ctx): + run_live_mode_clustering_with_remote_sysid(ctx.obj["CONFIG"]) @cli.command() @click.pass_context @@ -61,6 +86,10 @@ def mode_tracking_with_local_sysid(ctx): def mode_tracking_with_remote_sysid(ctx): run_mode_tracking_with_remote_sysid(ctx.obj["CONFIG"]) +@cli.command() +@click.pass_context +def live_mode_tracking_with_remote_sysid(ctx): + run_live_mode_tracking_with_remote_sysid(ctx.obj["CONFIG"]) @cli.command() @click.pass_context @@ -69,7 +98,7 @@ def model_update_local_sysid(ctx): @cli.command() @click.pass_context -def model_update_remote_sysid(ctx): +def live_model_update_remote_sysid(ctx): run_model_update_remote_sysid(ctx.obj["CONFIG"]) if __name__ == "__main__": diff --git a/src/examples/mode_tracking.py b/src/examples/mode_tracking.py deleted file mode 100644 index cb4f270..0000000 --- a/src/examples/mode_tracking.py +++ /dev/null @@ -1,54 +0,0 @@ -import numpy as np -from data.comm.mqtt import load_config -from data.accel.hbk.aligner import Aligner -from methods import sys_id as sysID -from methods import model_update_module as MT - -# pylint: disable=R0914 -def run_mode_tracking_with_local_sysid(config_path): - number_of_minutes = 0.5 - config = load_config(config_path) - mqtt_config = config["MQTT"] - - # Setting up the client and extracting Fs - data_client, fs = sysID.setup_client(mqtt_config) - - # Setting up the aligner - data_topic_indexes = [0, 2] - selected_topics = [mqtt_config["TopicsToSubscribe"][i] for i in data_topic_indexes] - aligner = Aligner(data_client, topics=selected_topics) - - aligner_time = None - while aligner_time is None: - oma_output, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) - data_client.disconnect() - - # Mode Track - cleaned_values, median_frequencies, confidence_intervals = MT.run_mode_track( - oma_output) - - median_frequencies = [] - mode_shapes_list = [] - - for cluster in cleaned_values: - mode_shapes = cluster["mode_shapes"] # shape: (n_modes_in_cluster, n_channels) - median_shape = np.median(mode_shapes, axis=0) # median across modes - median_frequencies.append(cluster["median"]) - mode_shapes_list.append(median_shape) - - # Convert to numpy arrays - median_frequencies = np.array(median_frequencies) - mode_shapes_array = np.array(mode_shapes_list) # shape: (n_clusters, n_channels) - print("Mode shapes:", mode_shapes_array) - print("\nMedian frequencies:", median_frequencies) - print("\nConfidence intervals:", confidence_intervals) - - -def run_mode_tracking_with_remote_sysid(config_path): - config = load_config(config_path) - cleaned_values, median_frequencies, confidence_intervals = ( - MT.subscribe_and_get_cleaned_values(config_path) - ) - print("Cleaned values:", cleaned_values) - print("Tracked frequencies:", median_frequencies) - print("\nConfidence intervals:", confidence_intervals) diff --git a/src/examples/run_mode_clustering.py b/src/examples/run_mode_clustering.py new file mode 100644 index 0000000..fceea97 --- /dev/null +++ b/src/examples/run_mode_clustering.py @@ -0,0 +1,53 @@ +import sys +import time +import matplotlib.pyplot as plt +from data.comm.mqtt import load_config +from data.accel.hbk.aligner import Aligner +from methods import sysid as sysID +from methods import mode_clustering as MC +from methods.constants import PARAMS +from functions.plot_clusters import plot_clusters + +# pylint: disable=R0914 +def run_mode_clustering_with_local_sysid(config_path): + number_of_minutes = 1 + config = load_config(config_path) + mqtt_config = config["MQTT"] + + # Setting up the client and extracting Fs + data_client, fs = sysID.setup_client(mqtt_config) + + # Setting up the aligner + data_topic_indexes = [0, 2, 3, 4] + selected_topics = [mqtt_config["TopicsToSubscribe"][i] for i in data_topic_indexes] + aligner = Aligner(data_client, topics=selected_topics) + + aligner_time = None + t1 = time.time() + while aligner_time is None: + time.sleep(0.1) + t2 = time.time() + t_text = f"Waiting for data for {round(t2-t1,1)} seconds" + print(t_text,end="\r") + sysid_output, aligner_time = sysID.get_sysid_results(number_of_minutes, aligner, fs) + data_client.disconnect() + + # Mode Tracks + dictionary_of_clusters, median_frequencies = MC.cluster_sysid( + sysid_output,PARAMS) + + # Print frequencies + print("\nMedian frequencies:", median_frequencies) + + fig_ax = plot_clusters(dictionary_of_clusters, sysid_output, PARAMS, fig_ax = None) + plt.show(block=True) + sys.stdout.flush() + +def run_mode_clustering_with_remote_sysid(config_path): + sysid_output, dictionary_of_clusters, meadian_frequencies = MC.subscribe_and_cluster(config_path,PARAMS) + fig_ax = plot_clusters(dictionary_of_clusters, sysid_output, PARAMS, fig_ax = None) + plt.show(block=True) + sys.stdout.flush() + +def run_live_mode_clustering_with_remote_sysid(config_path): + MC.live_mode_clustering(config_path,topic_index=0,plot=[1,1]) diff --git a/src/examples/run_mode_tracking.py b/src/examples/run_mode_tracking.py new file mode 100644 index 0000000..2dc1bc4 --- /dev/null +++ b/src/examples/run_mode_tracking.py @@ -0,0 +1,57 @@ +import sys +import time +import matplotlib.pyplot as plt +from data.comm.mqtt import load_config +from data.accel.hbk.aligner import Aligner +from methods import sysid as sysID +from methods import mode_clustering as MC +from methods import mode_tracking as MT +from methods.constants import PARAMS +from functions.plot_mode_tracking import plot_tracked_modes + +# pylint: disable=R0914 +def run_mode_tracking_with_local_sysid(config_path): + number_of_minutes = 1 + config = load_config(config_path) + mqtt_config = config["MQTT"] + + # Setting up the client and extracting Fs + data_client, fs = sysID.setup_client(mqtt_config) + + # Setting up the aligner + data_topic_indexes = [0, 2, 3, 4] + selected_topics = [mqtt_config["TopicsToSubscribe"][i] for i in data_topic_indexes] + aligner = Aligner(data_client, topics=selected_topics) + + aligner_time = None + t1 = time.time() + while aligner_time is None: + time.sleep(0.1) + t2 = time.time() + t_text = f"Waiting for data for {round(t2-t1,1)} seconds" + print(t_text,end="\r") + sysid_output, aligner_time = sysID.get_sysid_results(number_of_minutes, aligner, fs) + data_client.disconnect() + + # Mode Tracks + dictionary_of_clusters, median_frequencies = MC.cluster_sysid( + sysid_output,PARAMS) + + # Print frequencies + print("\nMedian frequencies:", median_frequencies) + + tracked_clusters = {} + tracked_clusters = MT.track_clusters(dictionary_of_clusters,tracked_clusters,PARAMS) + + fig_ax = plot_tracked_modes(tracked_clusters, PARAMS, fig_ax = None, x_length = None) + plt.show(block=True) + sys.stdout.flush() + +def run_mode_tracking_with_remote_sysid(config_path): + sysid_output, clusters, tracked_clusters = MT.subscribe_and_track_clusters(config_path) + fig_ax = plot_tracked_modes(tracked_clusters, PARAMS, fig_ax = None, x_length = None) + plt.show(block=True) + sys.stdout.flush() + +def run_live_mode_tracking_with_remote_sysid(config_path): + MT.live_mode_tracking(config_path,plot=[1,1]) diff --git a/src/examples/updating_parameters.py b/src/examples/run_model_update.py similarity index 84% rename from src/examples/updating_parameters.py rename to src/examples/run_model_update.py index e2b7ed9..3b420d3 100644 --- a/src/examples/updating_parameters.py +++ b/src/examples/run_model_update.py @@ -1,8 +1,10 @@ import time from data.comm.mqtt import load_config from data.accel.hbk.aligner import Aligner -from methods import sys_id as sysID -from methods import model_update_module as MT +from methods import sysid as sysID +from methods import mode_clustering as MC +from methods import model_update as MU +from methods.constants import PARAMS # pylint: disable=R0914, C0103 def run_model_update_local_sysid(config_path): @@ -22,14 +24,14 @@ def run_model_update_local_sysid(config_path): while aligner_time is None: print("Not enough aligned yet") time.sleep(10) - oma_output, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) + oma_output, aligner_time = sysID.get_sysid_results(number_of_minutes, aligner, fs) data_client.disconnect() # Mode Track - cleaned_values, _, _ = MT.run_mode_track(oma_output) + dictionary_clusters, median_frequencies = MC.cluster_sysid(oma_output,PARAMS) # Run model update - update_result = MT.run_model_update(cleaned_values) + update_result = MU.run_model_update(dictionary_clusters) if update_result is not None: optimized_parameters = update_result['optimized_parameters'] @@ -58,12 +60,12 @@ def run_model_update_local_sysid(config_path): def run_model_update_remote_sysid(config_path): config = load_config(config_path) - cleaned_values, _, _ = ( - MT.subscribe_and_get_cleaned_values(config_path) + sysid_output, clusters, median_frequencies = ( + MC.subscribe_and_cluster(config_path) ) # Run model update - update_result = MT.run_model_update(cleaned_values) + update_result = MU.run_model_update(clusters) if update_result is not None: optimized_parameters = update_result['optimized_parameters'] diff --git a/src/examples/run_pyoma.py b/src/examples/run_pyoma.py deleted file mode 100644 index c05f290..0000000 --- a/src/examples/run_pyoma.py +++ /dev/null @@ -1,84 +0,0 @@ -import sys -import matplotlib.pyplot as plt -from methods import sys_id as sysID -from data.comm.mqtt import load_config -from data.accel.hbk.aligner import Aligner -from functions.natural_freq import plot_natural_frequencies - - -def setup_oma(config_path, data_topic_indexes): - """ - Helper function to set up OMA (Operational Modal Analysis). - - Parameters: - config_path (str): Path to the configuration file. - data_topic_indexes (list): Indexes of topics to subscribe to. - - Returns: - tuple: (aligner, data_client, fs) - """ - config = load_config(config_path) - mqtt_config = config["MQTT"] - - # Setting up the client and extracting Fs - data_client, fs = sysID.setup_client(mqtt_config) - - # Setting up the aligner - selected_topics = [mqtt_config["TopicsToSubscribe"][i] for i in data_topic_indexes] - aligner = Aligner(data_client, topics=selected_topics) - - return aligner, data_client, fs - - -def run_oma_and_plot(config_path): - number_of_minutes = 0.2 - data_topic_indexes = [0, 2] - aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) - - fig_ax = None - aligner_time = None - while aligner_time is None: - results, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) - data_client.disconnect() - fig_ax = plot_natural_frequencies(results['Fn_poles'], freqlim=(0, 75), fig_ax=fig_ax) - plt.show(block=True) - sys.stdout.flush() - - -def run_oma_and_print(config_path): - number_of_minutes = 0.2 - data_topic_indexes = [0, 2] - aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) - - aligner_time = None - while aligner_time is None: - results, aligner_time = sysID.get_oma_results(number_of_minutes, aligner, fs) - data_client.disconnect() - sys.stdout.flush() - - print(f"\n System Frequencies \n {results['Fn_poles']}") - print(f"\n Cov \n{results['Fn_poles_cov']}") - print(f"\n damping_ratios \n{results['Xi_poles']}") - print(f"\n cov_damping \n{results['Xi_poles_cov']}") - - -def run_oma_and_publish(config_path): - number_of_minutes = 0.02 - data_topic_indexes = [0, 2] - aligner, data_client, fs = setup_oma(config_path, data_topic_indexes) - publish_config = load_config(config_path)["sysID"] - - # Setting up the client for publishing OMA results - publish_client, _ = sysID.setup_client(publish_config) # fs not needed here - - sysID.publish_oma_results( - number_of_minutes, - aligner, - publish_client, - publish_config["TopicsToSubscribe"][0], - fs - ) - - print(f"Publishing to topic: {publish_config['TopicsToSubscribe'][0]}") - data_client.disconnect() - sys.stdout.flush() diff --git a/src/examples/run_sysid.py b/src/examples/run_sysid.py new file mode 100644 index 0000000..0a4bb61 --- /dev/null +++ b/src/examples/run_sysid.py @@ -0,0 +1,122 @@ +import sys +import time +import matplotlib.pyplot as plt +from data.comm.mqtt import load_config +from data.accel.hbk.aligner import Aligner +from functions.plot_sysid import (plot_stabilization_diagram, plot_pre_stabilization_diagram) +from methods import sysid as sysID +from methods.constants import PARAMS + + +def setup_sysid(config_path, data_topic_indexes): + """ + Helper function to set up sysid (Operational Modal Analysis). + + Parameters: + config_path (str): Path to the configuration file. + data_topic_indexes (list): Indexes of topics to subscribe to. + + Returns: + tuple: (aligner, data_client, fs) + """ + config = load_config(config_path) + mqtt_config = config["MQTT"] + + # Setting up the client and extracting Fs + data_client, fs = sysID.setup_client(mqtt_config) + + # Setting up the aligner + selected_topics = [mqtt_config["TopicsToSubscribe"][i] for i in data_topic_indexes] + aligner = Aligner(data_client, topics=selected_topics) + + return aligner, data_client, fs + + +def run_sysid_and_plot(config_path): + number_of_minutes = 1 + data_topic_indexes = [0, 2, 3, 4] + aligner, data_client, fs = setup_sysid(config_path, data_topic_indexes) + + fig_ax1 = None + fig_ax = None + aligner_time = None + t1 = time.time() + while aligner_time is None: + time.sleep(0.1) + t2 = time.time() + t_text = f"Waiting for data for {round(t2-t1,1)} seconds" + print(t_text,end="\r") + results, aligner_time = sysID.get_sysid_results(number_of_minutes, aligner, fs) + data_client.disconnect() + print(aligner_time) + fig_ax1 = plot_pre_stabilization_diagram(results, PARAMS, fig_ax=fig_ax1) + fig_ax = plot_stabilization_diagram(results, PARAMS, fig_ax=fig_ax) + plt.show(block=True) + sys.stdout.flush() + + +def run_sysid_and_print(config_path): + number_of_minutes = 0.2 + data_topic_indexes = [0, 2, 3, 4] + aligner, data_client, fs = setup_sysid(config_path, data_topic_indexes) + + aligner_time = None + t1 = time.time() + while aligner_time is None: + time.sleep(0.1) + t2 = time.time() + t_text = f"Waiting for data for {round(t2-t1,1)} seconds" + print(t_text,end="\r") + results, aligner_time = sysID.get_sysid_results(number_of_minutes, aligner, fs) + data_client.disconnect() + sys.stdout.flush() + + print(f"\n System Frequencies \n {results['Fn_poles']}") + print(f"\n Cov \n{results['Fn_poles_cov']}") + print(f"\n damping_ratios \n{results['Xi_poles']}") + print(f"\n cov_damping \n{results['Xi_poles_cov']}") + + +def run_sysid_and_publish(config_path): + number_of_minutes = 0.2 + data_topic_indexes = [0, 2, 3, 4] + aligner, data_client, fs = setup_sysid(config_path, data_topic_indexes) + publish_config = load_config(config_path)["sysID"] + + # Setting up the client for publishing sysid results + publish_client, _ = sysID.setup_client(publish_config) # fs not needed here + + publish_result = sysID.publish_sysid_results( + number_of_minutes, + aligner, + publish_client, + publish_config["TopicsToSubscribe"][0], + fs + ) + + if publish_result is True: + print(f"Publishing to topic: {publish_config['TopicsToSubscribe'][0]}") + data_client.disconnect() + sys.stdout.flush() + + +def live_sysid_and_publish(config_path): + number_of_minutes = 1 + data_topic_indexes = [0, 2, 3, 4] + aligner, data_client, fs = setup_sysid(config_path, data_topic_indexes) + publish_config = load_config(config_path)["sysID"] + + # Setting up the client for publishing sysid results + publish_client, _ = sysID.setup_client(publish_config) # fs not needed here + + publish_result = True + while publish_result: + publish_result = sysID.publish_sysid_results( + number_of_minutes, + aligner, + publish_client, + publish_config["TopicsToSubscribe"][0], + fs + ) + if publish_result is True: + print(f"Publishing to topic: {publish_config['TopicsToSubscribe'][0]}") diff --git a/src/functions/calculate_mac.py b/src/functions/calculate_mac.py new file mode 100644 index 0000000..f6b73f1 --- /dev/null +++ b/src/functions/calculate_mac.py @@ -0,0 +1,16 @@ +import numpy as np + +def calculate_mac(reference_mode_shape: np.array, second_mode_shape: np.array) -> float: + """ + Calculate Modal Assurance Criterion (MAC) + + Args: + reference_mode (np.array): Mode shape to compare to + mode_shape (np.array): Mode shape to compare + Returns: + MAC (float): Modal Assurance Criterion + + """ + numerator = np.abs(np.dot(reference_mode_shape.conj().T, second_mode_shape)) ** 2 + denominator = np.dot(reference_mode_shape.conj().T, reference_mode_shape) * np.dot(second_mode_shape.conj().T, second_mode_shape) + return np.real(numerator / denominator) \ No newline at end of file diff --git a/src/functions/clean_sysid_output.py b/src/functions/clean_sysid_output.py new file mode 100644 index 0000000..ee67e10 --- /dev/null +++ b/src/functions/clean_sysid_output.py @@ -0,0 +1,134 @@ +import numpy as np + +def remove_complex_conjugates(sysid_output): + """ + Remove complex conjucates + + Args: + sysid_output (Dict[str, Any]): Results from Pysysid-2 + + Returns: + frequencies (np.ndarray): Frequencies (mean) + cov_freq (np.ndarray): Covariance of frequency + damping_ratios (np.ndarray): Damping ratios (mean) + cov_damping (np.ndarray): Covariance of damping ratio + mode_shapes (np.ndarray): Mode shapes + """ + sysid = sysid_output.copy() + # sysid results as numpy array + frequencies = sysid['Fn_poles'].copy() + cov_freq = sysid['Fn_poles_cov'].copy() + damping_ratios = sysid['Xi_poles'].copy() + cov_damping = sysid['Xi_poles_cov'].copy() + mode_shapes = sysid['Phi_poles'].copy() + + # Remove the complex conjugate entries + frequencies = frequencies[::2] # This is 'S' as per algorithm + damping_ratios = damping_ratios[::2] # This is 'S' as per algorithm + mode_shapes = mode_shapes[::2, :, :] + cov_freq = cov_freq[::2] + cov_damping = cov_damping[::2] + + return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes + +def transform_sysid_features(frequencies_,cov_freq_,damping_ratios_,cov_damping_,mode_shapes_): + """ + Transform sysid results + + Transpose, flip and sort arrays, such that arrays maps directly to the stabilization diagram. + This means the the frequency array maps directly to the plot: + MO. + 5.| x x + 4.| x + 3.| x + 2.| x + 1.| + 0.| + -1----4------- Frequency + The frequency array will then have the shape (6,3). Initially (6,6) but the complex conjugates have been removed. So 6 is halved to 3. + 6 for each model order, including 0 and 3 for maximum poles in a modelorder + The frequency array will then become: + _0_1_ + 0| 1 4 + 1| 1 Nan + 0| 1 Nan + 0| Nan 4 + 0| Nan Nan + 0| Nan Nan + + Args: + frequencies_ (np.ndarray): Frequencies (mean) + cov_freq_ (np.ndarray): Covariance of frequency + damping_ratios_ (np.ndarray): Damping ratios (mean) + cov_damping_ (np.ndarray): Covariance of damping ratio + mode_shapes_ (np.ndarray): Mode shapes + + Returns: + frequencies (np.ndarray): Frequencies (mean) + cov_freq (np.ndarray): Covariance of frequency + damping_ratios (np.ndarray): Damping ratios (mean) + cov_damping (np.ndarray): Covariance of damping ratio + mode_shapes (np.ndarray): Mode shapes + """ + + #Transformation of data + frequencies = np.transpose(frequencies_) + frequencies = np.flip(frequencies, 0) + sort_indices = np.argsort(frequencies,axis=1) + frequencies = np.take_along_axis(frequencies, sort_indices, axis=1) + cov_freq = np.transpose(cov_freq_) + cov_freq = np.flip(cov_freq, 0) + cov_freq = np.take_along_axis(cov_freq, sort_indices, axis=1) + damping_ratios = np.transpose(damping_ratios_) + damping_ratios = np.flip(damping_ratios, 0) + damping_ratios = np.take_along_axis(damping_ratios, sort_indices, axis=1) + cov_damping = np.transpose(cov_damping_) + cov_damping = np.flip(cov_damping, 0) + cov_damping = np.take_along_axis(cov_damping, sort_indices, axis=1) + mode_shapes = np.moveaxis(mode_shapes_, [0, 1, 2], [1, 0, 2]) + + mode_shapes2 = np.zeros(mode_shapes.shape,dtype=np.complex128) + for ii, indices in enumerate(sort_indices): + mode_shapes2[ii,:,:] = mode_shapes[(sort_indices.shape[0]-ii-1),indices,:] + + # Array of model orders + model_order = np.arange(sort_indices.shape[0]) + model_orders = np.stack((model_order,) * sort_indices.shape[1], axis=1) + model_orders = np.flip(model_orders) + + return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes2, model_orders + +def remove_highly_uncertain_points(sysid_output,sysid_params): + """ + Remove highly uncertain points + + Args: + sysid_output (Dict[str, Any]): Results from Pysysid-2 + sysid_params (Dict[str, Any]): Parameters + + Returns: + frequencies (np.ndarray): Frequencies (mean) + cov_freq (np.ndarray): Covariance of frequency + damping_ratios (np.ndarray): Damping ratios (mean) + cov_damping (np.ndarray): Covariance of damping ratio + mode_shapes (np.ndarray): Mode shapes + """ + frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes = remove_complex_conjugates(sysid_output) + + # # #=================== Removing high uncertain poles ======================= + freq_variance_treshold = sysid_params.get('freq_variance_treshold', 0.1) + damp_variance_treshold = sysid_params.get('damp_variance_treshold', 10**6) + frequency_coefficient_variation = np.sqrt(cov_freq)/frequencies + damping_coefficient_variation = np.sqrt(cov_damping)/damping_ratios + indices_frequency = frequency_coefficient_variation > freq_variance_treshold + indices_damping = damping_coefficient_variation > damp_variance_treshold + above_nyquist = frequencies > sysid_params['Fs']/2 + combined_indices = np.logical_or(np.logical_or(indices_frequency,indices_damping),above_nyquist) + frequencies[combined_indices] = np.nan + damping_ratios[combined_indices] = np.nan + cov_freq[combined_indices] = np.nan + cov_damping[combined_indices] = np.nan + mask = np.broadcast_to(np.expand_dims(combined_indices, axis=2), mode_shapes.shape) + mode_shapes[mask] = np.nan + + return frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes \ No newline at end of file diff --git a/src/functions/plot_clusters.py b/src/functions/plot_clusters.py new file mode 100644 index 0000000..feec388 --- /dev/null +++ b/src/functions/plot_clusters.py @@ -0,0 +1,115 @@ +from typing import Tuple, Dict, Any +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.figure +import matplotlib.axes +from functions.clean_sysid_output import remove_highly_uncertain_points +from functions.plot_sysid import (add_scatter_data,add_plot_standard_flair,add_plot_annotation) +plt.rcParams['font.family'] = 'Times New Roman' + +def plot_clusters(clusters: Dict[str,dict], + sysid_results: Dict[str, Any], + sysid_params: Dict[str, Any], + fig_ax = None, + legend = True)-> Tuple[matplotlib.figure.Figure, Tuple[matplotlib.axes.Axes,matplotlib.axes.Axes]]: + """ + Plot stabilization of clusters + + Args: + clsuters (Dict[str,dict]): Dictionary of clusters + sysid_results (Dict[str,dict]): PyOMA results + sysid_params (Dict[str,dict]): sysid parameters + fix_ax (Tuple[matplotlib.figure.Figure, Tuple[matplotlib.axes.Axes,matplotlib.axes.Axes]]): fig and ax of plot to redraw + Returns: + fig_ax (Tuple[matplotlib.figure.Figure, Tuple[matplotlib.axes.Axes,matplotlib.axes.Axes]]): fig and ax of plot + + """ + + if fig_ax is None: + plt.ion() + fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12, 6), tight_layout=True) + title_number = 0 + else: + fig, (ax1,ax2) = fig_ax + title = fig.axes[0].get_title() + ax1.clear() + ax2.clear() + + iteration_number = title.split(' ')[-1] + title_number = int(iteration_number) + 1 + + #Pre-clean + frequencies, cov_freq, damping_ratios, cov_damping, _ = remove_highly_uncertain_points(sysid_results,sysid_params) + + x = frequencies.flatten(order="f") + y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + + ax1 = add_scatter_data(ax1,x,y_model_order,None,error_dir="h",mark="^",lab='Non clustered',size=20) + + for i, key in enumerate(clusters.keys()): + cluster = clusters[key] + MO = cluster['model_order'] + ax1, col = add_scatter_cluster(ax1,cluster['f'],MO,cluster['cov_f'],i,error_dir="h") + ax1.vlines(np.median(cluster['f']),min(MO), + max(MO),color=col) + + ax1 = add_plot_standard_flair(ax1,sysid_params) + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_ylim(0, sysid_params['model_order'] + 1) + if legend is True: + ax1.legend(prop={'size': 10}) + ax1.set_title(f"Data set: {title_number}") + + # # # ............................................................................ + + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + + x = frequencies.flatten(order="f") + y = damping_ratios.flatten(order="f") + + ax2 = add_scatter_data(ax2,x,y,None,error_dir="v", mark="^",size=20) + + for i, key in enumerate(clusters.keys()): + cluster = clusters[key] + ax2, col = add_scatter_cluster(ax2,cluster['f'],cluster['d'],cluster['cov_d'],i,error_dir="v") + + ax2 = add_plot_annotation(ax2,x,y,y_model_order) + ax2 = add_plot_standard_flair(ax2,sysid_params) + + if y[~np.isnan(y)].shape[0] > 1: + ax2.set_ylim(0, max(max(y[~np.isnan(y)])+0.005,0.1)) + else: + ax2.set_ylim(0, 0.1) + + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) + + +def add_scatter_cluster(ax: matplotlib.axes.Axes, x: np.ndarray[float], y: np.ndarray[float], cov: np.ndarray[float], cluster_id = int, error_dir: str = "h") -> Tuple[matplotlib.axes.Axes, Any]: + """ + Add scatter plot of clusters to existing axes + + Args: + ax (matplotlib.axes.Axes): ax from matplotlib + x (np.ndarray[float]): x-axis data + y (np.ndarray[float]): y-axis data + cov (np.ndarray[float]): covariance for errorbars + cluster_id (int): Index of cluster for labeling + error_dir (str): Direction of errorbars, either "h" horizontal or "v" vertical + + Returns: + ax (matplotlib.axes.Axes): + col (Any): + """ + sc = ax.scatter(x, y, marker="o", s=60, label=f'Cluster {cluster_id}') + col = sc.get_facecolors().tolist() + if cov is not None: + xerr = np.sqrt(cov) * 2 + if error_dir == "h": + ax.errorbar(x, y, xerr=xerr, fmt="None", capsize=5, ecolor="gray", zorder=200) + else: + ax.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray", zorder=200) + return ax, col \ No newline at end of file diff --git a/src/functions/plot_mode_tracking.py b/src/functions/plot_mode_tracking.py new file mode 100644 index 0000000..c3e25a9 --- /dev/null +++ b/src/functions/plot_mode_tracking.py @@ -0,0 +1,68 @@ +from typing import Tuple, Dict, Any +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.figure +import matplotlib.axes +plt.rcParams['font.family'] = 'Times New Roman' + +def plot_tracked_modes( + tracked_clusters: Dict[str, Any], + sysid_params: Dict[str, Any], + fig_ax: Any = None, + x_length: Any = None)-> Tuple[matplotlib.figure.Figure, matplotlib.axes.Axes]: + """ + Plot tracked modes + + Args: + tracked_clusters (Dict[str, Any]): Tracked clusters + sysid_params (Dict[str, Any]): sysid parameters + Returns: + fig_ax (Tuple[matplotlib.figure.Figure, matplotlib.axes.Axes]): fig and ax of plot + + """ + + if fig_ax is None: + plt.ion() + fig, (ax1) = plt.subplots(1,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1) = fig_ax + ax1.clear() + + ii = 0 + max_x = [] + for key in tracked_clusters.keys(): + if key == "iteration": + pass + else: + tracked_cluster_list = tracked_clusters[key] + m_f = [] + x = [] + for cluster in tracked_cluster_list: + m_f.append(cluster['median_f']) + x.append(cluster['id']) + + sc = ax1.scatter(x, m_f, marker="o", s=50) + col2 = sc.get_facecolors().tolist() + ax1.plot(x, m_f, color=col2[0]) + max_x.append(max(x)) + ii += 1 + + ax1.set_ylabel("Eigenfrequency [Hz]", fontsize=20, color = 'black') + ax1.set_xlabel("Dataset", fontsize=20, color = 'black') + ax1.tick_params(axis='both', which='major', labelsize=17) + + ax1.set_ylim(0, sysid_params['Fs']/2) + if x_length is not None: + ax1.set_xlim(np.maximum(max(max_x)-x_length,0),max(max_x)+1) + ax1.set_xticks(np.arange(np.maximum(max(max_x)-x_length,0), + np.maximum(max(max_x)+1,x_length), 5)) + + # Add major and minor grid lines + ax1.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax1.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, ax1 diff --git a/src/functions/plot_sysid.py b/src/functions/plot_sysid.py new file mode 100644 index 0000000..f3cc82c --- /dev/null +++ b/src/functions/plot_sysid.py @@ -0,0 +1,197 @@ +from typing import Tuple, Dict, Any +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.figure +import matplotlib.axes +from functions.clean_sysid_output import (remove_complex_conjugates,remove_highly_uncertain_points) +plt.rcParams['font.family'] = 'Times New Roman' + + +def plot_pre_stabilization_diagram( + sysid_results: Dict[str, Any], + sysid_params: Dict[str, Any], + fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[matplotlib.axes.Axes,matplotlib.axes.Axes]]: + + """ + Plot stabilization of raw sysid data before pre-cleaning + + Args: + sysid_results (Dict[str, Any]): Pyoma results + sysid_params (Dict[str, Any]): sysid parameters + fix_ax (Tuple): fig and ax of plot to redraw + Returns: + fig_ax (Tuple): fig and ax of plot + + """ + if fig_ax is None: + plt.ion() + fig, (ax1,ax2) = plt.subplots(1,2,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1,ax2) = fig_ax + ax1.clear() + ax2.clear() + + frequencies, damping_ratios, cov_freq, cov_damping, _ = remove_complex_conjugates(sysid_results) + + x = frequencies.flatten(order="f") + y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + + ax1 = add_scatter_data(ax1,x,y_model_order,cov_freq,error_dir="h") + ax1 = add_plot_standard_flair(ax1,sysid_params) + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_ylim(0, sysid_params['model_order'] + 1) + + # # # ............................................................................ + + x = frequencies.flatten(order="f") + y = damping_ratios.flatten(order="f") + + ax2 = add_scatter_data(ax2,x,y,cov_damping,error_dir="v") + ax2 = add_plot_annotation(ax2,x,y,y_model_order) + ax2 = add_plot_standard_flair(ax2,sysid_params) + + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + ax2.set_ylim(0, max(y[~np.isnan(y)])+0.005) + + fig.tight_layout() + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) + + +def plot_stabilization_diagram( + sysid_results: Dict[str, Any], + sysid_params: Dict[str, Any], + fig_ax)-> Tuple[matplotlib.figure.Figure, Tuple[matplotlib.axes.Axes,matplotlib.axes.Axes]]: + """ + Plot stabilization of sysid data before after pre-cleaning + + Args: + sysid_results (dict): PyOMA results + sysid_params (dict): sysid parameters + Returns: + fig_ax (tuple): fig and ax of plot + + """ + + if fig_ax is None: + plt.ion() + fig, (ax1,ax2) = plt.subplots(1,2,figsize=(8, 6), tight_layout=True) + else: + fig, (ax1,ax2) = fig_ax + ax1.clear() + ax2.clear() + + #Pre-clean + frequencies, cov_freq, damping_ratios, cov_damping, _ = remove_highly_uncertain_points(sysid_results,sysid_params) + + x = frequencies.flatten(order="f") + y_model_order = np.array([i // len(frequencies) for i in range(len(x))]) * 1 + + ax1 = add_scatter_data(ax1,x,y_model_order,cov_freq,error_dir="h") + ax1 = add_plot_standard_flair(ax1,sysid_params) + + ax1.set_ylabel("Model order", fontsize=20, color = 'black') + ax1.set_ylim(0, sysid_params['model_order'] + 1) + + # # # ............................................................................ + + x = frequencies.flatten(order="f") + y = damping_ratios.flatten(order="f") + + ax2 = add_scatter_data(ax2,x,y,cov_damping,error_dir="v") + ax2 = add_plot_annotation(ax2,x,y,y_model_order) + ax2 = add_plot_standard_flair(ax2,sysid_params) + + ax2.set_ylabel("Damping ratio", fontsize=20, color = 'black') + ax2.set_ylim(0, max(y[~np.isnan(y)])+0.005) + + fig.canvas.draw() + fig.canvas.flush_events() + + return fig, (ax1,ax2) + + +def add_scatter_cluster(ax: matplotlib.axes.Axes, x: np.ndarray[float], y: np.ndarray[float], cov: np.ndarray[float], cluster_id = int, error_dir: str = "h") -> Tuple[matplotlib.axes.Axes, Any]: + """ + Add scatter plot of clusters to existing axes + + Args: + ax (matplotlib.axes.Axes): ax from matplotlib + x (np.ndarray[float]): x-axis data + y (np.ndarray[float]): y-axis data + cov (np.ndarray[float]): covariance for errorbars + cluster_id (int): Index of cluster for labeling + error_dir (str): Direction of errorbars, either "h" horizontal or "v" vertical + + Returns: + ax (matplotlib.axes.Axes): + col (Any): + """ + +def add_scatter_data(ax: matplotlib.axes.Axes, x: np.ndarray[float], y: np.ndarray[float], cov: np.ndarray[float], error_dir: str = "h", mark: str ="o", lab: str ='Non clustered',size: float = 50) -> matplotlib.axes.Axes: + """ + Add scatter plot of sysid results to existing axes + + Args: + ax (matplotlib.axes.Axes): ax from matplotlib + x (np.ndarray[float]): x-axis data + y (np.ndarray[float]): y-axis data + cov (np.ndarray[float]): covariance for errorbars + error_dir (str): Direction of errorbars, either "h" horizontal or "v" vertical + mark (str): marker type option + lab (str): Labeling for legend + size (float): Size of markers + + Returns: + ax (matplotlib.axes.Axes): + """ + ax.scatter(x, y, marker=mark, s=size, c="r", label = lab) + if cov is not None: + xerr = np.sqrt(cov) * 2 + xerr = xerr.flatten(order="f") + if error_dir == "h": + ax.errorbar(x, y, xerr=xerr, fmt="None", capsize=5, ecolor="gray") + else: + ax.errorbar(x, y, yerr=xerr, fmt="None", capsize=5, ecolor="gray") + return ax + +def add_plot_standard_flair(ax: matplotlib.axes.Axes, sysid_params: Dict[str,Any]) -> matplotlib.axes.Axes: + """ + Add labels, grid and limit existing axes + + Args: + ax (matplotlib.axes.Axes): ax from matplotlib + sysid_params (Dict[str, Any]): sysid parameters + Returns: + ax (matplotlib.axes.Axes): + """ + ax.set_xlabel("Frequency [Hz]", fontsize=20, color = 'black') + ax.tick_params(axis='both', which='major', labelsize=17) + + ax.set_xlim(0, sysid_params['Fs']/2) + + # Add major and minor grid lines + ax.grid(which='major', color='gray', linestyle='-', linewidth=0.5) + ax.grid(which='minor', color='lightgray', linestyle='--', linewidth=0.3) + + return ax + +def add_plot_annotation(ax: matplotlib.axes.Axes, x: np.ndarray[float], y: np.ndarray[float], y_model_order: np.ndarray[float]) -> matplotlib.axes.Axes: + """ + Add model order annotations + + Args: + ax (matplotlib.axes.Axes): ax from matplotlib + x (np.ndarray[float]): x-axis data + y (np.ndarray[float]): y-axis data + y_model_order (np.ndarray[float]): Model order data + + Returns: + ax (matplotlib.axes.Axes): + """ + for i, txt in enumerate(y_model_order): + ax.annotate(str(txt), (x[i], y[i])) + return ax diff --git a/src/methods/constants.py b/src/methods/constants.py index 3309c56..d376b5e 100644 --- a/src/methods/constants.py +++ b/src/methods/constants.py @@ -6,17 +6,39 @@ MIN_SAMPLES_NEEDED = 540 # Minimum samples for running sysid -BLOCK_SHIFT = 30 - -MODEL_ORDER = 20 - -# Constants for Model track -MSTAB_FACTOR = 0.4 # This is goning to be multiplied by the MODEL_ORDER to get the mstab -TMAC = 0.9 - # Constants for Model Update # 1st parameter is spring stiffness and 2nd is unbounded length X0 = np.array([1e1, 10e-3]) # Create bounds using element-wise i.e. different parameters have different bounds BOUNDS = [(1e-2 * X0[0], 1e2 * X0[0]), (1e-2 * X0[1], 1e2 * X0[1])] + + + + +# Parameters +PARAMS = {} + +#Pre-clean +PARAMS['freq_variance_treshold'] = 0.1 +PARAMS['damp_variance_treshold'] = 10**6 + +PARAMS['Fs'] = 256 # Sample frequency +PARAMS['model_order_min'] = 2 # Set the min model order +PARAMS['model_order'] = 15 # Set the max model order for analysis +PARAMS['block_shift'] = 30 # Block size in Hankel matrix +PARAMS['sensor_order'] = np.array([0, 2, 1, 3]) # sensor location in data + +# Params for clustering: +PARAMS['mstab'] = 6 # minimum number of frequencies to be validate as cluster +PARAMS['tMAC'] = 0.95 # MAC threshold to be included in cluster +PARAMS['bound_multiplier'] = 2 # Standard deviation multiplier +PARAMS['allignment_factor'] = [0.05,0.01] # Factors for allignment + +# Params for mode tracking +PARAMS['phi_cri'] = 0.8 #0.98 # MAC criteria [%] +PARAMS['freq_cri'] = 0.2 #0.2 # Frequency difference criteria [%] +PARAMS['obj_cri'] = 0.1 +# If more clusters match, an it is not clear what cluster is best, +# then check if the difference of the objective function values are less than the criteria. +# Then it is probably the one with higest MAC rather than frequency [difference] diff --git a/src/methods/mode_clustering.py b/src/methods/mode_clustering.py new file mode 100644 index 0000000..fc1715f --- /dev/null +++ b/src/methods/mode_clustering.py @@ -0,0 +1,184 @@ +import json +import sys +import threading +from typing import Any, List, Dict, Tuple +import numpy as np +import matplotlib.pyplot as plt +import paho.mqtt.client as mqtt +from methods.constants import PARAMS +from methods.mode_clustering_functions.clustering import cluster_func +from functions.plot_sysid import plot_stabilization_diagram +from functions.plot_clusters import plot_clusters +from data.comm.mqtt import load_config, setup_mqtt_client +# pylint: disable=C0103, W0603 + +# Global threading event to wait for sysid data +result_ready = threading.Event() +sysid_output_global = None # will store received sysid data inside callback + +def _convert_sysid_output(obj: Any) -> Any: + """Recursively convert JSON structure into complex numbers and numpy arrays.""" + if isinstance(obj, dict): + if "real" in obj and "imag" in obj: + return complex(obj["real"], obj["imag"]) + return {k: _convert_sysid_output(v) for k, v in obj.items()} + + if isinstance(obj, list): + try: + return np.array([_convert_sysid_output(item) for item in obj]) + except Exception: + return [_convert_sysid_output(item) for item in obj] + + return obj + + +def _on_connect(client: mqtt.Client, userdata: dict, flags: dict, reason_code: int, properties: mqtt.Properties) -> None: + """Callback when MQTT client connects.""" + if reason_code == 0: + print("Connected to MQTT broker.") + client.subscribe(userdata["topic"], qos=userdata["qos"]) + print(f"Subscribed to topic: {userdata['topic']}") + else: + print(f"Failed to connect to MQTT broker. Code: {reason_code}") + + +def _on_message(_client: mqtt.Client, _userdata: dict, msg: mqtt.MQTTMessage) -> None: + """Callback when a message is received.""" + global sysid_output_global + print(f"Message received on topic: {msg.topic}") + try: + raw = json.loads(msg.payload.decode("utf-8")) + sysid_output = _convert_sysid_output(raw["sysid_output"]) + timestamp = raw["timestamp"] + print(f"Received sysid data at timestamp: {timestamp}") + sysid_output_global = sysid_output + result_ready.set() + except Exception as e: + print(f"Error processing sysid message: {e}") + + +def cluster_sysid(sysid_output: Any, params: dict[str,Any]) -> Tuple[dict[str,Any], np.ndarray]: + """ + Runs the mode clustering algorithm. + + Args: + sysid_output (Any): sysid output from subscription or elsewhere. + Returns: + cluster_dict (dict[str,Any]), + median_frequencies (np.ndarray), + """ + dictionary_clusters = cluster_func(sysid_output, params) + + median_frequencies = np.array([dictionary_clusters[key]["median_f"] + for key in dictionary_clusters.keys()]) + return dictionary_clusters, median_frequencies + +def subscribe_and_cluster(config_path: str, params: Dict[str,Any] + ) -> Tuple[Dict[str,Any], Dict[str,Any]]: + """ + Subscribes to MQTT broker, receives one sysid message, runs mode clustering, and returns results. + + Args: + config_path (str): Path to config JSON. + params (Dict[str,Any]): clustering parameters + + Returns: + sysid_output_global (Dict[str,Any]): sysid output + clusters (Dict[str,Any]]): Clusters + """ + global sysid_output_global + sysid_output_global = None # Reset in case old data is present + result_ready.clear() + + config = load_config(config_path) + mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index=0) + + mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) + mqtt_client.on_connect = _on_connect + mqtt_client.on_message = _on_message + mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) + mqtt_client.loop_start() + print("Waiting for sysid data...") + try: + result_ready.wait() # Wait until message arrives + mqtt_client.loop_stop() + mqtt_client.disconnect() + + if sysid_output_global is None: + raise RuntimeError("Failed to receive sysid data.") + + print("sysid data received. Running mode clustering and tracking...") + clusters, median_frequencies = cluster_sysid(sysid_output_global,params) + print("Clustered frequencies", median_frequencies) + + except KeyboardInterrupt: + print("Shutting down gracefully") + mqtt_client.loop_stop() + mqtt_client.disconnect() + except Exception as e: + print(f"Unexpected error: {e}") + + return sysid_output_global, clusters, median_frequencies + + +def live_mode_clustering(config_path: str, topic_index: int = 0, + plot: np.ndarray[bool] = np.array([1,1]) + ) -> Tuple[List[Dict], np.ndarray, np.ndarray]: + """ + Subscribes to MQTT broker, receives one sysid message, runs mode clustering, plots results. Continue until stopped. + + Args: + config_path (str): Path to config JSON. + topic_index (int): Topic to subscribe + plot (np.ndarray[bool]): Array describing what plots to show + + Returns: + sysid_output_global (Dict[str,Any]): sysid output + clusters (Dict[str,Any]]): Clusters + tracked_clusters (Dict[str,Any]]): Tracked clusters + """ + global sysid_output_global + sysid_output_global = None # Reset in case old data is present + result_ready.clear() + + config = load_config(config_path) + mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index) + + mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) + mqtt_client.on_connect = _on_connect + mqtt_client.on_message = _on_message + mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) + mqtt_client.loop_start() + + fig_ax1 = None + fig_ax2 = None + while True: + try: + print("Waiting for sysid data...") + result_ready.wait() # Wait until message arrives + + if sysid_output_global is None: + raise RuntimeError("Failed to receive sysid data.") + + print("sysid data received. Running mode clustering and tracking...") + result_ready.clear() + + if plot[0] == 1: + fig_ax1 = plot_stabilization_diagram(sysid_output_global,PARAMS,fig_ax=fig_ax1) + plt.show(block=False) + + clusters, median_frequencies = cluster_sysid(sysid_output_global,PARAMS) + print("Clustered frequencies", median_frequencies) + + if plot[1] == 1: + fig_ax2 = plot_clusters(clusters,sysid_output_global,PARAMS,fig_ax=fig_ax2) + plt.show(block=False) + + sys.stdout.flush() + except KeyboardInterrupt: + print("Shutting down gracefully") + mqtt_client.loop_stop() + mqtt_client.disconnect() + break + except Exception as e: + print(f"Unexpected error: {e}") \ No newline at end of file diff --git a/src/methods/mode_clustering_functions/__init__.py b/src/methods/mode_clustering_functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/methods/mode_clustering_functions/align_clusters.py b/src/methods/mode_clustering_functions/align_clusters.py new file mode 100644 index 0000000..3b970e3 --- /dev/null +++ b/src/methods/mode_clustering_functions/align_clusters.py @@ -0,0 +1,161 @@ +import numpy as np +from typing import Any +from functions.calculate_mac import calculate_mac + +def alignment(cluster_dict: dict[str,dict], Params: dict[str,Any]) -> dict[str,dict]: + """ + Alignment/merging of clusters + + Args: + cluster_dict (dict): Dictionary of multiple clusters + Params (dict): Dictionary of algorithm parameters + Returns: + cluster_dict (dict): Dictionary of aligned clusters + + """ + median_f = [] + for key in cluster_dict.keys(): #Find the median of each cluster + cluster = cluster_dict[key] + median_f.append(np.median(cluster['f'])) + median_f = np.array(median_f) + + deleted_cluster_id = [] + for ii, m_f in enumerate(median_f): #Go through all medians + if ii in deleted_cluster_id: #If cluster is deleted pass on + continue + # Calculate absolute difference of selected median and all medians + diff = abs(median_f-m_f) + # If this difference is above 0 (not itself) and inside the bounds: + # Bounds are the minimum of either median_f * allignment_factor_0 or Sampling frequency / 2 * allignment_factor_1 + # For lower median frequencies the bound is determined by the size of median frequency. + # For higher median frequencies the bound is determined by the sampling frequency + + mask = (diff > 0) & (diff < min(m_f*Params['allignment_factor'][0],Params['Fs']/2*Params['allignment_factor'][1])) + indices = np.argwhere(mask == True) #Indicies of clusters that are closely located in frequency + + if indices.shape[0] > 0:# If one or more clusters are found + ids = indices[:,0] + for id in ids: #Go through all clusters that is closely located + if id in deleted_cluster_id: + continue + + cluster1 = cluster_dict[str(ii)] #Parent cluster + cluster2 = cluster_dict[str(id)] #Co-located cluster + + MAC = calculate_mac(cluster1['mode_shapes'][0],cluster2['mode_shapes'][0]) # Check mode shape for the first pole in each cluster + if MAC >= Params['tMAC']: #If MAC complies with the criteria, then add the two clusters + cluster, cluster_remaining = join_clusters(cluster_dict[str(ii)],cluster_dict[str(id)],Params) + cluster_dict[str(ii)] = cluster #Save the new larger cluster + if len(cluster_remaining) == 0: #If the remaining cluster is emmpty + cluster_dict.pop(str(id), None) #Remove the co-located cluster + deleted_cluster_id.append(int(id)) #The delete cluster id + else: + cluster_dict[str(id)] = cluster_remaining #Save the remaining cluster + + else: #Check if the mode shapes across any of the poles complies with the MAC criteria + + MAC = np.zeros((cluster1['mode_shapes'].shape[0],cluster2['mode_shapes'].shape[0])) + for jj, ms1 in enumerate(cluster1['mode_shapes']): + for kk, ms2 in enumerate(cluster2['mode_shapes']): + MAC[jj,kk] = calculate_mac(ms1,ms2) + if MAC.max() >= Params['tMAC']: #If MAC criteria is meet add the clusters together + cluster, cluster_remaining = join_clusters(cluster_dict[str(ii)],cluster_dict[str(id)],Params) + cluster_dict[str(ii)] = cluster #Save the new larger cluster + if len(cluster_remaining) == 0: #If the remaining cluster is emmpty + cluster_dict.pop(str(id), None) #Remove the co-located cluster + deleted_cluster_id.append(int(id)) #The delete cluster id + else: + cluster_dict[str(id)] = cluster_remaining #Save the remaining cluster + + + cluster_dict_alligned = cluster_dict + return cluster_dict_alligned + +def join_clusters(cluster_1: dict[str,Any], cluster_2: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: + """ + Add two clusters together + + Args: + cluster_1 (dict): Cluster + cluster_2 (dict): Cluster + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Joined cluster + cluster_remaining (dict): The cluster that remains + + """ + #Adding two clusters together + cluster = {} + cluster_remaining = {} + row1 = cluster_1['row'] + row2 = cluster_2['row'] + + #Should the dominant cluster be the one that have the higest model orders? + if row1.shape[0] >= row2.shape[0]: #Let be the largest cluster be the dominant one + cluster1 = cluster_1 + cluster2 = cluster_2 + row1 = cluster_1['row'] + row2 = cluster_2['row'] + else: + cluster1 = cluster_2 + cluster2 = cluster_1 + row1 = cluster_2['row'] + row2 = cluster_1['row'] + + median_f1 = np.median(cluster1['f']) + + for MO in range(Params['model_order']): #Go through all poles in a cluster + jj = np.argwhere(row1 == MO) + id = np.argwhere(row2 == MO) + if MO in row1: #If a pole in the largest cluster exist for the this model order + r1 = MO + if MO in row2: #If a pole exist in the same model order + #Get frequencies of the poles + f1 = cluster1['f'][jj[:,0]] + f2 = cluster2['f'][id[:,0]] + if abs(median_f1-f2) >= abs(median_f1-f1): #If pole in cluster 1 is closer to median of cluster 1 + cluster = append_cluster_data(cluster,cluster1,jj[:,0]) + cluster_remaining = append_cluster_data(cluster_remaining,cluster2,id[:,0]) + else: #If pole in cluster 2 is closer to median of cluster 1 + cluster = append_cluster_data(cluster,cluster2,id[:,0]) + cluster_remaining = append_cluster_data(cluster_remaining,cluster1,jj[:,0]) + else: #If only one pole exist in the largest cluster + cluster = append_cluster_data(cluster,cluster1,jj[:,0]) + elif MO in row2: #If a pole in the smallest cluster exist for the model order + cluster = append_cluster_data(cluster,cluster2,id[:,0]) + + return cluster, cluster_remaining + +def append_cluster_data(cluster: dict[str,Any], cluster2: dict[str,Any], id: int) -> dict[str,Any]: + """ + Add cluster data to an existing cluster + + Args: + cluster (dict): Existing cluster + cluster2 (dict): Cluster + id (int): id of data to append + Returns: + cluster (dict): Cluster + + """ + if len(cluster) == 0: #If it is the first pole + cluster['f'] = cluster2['f'][id] + cluster['cov_f'] = cluster2['cov_f'][id] + cluster['d'] = cluster2['d'][id] + cluster['cov_d'] = cluster2['cov_d'][id] + cluster['mode_shapes'] = cluster2['mode_shapes'][id,:] + cluster['MAC'] = cluster2['MAC'][id] + cluster['model_order'] = cluster2['model_order'][id] + cluster['row'] = cluster2['row'][id] + cluster['col'] = cluster2['col'][id] + else: + cluster['f'] = np.append(cluster['f'],cluster2['f'][id]) + cluster['cov_f'] = np.append(cluster['cov_f'],cluster2['cov_f'][id]) + cluster['d'] = np.append(cluster['d'],cluster2['d'][id]) + cluster['cov_d'] = np.append(cluster['cov_d'],cluster2['cov_d'][id]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],cluster2['mode_shapes'][id,:])) + cluster['MAC'] = np.append(cluster['MAC'],cluster2['MAC'][id]) + cluster['model_order'] = np.append(cluster['model_order'],cluster2['model_order'][id]) + cluster['row'] = np.append(cluster['row'],cluster2['row'][id]) + cluster['col'] = np.append(cluster['col'],cluster2['col'][id]) + return cluster \ No newline at end of file diff --git a/src/methods/mode_clustering_functions/clustering.py b/src/methods/mode_clustering_functions/clustering.py new file mode 100644 index 0000000..56710d4 --- /dev/null +++ b/src/methods/mode_clustering_functions/clustering.py @@ -0,0 +1,203 @@ +from typing import Any +import numpy as np +from functions.clean_sysid_output import (remove_highly_uncertain_points,transform_sysid_features) +from methods.mode_clustering_functions.create_cluster import cluster_creation +from methods.mode_clustering_functions.expand_cluster import cluster_expansion +from methods.mode_clustering_functions.initialize_Ip import cluster_initial +from methods.mode_clustering_functions.align_clusters import alignment + + +# Following the algorithm proposed here: https://doi.org/10.1007/978-3-031-61421-7_56 +# JVM 22/10/2025 + +def cluster_func(sysid_output: dict[str,Any], Params : dict[str,Any]) -> tuple[dict[str,Any], dict[str,Any], dict[str,Any]]: + """ + Clustering of OMA results + + Args: + sysid_output (dict): PyOMA results + Params (dict): Algorihm parameters + Returns: + cluster_dict_1 (dict): Dictionary of clusters after clustering + cluster_dict_2 (dict): Dictionary of clusters after alignment + cluster_dict_3 (dict): Dictionary of clusters after cardinailty check + + """ + + #Preeliminary cleaning + frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes = remove_highly_uncertain_points(sysid_output,Params) + + frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes2, model_orders = transform_sysid_features(frequencies, cov_freq, damping_ratios, cov_damping, mode_shapes) + + row, col = np.indices(model_orders.shape) + row = row.flatten(order="C") + col = col.flatten(order="C") + + #Initiate data + data1 = {'frequencies':frequencies, + 'damping_ratios':damping_ratios, + 'cov_f':cov_freq, + 'cov_d':cov_damping, + 'mode_shapes':mode_shapes2, + 'row':row, + 'col':col} + + cluster_dict = {} + cluster_counter = 0 + for count, f in enumerate(frequencies.flatten(order="f")): + #Extract data + frequencies = data1['frequencies'] + damping_ratios = data1['damping_ratios'] + cov_freq = data1['cov_f'] + cov_damping = data1['cov_d'] + + #Inital point + r = row[count] + c = col[count] + ip = [frequencies[r,c],cov_freq[r,c],damping_ratios[r,c],cov_damping[r,c]] + + if np.isnan(ip[0]) == True: #Pass if the pole does not exist. + pass + else: + initial_points = cluster_initial(ip,data1) #Algorithm. 1 step 3 - Initialization + + #Creating clusters + cluster1 = cluster_creation(initial_points,Params) + + data2 = data1.copy() + + # Cluster expansion + expansion = True + kk = 0 + while expansion: + kk += 1 + if kk > 10: + raise("Expansion never ends, something is wrong.") + pre_cluster = cluster1 + cluster2 = cluster_expansion(cluster1,data2,Params) + if cluster2['f'].shape == pre_cluster['f'].shape: + if (cluster2['f'] == pre_cluster['f']).all(): + expansion = False + else: + cluster1 = cluster2 + else: + cluster1 = cluster2 + + #Sort if more than one pole exist in the cluster + if isinstance(cluster2['f'],np.ndarray): + cluster2 = sort_cluster(cluster2) + + #Save cluster + if isinstance(cluster2['f'],np.ndarray): #Must atleast have two poles + cluster_dict[str(cluster_counter)] = cluster2 + cluster_counter += 1 + data1 = remove_data_from_S(data2,cluster2) #Remove clustered poles from data + else: + print("cluster2 too short:",1,"But must be:",Params['mstab']) + + + #Allignment or merging of stacked clusters + cluster_dict2 = alignment(cluster_dict.copy(),Params) + + #Custom cardinality check + cluster_dict3 = {} + cluster_counter = 0 + for ii, key in enumerate(cluster_dict2.keys()): + cluster = cluster_dict2[key] + if isinstance(cluster['f'],np.ndarray): + if cluster['f'].shape[0] < Params['mstab']: + print("cluster", np.median(cluster['f']),"too short:",cluster['f'].shape[0],"But must be:",Params['mstab']) + else: + print("Cluster saved:", np.median(cluster['f'])) + cluster_dict3[str(ii)] = cluster + cluster_counter += 1 + data1 = remove_data_from_S(data2,cluster) #Remove clustered poles from data + else: + print("cluster too short:",1,"But must be:",Params['mstab']) + cluster_dict2.pop(key) + + #Add median and confidence intervals (one sided) to cluster data + for key in cluster_dict3.keys(): + cluster = cluster_dict3[key] + cluster['median_f'] = np.median(cluster['f']) + ci_f = np.sqrt(cluster['cov_f']) * Params['bound_multiplier'] + ci_d = np.sqrt(cluster['cov_d']) * Params['bound_multiplier'] + cluster['ci_f'] = ci_f + cluster['ci_d'] = ci_d + + #Sort the clusters into accending order of median frequency + median_frequencies = np.zeros(len(cluster_dict3)) + for ii, key in enumerate(cluster_dict3.keys()): + cluster = cluster_dict3[key] + median_frequencies[ii] = cluster['median_f'] + + indices = np.argsort(median_frequencies) + cluster_dict4 = {} + for ii, id in enumerate(np.array(list(cluster_dict3.keys()))[indices]): #Rename all cluster dict from 0 to len(cluster_dict2) + cluster_dict4[ii] = cluster_dict3[id] #Insert a cluster into a key + + return cluster_dict4 + +def remove_data_from_S(data: dict[str,Any],cluster: dict[str,Any]) -> dict[str,Any]: + """ + Remove cluster from data or S + + Args: + data (dict): OMA points data + cluster (dict): cluster + Returns: + data2 (dict): Filtered OMA points data + + """ + #Copy data + frequencies = data['frequencies'].copy() + damping_ratios = data['damping_ratios'].copy() + cov_freq = data['cov_f'].copy() + cov_damping = data['cov_d'].copy() + mode_shapes = data['mode_shapes'].copy() + row = data['row'].copy() + col = data['col'].copy() + #Make new data dictionary + data2 = {'frequencies':frequencies, + 'damping_ratios':damping_ratios, + 'cov_f':cov_freq, + 'cov_d':cov_damping, + 'mode_shapes':mode_shapes, + 'row':row, + 'col':col} + #Remove data + row = cluster['row'] + col = cluster['col'] + for ii, r in enumerate(row): + c = col[ii] + data2['frequencies'][r,c] = np.nan + data2['damping_ratios'][r,c] = np.nan + data2['cov_f'][r,c] = np.nan + data2['cov_d'][r,c] = np.nan + data2['mode_shapes'][r,c,:] = np.nan + + return data2 + +def sort_cluster(cluster: dict[str,Any]) -> dict[str,Any]: + """ + Sort cluster based on row/model order + + Args: + cluster (dict): Cluster + Returns: + cluster (dict): Sorted cluster + + """ + sort_id = np.argsort(cluster['row']) + + cluster['f'] = cluster['f'][sort_id] + cluster['cov_f'] = cluster['cov_f'][sort_id] + cluster['d'] = cluster['d'][sort_id] + cluster['cov_d'] = cluster['cov_d'][sort_id] + cluster['mode_shapes'] = cluster['mode_shapes'][sort_id,:] + cluster['MAC'] = cluster['MAC'][sort_id] + cluster['model_order'] = cluster['model_order'][sort_id] + cluster['row'] = cluster['row'][sort_id] + cluster['col'] = cluster['col'][sort_id] + + return cluster \ No newline at end of file diff --git a/src/methods/mode_clustering_functions/create_cluster.py b/src/methods/mode_clustering_functions/create_cluster.py new file mode 100644 index 0000000..e020b74 --- /dev/null +++ b/src/methods/mode_clustering_functions/create_cluster.py @@ -0,0 +1,397 @@ +import numpy as np +from typing import Any, Dict +from functions.calculate_mac import calculate_mac + +def cluster_creation(IP: Dict[str,Any],Params: Dict[str,Any]) -> Dict[str,Any]: #Algorithm 2 + """ + Create cluster + + Args: + IP (dict): Dictionary of data on inital points + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Cluster + + """ #Algorithm 2 + #Extract data: + frequencies = IP['f'] + cov_f = IP['cov_f'] + damping_ratios = IP['d'] + cov_d = IP['cov_d'] + mode_shapes = IP['ms'] + row = IP['row'] + col = IP['col'] + + IPu = {} + if len(row) != len(set(row)): #line 5 in algorithm #If there are multiple points at the same model order + for ii, id in enumerate(row): #Go through all rows/model orders + pos = np.argwhere(row==id) #Locate the indices of one or more poles + #line 6 in algorithm + if len(pos) == 1: #If only 1 pole exist at the model order + if len(IPu) == 0: #First pole + IPu['f'] = frequencies[ii] + IPu['cov_f'] = cov_f[ii] + IPu['d'] = damping_ratios[ii] + IPu['cov_d'] = cov_d[ii] + IPu['ms'] = np.array((mode_shapes[ii,:])) + IPu['row'] = row[ii] + IPu['col'] = col[ii] + else: + IPu['f'] = np.append(IPu['f'],frequencies[ii]) + IPu['cov_f'] = np.append(IPu['cov_f'],cov_f[ii]) + IPu['d'] = np.append(IPu['d'],damping_ratios[ii]) + IPu['cov_d'] = np.append(IPu['cov_d'],cov_d[ii]) + IPu['ms'] = np.vstack((IPu['ms'],mode_shapes[ii,:])) + IPu['row'] = np.append(IPu['row'],row[ii]) + IPu['col'] = np.append(IPu['col'],col[ii]) + + if len(IPu) > 0: #If there exist model orders with unique poles + if type(IPu['f']) == np.float64: + cluster = {'f':np.array([IPu['f']]), + 'cov_f':np.array([IPu['cov_f']]), + 'd':np.array([IPu['d']]), + 'cov_d':np.array([IPu['cov_d']]), + 'mode_shapes':np.array([IPu['ms']]), + 'model_order':np.array([Params['model_order']-IPu['row']]), + 'row':np.array([IPu['row']]), + 'col':np.array([IPu['col']]), + 'MAC':np.array([1])} + + else: #If more unique poles exist + cluster = {'f':np.array([IPu['f'][0]]), + 'cov_f':np.array([IPu['cov_f'][0]]), + 'd':np.array([IPu['d'][0]]), + 'cov_d':np.array([IPu['cov_d'][0]]), + 'mode_shapes':np.array([IPu['ms'][0,:]]), + 'model_order':np.array([Params['model_order']-IPu['row'][0]]), + 'row':np.array([IPu['row'][0]]), + 'col':np.array([IPu['col'][0]]), + 'MAC':np.array([1])} + + cluster, non_clustered_IPu = cluster_from_mac(cluster,IPu,Params) #cluster the unique poles + + else: #if no unique poles exist then go forth with the initial point, ip. + #Only the initial point is clustered + cluster = {'f':np.array([frequencies[0]]), + 'cov_f':np.array([cov_f[0]]), + 'd':np.array([damping_ratios[0]]), + 'cov_d':np.array([cov_d[0]]), + 'mode_shapes':np.array([mode_shapes[0,:]]), + 'model_order':np.array([Params['model_order']-row[0]]), + 'row':np.array([row[0]]), + 'col':np.array([col[0]]), + 'MAC':np.array([1])} + + #Check if there are multiple points with same model order as ip + ip_ids = np.argwhere(row==row[0]) + if len(ip_ids[:,0]) > 1: # Remove all the other points at the same model order + for ii in np.flip(ip_ids[:,0]): + try: + frequencies = np.delete(frequencies,ii) + cov_f = np.delete(cov_f,ii) + damping_ratios = np.delete(damping_ratios,ii) + cov_d = np.delete(cov_d,ii) + mode_shapes = np.delete(mode_shapes,ii,axis=0) + row = np.delete(row,ii) + col = np.delete(col,ii) + except: + breakpoint() + + if len(row) != len(set(row)): #If there still are points at the same model order in IP + IPm = {} + for ii, id in enumerate(row): #Go through all rows/model orders + pos = np.argwhere(row==id) #Locate the indices of one or more poles + #line 6 in algorithm + if len(pos) > 1: #If more than one pole exist for the model order + if len(IPm) == 0: #First pole + IPm['f'] = frequencies[ii] + IPm['cov_f'] = cov_f[ii] + IPm['d'] = damping_ratios[ii] + IPm['cov_d'] = cov_d[ii] + IPm['ms'] = np.array((mode_shapes[ii,:])) + IPm['row'] = row[ii] + IPm['col'] = col[ii] + else: + IPm['f'] = np.append(IPm['f'],frequencies[ii]) + IPm['cov_f'] = np.append(IPm['cov_f'],cov_f[ii]) + IPm['d'] = np.append(IPm['d'],damping_ratios[ii]) + IPm['cov_d'] = np.append(IPm['cov_d'],cov_d[ii]) + IPm['ms'] = np.vstack((IPm['ms'],np.array(mode_shapes[ii,:]))) + IPm['row'] = np.append(IPm['row'],row[ii]) + IPm['col'] = np.append(IPm['col'],col[ii]) + # After the unique poles are clustered, the multiple poles are clusterd + cluster, non_clustered_IPm = cluster_from_mac_IPm(cluster,IPm,Params) + + #Start while loop + cluster_len_before = 0 + while len(cluster['row']) != cluster_len_before: + cluster_len_before = len(cluster['row']) + try: + if len(non_clustered_IPu['row']) > 0: + cluster, non_clustered_IPu = cluster_from_mac(cluster,non_clustered_IPu,Params) #cluster the unique poles again + except: + pass + if len(non_clustered_IPm['row']) > 0: + cluster, non_clustered_IPm = cluster_from_mac_IPm(cluster,non_clustered_IPm,Params) #cluster the non-unique poles again + + else: #line 1 in algorithm: only unique poles + cluster = {'f':np.array([frequencies[0]]), + 'cov_f':np.array([cov_f[0]]), + 'd':np.array([damping_ratios[0]]), + 'cov_d':np.array([cov_d[0]]), + 'mode_shapes':np.array([mode_shapes[0,:]]), + 'model_order':np.array([Params['model_order']-row[0]]), + 'row':np.array([row[0]]), + 'col':np.array([col[0]]), + 'MAC':np.array([1])} + if IP['f'].shape[0] > 1: + cluster, _ = cluster_from_mac(cluster,IP,Params) + + return cluster + +def cluster_from_mac(cluster: Dict[str,Any], IP: Dict[str,Any], Params: Dict[str,Any]) -> Dict[str,Any]: + """ + Add points to cluster based on MAC + + Args: + cluster (dict): Intermediate cluster + IP (dict): Dictionary of data on inital points + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Intermediate cluster + + """ + + #Extract data + frequencies = IP['f'] + cov_f = IP['cov_f'] + damping_ratios = IP['d'] + cov_d = IP['cov_d'] + mode_shapes = IP['ms'] + row = IP['row'] + col = IP['col'] + + ip_ms = IP['ms'][0] + i_ms = IP['ms'][1:] + f_ip = frequencies[0] + f_i = frequencies[1:] + row_i = row[1:] + + skip_id = [] + + for jj, ms in enumerate(i_ms): #Go through all mode shapes in cluster + idx = jj+1 + MAC = calculate_mac(ip_ms,ms) #Does the mode shape match with the first pole + if MAC > Params['tMAC']: #line 2 in algorithm + #Add to cluster + cluster['f'] = np.append(cluster['f'],frequencies[idx]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[idx]) + cluster['d'] = np.append(cluster['d'],damping_ratios[idx]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[idx]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[idx,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[idx]) + cluster['row'] = np.append(cluster['row'],row[idx]) + cluster['col'] = np.append(cluster['col'],col[idx]) + + skip_id.append(idx) + + #Compare remaining points with newly added cluster points, i.e. points are compared with the full cluster, not just ip + if cluster['f'].shape[0] > 1: #Proceed if points have been added to cluster + if IP['ms'].shape[0] > len(skip_id): #If there are more points to compare left, then proceed + cluster_length = len(cluster['row']) + new_cluster_length = 0 + while cluster_length != new_cluster_length: #Run until no points are clustered anymore + cluster_length = len(cluster['row']) + + i_ms = IP['ms'][1:] + for jj, ms in enumerate(i_ms): + idx = jj+1 + if idx in skip_id: #Skip indecies of points that have already been added + continue + + MAC_list = [] + for c_ms in cluster['mode_shapes']: + MAC_list.append(calculate_mac(c_ms,ms)) + + if max(MAC_list) > Params['tMAC']: #line 2 in algorithm + MAC = calculate_mac(ip_ms,ms) + #Add to cluster + cluster['f'] = np.append(cluster['f'],frequencies[idx]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[idx]) + cluster['d'] = np.append(cluster['d'],damping_ratios[idx]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[idx]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[idx,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[idx]) + cluster['row'] = np.append(cluster['row'],row[idx]) + cluster['col'] = np.append(cluster['col'],col[idx]) + + skip_id.append(idx) + + new_cluster_length = len(cluster['row']) + + + clustered_id = [] + for r2 in cluster['row']: #For every entry in row cluster + unclustered_point = False + for ii, r1 in enumerate(IP['row']): #For every entry in row IPu + if r1 == r2: #If r1 is a entry of "row" in the cluster, then save that row for later. + clustered_id.append(ii) + + all_id = np.array(list(range(len(IP['row'])))) + + clustered_id = np.array(clustered_id) + if clustered_id.shape[0] > 0: + unclustered_id = np.delete(all_id,clustered_id) + unclustered_id = np.insert(unclustered_id,0,0) + else: + unclustered_id = all_id + + unclustered_IPu = {} + unclustered_IPu['f'] = IP['f'][unclustered_id] + unclustered_IPu['cov_f'] = IP['cov_f'][unclustered_id] + unclustered_IPu['d'] = IP['d'][unclustered_id] + unclustered_IPu['cov_d'] = IP['cov_d'][unclustered_id] + unclustered_IPu['ms'] = IP['ms'][unclustered_id] + unclustered_IPu['row'] = IP['row'][unclustered_id] + unclustered_IPu['col'] = IP['col'][unclustered_id] + + return cluster, unclustered_IPu + +def cluster_from_mac_IPm(cluster: Dict[str,Any], IPm: Dict[str,Any], Params: Dict[str,Any]) -> Dict[str,Any]: + """ + Cluster based on MAC if multiple poles exist for the model order + + Args: + cluster (dict): Intermediate cluster + IP (dict): Dictionary of data on inital points + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Intermediate cluster + + """ + #Cluster based on MAC if multiple poles exist for the model order + #Extract data + frequencies = IPm['f'] + cov_f = IPm['cov_f'] + damping_ratios = IPm['d'] + cov_d = IPm['cov_d'] + mode_shapes = IPm['ms'] + row = IPm['row'] + col = IPm['col'] + + # Find the model orders with multiple poles + pos = [] + for ii, idd in enumerate(set(row)): + pos.append(np.argwhere(row==idd)) + + skip_id = [] + skip_id_before = None + while skip_id != skip_id_before: + ip_ms = cluster['mode_shapes'] + if isinstance(cluster['f'],np.ndarray): + ip_ms_0 = ip_ms[0,:] #Mode shape of the first pole + else: + ip_ms_0 = ip_ms #Mode shape of the first pole + + i_ms = IPm['ms'][:] #Mode shape of the model orders with mutiple poles + + skip_id_before = skip_id.copy() + #Go through all the model orders + for oo, pos_i in enumerate(pos): + MAC = np.zeros(pos_i.shape[0]) + + if oo in skip_id: #Skip these model orders, since they have already been added. + continue + + pos_i = pos_i[:,0] + for ii, id_row in enumerate(pos_i): + MAC[ii] = calculate_mac(ip_ms_0,i_ms[id_row]) #Calculate MAC between first pole of cluster and a pole in IPm + #If MAC is not satisfied + if MAC[ii] < Params['tMAC']: #Search for max across all mode shapes in cluster: + #line 3 in algorithm + MAC_list = [] + for ms in ip_ms: + MAC_list.append(calculate_mac(ms,i_ms[id_row])) + MAC[ii] = max(MAC_list) + + #Find the mask for the poles that meets the MAC criteria + mask = MAC > Params['tMAC'] + pos_MAC = np.argwhere(mask==True) #Get indicies + + #Formatting of the indicies + if pos_MAC.shape[0] > 1: #more than one indice + pos_MAC = pos_MAC[:,0] + else: #Only one or zero indice (No MAC match) + if pos_MAC.shape[0] == 1: + pos_MAC = pos_MAC[0] + + if pos_MAC.shape[0] > 1: #If multiple poles comply with MAC criteria + #ids formatting + ids = pos_i[pos_MAC] + + #Get frequencies of poles + freq = np.zeros(ids.shape[0]) + for jj, idid in enumerate(ids): + freq[jj] = frequencies[idid] + median_f = np.median(cluster['f']) + + #Locate the index of the closest pole + idx = (np.abs(freq - median_f)).argmin() + ll = pos_i[pos_MAC[idx]] + + #Add this pole to the cluster + cluster['f'] = np.append(cluster['f'],frequencies[ll]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[ll]) + cluster['d'] = np.append(cluster['d'],damping_ratios[ll]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[ll]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[ll,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC[pos_MAC[idx]]) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[ll]) + cluster['row'] = np.append(cluster['row'],row[ll]) + cluster['col'] = np.append(cluster['col'],col[ll]) + + skip_id.append(oo) + + elif pos_MAC.shape[0] == 1: #If only one pole complies with MAC + ll = pos_i[pos_MAC[0]] + + #Add this pole to the cluster + cluster['f'] = np.append(cluster['f'],frequencies[ll]) + cluster['cov_f'] = np.append(cluster['cov_f'],cov_f[ll]) + cluster['d'] = np.append(cluster['d'],damping_ratios[ll]) + cluster['cov_d'] = np.append(cluster['cov_d'],cov_d[ll]) + cluster['mode_shapes'] = np.vstack((cluster['mode_shapes'],np.array(mode_shapes[ll,:]))) + cluster['MAC'] = np.append(cluster['MAC'],MAC[pos_MAC[0]]) + cluster['model_order'] = np.append(cluster['model_order'],Params['model_order']-row[ll]) + cluster['row'] = np.append(cluster['row'],row[ll]) + cluster['col'] = np.append(cluster['col'],col[ll]) + + skip_id.append(oo) + + clustered_id = [] + for r2 in cluster['row']: #For every entry in row cluster + unclustered_point = False + for ii, r1 in enumerate(IPm['row']): #For every entry in row IPm + if r1 == r2: #If r1 is a entry of "row" in the cluster, then save that row for later. + clustered_id.append(ii) + + all_id = np.array(list(range(len(IPm['row'])))) + + clustered_id = np.array(clustered_id) + if clustered_id.shape[0] > 0: + unclustered_id = np.delete(all_id,clustered_id) + else: + unclustered_id = all_id + + unclustered_IPm = {} + unclustered_IPm['f'] = IPm['f'][unclustered_id] + unclustered_IPm['cov_f'] = IPm['cov_f'][unclustered_id] + unclustered_IPm['d'] = IPm['d'][unclustered_id] + unclustered_IPm['cov_d'] = IPm['cov_d'][unclustered_id] + unclustered_IPm['ms'] = IPm['ms'][unclustered_id] + unclustered_IPm['row'] = IPm['row'][unclustered_id] + unclustered_IPm['col'] = IPm['col'][unclustered_id] + + return cluster, unclustered_IPm \ No newline at end of file diff --git a/src/methods/mode_clustering_functions/expand_cluster.py b/src/methods/mode_clustering_functions/expand_cluster.py new file mode 100644 index 0000000..bf53060 --- /dev/null +++ b/src/methods/mode_clustering_functions/expand_cluster.py @@ -0,0 +1,85 @@ +from typing import Any +import numpy as np +from methods.mode_clustering_functions.create_cluster import cluster_creation + +def cluster_expansion(cluster: dict[str,Any], data: dict[str,Any], Params: dict[str,Any]) -> dict[str,Any]: + """ + Expand cluster based on minima and maxima bound + + Args: + cluster (dict): Intermediate cluster + data (dict): OMA points data + Params (dict): Dictionary of algorithm parameters + Returns: + cluster (dict): Expanded cluster + + Raises: + Double orders exist + + """ + unClustered_frequencies = data['frequencies'] + unClustered_damping = data['damping_ratios'] + + freq_c = cluster['f'] + cov_f = cluster['cov_f'] + damp_c = cluster['d'] + cov_d = cluster['cov_d'] + row = cluster['row'] + + bound_multiplier = Params['bound_multiplier'] + + #Find min-max bounds of cluster + f_lower_bound = np.min(freq_c - bound_multiplier * np.sqrt(cov_f)) # Minimum of all points for frequencies + f_upper_bound = np.max(freq_c + bound_multiplier * np.sqrt(cov_f)) # Maximum of all points for frequencies + d_lower_bound = np.min(damp_c - bound_multiplier * np.sqrt(cov_d)) # Minimum of all points for damping + d_upper_bound = np.max(damp_c + bound_multiplier * np.sqrt(cov_d)) # Maximum of all points for damping + + #Mask of possible expanded poles + condition_mask = (unClustered_frequencies >= f_lower_bound) & (unClustered_frequencies <= f_upper_bound) & (unClustered_damping >= d_lower_bound) & (unClustered_damping <= d_upper_bound) + # Get indices satisfying the condition + expanded_indices = np.argwhere(condition_mask) + + #Initiate cluster_points for cluster creation + cluster_points = {} + cluster_points['f'] = data['frequencies'][condition_mask] + cluster_points['cov_f'] = data['cov_f'][condition_mask] + cluster_points['d'] = data['damping_ratios'][condition_mask] + cluster_points['cov_d'] = data['cov_d'][condition_mask] + cluster_points['ms'] = data['mode_shapes'][condition_mask,:] + cluster_points['row'] = expanded_indices[:,0] + cluster_points['col'] = expanded_indices[:,1] + + #Make the first ip from cluster be the previous first point in cluster_points + if isinstance(cluster['f'],np.ndarray): + index_f = np.argwhere(cluster_points['f'] == cluster['f'][0]) + else: + index_f = np.argwhere(cluster_points['f'] == cluster['f']) + if len(index_f[:,0]) > 1: + index_row = np.argwhere(cluster_points['row'][index_f[:,0]] == cluster['row'][0]) + ip_id = int(index_f[index_row[:,0]][:,0]) + else: + ip_id = int(index_f[:,0]) + indecies = list(range(len(cluster_points['f']))) + poped_id = indecies.pop(ip_id) + indecies.insert(0,poped_id) + indecies = np.array(indecies) + + cluster_points['f'] = cluster_points['f'][indecies] + cluster_points['cov_f'] = cluster_points['cov_f'][indecies] + cluster_points['d'] = cluster_points['d'][indecies] + cluster_points['cov_d'] = cluster_points['cov_d'][indecies] + cluster_points['ms'] = cluster_points['ms'][indecies,:] + cluster_points['row'] = cluster_points['row'][indecies] + cluster_points['col'] = cluster_points['col'][indecies] + + #Check if these values can be clustered + cluster = cluster_creation(cluster_points,Params) + if isinstance(cluster['f'],np.ndarray): + if len(cluster['row']) != len(set(cluster['row'])): + print("row_before",cluster_points['row']) + print("row_after",cluster['row']) + print("exp2",cluster['f']) + print("double orders",cluster['row']) + raise ValueError("Failed due to double orders exist") + + return cluster diff --git a/src/methods/mode_clustering_functions/initialize_Ip.py b/src/methods/mode_clustering_functions/initialize_Ip.py new file mode 100644 index 0000000..5dc0dbb --- /dev/null +++ b/src/methods/mode_clustering_functions/initialize_Ip.py @@ -0,0 +1,46 @@ +from typing import Any +import numpy as np + +def cluster_initial(ip: list[float], data: dict[str,Any], bound: float = 2) -> dict[str,Any]: + """ + Find the initial cluster points + + Args: + ip (list): Frequency, damping and covariance for the inital point (ip) + data (dict): OMA points data + bound (float): Multiplier on standard deviation + Returns: + initial_points (float): Initial points to create cluster from + + """ + #Extract data of initial point + ip_f = ip[0] + ip_cov_f = ip[1] + ip_d = ip[2] + ip_cov_d = ip[3] + + # Confidence interval using the ±2*standard_deviation + f_lower_bound = ip_f - bound * np.sqrt(ip_cov_f) + f_upper_bound = ip_f + bound * np.sqrt(ip_cov_f) + z_lower_bound = ip_d - bound * np.sqrt(ip_cov_d) + z_upper_bound = ip_d + bound * np.sqrt(ip_cov_d) + + + frequencies = data['frequencies'] + damping_ratios = data['damping_ratios'] + + # Find elements within the current limit that are still ungrouped + condition_mask = (frequencies >= f_lower_bound) & (frequencies <= f_upper_bound) & (damping_ratios >= z_lower_bound) & (damping_ratios <= z_upper_bound)# & ungrouped_mask + indices = np.argwhere(condition_mask) # Get indices satisfying the condition + + #Generate the data for inital points + initial_points = {} + initial_points['f'] = data['frequencies'][condition_mask] + initial_points['cov_f'] = data['cov_f'][condition_mask] + initial_points['d'] = data['damping_ratios'][condition_mask] + initial_points['cov_d'] = data['cov_d'][condition_mask] + initial_points['ms'] = data['mode_shapes'][condition_mask,:] + initial_points['row'] = indices[:,0] + initial_points['col'] = indices[:,1] + + return initial_points \ No newline at end of file diff --git a/src/methods/mode_tracking.py b/src/methods/mode_tracking.py new file mode 100644 index 0000000..ae3f661 --- /dev/null +++ b/src/methods/mode_tracking.py @@ -0,0 +1,85 @@ +import sys +from typing import Any, List, Dict, Tuple +import numpy as np +import matplotlib.pyplot as plt +from methods.constants import PARAMS +from methods.mode_clustering import (subscribe_and_cluster) +from methods.mode_tracking_functions.mode_tracking import cluster_tracking +from functions.plot_mode_tracking import plot_tracked_modes +from functions.plot_clusters import plot_clusters +# pylint: disable=C0103, W0603 + +def track_clusters(cluster_dict: dict[str,Any], tracked_clusters: dict[str,Any], + params: dict[str,Any]) -> dict[str,Any]: + """ + Runs the mode tracking algorithm. + + Args: + cluster_dict (dict[str,Any]): Clusters from OMA + Returns: + tracked_clusters (dict[str,Any]): Tracked clusters + """ + tracked_clusters = cluster_tracking(cluster_dict, tracked_clusters, params) + return tracked_clusters + +def subscribe_and_track_clusters(config_path: str) -> Tuple[List[Dict], np.ndarray, np.ndarray]: + """ + Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. + + Args: + config_path (str): Path to config JSON. + + Returns: + oma_output_global (Dict[str,Any]): OMA output + clusters (Dict[str,Any]]): Clusters + tracked_clusters (Dict[str,Any]]): Tracked clusters + """ + tracked_clusters = {} + sysid_output, clusters, median_frequencies = subscribe_and_cluster(config_path,PARAMS) + + print("Clustered frequencies", median_frequencies) + tracked_clusters = track_clusters(clusters, tracked_clusters,PARAMS) + + return sysid_output, clusters, tracked_clusters + +def live_mode_tracking(config_path: str, + plot: np.ndarray[bool] = np.array([1,1]) + ) -> Tuple[List[Dict], np.ndarray, np.ndarray]: + """ + Subscribes to MQTT broker, receives one OMA message, runs mode tracking, plot results. Continue until stopped. + + Args: + config_path (str): Path to config JSON. + plot (np.ndarray[bool]): Array describing what plots to show + + Returns: + + Plots: + Stabilization diagram + Cluster plot + Tracked clusters plot + """ + tracked_clusters = {} + fig_ax1 = None + fig_ax2 = None + + while True: + try: + sysid_output, clusters, median_frequencies = subscribe_and_cluster(config_path,PARAMS) + + print("Clustered frequencies", median_frequencies) + tracked_clusters = track_clusters(clusters, tracked_clusters,PARAMS) + + if plot[0] == 1: + fig_ax1 = plot_clusters(clusters,sysid_output,PARAMS,fig_ax=fig_ax1) + plt.show(block=False) + if plot[1] == 1: + fig_ax2 = plot_tracked_modes(tracked_clusters,PARAMS,fig_ax=fig_ax2,x_length=None) + plt.show(block=False) + sys.stdout.flush() + + except KeyboardInterrupt: + print("Shutting down gracefully") + plt.close() + except Exception as e: + print(f"Unexpected error: {e}") \ No newline at end of file diff --git a/src/methods/mode_tracking_functions/__init__.py b/src/methods/mode_tracking_functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/methods/mode_tracking_functions/match_to_tracked_cluster.py b/src/methods/mode_tracking_functions/match_to_tracked_cluster.py new file mode 100644 index 0000000..5707510 --- /dev/null +++ b/src/methods/mode_tracking_functions/match_to_tracked_cluster.py @@ -0,0 +1,149 @@ +from typing import Any +import numpy as np +from functions.calculate_mac import calculate_mac + +def match_cluster_to_tracked_cluster(cluster_dict: dict[str,Any], tracked_clusters: dict[str,Any], Params: dict[str,Any], result_prev: dict[str,Any] = {},skip_cluster: list = [], skip_tracked_cluster: list = []) -> dict[str,Any]: + """ + Match clusters to tracked clusters + + The result dictionary consist of keys: cluster indecies, and values: indecies of tracked cluster to match with + Example: + Cluster 1 match with tracked cluster 2 + Cluster 2 match with tracked cluster 1 + Cluster 3 match with tracked cluster 1 + Cluster 4 match with "new", i.e. could not be matched with an existing tracked cluster + + Args: + cluster_dict (dict): Dictionary of clusters + tracked_clusters (dict): Previously tracked clusters + Params (dict): tracking parameters + result_prev (dict): Dictionary of previous match result + skip_cluster (list): List of clusters that have proven they are a optimal match with a tracked cluster + skip_tracked_cluster (list): List of tracked clusters that have an optimal match with a cluster + + Returns: + result (dict): Dictionary of matches + + """ + result = {} + for id, key in enumerate(cluster_dict): #Go through all clusters + if id in skip_cluster: #If this cluster is already matched skip it + result[str(id)] = result_prev[str(id)] + continue + + #Get mode shapes + cluster = cluster_dict[key] + omega = cluster['median_f'] + phi = cluster['mode_shapes'][0] + phi_all = cluster['mode_shapes'] + + Xres = [] + MAC_list = [] + D_freq = [] + omega_t_list = [] + MAC_max_list = [] + MAC_avg_list = [] + for key in tracked_clusters: #Go through all tracked clusters. They are identified with keys which are integers from 0 and up to total number of clusters + if key == 'iteration': + pass + else: + tracked_cluster_list = tracked_clusters[key] #Accessing all cluster in a tracked cluster group + tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group + omega_t = tracked_cluster['median_f'] #median freq of last cluster in tracked cluster group + omega_t_list.append(omega_t) + phi_t_all = tracked_cluster['mode_shapes'] #phi of last cluster in tracked cluster group + phi_t = phi_t_all[0] + + MAC_list.append(float(calculate_mac(phi_t, phi))) + + MACs = np.zeros((phi_all.shape[0],phi_t_all.shape[0])) + for ii, phi in enumerate(phi_all): + for jj, phi_t in enumerate(phi_t_all): + MAC = float(calculate_mac(phi_t, phi)) + MACs[ii,jj] = MAC #Compare the cluster with all tracked clusters + + if key in skip_tracked_cluster: + MAC_avg = np.mean(0) + MAC_max = np.max(0) + MAC_max_list.append(0) + MAC_avg_list.append(0) + D_freq.append(10**6) + else: + MAC_avg = np.mean(MACs) + MAC_max = np.max(MACs) + MAC_max_list.append(MAC_max) + MAC_avg_list.append(MAC_avg) + D_freq.append(abs(omega_t-omega)/omega) + + itemindex1 = np.argwhere(np.array(MAC_max_list) > Params['phi_cri']) #Find where the cluster matches the tracked cluster regarding the MAC criteria + itemindex = np.argwhere(np.array(D_freq)[itemindex1[:,0]] < Params['freq_cri']) #Find where the cluster matches the tracked cluster regarding the MAC and frequency criteria + indicies = itemindex1[itemindex[:,0]] + if len(indicies) > 1: #If two or more clusters combly with the mode shape criteria + Xres = [] + Xres_f = [] + Xres_MAC = [] + for nn in indicies: + pos = nn[0] + X = D_freq[pos]/MAC_max_list[pos] #Objective function + Xres.append(X) + Xres_f.append(D_freq[pos]) + Xres_MAC.append(MAC_max_list[pos]) + + if Xres != []: # One or more cluster(s) combly with the frequency criteria + pos1 = Xres.index(min(Xres)) #Find the cluster that is most likely + pos2 = Xres_MAC.index(max(Xres_MAC)) #Find the largest MAC + pos3 = Xres_f.index(min(Xres_f)) #Find the smallest frequency difference + + if len(Xres) > 1: #If more than one cluster comply with criteria + Xres_left = Xres.copy() + del Xres_left[pos1] + if type(Xres_left) == np.float64: + Xres_left = [Xres_left] + + Xres_MAC_left = Xres_MAC.copy() + del Xres_MAC_left[pos1] + if type(Xres_MAC_left) == np.float64: + Xres_MAC_left = [Xres_MAC_left] + + Xres_f_left = Xres_f.copy() + del Xres_f_left[pos1] + if type(Xres_f_left) == np.float64: + Xres_f_left = [Xres_f_left] + + pos1_2 = Xres_left.index(min(Xres_left)) #Find the cluster that is most likely + pos2_2 = Xres_MAC_left.index(max(Xres_MAC_left)) #Find the cluster that is most likely based on MAC + pos3_2 = Xres_f_left.index(min(Xres_f_left)) #Find the cluster that is most likely based on Freq + + if (pos1 == pos2) and (pos1 == pos3): #If one match on all three parameters: objective function, max MAC and frequency difference + pos = int(indicies[pos1][0]) + result[str(id)] = pos #group to a tracked cluster + + #Make different: abs(min(Xres_left)/min(Xres)) < Params['obj_cri'] = 2 + elif abs(min(Xres_left)-min(Xres)) < Params['obj_cri']: #If the objective function results are close + if (min(Xres_f) < Params['freq_cri']) and (min(Xres_f_left) < Params['freq_cri']): #If both frequency differences are close to the target cluster + pos = int(indicies[pos2_2][0]) #Match with best MAC + result[str(id)] = pos #group to a tracked cluster + elif (min(Xres_f) < Params['freq_cri']) and (min(Xres_f_left) > Params['freq_cri']): #If Xres_f is smaller than the threshold + pos = int(indicies[pos3][0]) #Match with lowest frequency difference + result[str(id)] = pos #group to a tracked cluster + elif (min(Xres_f) > Params['freq_cri']) and (min(Xres_f_left) < Params['freq_cri']): + pos = int(indicies[pos3_2][0]) #Match with lowest frequency difference + result[str(id)] = pos #group to a tracked cluster + else: #If none of the above choose the one with highest MAC + pos = int(indicies[pos2_2][0]) + result[str(id)] = pos #group to a tracked cluster + else: #If none of the above choose the one with lowest onjective function + pos = int(indicies[pos1][0]) + result[str(id)] = pos #group to a tracked cluster + + else: #No cluster comply with frequency criteria, so a new cluster is saved + result[str(id)] = "new" + + elif len(indicies) == 1: #If one cluster combly with the mode shape criteria + pos = int(indicies[0][0]) + result[str(id)] = pos #group to a tracked cluster + + else: #Does not comply with mode shape criteria + result[str(id)] = "new" + + return result \ No newline at end of file diff --git a/src/methods/mode_tracking_functions/mode_tracking.py b/src/methods/mode_tracking_functions/mode_tracking.py new file mode 100644 index 0000000..361c3ac --- /dev/null +++ b/src/methods/mode_tracking_functions/mode_tracking.py @@ -0,0 +1,148 @@ +from typing import Any +import numpy as np +from methods.mode_tracking_functions.match_to_tracked_cluster import match_cluster_to_tracked_cluster +from methods.mode_tracking_functions.resolve_nonunique_matches import resolve_nonunique_matches +# JVM 22/10/2025 + +def cluster_tracking(cluster_dict: dict[str,Any],tracked_clusters: dict[str,Any],Params: dict[str,Any]=None) -> dict[str,Any]: + """ + Tracking of modes across experiments + + Args: + cluster_dict (dict): Dictionary of clusters + tracked_clusters (dict): Previously tracked clusters + Params (dict): tracking parameters + + Returns: + tracked_clusters (dict): Previously tracked clusters + + """ + if Params == None: + Params = {'phi_cri':0.8, + 'freq_cri':0.2} + + m_f = [] + for key in cluster_dict.keys(): + cluster = cluster_dict[key] + m_f.append(cluster['median_f']) + + t_list = [] + t_length = [] + for key in tracked_clusters: #Go through all tracked clusters. They are identified with keys which are integers from 0 and up to total number of clusters + if key == 'iteration': + pass + else: + tracked_cluster_list = tracked_clusters[key] #Accessing all cluster in a tracked cluster group + t_length.append(len(tracked_cluster_list)) + tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group + #median freq of last cluster in tracked cluster group + t_list.append(tracked_cluster['median_f']) + + # No tracked clusters yet? + if not tracked_clusters: + first_track = 1 + else: + first_track = 0 + + if first_track == 1: + for id, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + cluster['id'] = 0 + + tracked_clusters['iteration'] = 0 + tracked_clusters[str(id)] = [cluster] + else: + iter = tracked_clusters['iteration'] + 1 + tracked_clusters['iteration'] = iter + + result = match_cluster_to_tracked_cluster(cluster_dict,tracked_clusters,Params) #Match clusters to tracked clusters + + result_int = [] + for val in result.values(): #Get all non-"new" results + if type(val) == int: + result_int.append(val) + + if len(result_int) == len(set(result_int)): #If all clusters match with a unique tracked cluster + for ii, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + pos = result[str(ii)] #Find pos in result dict + cluster['id'] = iter + if pos == "new": #Add cluster as a new tracked cluster + new_key = len(tracked_clusters)-1 #-1 for "iteration", + 1 for next cluster and -1 for starting at 0 = -1 + tracked_clusters[str(new_key)] = [cluster] + else: #Add cluster to an existing tracked cluster + cluster_to_add_to = tracked_clusters[str(pos)] + cluster_to_add_to.append(cluster) + tracked_clusters[str(pos)] = cluster_to_add_to + + else: #If there are some clusters that match with the same tracked cluster. + kk = 0 + skip_tracked_cluster = [] + skip_cluster = [] + while len(result_int) != len(set(result_int)): + kk += 1 + if kk > 10: + #Debug info: + unique_match_debug_info(result,cluster_dict,t_list) + raise("Unresolved mode tracking") + + for possible_match_id in set(result.values()): #Go through all unique values + if possible_match_id == "new": #Do nothing if "new" + pass + else: + test_if_str = np.argwhere(np.array(list(result.values())) == "new") #Test if "new" is present. If so, then we must match with str instead of int. + if len(test_if_str) > 0: + itemindex = np.argwhere(np.array(list(result.values())) == str(possible_match_id)) #Find the index of the unique cluster match + else: + itemindex = np.argwhere(np.array(list(result.values())) == possible_match_id) #Find the index of the unique cluster match + + if len(itemindex) > 1: #If multiple clusters match to the same tracked cluster + pos, result, cluster_index = resolve_nonunique_matches(possible_match_id, itemindex, result, cluster_dict, tracked_clusters) + skip_tracked_cluster.append(str(result[str(cluster_index[pos])])) #Skip the best tracked cluster which is matced with another cluster. + skip_cluster.append(cluster_index[pos]) #Skip the best tracked cluster which is matced with another cluster. + + result = match_cluster_to_tracked_cluster(cluster_dict,tracked_clusters,Params,result,skip_cluster,skip_tracked_cluster) #Match with tracked clusters, but skip the already matched. + + result_int = [] + for val in result.values(): + if type(val) == int: + result_int.append(val) + + #Add the clusters to tracked clusters + for ii, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + pos = result[str(ii)] #Find pos in result dict + cluster['id'] = iter + if pos == "new": + new_key = len(tracked_clusters)-1 #-1 for "iteration", + 1 for next cluster and -1 for starting at 0 = -1 + tracked_clusters[str(new_key)] = [cluster] + else: + cluster_to_add_to = tracked_clusters[str(pos)] + cluster_to_add_to.append(cluster) + tracked_clusters[str(pos)] = cluster_to_add_to + + + + return tracked_clusters + + +def unique_match_debug_info(result,cluster_dict,t_list): + """ + Debug info + + Args: + result (dict): Dictionary of matches + cluster_dict (dict): Dictionary of clusters + t_list (list): List of median frequencies of last tracked tracked clusters + + Returns: + + """ + print('\n') + for ii, key in enumerate(cluster_dict.keys()): + cluster = cluster_dict[key] + pos = result[str(ii)] #Find pos in result dict + if pos == "new": + print(cluster_dict[key]['median_f'],str(ii),pos) + else: + print(cluster_dict[key]['median_f'],str(ii),pos,t_list[pos]) \ No newline at end of file diff --git a/src/methods/mode_tracking_functions/resolve_nonunique_matches.py b/src/methods/mode_tracking_functions/resolve_nonunique_matches.py new file mode 100644 index 0000000..7cbeea4 --- /dev/null +++ b/src/methods/mode_tracking_functions/resolve_nonunique_matches.py @@ -0,0 +1,58 @@ +from typing import Any +import numpy as np +from functions.calculate_mac import calculate_mac + +def resolve_nonunique_matches(possible_match_id, itemindex, result, cluster_dict, tracked_clusters): + """ + Resolve if two clusters match with the same tracked cluster. Determine what match is the most optimal. + Those clusters that does not have an optimal match, they are given the match result = "new" + + Example: + Cluster 2 match with tracked cluster 1 + Cluster 3 match with tracked cluster 1 + + Args: + possible_match_id (int): The index of tracked cluster + itemindex (np.ndarray): The indecies of clusters that have the same match + result (dict): Dictionary of suggested matches + cluster_dict (dict): Dictionary of clusters + tracked_clusters (dict): Previously tracked clusters + + Returns: + pos (int): Value of cluster that have the most optimal match. + result (dict): Dictionary of re-done matches + cluster_index: The indecies of clusters that have the same match + + """ + mean_MAC = [] + keys = [str(y[0]) for y in itemindex.tolist()] #Make keys for dictionary based on indices in itemindex + for nn in itemindex: #Go through possible clusters match index + cluster = cluster_dict[int(nn[0])] + phi_all = cluster["mode_shapes"] #Find mode shapes in cluster + tracked_cluster_list = tracked_clusters[str(possible_match_id)] #Accessing all cluster in a tracked cluster group + tracked_cluster = tracked_cluster_list[-1] #Accessing the last cluster for each tracked cluster group + phi_t_all = tracked_cluster['mode_shapes'] #Find mode shapes in tracked cluster + + #Make list of mode shapes have the same length, i.e. same number of poles + if len(phi_all) > len(phi_t_all): + phi_all = phi_all[0:len(phi_t_all)] + elif len(phi_all) < len(phi_t_all): + phi_t_all = phi_t_all[0:len(phi_all)] + else: #Equal length + pass + MAC_matrix = np.zeros((len(phi_all),len(phi_all))) #Initiate a matrix of MAC values + for ii, phi in enumerate(phi_all): + for jj, phi_t in enumerate(phi_t_all): + MAC_matrix[ii,jj] = calculate_mac(phi,phi_t) #Mac + + mean_MAC.append(np.mean(MAC_matrix)) #Save the mean values of MAC from this cluster compared to the matched tracked cluster + pos = mean_MAC.index(max(mean_MAC)) #Find the index with higest mean MAC, i.e. the cluster that match best with the tracked cluster. + + cluster_index = itemindex[:,0] + + for key in keys: + if keys[pos] == key: #Let the best cluster match stay + pass + else: #Add the clusters with the worst match as a new cluster + result[key] = "new" + return pos, result, cluster_index \ No newline at end of file diff --git a/src/methods/mode_update_functions/__init__.py b/src/methods/mode_update_functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/methods/packages/mode_pairs.py b/src/methods/mode_update_functions/mode_pairs.py similarity index 100% rename from src/methods/packages/mode_pairs.py rename to src/methods/mode_update_functions/mode_pairs.py diff --git a/src/methods/packages/model_update.py b/src/methods/mode_update_functions/model_update.py similarity index 97% rename from src/methods/packages/model_update.py rename to src/methods/mode_update_functions/model_update.py index 2e64d45..4926365 100644 --- a/src/methods/packages/model_update.py +++ b/src/methods/mode_update_functions/model_update.py @@ -4,7 +4,7 @@ import os from methods.packages import eval_yafem_model as beam_new import json -from methods.packages.mode_pairs import pair_calculate +from methods.mode_update_functions.mode_pairs import pair_calculate def par_est(x, comb): diff --git a/src/methods/model_update_module.py b/src/methods/model_update.py similarity index 60% rename from src/methods/model_update_module.py rename to src/methods/model_update.py index 3d98e97..b6fa1ef 100644 --- a/src/methods/model_update_module.py +++ b/src/methods/model_update.py @@ -1,16 +1,13 @@ import json import threading -from typing import Any, List, Dict, Tuple, Optional +from typing import Any, List, Dict, Optional import numpy as np import paho.mqtt.client as mqtt from scipy.optimize import minimize from scipy.linalg import eigh -from methods.constants import MODEL_ORDER, MSTAB_FACTOR, TMAC -from methods.packages.mode_track import mode_allingment from methods.packages.eval_yafem_model import eval_yafem_model -from methods.packages import model_update +from methods.mode_update_functions import model_update from methods.constants import X0, BOUNDS -from data.comm.mqtt import load_config, setup_mqtt_client # pylint: disable=C0103, W0603 # Global threading event to wait for OMA data @@ -58,27 +55,6 @@ def _on_message(_client: mqtt.Client, _userdata: dict, msg: mqtt.MQTTMessage) -> print(f"Error processing OMA message: {e}") -def run_mode_track(oma_output: Any) -> Tuple[List[Dict], np.ndarray, np.ndarray]: - """ - Runs the mode tracking algorithm. - - Args: - oma_output (Any): OMA output from subscription or elsewhere. - Returns: - cleaned_values (List[Dict]), - median_frequencies (np.ndarray), - confidence_intervals (np.ndarray) - """ - mstab = MODEL_ORDER * MSTAB_FACTOR - cleaned_values = mode_allingment(oma_output, mstab, TMAC) - median_frequencies = np.array([cluster["median"] for cluster in cleaned_values]) - confidence_intervals = np.array([ - cluster["original_cluster"]["confidence_interval"] - for cluster in cleaned_values - ]) - return cleaned_values, median_frequencies, confidence_intervals - - # pylint: disable=R0914 def run_model_update(cleaned_values: List[Dict]) -> Optional[Dict[str, Any]]: """ @@ -137,49 +113,3 @@ def run_model_update(cleaned_values: List[Dict]) -> Optional[Dict[str, Any]]: except ValueError as e: print(f"Skipping model updating due to error: {e}") return None - - -def subscribe_and_get_cleaned_values(config_path: str, - num_clusters: int = 2) -> Tuple[List[Dict], np.ndarray, np.ndarray]: - """ - Subscribes to MQTT broker, receives one OMA message, runs mode tracking, and returns results. - - Args: - config_path (str): Path to config JSON. - num_clusters (int): Number of clusters to keep after mode tracking. - - Returns: - cleaned_values (List[Dict]), - median_frequencies (np.ndarray), - confidence_intervals (np.ndarray) - """ - global oma_output_global - oma_output_global = None # Reset in case old data is present - result_ready.clear() - - config = load_config(config_path) - mqtt_client, selected_topic = setup_mqtt_client(config["sysID"], topic_index=0) - - mqtt_client.user_data_set({"topic": selected_topic, "qos": 0}) - mqtt_client.on_connect = _on_connect - mqtt_client.on_message = _on_message - mqtt_client.connect(config["sysID"]["host"], config["sysID"]["port"], keepalive=60) - mqtt_client.loop_start() - print("Waiting for OMA data...") - try: - while not result_ready.wait(timeout=0.1): - pass - except KeyboardInterrupt: - print("Cancel") - mqtt_client.loop_stop() - mqtt_client.disconnect() - raise SystemExit - - mqtt_client.loop_stop() - mqtt_client.disconnect() - - if oma_output_global is None: - raise RuntimeError("Failed to receive OMA data.") - - print("OMA data received. Running mode tracking...") - return run_mode_track(oma_output_global) diff --git a/src/methods/packages/mode_track.py b/src/methods/packages/mode_track.py deleted file mode 100644 index fe68c91..0000000 --- a/src/methods/packages/mode_track.py +++ /dev/null @@ -1,944 +0,0 @@ -"This file is taken from the DTaaS-platform" -import matplotlib.pyplot as plt -import matplotlib.tri as mtri -import numpy as np -import numpy.ma as ma -import copy - -# plt.close('all') -# Clustering function -def cluster_frequencies(frequencies, damping_ratios, mode_shapes, - frequencies_max_MO, cov_freq_max_MO, - damping_ratios_max_MO, cov_damping_max_MO, - mode_shapes_max_MO, tMAC, bound_multiplier=2): - """ - - - Parameters - ---------- - frequencies : TYPE - DESCRIPTION. - damping_ratios : TYPE - DESCRIPTION. - mode_shapes : TYPE - DESCRIPTION. - frequencies_max_MO : TYPE - DESCRIPTION. - cov_freq_max_MO : TYPE - DESCRIPTION. - damping_ratios_max_MO : TYPE - DESCRIPTION. - cov_damping_max_MO : TYPE - DESCRIPTION. - mode_shapes_max_MO : TYPE - DESCRIPTION. - tMAC : TYPE - DESCRIPTION. - bound_multiplier : TYPE, optional - DESCRIPTION. The default is 2. - - Returns - ------- - None. - - """ - - # Modify the index of frequency to sorting - - sorted_indices = np.argsort(frequencies_max_MO) - fn_sorted = frequencies_max_MO[sorted_indices] - damping_ratios_sorted = damping_ratios_max_MO[sorted_indices] - cov_fn_sorted = cov_freq_max_MO[sorted_indices] - cov_damping_sorted = cov_damping_max_MO[sorted_indices] - mode_shape_sorted = mode_shapes_max_MO[sorted_indices] - - fn_unique, unique_indices = np.unique(fn_sorted, return_index=True) - cov_fn_unique = cov_fn_sorted[unique_indices] - damping_ratios_unique = damping_ratios_sorted[unique_indices] - cov_damping_unique = cov_damping_sorted[unique_indices] - mode_shape_unique = mode_shape_sorted[unique_indices] - - # print(f'unsorted frequencies: {frequencies_max_MO}') - # print(f'unique frequencies: {fn_unique}') - # print(f'unsorted covariance: {cov_freq_max_MO}') - # print(f'unique covariance: {cov_fn_unique}') - - # frequencies = frequencies[::2] # This is 'S' as per algorithm - # mode_shapes = mode_shapes[::2, :, :] - - # print(f'Shape of frequencies: {frequencies.shape}') - - C_cluster = [] - Ip = [] - - # Mask to track ungrouped elements (initially all elements are ungrouped) - ungrouped_mask = np.ones_like(frequencies, dtype=bool) - - # Check each limit and save indices - for ip, (f_MxMO, fcov_MxMO, z_MxMO, zcov_MxMO) in enumerate(zip(fn_unique, - cov_fn_unique, damping_ratios_unique, cov_damping_unique)): - if np.isnan(f_MxMO): - continue - - # Confidence interval using the mean±2*standard_deviation - f_lower_bound = f_MxMO - bound_multiplier * np.sqrt(fcov_MxMO) - f_upper_bound = f_MxMO + bound_multiplier * np.sqrt(fcov_MxMO) - z_lower_bound = z_MxMO - bound_multiplier * np.sqrt(zcov_MxMO) - z_upper_bound = z_MxMO + bound_multiplier * np.sqrt(zcov_MxMO) - - # Find elements within the current limit that are still ungrouped - condition_mask = (frequencies >= f_lower_bound) & (frequencies <= f_upper_bound) & (damping_ratios >= z_lower_bound) & (damping_ratios <= z_upper_bound) & ungrouped_mask - indices = np.argwhere(condition_mask) # Get indices satisfying the condition - - # Initialization of Ip - Ip.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound, f_upper_bound, z_lower_bound, z_upper_bound), - "indices": indices, - "f_values": frequencies[tuple(indices.T)], - "z_values": damping_ratios[tuple(indices.T)] - }) - - # for Ip_item in Ip: - # print(f'Ip values: {Ip_item["f_values"]}') - - - # declared for appending - updated_indices = np.empty((0, 2), dtype=int) - f_updated_values = [] - z_updated_values = [] - # print(f'ip : {ip}') - - - # Find duplicates and their indices - # print(f'Indices : {Ip[ip]["indices"]}') - model_order_id = Ip[ip]["indices"][:,1] - # print(f'model order id: {model_order_id}') - unique, counts = np.unique(model_order_id, return_counts=True) - duplicates = unique[counts > 1] # model order number with duplicate modes - # print(f'Duplicate : {duplicates}') - # Create a boolean mask for duplicate rows - is_duplicate_row = np.isin(model_order_id, duplicates) - # Filter indices with duplicate values - indices_Ipm = Ip[ip]["indices"][is_duplicate_row] # Rows with duplicates - # print(f'Ipm indices: {indices_Ipm}') - indices_Ipu = Ip[ip]["indices"][~is_duplicate_row] - # print(f'Ipu indices: {indices_Ipu}') - # Check if indices_Ipu is empty - if indices_Ipu.size > 0: - ip_for_Ipu = indices_Ipu[np.argmax(indices_Ipu[:, 1])] - # print(f'ip for Ipu : {ip_for_Ipu}') - else: - print("No unique mode issue in this step.") - - - if duplicates.size == 0: - print("All values are unique.") - if len(indices)>1: - - for ii in indices: - target_mode_shape = mode_shapes[ii[0], ii[1], :] # Extract mode shape from the 3D array - reference_mode_shape = mode_shape_unique[ip] - - # print(f'print target_mode_shape: {target_mode_shape}') - # print(f'print reference_mode_shape: {reference_mode_shape}') - - # Calculate MAC with the reference mode shape - mac_value = calculate_mac(reference_mode_shape, target_mode_shape) - # print(f'MAC value: {mac_value}') - # print(f'ip : {ip}') - # print(f'MAC : {mac_value}') - # Check the MAC value to include in C. Algorithm 2: step 2 - if mac_value > tMAC: - # print(f'updated indices: {updated_indices}') - # print(f'new indices to be added: {ii}') - updated_indices = np.vstack([updated_indices,ii]) - f_updated_values = np.append(f_updated_values, frequencies[tuple(ii.T)]) - z_updated_values = np.append(z_updated_values, damping_ratios[tuple(ii.T)]) - # print(f'updated values: {updated_values}') - # Check if the cluster already exists - existing_cluster = next((c for c in C_cluster if c["ip_index"] == ip), None) - if existing_cluster: - # Update existing cluster - existing_cluster["indices"] = np.vstack([existing_cluster["indices"], ii]) - existing_cluster["f_values"] = np.append(existing_cluster["f_values"], frequencies[tuple(ii.T)]) - existing_cluster["z_values"] = np.append(existing_cluster["z_values"], damping_ratios[tuple(ii.T)]) - else: - # Create a new cluster - C_cluster.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound, f_upper_bound, z_lower_bound, z_upper_bound), - "indices": np.copy(updated_indices), - "f_values": np.copy(f_updated_values), - "z_values":np.copy(z_updated_values) - }) - - else: - C_cluster.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound,f_upper_bound, z_lower_bound, z_upper_bound), - "indices": indices, - "f_values": frequencies[tuple(indices.T)], - "z_values": damping_ratios[tuple(indices.T)] - }) - - # Handle the duplicate model order for single mode - else: - if len(indices_Ipu)>1: - for ii in indices_Ipu: - target_mode_shape = mode_shapes[ii[0], ii[1], :] # Extract mode shape from the 3D array - reference_mode_shape = mode_shapes[ip_for_Ipu[0], ip_for_Ipu[1], :] - - # print(f'print target_mode_shape: {target_mode_shape}') - # print(f'print reference_mode_shape: {reference_mode_shape}') - - # Calculate MAC with the reference mode shape - mac_value = calculate_mac(reference_mode_shape, target_mode_shape) - # print(f'MAC value: {mac_value}') - # print(f'ip : {ip}') - # print(f'MAC : {mac_value}') - # Check the MAC value to include in C. Algorithm 2: step 2 - if mac_value > tMAC: - # print(f'updated indices: {updated_indices}') - # print(f'new indices to be added: {ii}') - updated_indices = np.vstack([updated_indices,ii]) - f_updated_values = np.append(f_updated_values, frequencies[tuple(ii.T)]) - z_updated_values = np.append(z_updated_values, damping_ratios[tuple(ii.T)]) - # print(f'updated values: {updated_values}') - # Check if the cluster already exists - existing_cluster = next((c for c in C_cluster if c["ip_index"] == ip), None) - if existing_cluster: - # Update existing cluster - existing_cluster["indices"] = np.vstack([existing_cluster["indices"], ii]) - existing_cluster["f_values"] = np.append(existing_cluster["f_values"], frequencies[tuple(ii.T)]) - existing_cluster["z_values"] = np.append(existing_cluster["z_values"], damping_ratios[tuple(ii.T)]) - else: - # print(f'Ipu indices: {indices_Ipu} and frequencies: {f_updated_values}') - # Create a new cluster - C_cluster.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound, f_upper_bound, z_lower_bound, z_upper_bound), - "indices": np.copy(updated_indices), - "f_values": np.copy(f_updated_values), - "z_values":np.copy(z_updated_values) - }) - - else: - C_cluster.append({ - "ip_index": ip, - "confidence_interval": (f_lower_bound,f_upper_bound, z_lower_bound, z_upper_bound), - "indices": indices, - "f_values": frequencies[tuple(indices.T)], - "z_values": damping_ratios[tuple(indices.T)] - }) - - - - - - - # for Ip_item in C_cluster: - # print(f'C_cluster values: {Ip_item["f_values"]}') - - - - Ip_C_cluster = [] - # algorith 2: setp 3 [condition check] - for item1 in C_cluster: - # print(f'C_cluster item: {item1}') - # print(f'C_cluster value: {item1["values"]}') - - for item2 in Ip: - if item1['ip_index'] != item2['ip_index']: - continue # Skip the comparison if ip_index is not the same - - if len(item1['f_values']) == len(item2['f_values']): - # print('For C and Ip - values have the same length. Proceeding to compare the values.') - - # Compare the values - if np.all(item1['f_values'] != item2['f_values']): - # print(f'Values are different between C_cluster and Ip: {item1["values"]} vs {item2["values"]}') - continue - else: - print('Values are the same between C_cluster and Ip') - - else: - # print('Values have different lengths between C_cluster and Ip.') - updated_indices2 = np.empty((0, 2), dtype=int) # Reset to empty 2D array - f_updated_values2 = [] - z_updated_values2 = [] - for pp in item1['indices']: - for kk in item2['indices']: - reference_mode_shape = mode_shapes[pp[0], pp[1], :] - target_mode_shape = mode_shapes[kk[0], kk[1], :] - mac_value = calculate_mac(reference_mode_shape, target_mode_shape) - if mac_value > tMAC: - updated_indices2 = np.vstack([updated_indices2,kk]) - f_updated_values2 = np.append(f_updated_values2, frequencies[tuple(kk.T)]) - z_updated_values2 = np.append(z_updated_values2, damping_ratios[tuple(kk.T)]) - # print(f'newly added indices: {kk}') - # print(f'newly added values: {frequencies[tuple(kk.T)]}') - Ip_C_cluster.append({ - "ip_index": item1['ip_index'], - "indices" : updated_indices2, - "f_values" : f_updated_values2, - "z_values" : z_updated_values2 - }) - - # for Ip_item in Ip_C_cluster: - # print(f'Ip_C_cluster values: {Ip_item["f_values"]}') - # print(f'Ip_C_cluster indices: {Ip_item["indices"]}') - - # Initialize C_cluster_finale as a deep copy of C_cluster - C_cluster_finale = copy.deepcopy(C_cluster) - - # Add the points from Ip_C_cluster if they satisfy MAC conditions - # algorith 2: setp 3 [addition of point] - for item1 in C_cluster: - for item2 in Ip_C_cluster: - if item1['ip_index'] != item2['ip_index']: - continue # Skip the comparison if ip_index is not the same - - # Combine values from both clusters - f_merged_values = np.concatenate((item1['f_values'], item2['f_values'])) - z_merged_values = np.concatenate((item1['z_values'], item2['z_values'])) - # Combine indices from both clusters - merged_indices = np.concatenate((item1['indices'], item2['indices'])) - - # Find the corresponding cluster in C_cluster_finale - for finale_item in C_cluster_finale: - if finale_item['ip_index'] == item1['ip_index']: - # Update values and indices - finale_item['f_values'] = f_merged_values - finale_item['z_values'] = z_merged_values - finale_item['indices'] = merged_indices - break # Exit the loop once the match is found - - - # for C_item in C_cluster_finale: - # print(f'C_cluster values end: {C_item["values"]}') - - # algorith 2: step 4 - Ip_indices = np.vstack([item['indices'] for item in C_cluster]) - # Make a copy of frequencies to represent unclustered frequencies - unclustered_frequencies = frequencies.copy() - unclustered_damping = damping_ratios.copy() - # Update the copied matrix to NaN at collected indices - for idx in Ip_indices: - unclustered_frequencies[tuple(idx)] = np.nan # Set to NaN - unclustered_damping[tuple(idx)] = np.nan - - # print(f'Unclustred frequencies: {unclustered_frequencies}') - - # Find all indices in the frequencies matrix - all_indices = np.array(np.meshgrid(np.arange(frequencies.shape[0]), np.arange(frequencies.shape[1]))).T.reshape(-1, 2) - - # Identify unclustered indices: exclude NaN and indices in clustered_indices - unclustered_indices = [] - for idx in all_indices: - if not np.isnan(frequencies[tuple(idx)]) and not any((idx == Ip_indices).all(axis=1)): - unclustered_indices.append(idx) - - unclustered_indices = np.array(unclustered_indices) - # print(f'Unclustred indices: {unclustered_indices}') - - return C_cluster_finale, unclustered_frequencies, unclustered_damping, unclustered_indices - -# MAC calculation function -def calculate_mac(reference_mode, mode_shape): - """ - - - Parameters - ---------- - reference_mode : TYPE - DESCRIPTION. - mode_shape : TYPE - DESCRIPTION. - - Returns - ------- - TYPE - DESCRIPTION. - - """ - numerator = np.abs(np.dot(reference_mode.conj().T, mode_shape)) ** 2 - denominator = np.dot(reference_mode.conj().T, reference_mode) * np.dot(mode_shape.conj().T, mode_shape) - return np.real(numerator / denominator) - -def clusterexpansion(C_clusters, unClustered_frequencies, unClustered_damping, cov_freq, cov_damping, mode_shapes, unClustered_indices, tMAC, bound_multiplier=2): - """ - - - Parameters - ---------- - C_clusters : TYPE - DESCRIPTION. - unClustered_frequencies : TYPE - DESCRIPTION. - unClustered_damping : TYPE - DESCRIPTION. - cov_freq : TYPE - DESCRIPTION. - cov_damping : TYPE - DESCRIPTION. - mode_shapes : TYPE - DESCRIPTION. - unClustered_indices : TYPE - DESCRIPTION. - bound_multiplier : TYPE, optional - DESCRIPTION. The default is 2. - - Raises - ------ - a - DESCRIPTION. - - Returns - ------- - C_cluster_finale : TYPE - DESCRIPTION. - unclustered_frequencies_expanded : TYPE - DESCRIPTION. - unclustered_damping_expanded : TYPE - DESCRIPTION. - unclustered_indices_expnaded : TYPE - DESCRIPTION. - - """ - - # cov_freq = cov_freq[::2] - # mode_shapes = mode_shapes[::2, :, :] - - # import pprint - # for cluster in C_clusters: - # pprint.pprint(cluster) - - Ip_plus = [] - - for cluster in C_clusters: - - f_values = cluster['f_values'] - z_values = cluster['z_values'] - indices = cluster['indices'] - - # **Skip if the cluster is empty** - if len(f_values) == 0: - print("Skipping empty cluster...") - continue # Move to the next cluster - - # print("Covariance Array:", np.sqrt(cov_freq[tuple(indices.T)])) - # Calculate the lower and upper bounds for the current cluster - # print(f'f_values: {f_values}') - # print(f'cov_freq[tuple(indices.T): {cov_freq[tuple(indices.T)]}') - f_lower_bound = np.min(f_values - bound_multiplier * np.sqrt(cov_freq[tuple(indices.T)])) # Minimum of all points for frequencies - f_upper_bound = np.max(f_values + bound_multiplier * np.sqrt(cov_freq[tuple(indices.T)])) # Maximum of all points for frequencies - z_lower_bound = np.min(z_values - bound_multiplier * np.sqrt(cov_damping[tuple(indices.T)])) # Minimum of all points for damping - z_upper_bound = np.max(z_values + bound_multiplier * np.sqrt(cov_damping[tuple(indices.T)])) # Maximum of all points for damping - - # print(f'Print cluster lower bound: {lower_bound}') - # print(f'Print cluster upper bound: {upper_bound}') - - # Find elements within the current limit that are still ungrouped - condition_mask2 = (unClustered_frequencies >= f_lower_bound) & (unClustered_frequencies <= f_upper_bound) & (unClustered_damping >= z_lower_bound) & (unClustered_damping <= z_upper_bound) - # Get indices satisfying the condition - expanded_indices = np.argwhere(condition_mask2) - - # Initialize lists to store updated indices and values - updated_indices3 = [] - f_updated_values3 = [] - z_updated_values3 = [] - - # Loop through the unclustered indices and append matching values to the cluster - for idx in expanded_indices: - freq_value = unClustered_frequencies[tuple(idx)] # Get the frequency value at this index - damp_value = unClustered_damping[tuple(idx)] # Get the damping value at this index - updated_indices3.append(idx) # Append the index - f_updated_values3.append(freq_value) # Append the frequency value - z_updated_values3.append(damp_value) # Append the damping value - - # Create a new cluster and append it to Ip_plus_cluster - Ip_plus.append({ - "ip_index": cluster['ip_index'], # Use the ip_index from the original cluster - "indices": np.array(updated_indices3), # Updated indices - "f_values": np.array(f_updated_values3), # Updated frequency values - "z_values": np.array(z_updated_values3) # Updated damping values - }) - - - Ip_plus_C = [] - # algorith 2: setp 3 [condition check] - for item1 in C_clusters: - # print(f'C_cluster item: {item1}') - # print(f'C_cluster value: {item1["values"]}') - - for item2 in Ip_plus: - if item1['ip_index'] != item2['ip_index']: - continue # Skip the comparison if ip_index is not the same - - if len(item1['f_values']) == len(item2['f_values']): - # print('For C and Ip - values have the same length. Proceeding to compare the values.') - - # Compare the values - if np.all(item1['f_values'] != item2['f_values']): - # print(f'Values are different between C_cluster and Ip: {item1["values"]} vs {item2["values"]}') - continue - else: - print(f'Values are the same between C_cluster and Ip_plus: {item1["f_values"]}') - - else: - # print('Values have different lengths between C_cluster and Ip.') - updated_indices4 = np.empty((0, 2), dtype=int) # Reset to empty 2D array - f_updated_values4 = [] - z_updated_values4 = [] - for pp in item1['indices']: - for kk in item2['indices']: - reference_mode_shape = mode_shapes[pp[0], pp[1], :] - target_mode_shape = mode_shapes[kk[0], kk[1], :] - mac_value = calculate_mac(reference_mode_shape, target_mode_shape) - if mac_value > tMAC: - updated_indices4 = np.vstack([updated_indices4,kk]) - f_updated_values4 = np.append(f_updated_values4, unClustered_frequencies[tuple(kk.T)]) - z_updated_values4 = np.append(z_updated_values4, unClustered_damping[tuple(kk.T)]) - # print(f'newly added indices: {kk}') - # print(f'newly added values: {frequencies[tuple(kk.T)]}') - Ip_plus_C.append({ - "ip_index": item1['ip_index'], - "indices" : updated_indices4, - "f_values" : f_updated_values4, - "z_values" : z_updated_values4 - }) - - - # Initialize C_cluster_finale as a deep copy of C_cluster - C_cluster_finale = copy.deepcopy(C_clusters) - - # Add the points from Ip_C_cluster if they satisfy MAC conditions - # algorith 2: setp 3 [addition of point] - for item1 in C_clusters: - for item2 in Ip_plus_C: - if item1['ip_index'] != item2['ip_index']: - continue # Skip the comparison if ip_index is not the same - - # Combine values from both clusters - f_merged_values2 = np.concatenate((item1['f_values'], item2['f_values'])) # concatenate frequencies - z_merged_values2 = np.concatenate((item1['z_values'], item2['z_values'])) # concatenate damping - # Combine indices from both clusters - merged_indices2 = np.concatenate((item1['indices'], item2['indices'])) - - # Find the corresponding cluster in C_cluster_finale - for finale_item in C_cluster_finale: - if finale_item['ip_index'] == item1['ip_index']: - # Update values and indices - finale_item['f_values'] = f_merged_values2 - finale_item['z_values'] = z_merged_values2 - finale_item['indices'] = merged_indices2 - break # Exit the loop once the match is found - - - # algorith 2: step 4 - # Filter out empty 'indices' arrays and check if there are any non-empty ones - valid_indices = [item['indices'] for item in C_clusters if item['indices'].size > 0] - - if valid_indices: - # If there are valid indices, proceed with stacking - Ip_plus_indices = np.vstack(valid_indices) - else: - # If there are no valid indices, handle accordingly (e.g., set to empty or raise a warning) - # print("No valid indices to stack.") - Ip_plus_indices = np.array([]) # Or choose another fallback behavior - # Make a copy of frequencies to represent unclustered frequencies - unclustered_frequencies_expanded = unClustered_frequencies.copy() - unclustered_damping_expanded = unClustered_damping.copy() - # Update the copied matrix to NaN at collected indices - for idx in Ip_plus_indices: - unclustered_frequencies_expanded[tuple(idx)] = np.nan # Set to NaN - unclustered_damping_expanded[tuple(idx)] = np.nan # Set to NaN - - # print(f'Unclustred frequencies: {unclustered_frequencies}') - - # Find all indices in the frequencies matrix - all_indices = np.array(np.meshgrid(np.arange(unClustered_frequencies.shape[0]), np.arange(unClustered_frequencies.shape[1]))).T.reshape(-1, 2) - - # Identify unclustered indices: exclude NaN and indices in clustered_indices - unclustered_indices_expnaded = [] - for idx in all_indices: - # if not np.isnan(unClustered_frequencies[tuple(idx)]) and not any((idx == Ip_plus_indices).all(axis=1)): - if Ip_plus_indices.size > 0 and not np.isnan(unClustered_frequencies[tuple(idx)]) and not any((idx == Ip_plus_indices).all(axis=1)): - unclustered_indices_expnaded.append(idx) - - unclustered_indices_expnaded = np.array(unclustered_indices_expnaded) - # print(f'Unclustred indices expanded: {unclustered_indices_expnaded}') - - return C_cluster_finale, unclustered_frequencies_expanded, unclustered_damping_expanded, unclustered_indices_expnaded - - -def visualize_clusters(clusters, cov_freq, bounds): - """ - - - Parameters - ---------- - clusters : TYPE - DESCRIPTION. - cov_freq : TYPE - DESCRIPTION. - bounds : TYPE - DESCRIPTION. - - Returns - ------- - None. - - """ - # Sort clusters by their median if available, otherwise keep original order - clusters.sort(key=lambda cluster: np.median(cluster["values"]) if "values" in cluster and len(cluster["values"]) > 0 else float('inf')) - - # Create subplots (one for each cluster) - num_clusters = len(clusters) - fig, axs = plt.subplots(num_clusters, 1, figsize=(10, 5 * num_clusters), tight_layout=True) - - - if num_clusters == 1: - axs = [axs] # Ensure axs is always iterable - - for idx, (cluster, ax) in enumerate(zip(clusters, axs)): - cluster_values = cluster["f_values"] - cluster_indices = cluster["indices"] - cluster_cov = cov_freq[tuple(np.array(cluster_indices).T)] # Covariance for original cluster - - # Extract the second part of the cluster indices for plotting - model_orders = cluster_indices[:, 1] - - # Scatter plot the cluster values against model orders - ax.scatter(cluster_values, model_orders, label="Cluster Data") - - # Plot the cluster values with covariance as error bars - ax.errorbar( - cluster_values, - model_orders, # Use the second index for the vertical axis - xerr=(np.sqrt(cluster_cov)*bounds), # Error bars for x-values based on covariance - fmt='o', capsize=5, ecolor='red', label="± 2σ" - ) - - # Check if the 'median' key exists in the cluster dictionary - if 'median' in cluster: - median_value = cluster["median"] - if not np.isnan(median_value): # If median is not NaN, plot the vertical line - ax.axvline(median_value, color='blue', linestyle='--', label='Median') - - ax.set_title(f"Cluster {idx + 1}") - ax.set_xlabel("Frequency [Hz]") - ax.set_ylabel("Model Order") - ax.set_ylim(0, 21) - ax.legend() - ax.grid() - - plt.show() - - -def clean_clusters_by_median(clusters, cov_freq, bound_multiplier=2): - """ - - - Parameters - ---------- - clusters : TYPE - DESCRIPTION. - cov_freq : TYPE - DESCRIPTION. - bound_multiplier : TYPE, optional - DESCRIPTION. The default is 2. - - Returns - ------- - cleaned_clusters : TYPE - DESCRIPTION. - - """ - cleaned_clusters = [] - - for cluster_idx, cluster in enumerate(clusters): - # Extract values and indices from the cluster - f_cluster_values = np.array(cluster["f_values"]) - z_cluster_values = np.array(cluster["z_values"]) - cluster_indices = np.array(cluster["indices"]) - - # Extract covariance for each cluster element - f_cluster_cov = cov_freq[tuple(cluster_indices.T)] # Extract covariance for the given indices - - # Remove duplicates by using unique values and their indices - f_unique_values, unique_indices = np.unique(f_cluster_values, return_index=True) - f_unique_cov = f_cluster_cov[unique_indices] - z_unique = z_cluster_values[unique_indices] - unique_indices_2D = cluster_indices[unique_indices] - - # Update the original cluster with unique values and indices - cluster["f_values"] = f_unique_values - cluster["z_values"] = z_unique - cluster["indices"] = unique_indices_2D - - # Calculate the median of the unique values - median_value = np.nanmedian(f_unique_values) - - # Define bounds for filtering based on the bound_multiplier and covariance - lower_bound = f_unique_values - bound_multiplier * np.sqrt(f_unique_cov) - upper_bound = f_unique_values + bound_multiplier * np.sqrt(f_unique_cov) - - # Keep elements where the median lies within the bounds - mask = (median_value >= lower_bound) & (median_value <= upper_bound) - f_cleaned_values = f_unique_values[mask] - z_cleaned_values = z_unique[mask] - cleaned_indices = unique_indices_2D[mask] - - # Append the cleaned cluster to the result if there are enough values - if len(f_cleaned_values) > 1: # Keep clusters with more than one cleaned value - cleaned_clusters.append({ - "original_cluster": cluster, # Store the original cluster (now updated with unique values) - "f_values": f_cleaned_values, - "z_values": z_cleaned_values, - "indices": cleaned_indices, - "median": median_value, - "bound_multiplier": bound_multiplier, # Store the bound multiplier used - }) - - return cleaned_clusters - - -def mode_allingment(ssi_mode_track_res, mstab, tMAC): - print("DEBUG: oma_output inside mode_allingment:", type(ssi_mode_track_res), ssi_mode_track_res) - - # extract results - frequencies = ssi_mode_track_res['Fn_poles'] - cov_freq = ssi_mode_track_res['Fn_poles_cov'] - damping_ratios = ssi_mode_track_res['Xi_poles'] - cov_damping = ssi_mode_track_res['Xi_poles_cov'] - mode_shapes = ssi_mode_track_res['Phi_poles'] - bounds = 2 # standard deviation multiplier - - frequencies_max_MO = frequencies[:,-1] - cov_freq_max_MO = cov_freq[:,-1] - damping_ratios_max_MO = damping_ratios[:,-1] - cov_damping_max_MO = cov_damping[:,-1] - mode_shapes_max_MO = mode_shapes[:,-1,:] - - frequencies_copy = frequencies.copy() - - # Remove the complex conjugate entries - frequencies = frequencies[::2] # This is 'S' as per algorithm - damping_ratios = damping_ratios[::2] # This is 'S' as per algorithm - mode_shapes = mode_shapes[::2, :, :] - cov_freq = cov_freq[::2] - cov_damping = cov_damping[::2] - - frequency_coefficient_variation = np.sqrt(cov_freq)/frequencies - damping_coefficient_variation = np.sqrt(cov_damping)/damping_ratios - indices_frequency = frequency_coefficient_variation > 0.05 - indices_damping = damping_coefficient_variation > 0.5 - combined_indices = indices_frequency & indices_damping - frequencies[combined_indices] = np.nan - damping_ratios[combined_indices] = np.nan - cov_freq[combined_indices] = np.nan - cov_damping[combined_indices] = np.nan - - - # Initial clustering - C_clusters, unClustd_frequencies, unClustd_damping, unClustd_indices = cluster_frequencies(frequencies, damping_ratios, - mode_shapes, frequencies_max_MO, cov_freq_max_MO, - damping_ratios_max_MO, cov_damping_max_MO, - mode_shapes_max_MO, tMAC, bound_multiplier=bounds) - - # Expansion step - C_expanded, unClustd_frequencies_expanded, unClustd_damping_expanded, unClustd_indices_expanded = clusterexpansion(C_clusters, unClustd_frequencies, unClustd_damping, - cov_freq, cov_damping, mode_shapes, - unClustd_indices, tMAC, bound_multiplier=bounds) - - - last_ip_index = max(cluster['ip_index'] for cluster in C_expanded) - - count = 0 - - # Loop until unClustd_indices contains only one index - while True: - # print(f"unClustd indices expanded size before: {unClustd_indices_expanded.size}") - # print(f'Loop counter: {count}') - count += 1 - # Check the termination condition - if unClustd_indices_expanded.size <= 2: # Stop if there are fewer than 2 indices - print("No more unclustered indices to process. Exiting ...") - break - - # Get the highest column index from unClustd_indices - highest_column = np.max(unClustd_indices_expanded[:, 1]) # Assuming column index is in the second column - - # Create a mask for the unclustered indices - mask1 = np.full(frequencies.shape, False) # Initialize a boolean mask - mask1[tuple(unClustd_indices_expanded.T)] = True # Set only unclustered indices to True - unClustd_frequencies = frequencies.copy() - unClustd_damping = damping_ratios.copy() - unClustd_frequencies[~mask1] = np.nan - unClustd_damping[~mask1] = np.nan - unClustd_cov_freq = cov_freq.copy() - unClustd_cov_damp = cov_damping.copy() - unClustd_cov_freq[~mask1] = np.nan # Unclustered frequency variance matrix - unClustd_cov_damp[~mask1] = np.nan # Unclustered damping variance matrix - unClustd_mode_shapes = mode_shapes.copy() - - for ii in range(unClustd_mode_shapes.shape[2]): - slice_2d = unClustd_mode_shapes[:, :, ii] - slice_2d[~mask1] = np.nan - unClustd_mode_shapes[:, :, ii] = slice_2d # Unclustered mode shape matrix - - # Filter the data for the highest column - frequencies_max_MO = unClustd_frequencies_expanded[:, highest_column] - # print(f'Maximum model order: {highest_column}') - # print(f'MO frequencies: {frequencies_max_MO}') - damping_ratios_max_MO = unClustd_damping_expanded[:, highest_column] - # print(f'frequencies initization: {frequencies_max_MO}') - cov_freq_max_MO = unClustd_cov_freq[:, highest_column] - cov_damp_max_MO = unClustd_cov_damp[:, highest_column] - mode_shapes_max_MO = unClustd_mode_shapes[:, highest_column, :] - - # Call the cluster_frequencies function with updated parameters - C_cluster_loop, unClustd_frequencies_loop, unClustd_damping_loop, unClustd_indices_loop = cluster_frequencies( - unClustd_frequencies, - unClustd_damping, - unClustd_mode_shapes, - frequencies_max_MO, - cov_freq_max_MO, - damping_ratios_max_MO, - cov_damping_max_MO, - mode_shapes_max_MO, - tMAC, - bound_multiplier=bounds - ) - print("Initial clustering done.") - - # import pprint - # for cluster in C_clusters: - # pprint.pprint(cluster) - - if unClustd_indices_loop.size == 0: - print("No unclustered indices left. Exiting ...") - # Update the clusters with new 'ip_index' values - for cluster in C_cluster_loop: - # Update the ip_index for the new clusters (starting from last_ip_index + 1) - new_ip_index = last_ip_index + 1 - cluster["ip_index"] = new_ip_index - - # Append the updated cluster to the final list - C_expanded.append(cluster) - - # Update last_ip_index to the newly assigned ip_index for the next iteration - last_ip_index = new_ip_index - - # print('before break') - break - # print('after break') - - print("Expansion started in loop.") - # Expansion step for each initial clusters - C_expanded_loop, unClustd_frequencies_expanded_loop, unClustd_damping_expanded_loop, unClustd_indices_expanded_loop = clusterexpansion( - C_cluster_loop, - unClustd_frequencies_loop, - unClustd_damping_loop, - cov_freq, - cov_damping, - mode_shapes, - unClustd_indices_loop, - tMAC, - bound_multiplier=bounds - ) - print("Expansion clustering done.") - - # Update the clusters with new 'ip_index' values - for cluster in C_expanded_loop: - # Update the ip_index for the new clusters (starting from last_ip_index + 1) - new_ip_index = last_ip_index + 1 - cluster["ip_index"] = new_ip_index - - # Append the updated cluster to the final list - C_expanded.append(cluster) - - # Update last_ip_index to the newly assigned ip_index for the next iteration - last_ip_index = new_ip_index - - # print("Expansion added to clustering.") - - if unClustd_indices_expanded_loop.size == 0: - print("No unclustered indices left. Exiting ...") - break - # Update the unClustd_indices for the next iteration - unClustd_indices_expanded = unClustd_indices_expanded_loop[ - unClustd_indices_expanded_loop[:, 1] != highest_column - ] - - # Check if the size of unClustd_indices_expanded has become less than or equal to 2 - if unClustd_indices_expanded.size <= 2: - print("Unclustered indices size <= 2. Stopping ...") - break - - # Removing repeatation during merge - for cluster in C_expanded: - # Get the current values - f_values = cluster['f_values'] - indices = cluster['indices'] - z_values = cluster['z_values'] - # Find unique f_values and their indices - unique_f_values, unique_indices = np.unique(f_values, return_index=True) - cluster['f_values'] = unique_f_values - cluster['indices'] = indices[unique_indices] - cluster['z_values'] = z_values[unique_indices] - - - # # Visualize the initial clusters - # visualize_clusters(C_expanded, cov_freq, bounds) - - # # import pprint - # for cluster in C_expanded: - # print(f"ip_index: {cluster['ip_index']}, f_values length: {len(cluster['f_values'])}") - # print(f"Cluster confidence interval: {cluster['confidence_interval'][0:2]}") - # print(f"Cluster shape: {len(cluster['f_values'])}") - # # pprint.pprint(cluster) - # # print(f"ip_index: {cluster['ip_index']}") - # # print(f"indices shape: {cluster['indices'].shape}") - # # print(f"f_values shape: {len(cluster['f_values'])}") - - - print('Cluster filter started') - # Filter clusters with less than 'mstab' elements - C_expanded_filtered = [cluster for cluster in C_expanded if cluster['indices'].shape[0] > mstab] - # Sort clusters by the lower bound of their confidence_interval (the first value in the tuple) - C_expanded_filtered.sort(key=lambda cluster: cluster['confidence_interval'][0]) - print('Cluster filter finished') - - # # Visualize the cluster filter by element numbers - # visualize_clusters(C_expanded_filtered, cov_freq, bounds) - - # Cluster cleaning based on median - cleaned_clusters = clean_clusters_by_median(C_expanded_filtered, cov_freq, bound_multiplier=bounds) - - # remove repeatative clusters - seen = set() - uq_clusters = [] - for d in cleaned_clusters: - f_values_tuple = tuple(d['f_values']) - if f_values_tuple not in seen: - seen.add(f_values_tuple) - uq_clusters.append(d) - - for cluster in uq_clusters: - indices = cluster['indices'] - mode_shapes_list = [] - - for idx in indices: - # Extract mode shapes using indices - mode_shape = mode_shapes[idx[0], idx[1], :] - mode_shapes_list.append(mode_shape) - - # Add mode shapes to the dictionary - cluster['mode_shapes'] = np.array(mode_shapes_list) - - uq_clusters_sorted = sorted(uq_clusters, key=lambda cluster: cluster["median"]) - - return uq_clusters_sorted diff --git a/src/methods/packages/pyoma/ssiWrapper.py b/src/methods/packages/pyoma/ssiWrapper.py index eadd783..c88bdc8 100644 --- a/src/methods/packages/pyoma/ssiWrapper.py +++ b/src/methods/packages/pyoma/ssiWrapper.py @@ -1,6 +1,8 @@ import typing import logging +import numpy as np + from pyoma2.algorithms.data.result import SSIResult from pyoma2.algorithms.data.run_params import SSIRunParams from pyoma2.algorithms.base import BaseAlgorithm @@ -106,21 +108,21 @@ def run(self) -> SSIResult: Fns, Xis, Phis, Fn_cov, Xi_cov, Phi_cov = gen.applymask( lista, mask7, Phis.shape[2] ) - - - # Get the labels of the poles - Lab = gen.SC_apply( - Fns, - Xis, - Phis, - ordmin, - ordmax, - step, - sc["err_fn"], - sc["err_xi"], - sc["err_phi"], - ) + #Infer minimum order + for ii in range(ordmin): + id = ii + nan_Matrix = np.empty(Fns.shape[0]) + nan_Matrix[:] = np.nan + Fns[:,id] = nan_Matrix + Xis[:,id] = nan_Matrix + Fn_cov[:,id] = nan_Matrix + Xi_cov[:,id] = nan_Matrix + nan_Matrix = np.empty((Phis.shape[0],Phis.shape[2])) + nan_Matrix[:,:] = np.nan + Phis[:,id,:] = nan_Matrix + Phi_cov[:,id,:] = nan_Matrix + return SSIResult( Obs=Obs, A=A, @@ -130,7 +132,6 @@ def run(self) -> SSIResult: Fn_poles=Fns, Xi_poles=Xis, Phi_poles=Phis, - Lab=Lab, Fn_poles_cov=Fn_cov, Xi_poles_cov=Xi_cov, Phi_poles_cov=Phi_cov, diff --git a/src/methods/packages/yafem-0.2.6-py3-none-any.whl b/src/methods/packages/yafem-0.2.6-py3-none-any.whl deleted file mode 100644 index 0ad7e9f..0000000 Binary files a/src/methods/packages/yafem-0.2.6-py3-none-any.whl and /dev/null differ diff --git a/src/methods/sys_id.py b/src/methods/sysid.py similarity index 77% rename from src/methods/sys_id.py rename to src/methods/sysid.py index ffd2b1e..cf09512 100644 --- a/src/methods/sys_id.py +++ b/src/methods/sysid.py @@ -9,7 +9,7 @@ from data.comm.mqtt import setup_mqtt_client from data.accel.hbk.aligner import Aligner from methods.packages.pyoma.ssiWrapper import SSIcov -from methods.constants import MODEL_ORDER, BLOCK_SHIFT, DEFAULT_FS +from methods.constants import DEFAULT_FS, PARAMS @@ -33,13 +33,14 @@ def sysid(data, params): if data.shape[0] Tuple[MQTTClient, float]: return data_client, fs -def get_oma_results( +def get_sysid_results( sampling_period: int, aligner: Aligner, fs: float ) -> Optional[Tuple[Dict[str, Any], datetime]]: """ @@ -91,16 +91,11 @@ def get_oma_results( Args: sampling_period: How many minutes of data to pass to sysid. aligner: An initialized Aligner object. - fs: Sampling frequency to use in the OMA algorithm. + fs: Sampling frequency to use in the sysid algorithm. Returns: - A tuple (OMA_output, timestamp) if successful, or None if data is not ready. + A tuple (sysid_output, timestamp) if successful, or None if data is not ready. """ - oma_params = { - "Fs": fs, - "block_shift": BLOCK_SHIFT, - "model_order": MODEL_ORDER - } number_of_samples = int(sampling_period * 60 * fs) data, timestamp = aligner.extract(number_of_samples) @@ -109,18 +104,18 @@ def get_oma_results( return None, None try: - oma_output = sysid(data, oma_params) - return oma_output, timestamp + sysid_output = sysid(data, PARAMS) + return sysid_output, timestamp except Exception as e: print(f"sysID failed: {e}") return None, None -def publish_oma_results(sampling_period: int, aligner: Aligner, +def publish_sysid_results(sampling_period: int, aligner: Aligner, publish_client: MQTTClient, publish_topic: str, fs: float) -> None: """ - Repeatedly tries to get aligned data and publish OMA results once. + Repeatedly tries to get aligned data and publish sysid results once. Args: sampling_period: Duration (in minutes) of data to extract. @@ -129,17 +124,22 @@ def publish_oma_results(sampling_period: int, aligner: Aligner, publish_topic: The MQTT topic to publish results to. fs: Sampling frequency. """ + t1 = time.time() + loop = True while True: try: - time.sleep(0.5) - oma_output, timestamp = get_oma_results(sampling_period, aligner, fs) - print(f"OMA result: {oma_output}") - print(f"Timestamp: {timestamp}") + time.sleep(0.1) + t2 = time.time() + t_text = f"Waiting for data for {round(t2-t1,1)} seconds" + print(t_text,end="\r") + sysid_output, timestamp = get_sysid_results(sampling_period, aligner, fs) + - if oma_output: + if sysid_output: + print(f"Timestamp: {timestamp}") payload = { "timestamp": timestamp.isoformat(), - "OMA_output": convert_numpy_to_list(oma_output) + "sysid_output": convert_numpy_to_list(sysid_output) } try: message = json.dumps(payload) @@ -149,16 +149,20 @@ def publish_oma_results(sampling_period: int, aligner: Aligner, publish_client.reconnect() publish_client.publish(publish_topic, message, qos=1) - print(f"[{timestamp.isoformat()}] Published OMA result to {publish_topic}") + print(f"[{timestamp.isoformat()}] Published sysid result to {publish_topic}") + loop = True break - except Exception as e: - print(f"Failed to publish OMA result: {e}") + print(f"\nFailed to publish sysid result: {e}") + except KeyboardInterrupt: - print("Shutting down gracefully") - aligner.client.loop_stop() - aligner.client.disconnect() + print("\nShutting down gracefully") + aligner.mqtt_client.loop_stop() + aligner.mqtt_client.disconnect() publish_client.disconnect() + loop = False break except Exception as e: - print(f"Unexpected error: {e}") + print(f"\nUnexpected error: {e}") + + return loop diff --git a/tests/integration/methods/test_sys_id.py b/tests/integration/methods/test_sys_id.py index c047e0b..4558044 100644 --- a/tests/integration/methods/test_sys_id.py +++ b/tests/integration/methods/test_sys_id.py @@ -3,21 +3,24 @@ from datetime import datetime from unittest.mock import MagicMock -from methods import sys_id +from methods import sysid + + def test_sysid(): # Define OMA parameters - oma_params = { + sysid_params = { "Fs": 100, # Sampling frequency in Hz "block_shift": 30, # Block shift parameter - "model_order": 20 # Model order + "model_order": 20, # Model order + "model_order_min": 1 # Lowest model order } # Load test data data = np.loadtxt('tests/integration/input_data/Acc_4DOF.txt').T # Perform system identification - sysid_output = sys_id.sysid(data, oma_params) + sysid_output = sysid.sysid(data, sysid_params) # Extract results using dictionary keys frequencies = sysid_output['Fn_poles'] @@ -25,7 +28,8 @@ def test_sysid(): damping_ratios = sysid_output['Xi_poles'] cov_damping = sysid_output['Xi_poles_cov'] mode_shapes = sysid_output['Phi_poles'] - poles_label = sysid_output['Lab'] + + # Load stored reference results stored_data = np.load('tests/integration/input_data/expected_sysid_output.npz') @@ -34,41 +38,39 @@ def test_sysid(): stored_damping_ratios = stored_data['damping_ratios'] stored_cov_damping = stored_data['cov_damping'] stored_mode_shapes = stored_data['mode_shapes'] - stored_poles_label = stored_data['poles_label'] - + tolerance = 0.4 assert np.allclose(frequencies, stored_frequencies, atol=tolerance, equal_nan=True), "Frequencies do not match!" assert np.allclose(cov_freq, stored_cov_freq, atol=tolerance, equal_nan=True), "Covariance frequencies do not match!" assert np.allclose(damping_ratios, stored_damping_ratios, atol=tolerance, equal_nan=True), "Damping ratios do not match!" - assert np.allclose(cov_damping, stored_cov_damping, atol=tolerance, equal_nan=True), "Covariance damping ratios do not match!" + assert np.allclose(cov_damping, stored_cov_damping, atol=tolerance*2, equal_nan=True), "Covariance damping ratios do not match!" assert np.allclose(mode_shapes, stored_mode_shapes, atol=tolerance, equal_nan=True), "Mode shapes do not match!" - assert np.array_equal(poles_label, stored_poles_label), "Pole labels do not match!" - -def test_sysid_full_flow_success(): +def test_oma_full_flow_success(): """ Simulates full OMA flow: aligned data → sysid → conversion to JSON-safe format. """ # Simulate 600 samples, 3 channels (e.g., 1 min * 10 Hz) data = np.random.randn(3, 600) - oma_params = { + sysid_params = { "Fs": 100, "block_shift": 30, - "model_order": 20 + "model_order": 6, + "model_order_min": 1 } - oma_result = sys_id.sysid(data, oma_params) + sysid_result = sysid.sysid(data, sysid_params) # Check output structure - assert isinstance(oma_result, dict) + assert isinstance(sysid_result, dict) for key in ["Fn_poles", "Xi_poles", "Phi_poles"]: - assert key in oma_result - assert isinstance(oma_result[key], list) or isinstance(oma_result[key], np.ndarray) + assert key in sysid_result + assert isinstance(sysid_result[key], list) or isinstance(sysid_result[key], np.ndarray) # Convert to JSON-safe structure - converted = sys_id.convert_numpy_to_list(oma_result) + converted = sysid.convert_numpy_to_list(sysid_result) assert isinstance(converted, dict) assert isinstance(converted["Fn_poles"], list) @@ -76,7 +78,7 @@ def test_sysid_full_flow_success(): def test_get_oma_results_integration(mocker): from datetime import datetime import numpy as np - from methods import sys_id + from methods import sysid fs = 100 # sampling frequency mock_aligner = MagicMock() @@ -88,24 +90,25 @@ def test_get_oma_results_integration(mocker): mock_aligner.extract.return_value = (mock_data, mock_timestamp) - oma_output, timestamp = sys_id.get_oma_results(number_of_minutes, mock_aligner, fs) + sysid_output, timestamp = sysid.get_sysid_results(number_of_minutes, mock_aligner, fs) - assert isinstance(oma_output, dict) - assert "Fn_poles" in oma_output + assert isinstance(sysid_output, dict) + assert "Fn_poles" in sysid_output assert timestamp == mock_timestamp -def test_sysid_raises_on_empty_data(): +def test_oma_raises_on_empty_data(): """ SSI should raise an error if data is empty (simulating a low-data scenario). """ data = np.empty((0, 3)) # No samples - oma_params = { + sysid_params = { "Fs": 10.0, "block_shift": 5, - "model_order": 6 + "model_order": 6, + "model_order_min": 1 } with pytest.raises(Exception): - sys_id.sysid(data, oma_params) + sysid.sysid(data, sysid_params) diff --git a/tests/unit/methods/test_sys_id_unit.py b/tests/unit/methods/test_sys_id_unit.py index 22efc4a..8e00fe4 100644 --- a/tests/unit/methods/test_sys_id_unit.py +++ b/tests/unit/methods/test_sys_id_unit.py @@ -3,10 +3,10 @@ from unittest.mock import MagicMock, patch from datetime import datetime import json -from methods.sys_id import ( +from methods.sysid import ( sysid, - get_oma_results, - publish_oma_results, + get_sysid_results, + publish_sysid_results, setup_client, ) from paho.mqtt.client import Client as MQTTClient @@ -18,24 +18,25 @@ def sample_data(): @pytest.fixture -def oma_params(): +def sysid_params(): return { "Fs": 100.0, "block_shift": 5, - "model_order": 6 + "model_order": 6, + "model_order_min": 1 } -def test_sysid_returns_expected_keys(sample_data, oma_params): - result = sysid(sample_data, oma_params) +def test_sysid_returns_expected_keys(sample_data, sysid_params): + result = sysid(sample_data, sysid_params) assert isinstance(result, dict) - expected_keys = {'Fn_poles', 'Fn_poles_cov', 'Xi_poles', 'Xi_poles_cov', 'Phi_poles', 'Lab'} + expected_keys = {'Fn_poles', 'Fn_poles_cov', 'Xi_poles', 'Xi_poles_cov', 'Phi_poles'} assert expected_keys.issubset(result.keys()) -def test_sysid_transposes_data_if_needed(oma_params): +def test_sysid_transposes_data_if_needed(sysid_params): data = np.random.randn(3, 600) # More columns than rows - result = sysid(data, oma_params) + result = sysid(data, sysid_params) assert isinstance(result, dict) assert "Fn_poles" in result @@ -46,7 +47,7 @@ def test_get_oma_results_success(mocker): mock_aligner = MagicMock() mock_aligner.extract.return_value = (mock_data, datetime.now()) - result, ts = get_oma_results(0.1, mock_aligner, fs) + result, ts = get_sysid_results(0.1, mock_aligner, fs) assert result is not None assert "Fn_poles" in result @@ -56,7 +57,7 @@ def test_get_oma_results_no_data(mocker): mock_aligner = MagicMock() mock_aligner.extract.return_value = (np.empty((0, 3)), datetime.now()) - result, ts = get_oma_results(1, mock_aligner, fs) + result, ts = get_sysid_results(1, mock_aligner, fs) assert result is None assert ts is None @@ -68,7 +69,7 @@ def test_get_oma_results_not_enough_samples(mocker): data = np.random.randn(100, 3) mock_aligner.extract.return_value = (data, datetime.now()) - result, ts = get_oma_results(10, mock_aligner, fs) # ask for too many samples + result, ts = get_sysid_results(10, mock_aligner, fs) # ask for too many samples assert result is None assert ts is None @@ -79,9 +80,9 @@ def test_get_oma_results_sysid_failure(mocker): mock_aligner = MagicMock() mock_aligner.extract.return_value = (np.random.randn(600, 3), datetime.now()) - mocker.patch("methods.sys_id.sysid", side_effect=Exception("fail")) + mocker.patch("methods.sysid.sysid", side_effect=Exception("fail")) - result, ts = get_oma_results(1, mock_aligner, fs) + result, ts = get_sysid_results(1, mock_aligner, fs) assert result is None assert ts is None @@ -95,13 +96,12 @@ def test_publish_oma_results_retries_and_publishes_once(mocker): 'Xi_poles': np.array([[0.02, 0.03], [0.04, 0.05]]), 'Xi_poles_cov': np.array([[0.001, 0.001], [0.001, 0.001]]), 'Phi_poles': np.array([[1.0, 0.0], [0.0, 1.0]]), - 'Lab': ['mode1', 'mode2'] } - mocker.patch("methods.sys_id.time.sleep", return_value=None) + mocker.patch("methods.sysid.time.sleep", return_value=None) mocker.patch( - "methods.sys_id.get_oma_results", + "methods.sysid.get_sysid_results", side_effect=[ (None, None), (dummy_result, datetime(2024, 1, 1)) @@ -109,7 +109,7 @@ def test_publish_oma_results_retries_and_publishes_once(mocker): ) mocker.patch( - "methods.sys_id.convert_numpy_to_list", + "methods.sysid.convert_numpy_to_list", return_value={k: v.tolist() if hasattr(v, "tolist") else v for k, v in dummy_result.items()} ) @@ -118,7 +118,7 @@ def test_publish_oma_results_retries_and_publishes_once(mocker): aligner = MagicMock() aligner.client = MagicMock() - publish_oma_results(0.1, aligner, mock_client, "test/topic", fs) + publish_sysid_results(0.1, aligner, mock_client, "test/topic", fs) assert mock_client.publish.called assert mock_client.publish.call_count == 1 @@ -132,10 +132,10 @@ def test_setup_client_with_multiple_topics(mocker): "topics": ["topic1", "topic2"] } - extract_mock = mocker.patch("methods.sys_id.extract_fs_from_metadata", return_value=123.0) + extract_mock = mocker.patch("methods.sysid.extract_fs_from_metadata", return_value=123.0) mock_mqtt_client = MagicMock() - mocker.patch("methods.sys_id.setup_mqtt_client", return_value=(mock_mqtt_client, None)) + mocker.patch("methods.sysid.setup_mqtt_client", return_value=(mock_mqtt_client, None)) client, fs = setup_client(mqtt_config)