From 680b5a76fc98586208a6358b07ed52c1bcd7a755 Mon Sep 17 00:00:00 2001 From: AJGHZ Date: Thu, 25 Jan 2024 11:01:04 -0600 Subject: [PATCH 01/10] Primer commit From e43ea82c1f5846ba3132bcb3cb949f0d2afbf10e Mon Sep 17 00:00:00 2001 From: AJGHZ Date: Thu, 25 Jan 2024 11:16:08 -0600 Subject: [PATCH 02/10] primer commit intentando subir archivos al repositorio del maestro. --- Hello.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 Hello.txt diff --git a/Hello.txt b/Hello.txt new file mode 100644 index 0000000..655ab99 --- /dev/null +++ b/Hello.txt @@ -0,0 +1 @@ +Hello world of git :) \ No newline at end of file From 1eabfbc7c1322dcf05a8aec26d34d686c05294d1 Mon Sep 17 00:00:00 2001 From: AJGHZ Date: Thu, 25 Jan 2024 11:18:38 -0600 Subject: [PATCH 03/10] =?UTF-8?q?Estructura=20de=20Python=20seg=C3=BAn=20P?= =?UTF-8?q?EP8.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Estructura.py | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 Estructura.py diff --git a/Estructura.py b/Estructura.py new file mode 100644 index 0000000..4eeb336 --- /dev/null +++ b/Estructura.py @@ -0,0 +1,50 @@ +""" +Este módulo es un ejemplo que sigue las pautas de PEP 8. +""" + +import os +import sys +import requests +from flask import Flask + + +class MyClass: + """ + Clase de ejemplo que sigue las convenciones de nombres CamelCase. + """ + + def __init__(self, name): + """ + Inicializador de la clase. + """ + self.name = name + + def get_name(self): + """ + Método que devuelve el nombre de la instancia. + """ + pass + + +def add_numbers(a, b): + """ + Función de ejemplo que suma dos números. + + :param a: Primer número + :param b: Segundo número + :return: Suma de a y b + """ + return a + b + + +def main(): + """ + Función principal de ejecución. + + Esta función muestra cómo estructurar un programa principal. + """ + pass + + +if __name__ == "__main__": + main() From b44f4292283edf2c7e4a535427be73dd30e231f4 Mon Sep 17 00:00:00 2001 From: AJGHZ Date: Thu, 7 Mar 2024 10:00:37 -0600 Subject: [PATCH 04/10] EDIT --- Estructura.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Estructura.py b/Estructura.py index 4eeb336..a714130 100644 --- a/Estructura.py +++ b/Estructura.py @@ -4,8 +4,7 @@ import os import sys -import requests -from flask import Flask + class MyClass: From 80a325b5d5c01311f2bbcc51d7717b0a2f8f768e Mon Sep 17 00:00:00 2001 From: AJGHZ Date: Thu, 7 Mar 2024 10:24:00 -0600 Subject: [PATCH 05/10] =?UTF-8?q?Creaci=C3=B3n=20algoritmos=20de=20busqued?= =?UTF-8?q?a=20bfs=20y=20dfs?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- BFS_DFS.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 BFS_DFS.py diff --git a/BFS_DFS.py b/BFS_DFS.py new file mode 100644 index 0000000..561a9f1 --- /dev/null +++ b/BFS_DFS.py @@ -0,0 +1,34 @@ +"""Programa en python para algotimos de +de busqueda por amplitud (bfs) y por +profundidad (dfs)""" + +from collections import defaultdict + +"""Esta clase reprenta un grafo dirigido +que hace uso de la representación de listas +de adyacencia""" + +class grafo: + #Constructor + def __init__(self): + #Diccionario default que almacenara los elementos del grafo + self.grafo = defaultdict(list) + + def vertices(self, u, v): + self.grafo[u].append(v) + + def BFS (self, s): + #Marca todos los vertices como no visitados + visited = [False]*(max(self.grafo)+1) + #Creación de una cola: + cola = [] + + """Marca ell nodo de origen como vistado + y lo añade a la cola:""" + cola.append(s) + visited[s] = True + + while cola: + #Se elimina un vertice de la cola y se imprime + s = cola.pop(0) + print(s, end='') \ No newline at end of file From 09b738fac6b76e677f3c888011a6aaf301b3cfa6 Mon Sep 17 00:00:00 2001 From: AJGHZ Date: Mon, 18 Mar 2024 16:01:50 -0600 Subject: [PATCH 06/10] Algoritmos BFS y DFS terminados --- BFS_DFS.py | 111 ++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 88 insertions(+), 23 deletions(-) diff --git a/BFS_DFS.py b/BFS_DFS.py index 561a9f1..47221e1 100644 --- a/BFS_DFS.py +++ b/BFS_DFS.py @@ -3,32 +3,97 @@ profundidad (dfs)""" from collections import defaultdict +import time """Esta clase reprenta un grafo dirigido que hace uso de la representación de listas de adyacencia""" -class grafo: - #Constructor +class Graph: def __init__(self): - #Diccionario default que almacenara los elementos del grafo - self.grafo = defaultdict(list) - - def vertices(self, u, v): - self.grafo[u].append(v) - - def BFS (self, s): - #Marca todos los vertices como no visitados - visited = [False]*(max(self.grafo)+1) - #Creación de una cola: - cola = [] - - """Marca ell nodo de origen como vistado - y lo añade a la cola:""" - cola.append(s) - visited[s] = True - - while cola: - #Se elimina un vertice de la cola y se imprime - s = cola.pop(0) - print(s, end='') \ No newline at end of file + # Utilizamos defaultdict para crear una lista de adyacencia + self.graph = defaultdict(list) + + def add_edge(self, u, v): + # Función para agregar una arista al grafo + self.graph[u].append(v) + + def bfs(self, s): + # Función para recorrido BFS (Breadth First Search) + visited = [False] * (len(self.graph)) # Marcamos todos los vértices como no visitados + queue = [] # Inicializamos una cola para el BFS + + visited[s] = True # Marcamos el vértice inicial como visitado + queue.append(s) # Añadimos el vértice inicial a la cola + + start_time = time.time() # Medimos el tiempo de inicio del algoritmo + + while queue: # Mientras la cola no esté vacía + s = queue.pop(0) # Obtenemos el primer elemento de la cola + print(s, end=" ") # Imprimimos el vértice actual + + # Recorremos todos los vértices adyacentes al vértice actual + for i in self.graph[s]: + if not visited[i]: # Si el vértice adyacente no ha sido visitado + visited[i] = True # Marcamos el vértice como visitado + queue.append(i) # Añadimos el vértice a la cola + + end_time = time.time() # Medimos el tiempo de finalización del algoritmo + print("\nTiempo de ejecución de BFS:", end_time - start_time, "segundos") + + def dfs_util(self, v, visited): + # Función de utilidad para el recorrido DFS (Depth First Search) + visited[v] = True # Marcamos el vértice actual como visitado + print(v, end=" ") # Imprimimos el vértice actual + + # Recorremos todos los vértices adyacentes al vértice actual + for i in self.graph[v]: + if not visited[i]: # Si el vértice adyacente no ha sido visitado + self.dfs_util(i, visited) # Llamamos recursivamente a la función DFS_util + + def dfs(self): + # Función para recorrido DFS + visited = [False] * (len(self.graph)) # Marcamos todos los vértices como no visitados + start_time = time.time() # Medimos el tiempo de inicio del algoritmo + for i in range(len(self.graph)): + if not visited[i]: # Si el vértice no ha sido visitado + self.dfs_util(i, visited) # Llamamos a la función de utilidad DFS_util + end_time = time.time() # Medimos el tiempo de finalización del algoritmo + print("\nTiempo de ejecución de DFS:", end_time - start_time, "segundos") + +# Grafo para BFS +g_bfs = Graph() +g_bfs.add_edge(0, 1) +g_bfs.add_edge(0, 2) +g_bfs.add_edge(1, 2) +g_bfs.add_edge(2, 0) +g_bfs.add_edge(2, 3) +g_bfs.add_edge(3, 5) +g_bfs.add_edge(3, 3) +g_bfs.add_edge(3, 4) +g_bfs.add_edge(4, 2) +g_bfs.add_edge(4, 4) +g_bfs.add_edge(4, 5) +g_bfs.add_edge(5, 1) +g_bfs.add_edge(5, 5) +print("Recorrido de la búsqueda por anchura:") +g_bfs.bfs(5) # Realizamos el recorrido BFS empezando desde el vértice 5 +print("\n") + +# Grafo para DFS +g_dfs = Graph() +g_dfs.add_edge(0, 1) +g_dfs.add_edge(0, 2) +g_dfs.add_edge(1, 1) +g_dfs.add_edge(1, 2) +g_dfs.add_edge(2, 2) +g_dfs.add_edge(2, 3) +g_dfs.add_edge(3, 3) +g_dfs.add_edge(3, 4) +g_dfs.add_edge(4, 3) +g_dfs.add_edge(4, 4) +g_dfs.add_edge(4, 5) +g_dfs.add_edge(5, 4) +g_dfs.add_edge(5, 5) +print("El recorrido a profundidad es el siguiente:") +g_dfs.dfs() # Realizamos el recorrido DFS From 63d59b9ce4bf59da0c305f4b15a838a1de9b1604 Mon Sep 17 00:00:00 2001 From: AJGHZ Date: Mon, 18 Mar 2024 16:01:50 -0600 Subject: [PATCH 07/10] =?UTF-8?q?Creaci=C3=B3n=20de=20carpeta=20con=20acti?= =?UTF-8?q?vidad?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ALGORITMOS DE BUSQUEDA/BFS_DFS.py | 99 +++++++++++++++++++++++++++++++ BFS_DFS.py | 34 ----------- 2 files changed, 99 insertions(+), 34 deletions(-) create mode 100644 ALGORITMOS DE BUSQUEDA/BFS_DFS.py delete mode 100644 BFS_DFS.py diff --git a/ALGORITMOS DE BUSQUEDA/BFS_DFS.py b/ALGORITMOS DE BUSQUEDA/BFS_DFS.py new file mode 100644 index 0000000..47221e1 --- /dev/null +++ b/ALGORITMOS DE BUSQUEDA/BFS_DFS.py @@ -0,0 +1,99 @@ +"""Programa en python para algotimos de +de busqueda por amplitud (bfs) y por +profundidad (dfs)""" + +from collections import defaultdict +import time + +"""Esta clase reprenta un grafo dirigido +que hace uso de la representación de listas +de adyacencia""" + +class Graph: + def __init__(self): + # Utilizamos defaultdict para crear una lista de adyacencia + self.graph = defaultdict(list) + + def add_edge(self, u, v): + # Función para agregar una arista al grafo + self.graph[u].append(v) + + def bfs(self, s): + # Función para recorrido BFS (Breadth First Search) + visited = [False] * (len(self.graph)) # Marcamos todos los vértices como no visitados + queue = [] # Inicializamos una cola para el BFS + + visited[s] = True # Marcamos el vértice inicial como visitado + queue.append(s) # Añadimos el vértice inicial a la cola + + start_time = time.time() # Medimos el tiempo de inicio del algoritmo + + while queue: # Mientras la cola no esté vacía + s = queue.pop(0) # Obtenemos el primer elemento de la cola + print(s, end=" ") # Imprimimos el vértice actual + + # Recorremos todos los vértices adyacentes al vértice actual + for i in self.graph[s]: + if not visited[i]: # Si el vértice adyacente no ha sido visitado + visited[i] = True # Marcamos el vértice como visitado + queue.append(i) # Añadimos el vértice a la cola + + end_time = time.time() # Medimos el tiempo de finalización del algoritmo + print("\nTiempo de ejecución de BFS:", end_time - start_time, "segundos") + + def dfs_util(self, v, visited): + # Función de utilidad para el recorrido DFS (Depth First Search) + visited[v] = True # Marcamos el vértice actual como visitado + print(v, end=" ") # Imprimimos el vértice actual + + # Recorremos todos los vértices adyacentes al vértice actual + for i in self.graph[v]: + if not visited[i]: # Si el vértice adyacente no ha sido visitado + self.dfs_util(i, visited) # Llamamos recursivamente a la función DFS_util + + def dfs(self): + # Función para recorrido DFS + visited = [False] * (len(self.graph)) # Marcamos todos los vértices como no visitados + start_time = time.time() # Medimos el tiempo de inicio del algoritmo + for i in range(len(self.graph)): + if not visited[i]: # Si el vértice no ha sido visitado + self.dfs_util(i, visited) # Llamamos a la función de utilidad DFS_util + end_time = time.time() # Medimos el tiempo de finalización del algoritmo + print("\nTiempo de ejecución de DFS:", end_time - start_time, "segundos") + +# Grafo para BFS +g_bfs = Graph() +g_bfs.add_edge(0, 1) +g_bfs.add_edge(0, 2) +g_bfs.add_edge(1, 2) +g_bfs.add_edge(2, 0) +g_bfs.add_edge(2, 3) +g_bfs.add_edge(3, 5) +g_bfs.add_edge(3, 3) +g_bfs.add_edge(3, 4) +g_bfs.add_edge(4, 2) +g_bfs.add_edge(4, 4) +g_bfs.add_edge(4, 5) +g_bfs.add_edge(5, 1) +g_bfs.add_edge(5, 5) +print("Recorrido de la búsqueda por anchura:") +g_bfs.bfs(5) # Realizamos el recorrido BFS empezando desde el vértice 5 +print("\n") + +# Grafo para DFS +g_dfs = Graph() +g_dfs.add_edge(0, 1) +g_dfs.add_edge(0, 2) +g_dfs.add_edge(1, 1) +g_dfs.add_edge(1, 2) +g_dfs.add_edge(2, 2) +g_dfs.add_edge(2, 3) +g_dfs.add_edge(3, 3) +g_dfs.add_edge(3, 4) +g_dfs.add_edge(4, 3) +g_dfs.add_edge(4, 4) +g_dfs.add_edge(4, 5) +g_dfs.add_edge(5, 4) +g_dfs.add_edge(5, 5) +print("El recorrido a profundidad es el siguiente:") +g_dfs.dfs() # Realizamos el recorrido DFS diff --git a/BFS_DFS.py b/BFS_DFS.py deleted file mode 100644 index 561a9f1..0000000 --- a/BFS_DFS.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Programa en python para algotimos de -de busqueda por amplitud (bfs) y por -profundidad (dfs)""" - -from collections import defaultdict - -"""Esta clase reprenta un grafo dirigido -que hace uso de la representación de listas -de adyacencia""" - -class grafo: - #Constructor - def __init__(self): - #Diccionario default que almacenara los elementos del grafo - self.grafo = defaultdict(list) - - def vertices(self, u, v): - self.grafo[u].append(v) - - def BFS (self, s): - #Marca todos los vertices como no visitados - visited = [False]*(max(self.grafo)+1) - #Creación de una cola: - cola = [] - - """Marca ell nodo de origen como vistado - y lo añade a la cola:""" - cola.append(s) - visited[s] = True - - while cola: - #Se elimina un vertice de la cola y se imprime - s = cola.pop(0) - print(s, end='') \ No newline at end of file From a14d9abe64a561cc9bdcc8cc32cac0fb7f8871ac Mon Sep 17 00:00:00 2001 From: AJGHZ Date: Mon, 18 Mar 2024 16:30:10 -0600 Subject: [PATCH 08/10] =?UTF-8?q?ALGORITMOS=20BFS=20Y=20DFS=20A=C3=91ADIDO?= =?UTF-8?q?S=20AL=20REPO?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ALGORITMOS BFS Y DFS/BFS_DFS.py | 99 +++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 ALGORITMOS BFS Y DFS/BFS_DFS.py diff --git a/ALGORITMOS BFS Y DFS/BFS_DFS.py b/ALGORITMOS BFS Y DFS/BFS_DFS.py new file mode 100644 index 0000000..78dfb2a --- /dev/null +++ b/ALGORITMOS BFS Y DFS/BFS_DFS.py @@ -0,0 +1,99 @@ +"""Programa en python para algotimos de +de busqueda por amplitud (bfs) y por +profundidad (dfs)""" + +from collections import defaultdict +import time + +"""Esta clase reprenta un grafo dirigido +que hace uso de la representación de listas +de adyacencia""" + +class Graph: + def __init__(self): + # Utilizamos defaultdict para crear una lista de adyacencia + self.graph = defaultdict(list) + + def add_edge(self, u, v): + # Función para agregar una arista al grafo + self.graph[u].append(v) + + def bfs(self, s): + # Función para recorrido BFS (Breadth First Search) + visited = [False] * (len(self.graph)) # Marcamos todos los vértices como no visitados + queue = [] # Inicializamos una cola para el BFS + + visited[s] = True # Marcamos el vértice inicial como visitado + queue.append(s) # Añadimos el vértice inicial a la cola + + start_time = time.time() # Medimos el tiempo de inicio del algoritmo + + while queue: # Mientras la cola no esté vacía + s = queue.pop(0) # Obtenemos el primer elemento de la cola + print(s, end=" ") # Imprimimos el vértice actual + + # Recorremos todos los vértices adyacentes al vértice actual + for i in self.graph[s]: + if not visited[i]: # Si el vértice adyacente no ha sido visitado + visited[i] = True # Marcamos el vértice como visitado + queue.append(i) # Añadimos el vértice a la cola + + end_time = time.time() # Medimos el tiempo de finalización del algoritmo + print("\nTiempo de ejecución de BFS: {:.10} segundos".format(end_time - start_time)) + + def dfs_util(self, v, visited): + # Función de utilidad para el recorrido DFS (Depth First Search) + visited[v] = True # Marcamos el vértice actual como visitado + print(v, end=" ") # Imprimimos el vértice actual + + # Recorremos todos los vértices adyacentes al vértice actual + for i in self.graph[v]: + if not visited[i]: # Si el vértice adyacente no ha sido visitado + self.dfs_util(i, visited) # Llamamos recursivamente a la función DFS_util + + def dfs(self): + # Función para recorrido DFS + visited = [False] * (len(self.graph)) # Marcamos todos los vértices como no visitados + start_time = time.time() # Medimos el tiempo de inicio del algoritmo + for i in range(len(self.graph)): + if not visited[i]: # Si el vértice no ha sido visitado + self.dfs_util(i, visited) # Llamamos a la función de utilidad DFS_util + end_time = time.time() # Medimos el tiempo de finalización del algoritmo + print("\nTiempo de ejecución de DFS: {:.10f} segundos".format(end_time - start_time)) + +# Grafo para BFS +g_bfs = Graph() +g_bfs.add_edge(0, 1) +g_bfs.add_edge(0, 2) +g_bfs.add_edge(1, 2) +g_bfs.add_edge(2, 0) +g_bfs.add_edge(2, 3) +g_bfs.add_edge(3, 5) +g_bfs.add_edge(3, 3) +g_bfs.add_edge(3, 4) +g_bfs.add_edge(4, 2) +g_bfs.add_edge(4, 4) +g_bfs.add_edge(4, 5) +g_bfs.add_edge(5, 1) +g_bfs.add_edge(5, 5) +print("Recorrido de la búsqueda por anchura:") +g_bfs.bfs(5) # Realizamos el recorrido BFS empezando desde el vértice 5 +print("\n") + +# Grafo para DFS +g_dfs = Graph() +g_dfs.add_edge(0, 1) +g_dfs.add_edge(0, 2) +g_dfs.add_edge(1, 1) +g_dfs.add_edge(1, 2) +g_dfs.add_edge(2, 2) +g_dfs.add_edge(2, 3) +g_dfs.add_edge(3, 3) +g_dfs.add_edge(3, 4) +g_dfs.add_edge(4, 3) +g_dfs.add_edge(4, 4) +g_dfs.add_edge(4, 5) +g_dfs.add_edge(5, 4) +g_dfs.add_edge(5, 5) +print("El recorrido a profundidad es el siguiente:") +g_dfs.dfs() # Realizamos el recorrido DFS \ No newline at end of file From 18da71f9a960742be5ef0655849b5a2652b4e0b4 Mon Sep 17 00:00:00 2001 From: AJGHZ Date: Wed, 20 Mar 2024 15:09:27 -0600 Subject: [PATCH 09/10] =?UTF-8?q?A=C3=B1adir=20los=20algoritmos=20del=20bf?= =?UTF-8?q?s=20y=20dfs=20al=20repo?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ALGORITMOS BFS Y DFS/BFS_DFS.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ALGORITMOS BFS Y DFS/BFS_DFS.py b/ALGORITMOS BFS Y DFS/BFS_DFS.py index 78dfb2a..e77dc12 100644 --- a/ALGORITMOS BFS Y DFS/BFS_DFS.py +++ b/ALGORITMOS BFS Y DFS/BFS_DFS.py @@ -61,7 +61,7 @@ def dfs(self): end_time = time.time() # Medimos el tiempo de finalización del algoritmo print("\nTiempo de ejecución de DFS: {:.10f} segundos".format(end_time - start_time)) -# Grafo para BFS +# Grafo utilizado para BFS g_bfs = Graph() g_bfs.add_edge(0, 1) g_bfs.add_edge(0, 2) From 83016ec495e1cb8adade1f7ba6ea8fcedbe1e2b6 Mon Sep 17 00:00:00 2001 From: AJGHZ Date: Thu, 16 May 2024 11:10:32 -0600 Subject: [PATCH 10/10] =?UTF-8?q?BFS=20PARALELO=20A=C3=91ADIDO=20AL=20REPO?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- PROYECTO_PARALELA24A/BFSParalelo.py | 75 +++++ .../BFSParaleloExplicaci\303\263n.txt" | 71 +++++ mpi4py2.py | 288 ++++++++++++++++++ 3 files changed, 434 insertions(+) create mode 100644 PROYECTO_PARALELA24A/BFSParalelo.py create mode 100644 "PROYECTO_PARALELA24A/BFSParaleloExplicaci\303\263n.txt" create mode 100644 mpi4py2.py diff --git a/PROYECTO_PARALELA24A/BFSParalelo.py b/PROYECTO_PARALELA24A/BFSParalelo.py new file mode 100644 index 0000000..8802910 --- /dev/null +++ b/PROYECTO_PARALELA24A/BFSParalelo.py @@ -0,0 +1,75 @@ +from mpi4py import MPI +import time +import os + +# Inicialización de MPI +comm = MPI.COMM_WORLD +rank = comm.Get_rank() +size = comm.Get_size() + +# Función BFS +def BFS(graph, frontier_nodes, bfs_tree, end_node): + start_time = time.time() + # Terminar cuando no haya más nodos en la frontera o se encuentre el nodo final + if len(frontier_nodes) == 0 or any(node == end_node for node in frontier_nodes): + end_time = time.time() #Tiempo de finalización + execution_time = end_time - start_time #Calcular el tiempo de ejecución + print(f"Rango: {rank}. Tiempo de ejecucion: {execution_time} segundos.") + return bfs_tree + else: + # Obtener todos los vecinos de la frontera actual + neighbors = graph[list(frontier_nodes)[0]] + + # Etiquetar los vecinos con el nodo del que provienen y aplanar + next_nodes = {(ne, n): ne for n in frontier_nodes for ne in graph[n]} + + # Eliminar los vecinos que ya han sido explorados + next_nodes = {ne for ne in next_nodes if bfs_tree.get(ne, -1) == -1} + + # Escribir en bfs_tree los punteros hacia atrás + for ne, n in next_nodes: + bfs_tree[ne] = n + + # Eliminar duplicados comprobando si ya se ha escrito + next_nodes = {ne for ne, n in next_nodes if n not in bfs_tree or bfs_tree[n] != ne} + + # Recursión + return BFS(graph, next_nodes, bfs_tree, end_node) +# Función para obtener el camino de un nodo a su raíz en el árbol +def TREE_PATH(node, bfs_tree): + if bfs_tree[node] == node: + return [node] + else: + return [node] + TREE_PATH(bfs_tree[node], bfs_tree) + +if rank == 0: + # Grafo de ejemplo + graph = { + 'A': ['B', 'C'], + 'B': ['A', 'D', 'E'], + 'C': ['A', 'F'], + 'D': ['B'], + 'E': ['B', 'F'], + 'F': ['C', 'E'] + } + + # Nodo inicial y nodo final + start_node = 'A' + end_node = 'F' + + # Inicializar bfs_tree con el nodo inicial apuntando a sí mismo + bfs_tree = {start_node: start_node} + + start_time = time.time() + # Ejecutar BFS en el nodo raíz (nodo inicial) + bfs_tree = BFS(graph, {start_node}, bfs_tree, end_node) + end_time = time.time() + execution_time = end_time - start_time + print(f"Rango:{rank}. Tiempo de ejecucion: {execution_time} segundos.") + # Imprimir el árbol BFS resultante + print("BFS Tree:", bfs_tree) + +# Sincronizar todos los procesos antes de finalizar +comm.Barrier() + +print(f"Rango actual: {rank} de total de procesos: {size}") \ No newline at end of file diff --git "a/PROYECTO_PARALELA24A/BFSParaleloExplicaci\303\263n.txt" "b/PROYECTO_PARALELA24A/BFSParaleloExplicaci\303\263n.txt" new file mode 100644 index 0000000..ca1f7a2 --- /dev/null +++ "b/PROYECTO_PARALELA24A/BFSParaleloExplicaci\303\263n.txt" @@ -0,0 +1,71 @@ +""" + Pseudocódigo: + INICIO + CREAR COLA Q + CREAR GRAFO + PARA CADA NODO EN GRAFO: + VISITADO [NODO] = FALSE + INICIA RECORRIDO + MIENTRAS: + VISITADO[NODO] = TRUE + HAZ: + Q.INSERTAR(VALOR_NODO) + MIENTRAS !Q.EMPTY(): + NODO_ACTUAL = EXTRAER VALOR + PROCESAR(NODO_ACTUAL) + PROCESO EN PARALELO: + PARA CADA HILO: + SI !Q.EMPTY(): + NODO_ACTUAL = Q.EXTRAER() + PROCESAR(NODO_ACTUAL): + PARA CDA NODO_ADYACENTE AL NODO_ACTUAL: + SI VISITADO [NODO_ADYACENTE] = FALSE: + PROCESO EN PARALELO: + VISITADO [NODO_ADYACENTE] = TRUE + Q.INSERTAR(NODO_ADYACENTE) +""" + +""" + EL PROCESO EN PARALELO SE UTILIZA PARA ASEGURAR QUE LOS HILOS MODIFIQUEN + LA ESTRUCTURA DE LOS DATOS COMPARTIDOS (ACCIONES DE RECORRIDO, VALIDACIÓN E INSERCIÓN) + PARA EVITAR LAS CONDICIONES DE CARRERA Y GARANTIZAR QUE LA ACTUALIZACIÓN DE LOS DATO SE + HAGA DE MANERA CORRECTA. + +""" + +""" + Comando para ejecutar determiando número de procesos + mpiexec -n ? python C:\Users\AldoG\Documents\GitHub\CuTonala_2024_A\BFSParalelo.py +""" + +USOS DE ESTE ALGORITMO EN LA ACTULIDAD: +Tecnología y redes sociales: En plataformas como Facebook, +LinkedIn o Twitter, BFS se utiliza para recomendar amigos o +conexiones basadas en la red de contactos existente. +También se emplea en la propagación de información o contenido viral. + +Transporte y logística: En la planificación de rutas de transporte, +BFS puede utilizarse para encontrar la ruta más corta entre dos ubicaciones, +minimizando el tiempo y los costos de transporte. + +Telecomunicaciones: En la optimización de redes de comunicación, +BFS se aplica para encontrar la ruta más eficiente para +transmitir datos entre nodos, minimizando la latencia y +maximizando el ancho de banda disponible. + +Finanzas: En el análisis de riesgos y la gestión de carteras, +BFS puede ayudar a identificar las conexiones entre diferentes +activos financieros y evaluar el impacto potencial de un evento en toda la red. + +Manufactura: En la cadena de suministro y gestión de inventario, +BFS puede utilizarse para optimizar el flujo de materiales y +minimizar los tiempos de espera en la producción. + +Salud: En bioinformática y genómica, BFS se utiliza para +analizar redes de interacción de proteínas o genes, +ayudando a identificar relaciones funcionales y biomarcadores relevantes +para enfermedades específicas. + +Seguridad cibernética: En la detección de amenazas y el análisis de +vulnerabilidades, BFS puede utilizarse para modelar y +analizar la propagación de malware o la propagación de ataques en una red informática. \ No newline at end of file diff --git a/mpi4py2.py b/mpi4py2.py new file mode 100644 index 0000000..88c8212 --- /dev/null +++ b/mpi4py2.py @@ -0,0 +1,288 @@ +# -*- coding: utf-8 -*- +""" +================================ +Evaluate the mpy4py functionality +================================ + +MPI for Python supports convenient, pickle-based communication +of generic Python object as well as fast, near C-speed, +direct array data communication of buffer-provider objects +(e.g., NumPy arrays). +https://materials.jeremybejarano.com/MPIwithPython/overview.html + + +Running Python scripts with MPI +Usage: + $ mpiexec -n 4 python mpi4py_basics.py + +THe Sphinx docstring format: + +'''[Summary] + +:param [ParamName]: [ParamDescription], defaults to [DefaultParamVal] +:type [ParamName]: [ParamType](, optional) +... +:raises [ErrorType]: [ErrorDescription] +... +:return: [ReturnDescription] +:rtype: [ReturnType] +'''' +""" +from mpi4py2 import MPI +import numpy +import sys + + +class MPI4PY_UTILS: + """Utilities for MPI4PY library""" + + def __init__(self) -> None: + """Then we need to create a communicator, an abstract concept which refers to a world where a predefined number + of processes can communicate. These processes can arise from processors which are physically at different + locations connected through a network (typically a cluster). It can also refer to a number of multiple + processes that can be generated in a single PC or laptop (e.g. 4 core i7 processors can enable spawning + of 8 processors thanks to hyperthreading). Once a communicator is created, mpi automatically identifies + them with an number called rank, which starts from 0. Keep in mind that the number of processors is nowhere + explicitly specified in the code. + + Virtual topologies (Cartcomm, Graphcomm and Distgraphcomm classes, which are specializations of the + Intracomm class) are fully supported. New instances can be obtained from intracommunicator instances + with factory methods Intracomm. Create_cart and Intracomm.Create_graph. + """ + # The two predefined intracommunicator instances are available: + # COMM_SELF and COMM_WORLD. From them, new communicators can be created as needed. + self.comm = MPI.COMM_WORLD # create default communicator + self.rank = self.comm.Get_rank() # Determines the rank of the calling process in the communicator. + self.size = self.comm.Get_size() # Returns the number of processes in the communicator. + # It will return the same number to every process. + + def simple_hello_world(self): + """As tradition has it, we will introduce you to MPI programming using a variation on the standard hello world + program: your first MPI python program will be the Hello World program for multiple processes. + The source code is as follows. + + First, the mpiexec program is launched. This is the program which starts MPI, a wrapper around whatever + program you to pass into it. The -n 5 option specifies the desired number of processes. In our case, + 8 processes are run, each one being an instance of python. To each of the 5 instances of python, + we pass the argument hello.py which is the name of our program’s text file, located in the current + directory. Each of the five instances of python then opens the .py file and runs the same program. + The difference in each process’s execution environment is that the processes are given different ranks + in the communicator. Because of this, each process prints a different number when it executes. + + Intracommunicators are the most commonly used form of communicator in MPI. + Each intracommunicator contains a set of processes, each of which is identified by its “rank” within + the communicator. The ranks are numbered 0 through Size-1. Any process in the communicator can send a + message to another process within the communicator or receive a message from any other process + in the communicator. Intracommunicators also support a variety of collective operations that involve + all of the processes in the communicator. Most MPI communication occurs within intracommunicators. + Intercommunicators provide a sophisticated method of implementing complex communications, + but very few MPI programs require them. + The Hierarchy of Communicators + Intercom + ----COMM + Intracom + Cartcomm + Distgraphcomm + Graphcomm + """ + # The above command will execute five python processes which can all communicate with each other. + # When each program runs, it will print hello, and tell you its rank: + # Example usage and output: + # (venv) i@is-MacBook-Pro mpi4py_examples % mpirun -np 8 python3 mpi4py_basics.py + # Hello wolrd from process 5 + # Hello wolrd from process 2 + # Hello wolrd from process 0 + # Hello wolrd from process 4 + # Hello wolrd from process 3 + # Hello wolrd from process 1 + # Hello wolrd from process 7 + # Hello wolrd from process 6 + print(f"Hello wolrd from process {self.rank}") + + def seperate_codes(self): + """ + When an MPI program is run, each process receives the same code. However, each process is assigned a + different rank. This allows us to embed a seperate code for each process into one file. + In the following code, all processes are given the same two numbers. However, though there is only one file, + 3 processes are given completely different instructions for what to do with them. + Process 0 sums them, process 1 multiplies them, and process 2 takes the maximum of them: + """ + a = 6.0 + b = 3.0 + if self.rank == 0: # Will be printed if the rank 0 is detected + print(a + b) + if self.rank == 1: # Will be printed if the rank 1 is detected + print(a * b) + if self.rank == 2: # Will be printed if the rank 2 is detected + print(max(a,b)) + + def pass_random_draw(self): + """ + As mentioned in earlier, the simplest message passing involves two processes: a sender and a receiver. + Let us begin by demonstrating a program designed for two processes. One will draw a random number and then + send it to the other. We will do this using the routines Comm.Send and Comm.Recv: + """ + rand_num = numpy.zeros(1) + if self.rank == 1: + rand_num = numpy.random.random_sample(1) + print(f"Process {self.rank} drew the number {rand_num[0]}") + self.comm.Send(rand_num, dest=0) + + if self.rank == 0: + print(f"Process {self.rank} before receiving has the number {rand_num[0]}") + self.comm.Recv(rand_num, source=1) + print(f"Process {self.rank} received the number {rand_num[0]}") + + def send(self, buffer, dest): + """ + Comm.Send(buf, dest = 0, tag = 0) + Performs a basic send. This send is a point-to-point communication. + It sends information from exactly one process to exactly one other process. + """ + return MPI.COMM_WORLD.Send(buffer, dest) + + def receive(self, buffer, source): + """ + Comm.Recv(buf, source = 0, tag = 0, Status status = None)¶ + Basic point-to-point receive of data + Parameters + Comm (MPI comm): communicator we wish to query + buf (choice): initial address of receive buffer (choose receipt location) + source (integer): rank of source + tag (integer): message tag + status (Status): status of object + """ + return MPI.COMM_WORLD.Recv(buffer, source) + + def send_and_receive(self): + """ + Comm.Recv(buf, source = 0, tag = 0, Status status = None)¶ + Basic point-to-point receive of data + Parameters + Comm (MPI comm): communicator we wish to query + buf (choice): initial address of receive buffer (choose receipt location) + source (integer): rank of source + tag (integer): message tag + status (Status): status of object + + Send and Recv are referred to as blocking functions. + That is, if a process calls Recv, it will sit idle until it has received a message from a corresponding + Send before it will proceeed. See the appendix for the corresponding non-blocking functions Isend and + Irecv (I stands for immediate). In essence, Irecv will return immediately. + If it did not receive its message it will indicate to the system that it will be receiving a message, + proceed beyond the Irecv to do other useful work, and then check back later to see if the message has arrived. + This can be used to dramatically improve performance. + + Tip: + On a Recv you do not always need to specify the source. Instead, you can allow the calling process to accept + a message from any process that happend to be sending to the receiving process. + This is done by setting source to a predefined MPI constant, source=ANY_SOURCE + (note that you would first need to import this with + from mpi4py.MPI import ANY_SOURCE or use the syntax source=MPI.ANY_SOURCE). + """ + data_frame = numpy.arange(3) + if self.rank == 0: + print(f"Sending data frame: {data_frame}") + self.send(data_frame, dest=1) + else: + print(f"Receiving data frame: {data_frame}") + self.receive(data_frame, source=0) + + def trap_serial(self, a, b, n): + """ + Now that we understand basic communication in MPI, we will proceed by parallelizing our first + algorithm-numerical integration using the “trapezoid rule.” + Early on in most calculus classes, students learn to estimate integrals using the trapezoid rule. + A range to be integrated is divided into many vertical slivers, and each sliver is approximated with + a trapezoid. The area of each trapezoid is computed, and then all their areas are added together. + https://materials.jeremybejarano.com/MPIwithPython/pointToPoint.html#the-trapezoidal-rule + + python mpi4py_basics.py 0.0 1.0 10000 + """ + integral = -(self.support_function(a) + self.support_function(b))/2.0 + # n+1 endpoints, but n trapazoids + for x in numpy.arange(a, b, n+1): + integral = integral + self.support_function(x) + integral = integral * (b-a)/n + return integral + + @staticmethod + def support_function(x): + """Support method to multiplication""" + return x*x + + def trap_parallel_serial(self, a, b, n): + """The parallel approach to trapezoidal integral estimation starts by dividing the original range among the + processors. Each process will get a group of one or more trapezoids to calculate area over. Now, notice how + we decided to implement to division of trapezoids among the processes. The processors individually calculate + their own ranges to work on. Although this is a small detail, it is fairly important. We could have written + the algorithm such that process 0 divides up the work for the other processors, then each processor calculates + its area, and finally a sum is computed. However, this would introduce an unnecessary bottleneck: all + processes with rank greater than 0 would be waiting for its data range to arrive. By having each process + calculate its own range, we gain a large speedup. + Once the integrals are calculated, they are summed up onto process 0. + Each process with a rank higher than 0 sends it's integral to process 0. The first parameter to the + Send command is an array storing the information your program wishes to send. + At the same time, process 0 receives the data from any process. This is what the tag ANY_SOURCE means. + It tells MPI to not worry about the sender, but rather to just accept data as it comes. + When Comm.Recv is called, the process must wait for a message to be sent to it. + If multiple processes are sending a message to the process with Comm.Send, the program will + call Comm.Recv multiple times - once for each message. The for-loop essentially accomplishes this. + MPI has two mechanisms specifically designed to partition the message space: tags and communicators. + The tag parameter is there in the case that two messages with the same size and datatype are + sent to the same process. In that case, the program would not necessarily be able to tell apart the data. + So the programmer can attach different tags that he or she defines to the sent data to keep them straight. + mpiexec -n 4 python mpi4py_basics.py 0.0 1.0 10007 + mpirun -n 4 python mpi4py_basics.py 0.0 1.0 10007 + """ + #h is the step size. n is the total number of trapezoids + h = (b-a)/n + #local_n is the number of trapezoids each process will calculate + #note that size must divide n + local_n = n/self.size + + #we calculate the interval that each process handles + #local_a is the starting point and local_b is the endpoint + local_a = a + self.rank*local_n*h + local_b = local_a + local_n*h + + #initializing variables. mpi4py requires that we pass numpy objects. + integral = numpy.zeros(1) + recv_buffer = numpy.zeros(1) + + # perform local computation. Each process integrates its own interval + integral[0] = self.trap_serial(local_a, local_b, local_n) + + # communication + # root node receives results from all processes and sums them + if self.rank == 0: + total = integral[0] + for i in range(1, self.size): + self.comm.Recv(recv_buffer, source=MPI.ANY_SOURCE) + total += recv_buffer[0] + else: + # all other process send their result + self.comm.Send(integral, dest=2) + + # root process prints results + if self.comm.rank == 0: + print("With n =", n, "trapezoids, our estimate of the integral from" , a, "to", b, "is", total) + + +if __name__ == "__main__": + instance = MPI4PY_UTILS() + instance.simple_hello_world() + #instance.seperate_codes() + #instance.pass_random_draw() + #instance.send_and_receive() + a = float(sys.argv[1]) + b = float(sys.argv[2]) + n = int(sys.argv[3]) + + # Executes with: python mpi4py_basics.py 0.0 1.0 10000 + #integrtal_1 = instance.trap_serial(a, b, n) + #if integrtal_1: + # print("With n =", n, "trapezoids, our estimate of the integral from", a, "to", b, "is", integrtal_1) + + # Executes with: mpirun -n 4 python mpi4py_basics.py 0.0 1.0 10007 + integral = instance.trap_parallel_serial(a, b, n) \ No newline at end of file