-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
118 lines (90 loc) · 3.72 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import sys
sys.path.append('..')
import matplotlib.pyplot as plt
import numpy as np
from mylib.particle import Particle, PriorityQueue
from mylib.cell import Cell
from mylib.neigbour_search import neighbor_search_periodic
from mylib.treebuild import build_tree
from mylib.kernel import monoghan_kernel
if __name__ == "__main__":
fig, axis = plt.subplots()
NO_PARTICLES = 10_000
A: np.ndarray = np.array([])
for _ in range(NO_PARTICLES):
A = np.append(A, np.array(Particle(np.random.rand(2))))
root = Cell(
regionLowerBound=[0.0, 0.0],
regionHigherBound=[1.0, 1.0],
lower_index=0,
upper_index=len(A) - 1,
)
build_tree(A, root, 0)
K = 32 # number of neighbours
densities = []
x_coords = []
y_coords = []
# for particle in A:
# prio_queue = PriorityQueue(K)
# sum_of_mass = 0
# sum_of_mass_monoghan = 0
# neighbor_search_periodic(prio_queue, root, A, particle.r, [1, 1])
# H = np.sqrt(prio_queue.key())
# for i in range(K):
# R = np.sqrt(-prio_queue._queue[i].key)
# # get the mass of each neighbours
# mass = prio_queue._queue[i].mass
# rho = mass * top_hat_kernel(R, H)
# # rho = mass * monoghan_kernel(R, H)
# sum_of_mass += rho
# particle.rho = sum_of_mass
# x_coords.append(particle.r[0])
# y_coords.append(particle.r[1])
# densities.append(particle.rho)
###############################################################################################
# Parallelization
###############################################################################################
import multiprocessing
from functools import reduce
def worker(chunk: list[Particle]):
local_x_coords = []
local_y_coords = []
local_densities = []
# do calculations on each particles in the chunks
for particle in chunk:
local_sum_of_mass = 0
particle.priority_queue = PriorityQueue(K)
neighbor_search_periodic(particle.priority_queue, root, A, particle.r, [1, 1])
H = np.sqrt(particle.priority_queue.key())
for i in range(K):
R = np.sqrt(-particle.priority_queue._queue[i].key)
# get the mass of each neighbours
mass = particle.priority_queue._queue[i].mass
# rho = mass * top_hat_kernel(R, H)
rho = mass * monoghan_kernel(R, H)
local_sum_of_mass += rho
particle.rho = local_sum_of_mass
local_x_coords.append(particle.r[0])
local_y_coords.append(particle.r[1])
local_densities.append(particle.rho)
return local_x_coords, local_y_coords, local_densities
def my_reducer(first_tuple, second_tuple):
x_coords, y_coords, densities = first_tuple
x_coords.extend(second_tuple[0])
y_coords.extend(second_tuple[1])
densities.extend(second_tuple[2])
return x_coords, y_coords, densities
chunk_size = NO_PARTICLES // 8
chunks = [A[i:i + chunk_size] for i in range(0, len(A), chunk_size)]
pool = multiprocessing.Pool()
results = pool.map(worker, chunks)
# unpack the final results
x_coords, y_coords, densities = reduce(my_reducer, results)
###############################################################################################
# END Parallelization
###############################################################################################
plt.scatter(x_coords, y_coords, s=4, c=densities, cmap="magma")
plt.axis("equal")
plt.colorbar()
# plt.savefig(f"dens-{NO_PARTICLES}.png")
plt.show()