/
bitonic.c
289 lines (243 loc) · 9.71 KB
/
bitonic.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
/*
ECSE420 Parallel Assignment 4
Dec 1, 2012
Adrian Lee
260272188
References:
Assignment 4 Supplement - Parallel Bitonic Sort Algorithm
Sorting Algorithms - http://www-users.cs.umn.edu/~karypis/parbook/Algorithms/pchap9.pdf
Bionic Sorting - http://www.thi.informatik.uni-frankfurt.de/~klauck/PDA07/Bitonic%20Sorting.pdf
*/
#include <stdio.h> // Printf
#include <time.h> // Timer
#include <math.h> // Logarithm
#include <stdlib.h> // Malloc
#include "mpi.h" // MPI Library
#include "bitonic.h"
#define MASTER 0 // Who should do the final processing?
#define OUTPUT_NUM 10 // Number of elements to display in output
// Globals
// Not ideal for them to be here though
double timer_start;
double timer_end;
int process_rank;
int num_processes;
int * array;
int array_size;
///////////////////////////////////////////////////
// Main
///////////////////////////////////////////////////
int main(int argc, char * argv[]) {
int i, j;
// Initialization, get # of processes & this PID/rank
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &num_processes);
MPI_Comm_rank(MPI_COMM_WORLD, &process_rank);
// Initialize Array for Storing Random Numbers
array_size = atoi(argv[1]) / num_processes;
array = (int *) malloc(array_size * sizeof(int));
// Generate Random Numbers for Sorting (within each process)
// Less overhead without MASTER sending random numbers to each slave
srand(time(NULL)); // Needed for rand()
for (i = 0; i < array_size; i++) {
array[i] = rand() % (atoi(argv[1]));
}
// Blocks until all processes have finished generating
MPI_Barrier(MPI_COMM_WORLD);
// Begin Parallel Bitonic Sort Algorithm from Assignment Supplement
// Cube Dimension
int dimensions = (int)(log2(num_processes));
// Start Timer before starting first sort operation (first iteration)
if (process_rank == MASTER) {
printf("Number of Processes spawned: %d\n", num_processes);
timer_start = MPI_Wtime();
}
// Sequential Sort
qsort(array, array_size, sizeof(int), ComparisonFunc);
// Bitonic Sort follows
for (i = 0; i < dimensions; i++) {
for (j = i; j >= 0; j--) {
// (window_id is even AND jth bit of process is 0)
// OR (window_id is odd AND jth bit of process is 1)
if (((process_rank >> (i + 1)) % 2 == 0 && (process_rank >> j) % 2 == 0) || ((process_rank >> (i + 1)) % 2 != 0 && (process_rank >> j) % 2 != 0)) {
CompareLow(j);
} else {
CompareHigh(j);
}
}
}
// Blocks until all processes have finished sorting
MPI_Barrier(MPI_COMM_WORLD);
if (process_rank == MASTER) {
timer_end = MPI_Wtime();
printf("Displaying sorted array (only 10 elements for quick verification)\n");
// Print Sorting Results
for (i = 0; i < array_size; i++) {
if ((i % (array_size / OUTPUT_NUM)) == 0) {
printf("%d ",array[i]);
}
}
printf("\n\n");
printf("Time Elapsed (Sec): %f\n", timer_end - timer_start);
}
// Reset the state of the heap from Malloc
free(array);
// Done
MPI_Finalize();
return 0;
}
///////////////////////////////////////////////////
// Comparison Function
///////////////////////////////////////////////////
int ComparisonFunc(const void * a, const void * b) {
return ( * (int *)a - * (int *)b );
}
///////////////////////////////////////////////////
// Compare Low
///////////////////////////////////////////////////
void CompareLow(int j) {
int i, min;
/* Sends the biggest of the list and receive the smallest of the list */
// Send entire array to paired H Process
// Exchange with a neighbor whose (d-bit binary) processor number differs only at the jth bit.
int send_counter = 0;
int * buffer_send = malloc((array_size + 1) * sizeof(int));
MPI_Send(
&array[array_size - 1], // entire array
1, // one data item
MPI_INT, // INT
process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions
0, // tag 0
MPI_COMM_WORLD // default comm.
);
// Receive new min of sorted numbers
int recv_counter;
int * buffer_recieve = malloc((array_size + 1) * sizeof(int));
MPI_Recv(
&min, // buffer the message
1, // one data item
MPI_INT, // INT
process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions
0, // tag 0
MPI_COMM_WORLD, // default comm.
MPI_STATUS_IGNORE // ignore info about message received
);
// Buffers all values which are greater than min send from H Process.
for (i = 0; i < array_size; i++) {
if (array[i] > min) {
buffer_send[send_counter + 1] = array[i];
send_counter++;
} else {
break; // Important! Saves lots of cycles!
}
}
buffer_send[0] = send_counter;
// send partition to paired H process
MPI_Send(
buffer_send, // Send values that are greater than min
send_counter, // # of items sent
MPI_INT, // INT
process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions
0, // tag 0
MPI_COMM_WORLD // default comm.
);
// receive info from paired H process
MPI_Recv(
buffer_recieve, // buffer the message
array_size, // whole array
MPI_INT, // INT
process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions
0, // tag 0
MPI_COMM_WORLD, // default comm.
MPI_STATUS_IGNORE // ignore info about message received
);
// Take received buffer of values from H Process which are smaller than current max
for (i = 1; i < buffer_recieve[0] + 1; i++) {
if (array[array_size - 1] < buffer_recieve[i]) {
// Store value from message
array[array_size - 1] = buffer_recieve[i];
} else {
break; // Important! Saves lots of cycles!
}
}
// Sequential Sort
qsort(array, array_size, sizeof(int), ComparisonFunc);
// Reset the state of the heap from Malloc
free(buffer_send);
free(buffer_recieve);
return;
}
///////////////////////////////////////////////////
// Compare High
///////////////////////////////////////////////////
void CompareHigh(int j) {
int i, max;
// Receive max from L Process's entire array
int recv_counter;
int * buffer_recieve = malloc((array_size + 1) * sizeof(int));
MPI_Recv(
&max, // buffer max value
1, // one item
MPI_INT, // INT
process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions
0, // tag 0
MPI_COMM_WORLD, // default comm.
MPI_STATUS_IGNORE // ignore info about message received
);
// Send min to L Process of current process's array
int send_counter = 0;
int * buffer_send = malloc((array_size + 1) * sizeof(int));
MPI_Send(
&array[0], // send min
1, // one item
MPI_INT, // INT
process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions
0, // tag 0
MPI_COMM_WORLD // default comm.
);
// Buffer a list of values which are smaller than max value
for (i = 0; i < array_size; i++) {
if (array[i] < max) {
buffer_send[send_counter + 1] = array[i];
send_counter++;
} else {
break; // Important! Saves lots of cycles!
}
}
// Receive blocks greater than min from paired slave
MPI_Recv(
buffer_recieve, // buffer message
array_size, // whole array
MPI_INT, // INT
process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions
0, // tag 0
MPI_COMM_WORLD, // default comm.
MPI_STATUS_IGNORE // ignore info about message receiveds
);
recv_counter = buffer_recieve[0];
// send partition to paired slave
buffer_send[0] = send_counter;
MPI_Send(
buffer_send, // all items smaller than max value
send_counter, // # of values smaller than max
MPI_INT, // INT
process_rank ^ (1 << j), // paired process calc by XOR with 1 shifted left j positions
0, // tag 0
MPI_COMM_WORLD // default comm.
);
// Take received buffer of values from L Process which are greater than current min
for (i = 1; i < recv_counter + 1; i++) {
if (buffer_recieve[i] > array[0]) {
// Store value from message
array[0] = buffer_recieve[i];
} else {
break; // Important! Saves lots of cycles!
}
}
// Sequential Sort
qsort(array, array_size, sizeof(int), ComparisonFunc);
// Reset the state of the heap from Malloc
free(buffer_send);
free(buffer_recieve);
return;
}