-
Notifications
You must be signed in to change notification settings - Fork 7
/
Ckmeans.1d.dp.cpp
409 lines (325 loc) · 11.2 KB
/
Ckmeans.1d.dp.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
/*
Ckmeans_1d_dp.cpp -- Performs 1-D k-means by a dynamic programming
approach that is guaranteed to be optimal.
Joe Song
Computer Science Department
New Mexico State University
joemsong@cs.nmsu.edu
Haizhou Wang
Computer Science Department
New Mexico State University
hwang@cs.nmsu.edu
Created: May 19, 2007
Updated: September 3, 2009
Updated: September 6, 2009. Handle special cases when K is too big or array
contains identical elements. Added number of clusters selection by the
MCLUST package.
Updated: Oct 6, 2009 by Haizhou Wang Convert this program from R code into
C++ code.
Updated: Oct 20, 2009 by Haizhou Wang, add interface to this function, so that
it could be called directly in R
Updated: March 20, 2014. Joe Song.
1. When the number of clusters is not uniquely specified, the code now
automatically select an optimal number of clusters by Bayesian
information criterion.
2. Reduced unnecessary sorting performed on the input string in
kmeans_1d_dp().
Updated: February 8, 2015.
1. Cleaned up the code by removing commented sections (Haizhou Wang)
2. Speed up the code slightly as suggested by a user (Joe Song)
3. Throw exceptions when for fatal errors (Joe Song)
Updated: May 3, 2016
1. Changed from 1-based to 0-based C implementation (MS)
2. Optimized the code by reducing overhead. See 22% reduction in runtime to
cluster one million points into two clusters. (MS)
3. Removed the result class ClusterResult
Updated: May 7, 2016
1. Substantial runtime reduction. Added code to check for an upper bound
for the sum of within cluster square distances. This reduced the runtime
by half when clustering 100000 points (from standard normal distribution)
into 10 clusters.
2. Eliminated the unnecessary calculation of (n-1) elements in the dynamic
programming matrix that are not needed for the final result. This
resulted in enormous reduction in run time when the number of cluster
is 2: assigning one million points into two clusters took half a
a second on iMac with 2.93 GHz Intel Core i7 processor.
Updated: May 9, 2016
1. Added an alternative way to fill in the dynamic programming
matix by i (columns in the matrix) to achieve speedup
Updated: May 21, 2016
1. Moved the weighted solutions to new files. They will be updated separately
Updated: 2016-08-28
1. Removed usage of lambda functions in C++ code for compatibility with
older versions of C++ compilers.
2. Shifted the values of x by median to improve numerical stability.
*/
#include "Ckmeans.1d.dp.h"
#include <algorithm>
#include <cmath>
#include <iostream>
#include <string>
#include <vector>
// #define DEBUG
inline double ssq(const size_t j, const size_t i,
const std::vector<double> & sum_x,
const std::vector<double> & sum_x_sq)
{
double sji;
if(j > 0) {
double muji = (sum_x[i] - sum_x[j-1]) / (i - j + 1);
sji = sum_x_sq[i] - sum_x_sq[j-1] - (i - j + 1) * muji * muji;
} else {
sji = sum_x_sq[i] - sum_x[i] * sum_x[i] / (i+1);
}
sji = (sji < 0) ? 0 : sji;
return sji;
}
void fill_row_k(int imin, int imax, int k,
std::vector<std::vector<double>> & S,
std::vector<std::vector<size_t>> & J,
const std::vector<double> & sum_x,
const std::vector<double> & sum_x_sq)
{
if(imin > imax) {
return;
}
const int N = S[0].size();
int i = (imin + imax) / 2;
#ifdef DEBUG
std::cout << std::endl << " i=" << i << ":" << std::endl;
#endif
// Initialization of S[k][i]:
S[k][i] = S[k - 1][i - 1];
J[k][i] = i;
int jlow=k; // the lower end for j
if(imin > k) {
jlow = std::max(jlow, (int)J[k][imin-1]);
}
jlow = std::max(jlow, (int)J[k-1][i]);
int jhigh = i - 1; // the upper end for j
if(imax < N-1) {
jhigh = std::min(jhigh, (int)J[k][imax+1]);
}
#ifdef DEBUG
std::cout << " j-=" << jlow << ", j+=" << jhigh << ":" << std::endl;
#endif
for(int j=jhigh; j>=jlow; --j) {
// compute s(j,i)
double sji = ssq(j, i, sum_x, sum_x_sq);
// MS May 11, 2016 Added:
if(sji + S[k-1][jlow-1] >= S[k][i]) break;
// Examine the lower bound of the cluster border
// compute s(jlow, i)
double sjlowi = ssq(jlow, i, sum_x, sum_x_sq);
double SSQ_jlow = sjlowi + S[k-1][jlow-1];
if(SSQ_jlow < S[k][i]) {
// shrink the lower bound
S[k][i] = SSQ_jlow;
J[k][i] = jlow;
}
jlow ++;
double SSQ_j = sji + S[k - 1][j - 1];
if(SSQ_j < S[k][i]) {
S[k][i] = SSQ_j;
J[k][i] = j;
}
}
#ifdef DEBUG
std::cout << std::endl << // " k=" << k << ": " <<
"\t" << S[k][i] << "\t" << J[k][i];
#endif
#ifdef DEBUG
std::cout << std::endl;
#endif
fill_row_k(imin, i-1, k, S, J, sum_x, sum_x_sq);
fill_row_k(i+1, imax, k, S, J, sum_x, sum_x_sq);
}
void fill_dp_matrix(const std::vector<double> & x,
std::vector< std::vector< double > > & S,
std::vector< std::vector< size_t > > & J)
/*
x: One dimension vector to be clustered, must be sorted (in any order).
S: K x N matrix. S[k][i] is the sum of squares of the distance from
each x[i] to its cluster mean when there are exactly x[i] is the
last point in cluster k
J: K x N backtrack matrix
NOTE: All vector indices in this program start at position 0
*/
{
const int K = S.size();
const int N = S[0].size();
std::vector<double> sum_x(N), sum_x_sq(N);
double shift = x[N/2]; // median. used to shift the values of x to
// improve numerical stability
for(int i = 0; i < N; ++i) {
if(i == 0) {
sum_x[0] = x[0] - shift;
sum_x_sq[0] = (x[0] - shift) * (x[0] - shift);
} else {
sum_x[i] = sum_x[i-1] + x[i] - shift;
sum_x_sq[i] = sum_x_sq[i-1] + (x[i] - shift) * (x[i] - shift);
}
// Initialize for k = 0
S[0][i] = ssq(0, i, sum_x, sum_x_sq);
J[0][i] = 0;
}
for(int k = 1; k < K; ++k) {
int imin;
if(k < K - 1) {
imin = std::max((size_t)1, (size_t)k);
} else {
// No need to compute S[K-1][0] ... S[K-1][N-2]
imin = N-1;
}
#ifdef DEBUG
std::cout << std::endl << "k=" << k << ":";
#endif
fill_row_k(imin, N-1, k, S, J, sum_x, sum_x_sq);
}
}
void backtrack(const std::vector<double> & x,
const std::vector< std::vector< size_t > > & J,
int* cluster, double* centers, double* withinss, int* count)
{
const size_t K = J.size();
const size_t N = J[0].size();
size_t cluster_right = N-1;
size_t cluster_left;
// Backtrack the clusters from the dynamic programming matrix
for(int k = K-1; k >= 0; --k) {
cluster_left = J[k][cluster_right];
for(size_t i = cluster_left; i <= cluster_right; ++i)
cluster[i] = k;
double sum = 0.0;
for(size_t i = cluster_left; i <= cluster_right; ++i)
sum += x[i];
centers[k] = sum / (cluster_right-cluster_left+1);
for(size_t i = cluster_left; i <= cluster_right; ++i)
withinss[k] += (x[i] - centers[k]) * (x[i] - centers[k]);
count[k] = cluster_right - cluster_left + 1;
if(k > 0) {
cluster_right = cluster_left - 1;
}
}
}
void backtrack(const std::vector<double> & x,
const std::vector< std::vector< size_t > > & J,
std::vector<size_t> & count)
{
const size_t K = J.size();
const size_t N = J[0].size();
size_t cluster_right = N-1;
size_t cluster_left;
// Backtrack the clusters from the dynamic programming matrix
for(int k = K-1; k >= 0; --k) {
cluster_left = J[k][cluster_right];
count[k] = cluster_right - cluster_left + 1;
if(k > 0) {
cluster_right = cluster_left - 1;
}
}
}
template <class ForwardIterator>
size_t numberOfUnique(ForwardIterator first, ForwardIterator last)
{
size_t nUnique;
if (first == last) {
nUnique = 0;
} else {
nUnique = 1;
for (ForwardIterator itr=first+1; itr!=last; ++itr) {
if (*itr != *(itr -1)) {
nUnique ++;
}
}
}
return nUnique;
}
void kmeans_1d_dp(const double *x, const size_t N, const double *y,
size_t Kmin, size_t Kmax,
int* cluster, double* centers,
double* withinss, int* size)
{
// Input:
// x -- an array of double precision numbers, not necessarily sorted
// Kmin -- the minimum number of clusters expected
// Kmax -- the maximum number of clusters expected
// NOTE: All vectors in this program is considered starting at position 0.
std::vector<double> x_sorted(N);
std::vector<double> y_sorted;
auto is_equally_weighted(true);
std::vector<size_t> order(N);
//Number generation using lambda function, not supported by all g++:
//std::size_t n(0);
//std::generate(order.begin(), order.end(), [&]{ return n++; });
for(size_t i=0; i<order.size(); ++i) {
order[i] = i;
}
// Sort the index of x in increasing order of x
// Sorting using lambda function, not supported by all g++ versions:
// std::sort(order.begin(), order.end(),
// [&](size_t i1, size_t i2) { return x[i1] < x[i2]; } );
struct CompareIndex {
const double * m_x;
CompareIndex(const double * x) : m_x(x) {}
bool operator() (size_t i, size_t j) { return (m_x[i] < m_x[j]);}
} compi(x);
std::sort(order.begin(), order.end(), compi);
for(size_t i=0; i<order.size(); ++i) {
x_sorted[i] = x[order[i]];
}
// check to see if unequal weight is provided
if(y != NULL) {
is_equally_weighted = true;
for(size_t i=1; i<N; ++i) {
if(y[i] != y[i-1]) {
is_equally_weighted = false;
break;
}
}
}
if(! is_equally_weighted) {
y_sorted.resize(N);
for(size_t i=0; i<order.size(); ++i) {
y_sorted[i] = y[order[i]];
}
}
const size_t nUnique = numberOfUnique(x_sorted.begin(), x_sorted.end());
Kmax = nUnique < Kmax ? nUnique : Kmax;
if(nUnique > 1) { // The case when not all elements are equal.
std::vector< std::vector< double > > S( Kmax, std::vector<double>(N) );
std::vector< std::vector< size_t > > J( Kmax, std::vector<size_t>(N) );
size_t Kopt;
// Fill in dynamic programming matrix
if(is_equally_weighted) {
fill_dp_matrix(x_sorted, S, J);
// Choose an optimal number of levels between Kmin and Kmax
Kopt = select_levels(x_sorted, J, Kmin, Kmax);
} else {
fill_weighted_dp_matrix(x_sorted, y_sorted, S, J);
// Choose an optimal number of levels between Kmin and Kmax
Kopt = select_levels_weighted(x_sorted, y_sorted, J, Kmin, Kmax);
}
if (Kopt < Kmax) { // Reform the dynamic programming matrix S and J
J.erase(J.begin() + Kopt, J.end());
}
std::vector<int> cluster_sorted(N);
// Backtrack to find the clusters beginning and ending indices
if(is_equally_weighted) {
backtrack(x_sorted, J, &cluster_sorted[0], centers, withinss, size);
} else {
backtrack_weighted(x_sorted, y_sorted, J, &cluster_sorted[0], centers, withinss, size);
}
for(size_t i = 0; i < N; ++i) {
// Obtain clustering on data in the original order
cluster[order[i]] = cluster_sorted[i];
}
} else { // A single cluster that contains all elements
for(size_t i=0; i<N; ++i) {
cluster[i] = 0;
}
centers[0] = x[0];
withinss[0] = 0.0;
size[0] = N * (is_equally_weighted ? 1 : y[0]);
}
} //end of kmeans_1d_dp()