/
simple_statistics.js
2371 lines (2044 loc) · 82.1 KB
/
simple_statistics.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
(function(f){if(typeof exports==="object"&&typeof module!=="undefined"){module.exports=f()}else if(typeof define==="function"&&define.amd){define([],f)}else{var g;if(typeof window!=="undefined"){g=window}else if(typeof global!=="undefined"){g=global}else if(typeof self!=="undefined"){g=self}else{g=this}g.ss = f()}})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){
'use strict';
// # simple-statistics
//
// A simple, literate statistics system.
var ss = module.exports = {};
// Linear Regression
ss.linearRegression = require(17);
ss.linearRegressionLine = require(18);
ss.standardDeviation = require(43);
ss.rSquared = require(32);
ss.mode = require(25);
ss.min = require(23);
ss.max = require(20);
ss.sum = require(45);
ss.quantile = require(30);
ss.quantileSorted = require(31);
ss.iqr = ss.interquartileRange = require(15);
ss.medianAbsoluteDeviation = ss.mad = require(19);
ss.chunk = require(7);
ss.shuffle = require(40);
ss.shuffleInPlace = require(41);
ss.sample = require(34);
ss.ckmeans = require(8);
ss.sortedUniqueCount = require(42);
ss.sumNthPowerDeviations = require(46);
// sample statistics
ss.sampleCovariance = require(36);
ss.sampleCorrelation = require(35);
ss.sampleVariance = require(39);
ss.sampleStandardDeviation = require(38);
ss.sampleSkewness = require(37);
// measures of centrality
ss.geometricMean = require(13);
ss.harmonicMean = require(14);
ss.mean = ss.average = require(21);
ss.median = require(22);
ss.rootMeanSquare = ss.rms = require(33);
ss.variance = require(49);
ss.tTest = require(47);
ss.tTestTwoSample = require(48);
// ss.jenks = require('./src/jenks');
// Classifiers
ss.bayesian = require(2);
ss.perceptron = require(27);
// Distribution-related methods
ss.epsilon = require(10); // We make ε available to the test suite.
ss.factorial = require(12);
ss.bernoulliDistribution = require(3);
ss.binomialDistribution = require(4);
ss.poissonDistribution = require(28);
ss.chiSquaredGoodnessOfFit = require(6);
// Normal distribution
ss.zScore = require(50);
ss.cumulativeStdNormalProbability = require(9);
ss.standardNormalTable = require(44);
ss.errorFunction = ss.erf = require(11);
ss.inverseErrorFunction = require(16);
ss.probit = require(29);
ss.mixin = require(24);
},{"10":10,"11":11,"12":12,"13":13,"14":14,"15":15,"16":16,"17":17,"18":18,"19":19,"2":2,"20":20,"21":21,"22":22,"23":23,"24":24,"25":25,"27":27,"28":28,"29":29,"3":3,"30":30,"31":31,"32":32,"33":33,"34":34,"35":35,"36":36,"37":37,"38":38,"39":39,"4":4,"40":40,"41":41,"42":42,"43":43,"44":44,"45":45,"46":46,"47":47,"48":48,"49":49,"50":50,"6":6,"7":7,"8":8,"9":9}],2:[function(require,module,exports){
'use strict';
/**
* [Bayesian Classifier](http://en.wikipedia.org/wiki/Naive_Bayes_classifier)
*
* This is a naïve bayesian classifier that takes
* singly-nested objects.
*
* @class
* @example
* var bayes = new BayesianClassifier();
* bayes.train({
* species: 'Cat'
* }, 'animal');
* var result = bayes.score({
* species: 'Cat'
* })
* // result
* // {
* // animal: 1
* // }
*/
function BayesianClassifier() {
// The number of items that are currently
// classified in the model
this.totalCount = 0;
// Every item classified in the model
this.data = {};
}
/**
* Train the classifier with a new item, which has a single
* dimension of Javascript literal keys and values.
*
* @param {Object} item an object with singly-deep properties
* @param {string} category the category this item belongs to
* @return {undefined} adds the item to the classifier
*/
BayesianClassifier.prototype.train = function(item, category) {
// If the data object doesn't have any values
// for this category, create a new object for it.
if (!this.data[category]) {
this.data[category] = {};
}
// Iterate through each key in the item.
for (var k in item) {
var v = item[k];
// Initialize the nested object `data[category][k][item[k]]`
// with an object of keys that equal 0.
if (this.data[category][k] === undefined) {
this.data[category][k] = {};
}
if (this.data[category][k][v] === undefined) {
this.data[category][k][v] = 0;
}
// And increment the key for this key/value combination.
this.data[category][k][item[k]]++;
}
// Increment the number of items classified
this.totalCount++;
};
/**
* Generate a score of how well this item matches all
* possible categories based on its attributes
*
* @param {Object} item an item in the same format as with train
* @returns {Object} of probabilities that this item belongs to a
* given category.
*/
BayesianClassifier.prototype.score = function(item) {
// Initialize an empty array of odds per category.
var odds = {}, category;
// Iterate through each key in the item,
// then iterate through each category that has been used
// in previous calls to `.train()`
for (var k in item) {
var v = item[k];
for (category in this.data) {
// Create an empty object for storing key - value combinations
// for this category.
if (odds[category] === undefined) { odds[category] = {}; }
// If this item doesn't even have a property, it counts for nothing,
// but if it does have the property that we're looking for from
// the item to categorize, it counts based on how popular it is
// versus the whole population.
if (this.data[category][k]) {
odds[category][k + '_' + v] = (this.data[category][k][v] || 0) / this.totalCount;
} else {
odds[category][k + '_' + v] = 0;
}
}
}
// Set up a new object that will contain sums of these odds by category
var oddsSums = {};
for (category in odds) {
// Tally all of the odds for each category-combination pair -
// the non-existence of a category does not add anything to the
// score.
for (var combination in odds[category]) {
if (oddsSums[category] === undefined) {
oddsSums[category] = 0;
}
oddsSums[category] += odds[category][combination];
}
}
return oddsSums;
};
module.exports = BayesianClassifier;
},{}],3:[function(require,module,exports){
'use strict';
var binomialDistribution = require(4);
/**
* The [Bernoulli distribution](http://en.wikipedia.org/wiki/Bernoulli_distribution)
* is the probability discrete
* distribution of a random variable which takes value 1 with success
* probability `p` and value 0 with failure
* probability `q` = 1 - `p`. It can be used, for example, to represent the
* toss of a coin, where "1" is defined to mean "heads" and "0" is defined
* to mean "tails" (or vice versa). It is
* a special case of a Binomial Distribution
* where `n` = 1.
*
* @param {number} p input value, between 0 and 1 inclusive
* @returns {number} value of bernoulli distribution at this point
*/
function bernoulliDistribution(p) {
// Check that `p` is a valid probability (0 ≤ p ≤ 1)
if (p < 0 || p > 1 ) { return null; }
return binomialDistribution(1, p);
}
module.exports = bernoulliDistribution;
},{"4":4}],4:[function(require,module,exports){
'use strict';
var epsilon = require(10);
var factorial = require(12);
/**
* The [Binomial Distribution](http://en.wikipedia.org/wiki/Binomial_distribution) is the discrete probability
* distribution of the number of successes in a sequence of n independent yes/no experiments, each of which yields
* success with probability `probability`. Such a success/failure experiment is also called a Bernoulli experiment or
* Bernoulli trial; when trials = 1, the Binomial Distribution is a Bernoulli Distribution.
*
* @param {number} trials number of trials to simulate
* @param {number} probability
* @returns {number} output
*/
function binomialDistribution(trials, probability) {
// Check that `p` is a valid probability (0 ≤ p ≤ 1),
// that `n` is an integer, strictly positive.
if (probability < 0 || probability > 1 ||
trials <= 0 || trials % 1 !== 0) {
return null;
}
// We initialize `x`, the random variable, and `accumulator`, an accumulator
// for the cumulative distribution function to 0. `distribution_functions`
// is the object we'll return with the `probability_of_x` and the
// `cumulativeProbability_of_x`, as well as the calculated mean &
// variance. We iterate until the `cumulativeProbability_of_x` is
// within `epsilon` of 1.0.
var x = 0,
cumulativeProbability = 0,
cells = {};
// This algorithm iterates through each potential outcome,
// until the `cumulativeProbability` is very close to 1, at
// which point we've defined the vast majority of outcomes
do {
// a [probability mass function](https://en.wikipedia.org/wiki/Probability_mass_function)
cells[x] = factorial(trials) /
(factorial(x) * factorial(trials - x)) *
(Math.pow(probability, x) * Math.pow(1 - probability, trials - x));
cumulativeProbability += cells[x];
x++;
// when the cumulativeProbability is nearly 1, we've calculated
// the useful range of this distribution
} while (cumulativeProbability < 1 - epsilon);
return cells;
}
module.exports = binomialDistribution;
},{"10":10,"12":12}],5:[function(require,module,exports){
'use strict';
/**
* **Percentage Points of the χ2 (Chi-Squared) Distribution**
*
* The [χ2 (Chi-Squared) Distribution](http://en.wikipedia.org/wiki/Chi-squared_distribution) is used in the common
* chi-squared tests for goodness of fit of an observed distribution to a theoretical one, the independence of two
* criteria of classification of qualitative data, and in confidence interval estimation for a population standard
* deviation of a normal distribution from a sample standard deviation.
*
* Values from Appendix 1, Table III of William W. Hines & Douglas C. Montgomery, "Probability and Statistics in
* Engineering and Management Science", Wiley (1980).
*/
var chiSquaredDistributionTable = {
1: { 0.995: 0.00, 0.99: 0.00, 0.975: 0.00, 0.95: 0.00, 0.9: 0.02, 0.5: 0.45, 0.1: 2.71, 0.05: 3.84, 0.025: 5.02, 0.01: 6.63, 0.005: 7.88 },
2: { 0.995: 0.01, 0.99: 0.02, 0.975: 0.05, 0.95: 0.10, 0.9: 0.21, 0.5: 1.39, 0.1: 4.61, 0.05: 5.99, 0.025: 7.38, 0.01: 9.21, 0.005: 10.60 },
3: { 0.995: 0.07, 0.99: 0.11, 0.975: 0.22, 0.95: 0.35, 0.9: 0.58, 0.5: 2.37, 0.1: 6.25, 0.05: 7.81, 0.025: 9.35, 0.01: 11.34, 0.005: 12.84 },
4: { 0.995: 0.21, 0.99: 0.30, 0.975: 0.48, 0.95: 0.71, 0.9: 1.06, 0.5: 3.36, 0.1: 7.78, 0.05: 9.49, 0.025: 11.14, 0.01: 13.28, 0.005: 14.86 },
5: { 0.995: 0.41, 0.99: 0.55, 0.975: 0.83, 0.95: 1.15, 0.9: 1.61, 0.5: 4.35, 0.1: 9.24, 0.05: 11.07, 0.025: 12.83, 0.01: 15.09, 0.005: 16.75 },
6: { 0.995: 0.68, 0.99: 0.87, 0.975: 1.24, 0.95: 1.64, 0.9: 2.20, 0.5: 5.35, 0.1: 10.65, 0.05: 12.59, 0.025: 14.45, 0.01: 16.81, 0.005: 18.55 },
7: { 0.995: 0.99, 0.99: 1.25, 0.975: 1.69, 0.95: 2.17, 0.9: 2.83, 0.5: 6.35, 0.1: 12.02, 0.05: 14.07, 0.025: 16.01, 0.01: 18.48, 0.005: 20.28 },
8: { 0.995: 1.34, 0.99: 1.65, 0.975: 2.18, 0.95: 2.73, 0.9: 3.49, 0.5: 7.34, 0.1: 13.36, 0.05: 15.51, 0.025: 17.53, 0.01: 20.09, 0.005: 21.96 },
9: { 0.995: 1.73, 0.99: 2.09, 0.975: 2.70, 0.95: 3.33, 0.9: 4.17, 0.5: 8.34, 0.1: 14.68, 0.05: 16.92, 0.025: 19.02, 0.01: 21.67, 0.005: 23.59 },
10: { 0.995: 2.16, 0.99: 2.56, 0.975: 3.25, 0.95: 3.94, 0.9: 4.87, 0.5: 9.34, 0.1: 15.99, 0.05: 18.31, 0.025: 20.48, 0.01: 23.21, 0.005: 25.19 },
11: { 0.995: 2.60, 0.99: 3.05, 0.975: 3.82, 0.95: 4.57, 0.9: 5.58, 0.5: 10.34, 0.1: 17.28, 0.05: 19.68, 0.025: 21.92, 0.01: 24.72, 0.005: 26.76 },
12: { 0.995: 3.07, 0.99: 3.57, 0.975: 4.40, 0.95: 5.23, 0.9: 6.30, 0.5: 11.34, 0.1: 18.55, 0.05: 21.03, 0.025: 23.34, 0.01: 26.22, 0.005: 28.30 },
13: { 0.995: 3.57, 0.99: 4.11, 0.975: 5.01, 0.95: 5.89, 0.9: 7.04, 0.5: 12.34, 0.1: 19.81, 0.05: 22.36, 0.025: 24.74, 0.01: 27.69, 0.005: 29.82 },
14: { 0.995: 4.07, 0.99: 4.66, 0.975: 5.63, 0.95: 6.57, 0.9: 7.79, 0.5: 13.34, 0.1: 21.06, 0.05: 23.68, 0.025: 26.12, 0.01: 29.14, 0.005: 31.32 },
15: { 0.995: 4.60, 0.99: 5.23, 0.975: 6.27, 0.95: 7.26, 0.9: 8.55, 0.5: 14.34, 0.1: 22.31, 0.05: 25.00, 0.025: 27.49, 0.01: 30.58, 0.005: 32.80 },
16: { 0.995: 5.14, 0.99: 5.81, 0.975: 6.91, 0.95: 7.96, 0.9: 9.31, 0.5: 15.34, 0.1: 23.54, 0.05: 26.30, 0.025: 28.85, 0.01: 32.00, 0.005: 34.27 },
17: { 0.995: 5.70, 0.99: 6.41, 0.975: 7.56, 0.95: 8.67, 0.9: 10.09, 0.5: 16.34, 0.1: 24.77, 0.05: 27.59, 0.025: 30.19, 0.01: 33.41, 0.005: 35.72 },
18: { 0.995: 6.26, 0.99: 7.01, 0.975: 8.23, 0.95: 9.39, 0.9: 10.87, 0.5: 17.34, 0.1: 25.99, 0.05: 28.87, 0.025: 31.53, 0.01: 34.81, 0.005: 37.16 },
19: { 0.995: 6.84, 0.99: 7.63, 0.975: 8.91, 0.95: 10.12, 0.9: 11.65, 0.5: 18.34, 0.1: 27.20, 0.05: 30.14, 0.025: 32.85, 0.01: 36.19, 0.005: 38.58 },
20: { 0.995: 7.43, 0.99: 8.26, 0.975: 9.59, 0.95: 10.85, 0.9: 12.44, 0.5: 19.34, 0.1: 28.41, 0.05: 31.41, 0.025: 34.17, 0.01: 37.57, 0.005: 40.00 },
21: { 0.995: 8.03, 0.99: 8.90, 0.975: 10.28, 0.95: 11.59, 0.9: 13.24, 0.5: 20.34, 0.1: 29.62, 0.05: 32.67, 0.025: 35.48, 0.01: 38.93, 0.005: 41.40 },
22: { 0.995: 8.64, 0.99: 9.54, 0.975: 10.98, 0.95: 12.34, 0.9: 14.04, 0.5: 21.34, 0.1: 30.81, 0.05: 33.92, 0.025: 36.78, 0.01: 40.29, 0.005: 42.80 },
23: { 0.995: 9.26, 0.99: 10.20, 0.975: 11.69, 0.95: 13.09, 0.9: 14.85, 0.5: 22.34, 0.1: 32.01, 0.05: 35.17, 0.025: 38.08, 0.01: 41.64, 0.005: 44.18 },
24: { 0.995: 9.89, 0.99: 10.86, 0.975: 12.40, 0.95: 13.85, 0.9: 15.66, 0.5: 23.34, 0.1: 33.20, 0.05: 36.42, 0.025: 39.36, 0.01: 42.98, 0.005: 45.56 },
25: { 0.995: 10.52, 0.99: 11.52, 0.975: 13.12, 0.95: 14.61, 0.9: 16.47, 0.5: 24.34, 0.1: 34.28, 0.05: 37.65, 0.025: 40.65, 0.01: 44.31, 0.005: 46.93 },
26: { 0.995: 11.16, 0.99: 12.20, 0.975: 13.84, 0.95: 15.38, 0.9: 17.29, 0.5: 25.34, 0.1: 35.56, 0.05: 38.89, 0.025: 41.92, 0.01: 45.64, 0.005: 48.29 },
27: { 0.995: 11.81, 0.99: 12.88, 0.975: 14.57, 0.95: 16.15, 0.9: 18.11, 0.5: 26.34, 0.1: 36.74, 0.05: 40.11, 0.025: 43.19, 0.01: 46.96, 0.005: 49.65 },
28: { 0.995: 12.46, 0.99: 13.57, 0.975: 15.31, 0.95: 16.93, 0.9: 18.94, 0.5: 27.34, 0.1: 37.92, 0.05: 41.34, 0.025: 44.46, 0.01: 48.28, 0.005: 50.99 },
29: { 0.995: 13.12, 0.99: 14.26, 0.975: 16.05, 0.95: 17.71, 0.9: 19.77, 0.5: 28.34, 0.1: 39.09, 0.05: 42.56, 0.025: 45.72, 0.01: 49.59, 0.005: 52.34 },
30: { 0.995: 13.79, 0.99: 14.95, 0.975: 16.79, 0.95: 18.49, 0.9: 20.60, 0.5: 29.34, 0.1: 40.26, 0.05: 43.77, 0.025: 46.98, 0.01: 50.89, 0.005: 53.67 },
40: { 0.995: 20.71, 0.99: 22.16, 0.975: 24.43, 0.95: 26.51, 0.9: 29.05, 0.5: 39.34, 0.1: 51.81, 0.05: 55.76, 0.025: 59.34, 0.01: 63.69, 0.005: 66.77 },
50: { 0.995: 27.99, 0.99: 29.71, 0.975: 32.36, 0.95: 34.76, 0.9: 37.69, 0.5: 49.33, 0.1: 63.17, 0.05: 67.50, 0.025: 71.42, 0.01: 76.15, 0.005: 79.49 },
60: { 0.995: 35.53, 0.99: 37.48, 0.975: 40.48, 0.95: 43.19, 0.9: 46.46, 0.5: 59.33, 0.1: 74.40, 0.05: 79.08, 0.025: 83.30, 0.01: 88.38, 0.005: 91.95 },
70: { 0.995: 43.28, 0.99: 45.44, 0.975: 48.76, 0.95: 51.74, 0.9: 55.33, 0.5: 69.33, 0.1: 85.53, 0.05: 90.53, 0.025: 95.02, 0.01: 100.42, 0.005: 104.22 },
80: { 0.995: 51.17, 0.99: 53.54, 0.975: 57.15, 0.95: 60.39, 0.9: 64.28, 0.5: 79.33, 0.1: 96.58, 0.05: 101.88, 0.025: 106.63, 0.01: 112.33, 0.005: 116.32 },
90: { 0.995: 59.20, 0.99: 61.75, 0.975: 65.65, 0.95: 69.13, 0.9: 73.29, 0.5: 89.33, 0.1: 107.57, 0.05: 113.14, 0.025: 118.14, 0.01: 124.12, 0.005: 128.30 },
100: { 0.995: 67.33, 0.99: 70.06, 0.975: 74.22, 0.95: 77.93, 0.9: 82.36, 0.5: 99.33, 0.1: 118.50, 0.05: 124.34, 0.025: 129.56, 0.01: 135.81, 0.005: 140.17 }
};
module.exports = chiSquaredDistributionTable;
},{}],6:[function(require,module,exports){
'use strict';
var mean = require(21);
var chiSquaredDistributionTable = require(5);
/**
* The [χ2 (Chi-Squared) Goodness-of-Fit Test](http://en.wikipedia.org/wiki/Goodness_of_fit#Pearson.27s_chi-squared_test)
* uses a measure of goodness of fit which is the sum of differences between observed and expected outcome frequencies
* (that is, counts of observations), each squared and divided by the number of observations expected given the
* hypothesized distribution. The resulting χ2 statistic, `chiSquared`, can be compared to the chi-squared distribution
* to determine the goodness of fit. In order to determine the degrees of freedom of the chi-squared distribution, one
* takes the total number of observed frequencies and subtracts the number of estimated parameters. The test statistic
* follows, approximately, a chi-square distribution with (k − c) degrees of freedom where `k` is the number of non-empty
* cells and `c` is the number of estimated parameters for the distribution.
*
* @param {Array<number>} data
* @param {Function} distributionType a function that returns a point in a distribution:
* for instance, binomial, bernoulli, or poisson
* @param {number} significance
* @returns {number} chi squared goodness of fit
* @example
* // Data from Poisson goodness-of-fit example 10-19 in William W. Hines & Douglas C. Montgomery,
* // "Probability and Statistics in Engineering and Management Science", Wiley (1980).
* var data1019 = [
* 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
* 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
* 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
* 2, 2, 2, 2, 2, 2, 2, 2, 2,
* 3, 3, 3, 3
* ];
* ss.chiSquaredGoodnessOfFit(data1019, ss.poissonDistribution, 0.05)); //= false
*/
function chiSquaredGoodnessOfFit(data, distributionType, significance) {
// Estimate from the sample data, a weighted mean.
var inputMean = mean(data),
// Calculated value of the χ2 statistic.
chiSquared = 0,
// Degrees of freedom, calculated as (number of class intervals -
// number of hypothesized distribution parameters estimated - 1)
degreesOfFreedom,
// Number of hypothesized distribution parameters estimated, expected to be supplied in the distribution test.
// Lose one degree of freedom for estimating `lambda` from the sample data.
c = 1,
// The hypothesized distribution.
// Generate the hypothesized distribution.
hypothesizedDistribution = distributionType(inputMean),
observedFrequencies = [],
expectedFrequencies = [],
k;
// Create an array holding a histogram from the sample data, of
// the form `{ value: numberOfOcurrences }`
for (var i = 0; i < data.length; i++) {
if (observedFrequencies[data[i]] === undefined) {
observedFrequencies[data[i]] = 0;
}
observedFrequencies[data[i]]++;
}
// The histogram we created might be sparse - there might be gaps
// between values. So we iterate through the histogram, making
// sure that instead of undefined, gaps have 0 values.
for (i = 0; i < observedFrequencies.length; i++) {
if (observedFrequencies[i] === undefined) {
observedFrequencies[i] = 0;
}
}
// Create an array holding a histogram of expected data given the
// sample size and hypothesized distribution.
for (k in hypothesizedDistribution) {
if (k in observedFrequencies) {
expectedFrequencies[k] = hypothesizedDistribution[k] * data.length;
}
}
// Working backward through the expected frequencies, collapse classes
// if less than three observations are expected for a class.
// This transformation is applied to the observed frequencies as well.
for (k = expectedFrequencies.length - 1; k >= 0; k--) {
if (expectedFrequencies[k] < 3) {
expectedFrequencies[k - 1] += expectedFrequencies[k];
expectedFrequencies.pop();
observedFrequencies[k - 1] += observedFrequencies[k];
observedFrequencies.pop();
}
}
// Iterate through the squared differences between observed & expected
// frequencies, accumulating the `chiSquared` statistic.
for (k = 0; k < observedFrequencies.length; k++) {
chiSquared += Math.pow(
observedFrequencies[k] - expectedFrequencies[k], 2) /
expectedFrequencies[k];
}
// Calculate degrees of freedom for this test and look it up in the
// `chiSquaredDistributionTable` in order to
// accept or reject the goodness-of-fit of the hypothesized distribution.
degreesOfFreedom = observedFrequencies.length - c - 1;
return chiSquaredDistributionTable[degreesOfFreedom][significance] < chiSquared;
}
module.exports = chiSquaredGoodnessOfFit;
},{"21":21,"5":5}],7:[function(require,module,exports){
'use strict';
/**
* Split an array into chunks of a specified size. This function
* has the same behavior as [PHP's array_chunk](http://php.net/manual/en/function.array-chunk.php)
* function, and thus will insert smaller-sized chunks at the end if
* the input size is not divisible by the chunk size.
*
* `sample` is expected to be an array, and `chunkSize` a number.
* The `sample` array can contain any kind of data.
*
* @param {Array} sample any array of values
* @param {number} chunkSize size of each output array
* @returns {Array<Array>} a chunked array
* @example
* console.log(chunk([1, 2, 3, 4], 2)); // [[1, 2], [3, 4]]
*/
function chunk(sample, chunkSize) {
// a list of result chunks, as arrays in an array
var output = [];
// `chunkSize` must be zero or higher - otherwise the loop below,
// in which we call `start += chunkSize`, will loop infinitely.
// So, we'll detect and return null in that case to indicate
// invalid input.
if (chunkSize <= 0) {
return null;
}
// `start` is the index at which `.slice` will start selecting
// new array elements
for (var start = 0; start < sample.length; start += chunkSize) {
// for each chunk, slice that part of the array and add it
// to the output. The `.slice` function does not change
// the original array.
output.push(sample.slice(start, start + chunkSize));
}
return output;
}
module.exports = chunk;
},{}],8:[function(require,module,exports){
'use strict';
var sortedUniqueCount = require(42),
numericSort = require(26);
/**
* Create a new column x row matrix.
*
* @private
* @param {number} columns
* @param {number} rows
* @return {Array<Array<number>>} matrix
* @example
* makeMatrix(10, 10);
*/
function makeMatrix(columns, rows) {
var matrix = [];
for (var i = 0; i < columns; i++) {
var column = [];
for (var j = 0; j < rows; j++) {
column.push(0);
}
matrix.push(column);
}
return matrix;
}
/**
* Ckmeans clustering is an improvement on heuristic-based clustering
* approaches like Jenks. The algorithm was developed in
* [Haizhou Wang and Mingzhou Song](http://journal.r-project.org/archive/2011-2/RJournal_2011-2_Wang+Song.pdf)
* as a [dynamic programming](https://en.wikipedia.org/wiki/Dynamic_programming) approach
* to the problem of clustering numeric data into groups with the least
* within-group sum-of-squared-deviations.
*
* Minimizing the difference within groups - what Wang & Song refer to as
* `withinss`, or within sum-of-squares, means that groups are optimally
* homogenous within and the data is split into representative groups.
* This is very useful for visualization, where you may want to represent
* a continuous variable in discrete color or style groups. This function
* can provide groups that emphasize differences between data.
*
* Being a dynamic approach, this algorithm is based on two matrices that
* store incrementally-computed values for squared deviations and backtracking
* indexes.
*
* Unlike the [original implementation](https://cran.r-project.org/web/packages/Ckmeans.1d.dp/index.html),
* this implementation does not include any code to automatically determine
* the optimal number of clusters: this information needs to be explicitly
* provided.
*
* ### References
* _Ckmeans.1d.dp: Optimal k-means Clustering in One Dimension by Dynamic
* Programming_ Haizhou Wang and Mingzhou Song ISSN 2073-4859
*
* from The R Journal Vol. 3/2, December 2011
* @param {Array<number>} data input data, as an array of number values
* @param {number} nClusters number of desired classes. This cannot be
* greater than the number of values in the data array.
* @returns {Array<Array<number>>} clustered input
* @example
* ckmeans([-1, 2, -1, 2, 4, 5, 6, -1, 2, -1], 3);
* // The input, clustered into groups of similar numbers.
* //= [[-1, -1, -1, -1], [2, 2, 2], [4, 5, 6]]);
*/
function ckmeans(data, nClusters) {
if (nClusters > data.length) {
throw new Error('Cannot generate more classes than there are data values');
}
var sorted = numericSort(data),
// we'll use this as the maximum number of clusters
uniqueCount = sortedUniqueCount(sorted);
// if all of the input values are identical, there's one cluster
// with all of the input in it.
if (uniqueCount === 1) {
return [sorted];
}
// named 'D' originally
var matrix = makeMatrix(nClusters, sorted.length),
// named 'B' originally
backtrackMatrix = makeMatrix(nClusters, sorted.length);
// This is a dynamic programming way to solve the problem of minimizing
// within-cluster sum of squares. It's similar to linear regression
// in this way, and this calculation incrementally computes the
// sum of squares that are later read.
// The outer loop iterates through clusters, from 0 to nClusters.
for (var cluster = 0; cluster < nClusters; cluster++) {
// At the start of each loop, the mean starts as the first element
var firstClusterMean = sorted[0];
for (var sortedIdx = Math.max(cluster, 1);
sortedIdx < sorted.length;
sortedIdx++) {
if (cluster === 0) {
// Increase the running sum of squares calculation by this
// new value
var squaredDifference = Math.pow(
sorted[sortedIdx] - firstClusterMean, 2);
matrix[cluster][sortedIdx] = matrix[cluster][sortedIdx - 1] +
(sortedIdx / (sortedIdx + 1)) * squaredDifference;
// We're computing a running mean by taking the previous
// mean value, multiplying it by the number of elements
// seen so far, and then dividing it by the number of
// elements total.
var newSum = sortedIdx * firstClusterMean + sorted[sortedIdx];
firstClusterMean = newSum / (sortedIdx + 1);
} else {
var sumSquaredDistances = 0,
meanXJ = 0;
for (var j = sortedIdx; j >= cluster; j--) {
sumSquaredDistances += (sortedIdx - j) /
(sortedIdx - j + 1) *
Math.pow(sorted[j] - meanXJ, 2);
meanXJ = (sorted[j] + (sortedIdx - j) * meanXJ) /
(sortedIdx - j + 1);
if (j === sortedIdx) {
matrix[cluster][sortedIdx] = sumSquaredDistances;
backtrackMatrix[cluster][sortedIdx] = j;
if (j > 0) {
matrix[cluster][sortedIdx] += matrix[cluster - 1][j - 1];
}
} else {
if (j === 0) {
if (sumSquaredDistances <= matrix[cluster][sortedIdx]) {
matrix[cluster][sortedIdx] = sumSquaredDistances;
backtrackMatrix[cluster][sortedIdx] = j;
}
} else if (sumSquaredDistances + matrix[cluster - 1][j - 1] < matrix[cluster][sortedIdx]) {
matrix[cluster][sortedIdx] = sumSquaredDistances + matrix[cluster - 1][j - 1];
backtrackMatrix[cluster][sortedIdx] = j;
}
}
}
}
}
}
// The real work of Ckmeans clustering happens in the matrix generation:
// the generated matrices encode all possible clustering combinations, and
// once they're generated we can solve for the best clustering groups
// very quickly.
var clusters = [],
clusterRight = backtrackMatrix[0].length - 1;
// Backtrack the clusters from the dynamic programming matrix. This
// starts at the bottom-right corner of the matrix (if the top-left is 0, 0),
// and moves the cluster target with the loop.
for (cluster = backtrackMatrix.length - 1; cluster >= 0; cluster--) {
var clusterLeft = backtrackMatrix[cluster][clusterRight];
// fill the cluster from the sorted input by taking a slice of the
// array. the backtrack matrix makes this easy - it stores the
// indexes where the cluster should start and end.
clusters[cluster] = sorted.slice(clusterLeft, clusterRight + 1);
if (cluster > 0) {
clusterRight = clusterLeft - 1;
}
}
return clusters;
}
module.exports = ckmeans;
},{"26":26,"42":42}],9:[function(require,module,exports){
'use strict';
var standardNormalTable = require(44);
/**
* **[Cumulative Standard Normal Probability](http://en.wikipedia.org/wiki/Standard_normal_table)**
*
* Since probability tables cannot be
* printed for every normal distribution, as there are an infinite variety
* of normal distributions, it is common practice to convert a normal to a
* standard normal and then use the standard normal table to find probabilities.
*
* You can use `.5 + .5 * errorFunction(x / Math.sqrt(2))` to calculate the probability
* instead of looking it up in a table.
*
* @param {number} z
* @returns {number} cumulative standard normal probability
*/
function cumulativeStdNormalProbability(z) {
// Calculate the position of this value.
var absZ = Math.abs(z),
// Each row begins with a different
// significant digit: 0.5, 0.6, 0.7, and so on. Each value in the table
// corresponds to a range of 0.01 in the input values, so the value is
// multiplied by 100.
index = Math.min(Math.round(absZ * 100), standardNormalTable.length - 1);
// The index we calculate must be in the table as a positive value,
// but we still pay attention to whether the input is positive
// or negative, and flip the output value as a last step.
if (z >= 0) {
return standardNormalTable[index];
} else {
// due to floating-point arithmetic, values in the table with
// 4 significant figures can nevertheless end up as repeating
// fractions when they're computed here.
return +(1 - standardNormalTable[index]).toFixed(4);
}
}
module.exports = cumulativeStdNormalProbability;
},{"44":44}],10:[function(require,module,exports){
'use strict';
/**
* We use `ε`, epsilon, as a stopping criterion when we want to iterate
* until we're "close enough". Epsilon is a very small number: for
* simple statistics, that number is **0.0001**
*
* This is used in calculations like the binomialDistribution, in which
* the process of finding a value is [iterative](https://en.wikipedia.org/wiki/Iterative_method):
* it progresses until it is close enough.
*
* Below is an example of using epsilon in [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent),
* where we're trying to find a local minimum of a function's derivative,
* given by the `fDerivative` method.
*
* @example
* // From calculation, we expect that the local minimum occurs at x=9/4
* var x_old = 0;
* // The algorithm starts at x=6
* var x_new = 6;
* var stepSize = 0.01;
*
* function fDerivative(x) {
* return 4 * Math.pow(x, 3) - 9 * Math.pow(x, 2);
* }
*
* // The loop runs until the difference between the previous
* // value and the current value is smaller than epsilon - a rough
* // meaure of 'close enough'
* while (Math.abs(x_new - x_old) > ss.epsilon) {
* x_old = x_new;
* x_new = x_old - stepSize * fDerivative(x_old);
* }
*
* console.log('Local minimum occurs at', x_new);
*/
var epsilon = 0.0001;
module.exports = epsilon;
},{}],11:[function(require,module,exports){
'use strict';
/**
* **[Gaussian error function](http://en.wikipedia.org/wiki/Error_function)**
*
* The `errorFunction(x/(sd * Math.sqrt(2)))` is the probability that a value in a
* normal distribution with standard deviation sd is within x of the mean.
*
* This function returns a numerical approximation to the exact value.
*
* @param {number} x input
* @return {number} error estimation
* @example
* errorFunction(1); //= 0.8427
*/
function errorFunction(x) {
var t = 1 / (1 + 0.5 * Math.abs(x));
var tau = t * Math.exp(-Math.pow(x, 2) -
1.26551223 +
1.00002368 * t +
0.37409196 * Math.pow(t, 2) +
0.09678418 * Math.pow(t, 3) -
0.18628806 * Math.pow(t, 4) +
0.27886807 * Math.pow(t, 5) -
1.13520398 * Math.pow(t, 6) +
1.48851587 * Math.pow(t, 7) -
0.82215223 * Math.pow(t, 8) +
0.17087277 * Math.pow(t, 9));
if (x >= 0) {
return 1 - tau;
} else {
return tau - 1;
}
}
module.exports = errorFunction;
},{}],12:[function(require,module,exports){
'use strict';
/**
* A [Factorial](https://en.wikipedia.org/wiki/Factorial), usually written n!, is the product of all positive
* integers less than or equal to n. Often factorial is implemented
* recursively, but this iterative approach is significantly faster
* and simpler.
*
* @param {number} n input
* @returns {number} factorial: n!
* @example
* console.log(factorial(5)); // 120
*/
function factorial(n) {
// factorial is mathematically undefined for negative numbers
if (n < 0 ) { return null; }
// typically you'll expand the factorial function going down, like
// 5! = 5 * 4 * 3 * 2 * 1. This is going in the opposite direction,
// counting from 2 up to the number in question, and since anything
// multiplied by 1 is itself, the loop only needs to start at 2.
var accumulator = 1;
for (var i = 2; i <= n; i++) {
// for each number up to and including the number `n`, multiply
// the accumulator my that number.
accumulator *= i;
}
return accumulator;
}
module.exports = factorial;
},{}],13:[function(require,module,exports){
'use strict';
/**
* The [Geometric Mean](https://en.wikipedia.org/wiki/Geometric_mean) is
* a mean function that is more useful for numbers in different
* ranges.
*
* This is the nth root of the input numbers multiplied by each other.
*
* The geometric mean is often useful for
* **[proportional growth](https://en.wikipedia.org/wiki/Geometric_mean#Proportional_growth)**: given
* growth rates for multiple years, like _80%, 16.66% and 42.85%_, a simple
* mean will incorrectly estimate an average growth rate, whereas a geometric
* mean will correctly estimate a growth rate that, over those years,
* will yield the same end value.
*
* This runs on `O(n)`, linear time in respect to the array
*
* @param {Array<number>} x input array
* @returns {number} geometric mean
* @example
* var growthRates = [1.80, 1.166666, 1.428571];
* var averageGrowth = geometricMean(growthRates);
* var averageGrowthRates = [averageGrowth, averageGrowth, averageGrowth];
* var startingValue = 10;
* var startingValueMean = 10;
* growthRates.forEach(function(rate) {
* startingValue *= rate;
* });
* averageGrowthRates.forEach(function(rate) {
* startingValueMean *= rate;
* });
* startingValueMean === startingValue;
*/
function geometricMean(x) {
// The mean of no numbers is null
if (x.length === 0) { return null; }
// the starting value.
var value = 1;
for (var i = 0; i < x.length; i++) {
// the geometric mean is only valid for positive numbers
if (x[i] <= 0) { return null; }
// repeatedly multiply the value by each number
value *= x[i];
}
return Math.pow(value, 1 / x.length);
}
module.exports = geometricMean;
},{}],14:[function(require,module,exports){
'use strict';
/**
* The [Harmonic Mean](https://en.wikipedia.org/wiki/Harmonic_mean) is
* a mean function typically used to find the average of rates.
* This mean is calculated by taking the reciprocal of the arithmetic mean
* of the reciprocals of the input numbers.
*
* This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):
* a method of finding a typical or central value of a set of numbers.
*
* This runs on `O(n)`, linear time in respect to the array.
*
* @param {Array<number>} x input
* @returns {number} harmonic mean
* @example
* ss.harmonicMean([2, 3]) //= 2.4
*/
function harmonicMean(x) {
// The mean of no numbers is null
if (x.length === 0) { return null; }
var reciprocalSum = 0;
for (var i = 0; i < x.length; i++) {
// the harmonic mean is only valid for positive numbers
if (x[i] <= 0) { return null; }
reciprocalSum += 1 / x[i];
}
// divide n by the the reciprocal sum
return x.length / reciprocalSum;
}
module.exports = harmonicMean;
},{}],15:[function(require,module,exports){
'use strict';
var quantile = require(30);
/**
* The [Interquartile range](http://en.wikipedia.org/wiki/Interquartile_range) is
* a measure of statistical dispersion, or how scattered, spread, or
* concentrated a distribution is. It's computed as the difference between
* the third quartile and first quartile.
*
* @param {Array<number>} sample
* @returns {number} interquartile range: the span between lower and upper quartile,
* 0.25 and 0.75
* @example
* interquartileRange([0, 1, 2, 3]); //= 2
*/
function interquartileRange(sample) {
// We can't derive quantiles from an empty list
if (sample.length === 0) { return null; }
// Interquartile range is the span between the upper quartile,
// at `0.75`, and lower quartile, `0.25`
return quantile(sample, 0.75) - quantile(sample, 0.25);
}
module.exports = interquartileRange;
},{"30":30}],16:[function(require,module,exports){
'use strict';
/**
* The Inverse [Gaussian error function](http://en.wikipedia.org/wiki/Error_function)
* returns a numerical approximation to the value that would have caused
* `errorFunction()` to return x.
*
* @param {number} x value of error function
* @returns {number} estimated inverted value
*/
function inverseErrorFunction(x) {
var a = (8 * (Math.PI - 3)) / (3 * Math.PI * (4 - Math.PI));
var inv = Math.sqrt(Math.sqrt(
Math.pow(2 / (Math.PI * a) + Math.log(1 - x * x) / 2, 2) -
Math.log(1 - x * x) / a) -
(2 / (Math.PI * a) + Math.log(1 - x * x) / 2));
if (x >= 0) {
return inv;
} else {
return -inv;
}
}
module.exports = inverseErrorFunction;
},{}],17:[function(require,module,exports){
'use strict';
/**
* [Simple linear regression](http://en.wikipedia.org/wiki/Simple_linear_regression)
* is a simple way to find a fitted line
* between a set of coordinates. This algorithm finds the slope and y-intercept of a regression line
* using the least sum of squares.
*
* @param {Array<Array<number>>} data an array of two-element of arrays,
* like `[[0, 1], [2, 3]]`
* @returns {Object} object containing slope and intersect of regression line
* @example
* linearRegression([[0, 0], [1, 1]]); // { m: 1, b: 0 }
*/
function linearRegression(data) {
var m, b;
// Store data length in a local variable to reduce
// repeated object property lookups
var dataLength = data.length;
//if there's only one point, arbitrarily choose a slope of 0
//and a y-intercept of whatever the y of the initial point is
if (dataLength === 1) {
m = 0;
b = data[0][1];
} else {
// Initialize our sums and scope the `m` and `b`
// variables that define the line.
var sumX = 0, sumY = 0,
sumXX = 0, sumXY = 0;