-
Notifications
You must be signed in to change notification settings - Fork 3
/
madlib_the_sql.tex
1501 lines (1300 loc) · 111 KB
/
madlib_the_sql.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
% THIS IS AN EXAMPLE DOCUMENT FOR VLDB 2012
% based on ACM SIGPROC-SP.TEX VERSION 2.7
% Modified by Gerald Weber <gerald@cs.auckland.ac.nz>
% Removed the requirement to include *bbl file in here. (AhmetSacan, Sep2012)
% Fixed the equation on page 3 to prevent line overflow. (AhmetSacan, Sep2012)
\documentclass{vldb}
\usepackage{times}
\usepackage{graphicx}
\usepackage{balance} % for \balance command ON LASMAD PAGE (only there!)
\usepackage{alltt,algo}
\usepackage{url}
\usepackage{ctable} % Nicer tables, e.g., \toprule
\usepackage{verbatim} % Also provides the comment environment
\usepackage{dcolumn} % Decimal point alignment in tables
\usepackage{xcolor} % for color comments
\usepackage[
pdftitle={Introducing MADlib (MAD Skills, the SQL)},
pdfauthor={}
]{hyperref}
\usepackage{pgfplotstable} % Generating pretty-printed tables and plots
\makeatletter
% The vldb style file is an anachronsism. So no need to worry about hacks anyway.
% We redefined the listing environment by package minted here. (If we did not,
% we would also get an error because \aboveparskip is undefined.)
% Copied from vldb.csl
\@ifundefined{code}{\newcounter {code}} % this is for LaTeX2e
\def\fps@code{tbp}
\def\ftype@code{1}
\def\ext@code{lof}
\def\fnum@code{Listing \thecode}
\def\code{\@float{code}}
\let\endcode\end@float
\@namedef{code*}{\@dblfloat{code}}
\@namedef{endcode*}{\end@dblfloat}
\makeatother
\usepackage{minted}
% BEGIN Layout
\newcommand{\otoprule}{\midrule[\heavyrulewidth]}
\newcolumntype{+}{>{\global\let\currentrowstyle\relax}}
\newcolumntype{^}{>{\currentrowstyle}}
\newcommand{\rowstyle}[1]{\gdef\currentrowstyle{#1}%
#1\ignorespaces
}
% END Layout
% BEGIN Convenience commands
% BEGIN Listing environments
\newminted{cpp}{mathescape,
linenos,
numbersep=5pt,
fontsize=\scriptsize,
framesep=2mm}
\newminted{python}{mathescape,
linenos,
numbersep=5pt,
fontsize=\scriptsize,
framesep=2mm}
\newminted{sql}{mathescape,
numbersep=5pt,
tabsize=4,
fontsize=\scriptsize,
framesep=2mm}
% END Listing environments
% BEGIN COMMENTS
\newcommand{\jmh}[1]{{\textcolor{red}{#1 -- jmh}}}
\newcommand{\fs}[1]{{\textcolor{orange}{#1 -- fs}}}
\newcommand{\ksn}[1]{{\textcolor{blue}{#1 -- ksn}}}
% END COMMENTS
% END Convenience commands
% BEGIN Mathematical Definitions
% BEGIN Set Symbols
\newcommand{\setsymbol}[1]{\mathbb{#1}}
\newcommand{\N}{\@ifstar{\setsymbol{N}_0}{\setsymbol{N}}}
\newcommand{\R}{\setsymbol{R}}
% END Set Symbols
\renewcommand{\vec}[1]{\ensuremath{\boldsymbol{#1}}}
% END Mathematical Definitions
\graphicspath{{FIGS/}}
\begin{document}
% ****************** TITLE ****************************************
\title{Introducing MADlib\\{\Large (MAD Skills, the SQL)}}
\numberofauthors{11} % in this sample file, there are a *total*
% of EIGHT authors. SIX appear on the 'first-page' (for formatting
% reasons) and the remaining two appear in the \additionalauthors section.
\author{Joseph M. Hellerstein\\{\small U.C. Berkeley} \and
Christoper Re\\{\small U. Wisconsin} \and
Florian Schoppmann\\{\small Greenplum} \and
Daisy Zhe Wang\\{\small U. Florida}
% \and
% Gavin Sherry\\{\small Greenplum} \and
% Caleb Welton\\{\small Greenplum}
% Eugene Fratkin\\{\small Greenplum} \and
% Aleks Gorajek\\{\small Greenplum} \and
% Steven Hillion\\{\small Alpine Data Labs} \and
% Luke Lonergan\\{\small Greenplum} \and
% Kee Siong Ng\\{\small Greenplum} \and
}
\maketitle
\begin{abstract}
MADlib is a free, open-source library of in-database analytic methods.
It provides an evolving suite of SQL-based algorithms for machine
learning, data mining and statistics that run at scale within a
database engine, with no need for data import/export to other tools.
The goal is for MADlib to eventually serve a role for scalable
database systems that is similar to the CRAN library for R: a
community repository of statistical methods, this time written with
scale and parallelism in mind.
In this paper we introduce the MADlib project, including the
background that led to its beginnings, and the motivation for its
open-source nature. We provide an overview of the library's
architecture and design patterns, and provide a description
of various statistical methods in that context. We include raw performance and speedup results of a core design pattern from one of those methods over the Greenplum parallel DBMS on a modest-sized test cluster. We then report on two
initial efforts at incorporating academic research into MADlib, which is one of the
project's goals.
MADlib is freely
available in beta \fs{Should we call it beta? In the version string `v0.3.0' and on the web page, we have removed `beta'.} form at \url{http://madlib.net}, and the project is open
for contributions of both new methods, and ports to additional
database platforms.
\end{abstract}
\section{Introduction:\\From Warehousing to Science}
\noindent
Until fairly recently, large databases were used mainly for accounting
purposes in enterprises, supporting financial record-keeping,
reporting and analysis at various levels of granularity. {\em Data
Warehousing} was the name given to industry practices for these
database workloads. Accounting, by definition, involves significant
care and attention to detail. Data Warehousing practices followed suit
by encouraging careful and comprehensive database design, and by following exacting policies regarding the quality of data loaded into the database.
Attitudes toward large databases have been changing quickly in the
past decade, as the focus of large database usage has shifted from
accountancy to analytics. The need for correct accounting and data
warehousing practice has not gone away, but it is becoming a
shrinking fraction of the volume---and the value---of large-scale data
management. The emerging trend focuses on the use of a wide range of
potentially noisy data to support predictive analytics, managed by \ksn{"managed by" statistical models ... sounds really odd.}
statistical models and algorithms for analysis. {\em Data Science} is a
name that is gaining currency for the industry practices evolving
around these workloads.
Data scientists make use of database engines in a very different way than traditional data warehousing professionals. Rather than carefully
designing global schemas and ``repelling'' data until it is integrated,
they load data into private schemas in whatever form is convenient. Rather than focusing on simple OLAP-style drill-down reports, they implement rich statistical
models and algorithms in the database, using extensible SQL as a
language for orchestrating data movement between disk, memory, and multiple parallel machines. In short, for data scientists a DBMS is a scalable analytics runtime---one that is conveniently compatible with the database systems widely used for transactions and accounting.
In 2008, a group of us from the database industry, consultancy,
academia, and end-user analytics got together to describe this usage
pattern as we observed it in the field. We dubbed it {\bf MAD}, an acronym for the {\em Magnetic} (as opposed to
repellent) aspect of the platform, the {\em Agile} design patterns used for
modeling, loading and iterating on data, and the {\em Deep} statistical models and
algorithms being used. The ``MAD Skills'' paper that resulted described
this pattern, and included a number of non-trivial analytics
techniques implemented as simple SQL scripts~\cite{madskills}.
After the publication of the paper, it became clear that there was
significant interest not only in its design aspects, but
also in the actual SQL implementations of statistical methods. This
interest came from every constituency involved: customers were
requesting it of consultants and vendors, and academics were
increasingly publishing papers on the topic. What was missing was a
software framework to focus the energy of the community, and connect
the various interested constituencies \fs{repeated word}. This led to the design of
{\bf MADlib}, the subject of this paper.
\subsection*{Introducing MADlib}
MADlib is a library of analytic methods that can be installed and
executed within a relational database engine that supports extensible
SQL. A snapshot of the current contents of MADlib including methods and
ports is provided in Table~\ref{tab:methods} and Table~\ref{tab:ports}. This set of methods and ports is intended to grow over time.
The methods in MADlib are designed for the shared-nothing, ``scale-out''
parallelism offered by modern parallel database engines, ensuring that
computation is done close to the data. The core functionality is
written in declarative SQL statements, which orchestrate massively
parallel data movement. Single-node inner loops take advantage of SQL
extensibility to call out to high-performance math libraries in
user-defined scalar and aggregate functions. At the highest level,
tasks that require iteration and/or structure definition are coded in
Python driver code, which is used only to kick off the data-rich
computations that happen within the parallel database engine.
MADlib is hosted publicly at github, and readers are encouraged to
browse the code and documentation via the MADlib website
\url{http://madlib.net}. The initial MADlib codebase reflects contributions
from both industry (Greenplum) and academia (UC Berkeley, the
University of Wisconsin, and the University of Florida). Code
management and Quality Assurance efforts have been contributed by
Greenplum. At this time, the project is ready to consider
contributions from additional parties, including both new methods and
ports to new platforms.
\begin{table}
\begin{tabular}{|l|l|}
\hline
{\bf Category} & {\bf Method} \\ \hline\hline
Supervised Learning
& Linear Regression\\
& Logistic Regression \\
& Naive Bayes Classification\\
& Decision Trees (C4.5)\\
& Support Vector Machines\\ \hline
Unsupervised Learning
& k-Means Clustering\\
& SVD Matrix Factorization\\
& Latent Dirichlet Allocation\\
& Association Rules\\ \hline
Decriptive Statistics
& Count-Min Sketch\\
& Flajolet-Martin Sketch\\
& Data Profiling\\
& Quantiles\\ \hline
Support Modules
& Sparse Vectors\\
& Array Operations\\
& Conjugate Gradient Optimization \\\hline
\end{tabular}
\caption{Current MADlib methods. (We may want to rate these with markers for maturity. Also have to decide what to include here from UW and UF.)}
\label{tab:methods}
\end{table}
\begin{table*}
\begin{center}
\begin{tabular}{|p{1.25in}|p{1.75in}|p{3.5in}|}
\hline
{\bf DBMS} & {\bf R\&D Access} & {\bf Notes}\\ \hline
PostgreSQL& Open source & Single-node only. \\ \hline
Greenplum Database & ``Community Edition'' available free for non-production use & Shared-nothing parallel DBMS for clusters and multicore. \\ \hline
\end{tabular}
\end{center}
\caption{Current MADlib ports. \jmh{This isn't very useful. Worth pointing out somewhere that GP Community Edition is free and not crippleware.} \ksn{A two line table probably doesn't deserve to take up so much space. We can say this in two sentences.}}
\label{tab:ports}
\end{table*}
\section{Goals of the Project}
The primary goal of the MADlib open-source project is to accelerate
innovation and technology transfer in the Data Science community via a shared
library of scalable in-database analytics, much as the CRAN library
serves the statistics community~\cite{cran}. Unlike CRAN, which is
customized to the R analytics engine, we hope that MADlib's grounding
in standard SQL will lead to community ports to a
variety of parallel database engines. \fs{True in theory, but we do currently use a lot of PG and GP specific SQL and extensibility. On the SQL layer, few abstraction efforts have been undertaken (essentially just the m4 preprocessor). At this point, we certainly do not have an edge over R in terms of portability. (While I agree that this is what the C++ AL is for, why we are trying to port pgSQL to Python for driver code, etc.)}
In addition to its primary goal, MADlib can serve a number of other
purposes for the community. As a standard and relatively mature
open-source platform, it enables apples-to-apples comparisons that can
be deeper than the traditional TPC benchmarks. For example, the DBMS
backend can be held constant, so that two algorithms for the same task
(e.g., entity extraction) can be compared for runtime and answer
quality. Similarly, as MADlib is ported to more platforms, an
algorithm can be held constant, and two backend DBMS engines can be
compared for performance. This latter comparison has been notoriously
difficult in the past, due to the closed-source, black-box nature of
``data mining'' and analytic toolkits that were not only customized to
specific platforms, but also lacked transparency into their analytic
algorithms. \fs{While I'd love this to be true, I don't see that MADlib would be more of an apple-to-apples comparison than using existing benchmarks. Performance would again largely depend on the quality of the port, i.e., how well the port adapts to vendor-specific implementations. Currently, we are using lots of PG/GP specific SQL. In fact, much code is unfortunately still written in pgSQL. (I don't like it, but that's how it is.) Not even user-defined aggregates---arguably our most fundamental building block---is standard SQL.}
\subsection{Why Databases?}
For decades, statistical packages like SAS, Matlab and R have been the key tools for deep analytics, and the practices surrounding these tools have been elevated into widely-used traditional methodologies.
% DELETED -- better not to pick fights with specific vendors. -JMH
% SAS in particular has a
% large customer base, and its proposed analytics methodologies have
% become engrained in the modern enterprise. Nevertheless, usurpers loom
% on the horizon.
One standard analytics methodology advocated in this domain
is called SEMMA: Sample, Explore, Modify, Model, Assess. The ``EMMA''
portion of this cycle identifies a set of fundamental tasks
that an analyst needs to perform, but the first, ``S'' step makes less and less sense in many settings today. The costs of computation and storage are increasingly cheap, and entire data sets can often be processed efficiently by a cluster of computers. Meanwhile, competition for extracting value from data has become increasingly refined. Consider fiercely competitive application domains like online advertising or politics. It is of course important to target ``typical'' people (customers, voters) that would be captured by sampling the database. Because SEMMA is standard practice, optimizing for a sample provides no real competitive advantage. Winning today requires extracting advantages in the long tail of ``special interests'', a practice known as ``microtargeting'', ``hypertargeting'' or ``narrowcasting''. In that context, the first step of SEMMA essentially defeats the remaining four steps, leading to simplistic, generalized decision-making that may not translate well to small populations in the tail of the distribution. \fs{That explains why you do not want to weed out anything \emph{before} storing the data. But what is the reason for doing large-scale machine learning \emph{after} you have identified you microtarget? Concretely: Why prefer logistic regression over 1~billion rows to 1~million rows? If the model is reasonable, shouldn't the difference always be insignificant?}
% throws away the very
% competitive advantage that customers hope to get from acquiring their
% valuable data. Sampling decouples the human intelligence in
% modeling from where these insights are put to use (on the entire
% data). This decoupling makes analysts less effective: they must guess
% about the statistical and performance robustness of their model. If
% they guess incorrectly, they may not know about it for weeks or
% months. (SAID BETTER BY PEOPLE WHO TALK GET TO REGULARLY TALK TO
% CUSTOMERS)
Driven in part by this observation, momentum has been gathering around efforts to develop scalable full-dataset analytics. One popular alternative is to
push the statistical methods directly into so-called Big Data processing platforms---notably, Apache Hadoop. For example, the open-source Mahout project
aims to implement machine learning tools within Hadoop, harnessing
interest in both academia and industry~\cite{mlformr,mahout}. This is
certainly an attractive path to solution, and is being advocated by
major players including IBM~\cite{systemml}.
At the same time that the Hadoop ecosystem has been evolving, the SQL-based analytics ecosystem has grown rapidly as well, and large volumes of valuable data are likely to reside in
relational databases for many years to come. There is a rich
ecosystem of tools, know-how, and organizational requirements that
encourage this. For these cases, it would be helpful to push statistical methods into the DBMS. And as we will see, massively parallel databases form a surprisingly useful platform for sophisticated analytics. MADlib currently targets this environment of in-database analytics.
%% There is nothing to be gained by pissing on Hadoop. -- JMH
% there
% are drawbacks with the Hadoop approach: its performance is untenably
% slow compared to database processing as it must provide higher levels
% of fault tolerance than other systems. Additionally, the sheer number
% of machines that are required to achieve good performance of makes it
% unclear that Hadoop systems are cost-effect in all but the most scale
% heavy environments.
%
% For such users, Greenplum provides a cost-effective solution for
% scalable analytics. It does not require a complicated and error-prone
% import/export cycle to Haddop nor forces the user to work over a
% snapshot of the data: one works on the raw data and does their
% analysis on production (or very near-to-production) data. This allows
% user to maximize the value proposition of storing all that data in a
% cost-effective manner.
%% Q1: why not SAS? Q2: what about the fact that SQL nerds dont do
%% data analysis? Q3: does Hadoop and Mahout make this irrelevant?
%% Chris asked Q1 and Q2 this way:
%% There is another major shift that MADlib potentially allows:
%% analysts can get closer to their data than ever before. For
%% example, SAS promotes a model of an analysts workflow called SEMMA
%% model (Sample, Explore, Modify, Model, Assess). This is (afaik) the
%% industry standard. To me, what's totally busted about this loop is
%% the S -- if you're looking for something rare, the sampling step
%% throws out the most interesting bits! Then your EMM steps where you
%% build understanding are of a small fraction of your data. This is
%% the part where the analyst is currently far from the data. As a
%% result, their entire conversation is not with the data but with a
%% small sample. If something goes wrong, you (maybe) find out about
%% it in the A step (which is on the whole data).
%% Moreover, it's totally unnecessary to do the S loop in the MADlib
%% world view, and the fact that the S step has been elevated to the
%% level of feature is a testament to how broken the current tool
%% chain is.
%% A couple follow-on points:
%% * WRT SAS and sampling, the way I heard it from the FAN guys its all about competitive advantage in the tails of the distribution. Something like anybody can tackle the 20% of cases in the head of the distribution. The competitive advantage is in tackling the long tails: e.g. target ads to toyota-truck-driving-latin-music-loving-sushi-eating women in cold climates. Simple sample/extract techniques blow on that stuff.
%% * WRT Hadoop/Mahout: I think we need to mention it here and acknowledge its been an attractive path to a solution, and Hadoop certainly is getting the attention of the Web and ML communities [Stanford paper]. Mahout is an attempt to package that energy, with some institutional support from MapR. But despite the success of MapReduce, lots of important data is still going into databases and will continue to do so for years to come for a host of reasons that are both technical and organizational. Even if Mahout succeeds wildly (and it isnt doing so to date, but I dont think we want to bother saying that), theres a critical vacuum to be filled in SQL-land. What we can (re-)learn from the Hadoop community is the power of open-source teamwork, and the desire for agile platforms for analytics. Theres no reason we cant direct that agile thinking toward the data in databases.
%% *
\subsection{Why Open Source?}
From the beginning, MADlib was designed as an open source project with
corporate backing, rather than a closed-source corporate effort with
academic consulting. This decision was motivated by a number of
factors, including the following:
\begin{itemize}
\item \textbf{The benefits of customization}: Statistical methods are rarely used as turnkey solutions. As a result, it is common for data scientists to want to modify and adapt canonical models and methods (e.g., regression, classification, clustering) to their own purposes. This is a very tangible benefit of open source libraries over traditional closed-source packages. Moreover, in an open-source community there is a process and a set of positive incentives for useful modifications to be shared back to the benefit of the entire community.
\item \textbf{Valuable data vs.\ valuable software}: In many emerging business sectors, the corporate value is captured in the data itself, not in the software used to analyze that data. Indeed, it is in the interest of these companies to have the open-source community adopt and improve their software. Open-source efforts can also be synergistic for vendors that sell commercial software to customers who run open-source code. Most IT shops today run a mix of open-source and proprietary software, and it is wise for software vendors to position themselves intelligently in that context. For many database system vendors, their core competency is not in statistical methods, but rather in the engines that support those methods, and the service industry that evolves around them. For these vendors, an open-source library like MADlib is an opportunity to expand their functionality and hence services. \fs{As a reader I ask ask myself here: Does Greenplum have competency in statistical methods? If so, why do they want to let competitors benefit from that? The argument would be plausible if MADlib was a joint venture, but leaves open questions as a unilateral move by Greenplum.}
\item \textbf{Closing the research-to-adoption loop}: Very few companies that depend on data analysis have the capacity to develop significant in-house research into computing or data science. \fs{Again, one would assume that the Greenplum side of MADlib could not care less if other companies are lacking certain capacities.} On the other hand, it is hard for academics doing computing research to understand and influence the way that analytic processes are done in the field. An open source project like MADlib has the potential to connect academic researchers not only to industrial software vendors, but also directly to the end-users of analytics software. This can improve technology transfer from academia into practice without requiring database software vendors to serve as middlemen. It can similarly enable end-users in specific application domains to influence the research agenda in academia.
\item \textbf{Leveling the playing field, encouraging innovation}: Over the past two decades, database software vendors have developed proprietary data mining toolkits consisting of textbook algorithms. It is hard to assess their relative merits. Meanwhile, other communities in machine learning and internet advertising have also been busily innovating, but their code is typically not well packaged for reuse, and the code that is available was not written to run in a database system. Meanwhile, none of these projects has demonstrated the vibrancy and breadth we see in the open-source community surrounding R and its CRAN package. A robust open-source project like MADlib \fs{We should find the right mix of self-confidence and at the same time acknowledging that MADlib is still at an early stage and beta in many respects.} can bring the entire database community up to a baseline level of competence on standard statistical algorithms, remove the corporate \textit{FUD} from proprietary toolkits that has held back innovation, and help focus a large community on innovation and technology transfer.
\end{itemize}
\subsection{A Model for Open Source Collaboration}
The design of MADlib comes at a time when the connections between
open-source software and academic research seem particularly frayed.
MADlib is designed in part as an experiment in binding these
communities more tightly, to face current realities in software
development.
In previous decades, open-source software famously came from universities and evolved into significant commercial products. Examples include the Ingres and Postgres database systems, the BSD UNIX and Mach operating systems, the X-windows \fs{X Window} user interfaces and the Kerberos authentication suite. These projects were characterized by aggressive application of cutting-edge research ideas, captured in workable but fairly raw public releases that matured slowly with the help of communities outside the university. While all of the above examples were incorporated into commercial products, many of those efforts emerged years after the initial open-source releases, and often with significant changes.
Today, we expect successful open source projects to be quite mature,
often comparable to commercial products. To achieve this level of
maturity, most successful open-source projects have one or more major
corporate backers who pay some number of committers and provide
professional support for Quality Assurance (QA). This kind of investment is typically
made in familiar software packages that tend not to feature new
research ideas. Many of the most popular examples---Hadoop, Linux,
OpenOffice---are direct clones of well-identified,
pre-existing commercial efforts. \fs{I think Linux is too big a project to say it does not contain research. This paragraph also sounds a bit too negative to me. Like ``Today's OSS consists mainly of boring rip-offs of commercial products.'' While not completely untrue, there is more to the story.}
MADlib is making an explicit effort to establish a new model for industry support of both academic research and open source software. Many academic research projects are supported by financial grants and gifts from companies. In MADlib, the corporate donation consists of a commitment to allocate significant professional software engineering time to an open-source collaboration with academia. \fs{This paragraph sounds as if MADlib is contrasting, say, Linux here in that (a) it contains cutting-edge research and (b) corporate donations consist mainly of labor instead of money. I do not see that MADlib is singular in that respect. Think of what, e.g., RedHat does, or in fact any company that makes money with GPL'd software.} This leverages a strength of industry that cannot be replicated on a university
campus. Companies can hire high-quality, experienced software
engineers with the attraction of well-compensated, long-term career
paths. Equally important, software shops can offer an entire software
engineering pipeline that cannot be replicated on campus:
this includes QA processes encompassing specialized QA engineers as
well as software testing procedures and hardware platforms for
automated testing at scale. The hope is that the corporate staffing of research projects like MADlib can both enable
academic open-source research, and speed technology transfer to industry. \fs{Linux is used and advanced by lots of students and academics, and there are companies like RedHat, IBM, etc.}
\subsection{Status and Directions for Growth}
The initial beta release of MADlib in {\bf MONTH} of 2012 \fs{We need to agree on what we call the current release. `beta'? Just v0.3?} was focused on establishing a baseline of
valuable functionality, while laying the groundwork for future evolution. The beta development began with the non-trivial work of building the general-purpose
framework described in Section~\ref{sec:gen}. Additionally, we wanted robust
implementations of textbook methods that were most frequently
requested from customers we met through Greenplum. Finally, we wanted
to validate MADlib as a research vehicle, by fostering a small number of university groups working in the area to experiment with the platform and get their code disseminated.
% Discuss the initial selection of methods.
With the recent MADlib release completed \fs{`Recent release' might be too unspecific. We plan to complete at least one more release cycle until the conference/publication of the proceedings.}, there is room for growth in multiple
dimensions. The library infrastructure itself is still in beta, and
has room to mature. \fs{I agree.} There is room for enhancements in its core
treatment of mathematical kernels (e.g.\ linear algebra over both
sparse and dense matrices) especially in out-of-core settings. And of
course there will always be an appetite for additional statistical models and
algorithmic methods, both textbook techniques and cutting-edge research. Finally,
there is nascent interest on the MADlib newsgroup in ports to other DBMSs. Porting MADlib across DBMSs is a
mechanical but non-trivial software development effort that will
span the infrastructure (e.g.\ the need for a cross-platform
installer), the methods themselves (particularly the user-defined
functions) and the software engineering infrastructure (e.g.\ the need
for QA support on additional database engines). \fs{True for pure SQL modules. For pgSQL/C modules, porting at this point is essentially rewriting. And for C++ AL modules, porting is a non-trivial rewrite of the AL.}
\section{MADlib Architecture}
\label{sec:gen}
\ksn{We support PostgreSQL, but make almost no mention of non-parallel in-database algorithms/design issues.
Ideally, we would start with issues surrounding the use of non-parallelised SQL to implement statistical methods, then move on to additional complications introduced by parallelism.
It's probably too late to change things now, but maybe we can add a sentence near the beginning of Section 3 to say that the MADlib architecture is designed to exploit parallel databases, and that's what we will focus on (exclusively) in Section 3.}
\fs{The first paragraph of the abstract puts emphasize on parallelism. Likewise, the second paragraph of Section 1 under "Introducing MADlib". So I think we are covered in general. But I don't feel strongly here.}
The core of traditional SQL---\texttt{SELECT...} \texttt{FROM...} \texttt{WHERE}... \texttt{GROUP BY}---is
quite a powerful harness for orchestrating bulk data processing across
one or many processors and disks. It is also a portable, native
language supported by a range of widely-deployed open source and
commercial database engines. This makes SQL an attractive
framework for writing data-intensive programs. Ideally, we would like
MADlib methods to be written entirely in straightforward and portable
SQL. Unfortunately, the portable core of ``vanilla'' SQL is often not
quite enough to express the kinds of algorithms needed for advanced
analytics.
Many statistical methods boil down to linear algebra expressions over
matrices. For relational databases to operate over very large
matrices, this presents challenges at two scales. At a macroscopic
scale, the matrices must be intelligently partitioned into chunks that
can fit in memory on a single node. Once partitioned, the pieces can
be keyed in such a way that SQL constructs can be used to orchestrate
the movement of these chunks in and out of memory across one or more
machines. At a microscopic scale, the database engine must invoke
efficient linear algebra routines on the pieces of data it gets in
core. To this end it has to have the ability to very quickly invoke
well-tuned linear algebra methods.
We proceed to discuss issues involved at both of these levels in a bit more detail, and solutions we chose to implement in MADlib.
\subsection{Macro-Programming (Orchestration)}
A scalable method for linear algebra depends upon divide-and-conquer techniques: intelligent
partitioning of the matrix, and a pattern to process the pieces and
merge results back together. This partitioning and dataflow is
currently outside the scope of a traditional query optimizer or
database design tool. But there is a rich literature from scientific
computing on these issues~\cite{demmel}
% \fs{I suppose this refers to ``Applied numerical linear algebra''?}
that database programmers can use to craft efficient in-database implementations. Once data is properly partitioned, database engines shine at orchestrating the
resulting data movement of partitions and the piecewise results of
computation.
\subsubsection{User-Defined Aggregation}
\ksn{This section looks like it should follow the first paragraph of Section 3.1 to prove the point that SQL is pretty good as the basis of a macro-programming or parallel declarative programming language.
We should mention that UDA and parallel constructs like the merge and final functions lend themselves nicely to the implementation of the large body of work on online learning algorithms and model averaging techniques over such algorithms currently being actively pursued in the machine learning literature. The Zinkevish, Weimer, Smola, Li paper is a good reference.}
\fs{I followed KeeSiong's suggestions.}
The most important and the most basic building block in the macro-programming of MADlib is the use of user-defined aggregates (UDAs). In general, aggregates---and the related window functions---are the natural way in SQL to implement mathematical functions that take as input the values of an arbitrary number of rows (tuples). DBMSs typically implement aggregates as data-parallel streaming algorithms. Indeed, there is a large body of recent works on online learning algorithms and model averaging techniques that very well fit the computational model of aggregates (see, e.g., \cite{Zinkevich10}).
Unfortunately, the SQL standard does not prescribe an extension interface for user-defined aggregates \jmh{we'd better triple-check that} \fs{I'll have another look. Can somebody else check, too?}, and they are typically implemented using vendor-specific APIs. Yet, the aggregation paradigm (or in functional programming terms, ``fold'' or ``reduce'') is natural and ubiquitous, so that we expect user-defined aggregates to be very portable. In fact, in all major DBMSs (we checked Greenplum, MySQL, Oracle, PostgreSQL, SQL Server, Teradata as representative examples), a user-defined aggregate consists of three user-defined functions:
\begin{enumerate}
\item A \emph{transition function} that takes the current transition state $z$ and a new data point $x$. The transition function computes $f(x)$ and combines this with $z$ into a new transition state $z'$.
% DB people won't be helped by analogies to functional programming
%The transition function is equivalent to the ``combining'' function passed to linear \emph{left-fold} functions in functional-programming languages.
\item A \emph{merge function} that takes two transition states and transforms them into a new combined transition state. This function is only needed for parallel execution.
% Again in functional-programming terms, a merge operation is a tree-like fold.
\item A \emph{final function} that takes a transition state and transforms it into the output value.
\end{enumerate}
Clearly, a user-defined aggregate is inherently data-parallel if the transition function is associative and the merge function returns the same result as if the transition function was called repeatedly for every individual element in the second state. \jmh{Probably a ref to Gray's Data Cube taxonomy and/or TAG is relevant here.}
Unfortunately, user-defined aggregates are not enough. In designing the high-level orchestration of data movement for analytics, we ran across two main
limitations in standard SQL that we describe next. We addressed both these limitations using driver code written in simple
script-based user-defined functions (UDFs), which in turn kick off more
involved SQL queries. When implemented correctly, the performance of the scripting language code is not critical, since its logic is invoked only
occasionally to kick off much larger bulk tasks that are executed by
the core database engine.
\subsubsection{Driver Functions}
\ksn{I would put this section before Section `Templated Queries'. \fs{I did this.}
Do we need to clarify that iteration is only an issue when each iteration requires access to multiple data points?
An iterative algorithm that processes training data points one at a time is perfectly suited to SQL implementation as an aggregate function, so iterations by itself is not necessarily a problem.}
The first problem we faced is the prevalence of iterative algorithms for many
methods in statistics, particularly optimization methods like Gradient
Descent and Markov Chain Monte Carlo simulation in which the number of iterations
is determined by a data-dependent stopping condition at the end of
each round. There are multiple SQL-based workarounds for this
problem, which depend on the context. In order to drive a fixed number $n$ of independent
iterations, it is often simplest (and very efficient) to synthesize a virtual table with $n$ rows, and join it with a view representing a single iteration---this is an approach that we used to implement
Bootstrap sampling in the original MAD Skills paper~\cite{MADSkills}. For
settings where the current iteration depends on previous iterations,
SQL's windowed aggregate feature can be used to carry state across iterations.
% \fs{What is the idea here? We should either have a little bit more detail here, so that readers have an understanding, or omit this comment.}
Wang et al.\ took
this approach to implement in-database MCMC inference~\cite{Daisy11}.
Alternatively, some DBMSs might support multi-pass user-defined aggregate together with a stopping criterion. \jmh{Can somebody back up the previous sentence with an example? I'm not familiar with this feature.} \fs{I checked DB2, Oracle, Teradata, MySQL, hsqldb. None of these have multi-pass aggregates. Still, multi-pass aggregates would make a lot of sense from a user perspective.}
Most
generally, it is possible to use the recursion features of SQL to
perform iteration with arbitrary stopping conditions---this was used by
Wang et al.\ to implement Viterbi inference~\cite{Daisy10}.
Unfortunately,
the level of support for SQL's windowed aggregates and recursion
varies across database products, and does not form a reliable
basis for portability.
As a result, in MADlib we typically implement iterative methods by
writing a driver UDF in Python to control the iteration. A standard
pitfall in this style of programming is to pull a large amount of data
out of the database and into the driver code; this becomes a
scalability bottleneck since the driver code typically does not
parallelize and hence pulls all data to a single node. We avoid this
via a design pattern in which the driver UDF kicks off each iteration
and stages its output into a temporary table via \texttt{CREATE TEMP TABLE...
AS SELECT...} It then interrogates the resulting temp table using small
aggregate queries as needed. \jmh{Forward ref to an example in this paper?} \fs{We could reference Section~\ref{sec:log-regression-impl} or Figure~\ref{fig:log-reg-driver}} As a result, all large-data movement is
done within the database engine and its buffer pool.
Database engines typically provide efficient parallelism as well as
buffering and spill files on disk for large temp tables, so this
pattern is quite efficient in practice.
\subsubsection{Templated Queries}
\ksn{ The two problems with first-order logic in this context are 1) lack of a proper type system; and 2) no support for higher-order functions.
The current description seems to conflate the two problems, which maybe why it's slightly confusing.
I don't really understand what's the problem being solved and what's the solution being proposed here.
A relevant problem here is the unnatural way we support function arguments to algorithms, for example the distance metric argument in k-means and the kernel function in SVM, which can be traced directly to the lack of direct support for higher-order functions in SQL.
The reference to iterative algorithms, and the solution approach illustrated in Listing 3, seem to be related to this lack of higher-order functions support as well.}
\fs{Section `Templated Queries' discussed variable arity, which is a problem encountered by the profile module and also by the decision tree module (I think). Problem 1) was only mentioned in Section 4.2.1, and Problem 2) was discussed in Section 3.3 but none are discussed in `Templated Queries', were they? I now have mentioned problems 1 and 2 in this section.}
A second problem is a limitation of SQL's roots in
first-order logic, which requires that queries be cognizant of the
schema of their input tables, and produce output tables with a fixed
schema. In many cases we want to write ``templated'' queries that
work over arbitrary schemas, with the details of column names and
types to be filled in later.
For example, the multivariate linear
regression algorithm of Section~\ref{sec:regression} is designed to run over any subset
of the columns of an input table, producing an output table including
the same columns as well as a predicted output column. \fs{Actually, in most algorithms we only work with vectors/arrays, and variable arity does not pose a problem. Only when an algorithm prescribes how non-numerical input columns should be encoded, we have to work with variable arity (e.g., the C4.5 decision-tree algoorithm does that). Still, \emph{all} iterative algorithm necessarily rely on templated SQL.} \jmh{Please help me clarify here. My mental example is the profile module, in which I explicitly synthesize SQL in Python. But I didn't want to take the time to describe it here, so maybe you can tell me what ou mean by all the algorithms relying on templated SQL.} \fs{OK, I didn't have the profile module on my mind. So my only concern is that variable arity is not an issue for the example modules.} SQL helps with
problems of data types and casting, but cannot help with the variable
arity of inputs and outputs. To address this issue, we use Python
UDFs to interrogate the database catalog for details of input tables,
and then synthesize customized SQL queries based on templates to
produce outputs. {\bf Figure~\ref{fig:secondorder} shows Python code that illustrates this.} \fs{I assume profile is the only Python module dealing with variable arity. The DT code is not Python unfortunately. Figure~\ref{fig:log-reg-driver} gives an example of templated SQL, but not variable arity.} This pattern is currently done in an ad hoc way in
each method that needs it. In the future we plan to support this pattern
as a Python library that ships with MADlib. Similarly, the lack of native support for higher-order functions in SQL again necessitates templated SQL or a technical solution as discussed in Section~\ref{sec:C++AL}.
Unfortunately, templated SQL relies on identifiers or expressions passed as strings. As such, the DBMS backend will discover syntactical errors only when the generated SQL is executed. However, having the DBMS catch errors only in the generated SQL will usually lead to error messages that are enigmatic to the user. As a result, templated SQL necessitates MADlib to perform additional validation and error handling up front, in order to not compromise usability.
\subsection{Micro-Programming: Data Representations and Inner Loops}
\label{sec:micro}
In addition to doing the coarse-grained orchestration of chunks, the
database engine must very efficiently invoke the single-node code that
performs arithmetic on those chunks. For UDFs that operate at the row level (perhaps called multiple times per row), the standard practice is to implement them in C or C++. When computing dense matrix operations, these functions would
make native calls to an open-source library like LAPACK~\cite{laug} or Eigen~\cite{eigenweb}.
Sparse
matrices are not as well-handled by standard math libraries, and require
more customization for efficient representations both on disk and in
memory. We chose to write our own sparse matrix library in C for
MADlib, which implements a run-length encoding scheme. Both of
these solutions require careful low-level coding, and formed part of
the overhead of getting MADlib started.
% \fs{We inherited the sparse-vector implementation and did not actually make a decision on expected needs. So maybe we should deemphasize this point.}
% Let's include Luke as an author and give him credit for getting us off the ground.
The specifics of a given method's linear algebra can be coded in a
low-level way using loops of basic arithmetic in a language like C,
but it is nicer if they can be expressed in a higher-level syntax that
captures the semantics of the linear algebra at the level of matrices
and arrays. Moreover, for maintainability as well as portability it is best to separate database logic and APIs from mathematical code. We therefore provide a C++ abstraction layer in MADlib for writing performance-critical inner loops, which we describe next. \fs{The last two sentences here are now redundant with the first two sentences in the next paragraph. I would just remove everything between `Moreover' and `describe next'.}
\subsection{A C++ Abstraction Layer for UDFs} \label{}
There are a number of complexities involved in writing C or C++-based user-defined functions over a legacy DBMS like PostgreSQL, all of which can get in the way of maintainable, portable application logic. This is especially frustrating for routines whose pseudocode amounts to a few symbols in linear algebra. \fs{I don't understand the last sentence.} MADlib provides a C++ abstraction layer both to ease the burden of writing high-performance UDFs, and to encapsulate DBMS-specific logic inside the abstraction layer, rather than spreading the cost of porting across all the UDFs in the library. In brief, the MADlib C++ abstraction provides three classes of functionality: type bridging, resource management shims, and math library integration.
Type bridging is provided via an encapsulated mapping of C++ types and methods to database types and functions. UDFs can be written with standard C++ atomic types, as well as the vector and matrix types that are native to a high performance linear-algebra library. (We have successfully layered multiple alternative libraries under this interface, and are currently using Eigen~\cite{eigenweb}). The translation to and from database types (including composite types like \texttt{double precision[]} for vectors) is handled by the abstraction layer. Similarly, higher-order templated functions in C++ can be mapped to the appropriate object IDs of UDFs in the database, with the abstraction layer taking care of looking up the function in the database catalog, verifying argument lists, ensuring type-safety, etc.
% One is the mapping of C++ language types and function pointers to database types and DBMS-registered functions. A second is the correct use of DBMS facilities ordinarily associated with operating-system or language-level standard libraries: memory management, exception handling, signal handling and so on. Finally, the key goal in MADlib is for mathematical expressions to take advantage of the expressivity and performance tuning of mature linear algebra libraries. The typical coding style for PostgreSQL extensions makes all these issues explicit, verbose, and easy to get wrong. We cover each of these three topics briefly.
%
% UDFs written in C or C++ are invoked as dynamically-linked function pointers, with arguments passed as an array of pointers and additional metadata. These UDFs typically begin with lengthy, system-specific boilerplate code for type-checking: it must ensure that the passed data is of the correct type, it must copy immutable data before doing modifications, verify array lengths, etc. The MADlib C++ abstraction layer encapsulates these issues within a recursive C++ class called \texttt{AnyType} that can contain either a primitive type (like, e.g., \texttt{int} or \texttt{double}) or multiple other values of type \texttt{AnyType} (for representing a composite type). This encapsulation works both for passing data from the DBMS to the C++ function, as well as returning values back from C++. To give an example: A simple, portable, and completely type-safe (though arguably not very useful) function that adds two numbers can thus be implemented with essentially as little code as in a high-level scripting language:
% \begin{cppcode*}{gobble=2}
% AnyType
% sum_two_doubles::run(AnyType &args) {
% return args[0].getAs<double>()
% + args[1].getAs<double>();
% }
% \end{cppcode*}
% \jmh{I don't really understand this.}
%
% A second responsibility of the abstraction layer is to supplement the bridging with additional semantics, in order to facilitate rapid implementation of mathematical functions: For instance, double-precision arrays in the DBMS are the canonical way to represent vectors in Euclidean space. Our C++ abstraction layer therefore not only provides an array-to-array bridge but also maps DBMS arrays to vectors of the linear-algebra library Eigen~\cite{eigenweb}. That way users can immediately make use of the very sophisticated vector and matrix operations provided by Eigen.
%
% \jmh{This is about higher-order functions, not 2nd-order logic.}
% The abstraction layer can also help compensate for SQL's lack of higher-order logic: For instance, an \texttt{AnyType} object can contain a ``pointer'' to a user-defined function. With the syntactic sugar possible in C++, this essentially makes in-database function first-class objects like they commonly are in modern programming languages. Internally, the abstraction layer maps UDFs to their object ID in the database, and it takes care of looking up the function in the database catalog, verifying argument lists, ensuring type-safety, etc.
The second aspect of the C++ abstraction layer is to provide a safe and robust standard runtime interface to DBMS-managed resources. This includes layering C++ object allocation/deallocation over DBMS-managed memory interfaces, providing shims between C++ exception handling and DBMS handlers, and correctly propagating system signals to and from the DBMS.
%
% . For instance, PostgreSQL maintains a hierarchy of memory contexts: When a query is started, a new memory context is created and all transient memory allocations are supposed to occur within this context. When the query ends, disposing of the query context provides a simple and effective way of garbage collection. Our C++ abstraction layer makes sure that such modi operandi are followed. On the other hand, the C++ abstraction layer also facilitates writing C++ code with a well-defined interface. This is particularly necessary if (as is typically the case) a DBMS only provides a C plugin interface: In that case it is important that exceptions, signals, etc. to not cross runtime boundaries.
Finally, by incorporating proven third-party libraries, the C++ abstraction layer makes it easy for MADlib developers to write correct and performant code. For example, the Eigen linear-algebra library contains well-tested and well-tuned code that makes use of the SIMD instruction sets (like SSE) found in today's CPUs. Likewise, the abstraction layer itself has been tuned for efficient value marshalling, and code based on it will automatically benefit from future improvements.
% Some examples include: All type bridges are aware of mutable and immutable objects and avoid making copies whenever possible. DBMS-catalogue lookups are minimized by caching.
Moreover, by virtue of being a template library, the runtime and abstraction overhead is reduced to a minimum.
As an illustration of the high-level code one can write over our abstraction layer, Listings~\ref{fn:lin-reg-trans} and \ref{fn:lin-reg-final} show reduced, but fully functional code snippets that implement multiple linear regression (as discussed further in Section~\ref{sec:regression}).
We have spent significant time tuning the C++ abstraction layer over PostgreSQL and Eigen. Figure~\ref{fig:regression} illustrates our progress in performance tuning, but also shows that some runtime overhead still has to be resolved. One of our goals for version 1.0 is to reduce the overhead of the C++ abstraction layer to virtually zero.
\ksn{Preceding paragraph. This is too much of a forward reference. Move the paragraph to Section 4.4?} \fs{Tend to agree, except you probably mean 4.5, not 4.4.}
\section{Examples}
\ksn{We used the word "segment" throughout without first explaining what it is. Outside Greenplum, probably nobody know what a segment is. :)}
To illustrate the above points, we look at three different algorithmic scenarios. The first is Linear Regression using Ordinary Least Squares (OLS), which is an example of a widely useful, simple single-pass aggregation technique. The second is binary Logistic Regression, another widely used technique, but one that employs an iterative algorithm. Finally, we look at $k$-means \fs{changed this from `K-Means' to `$k$-means' because that's what I used everywhere else.} Clustering, an iterative algorithm with large intermediate states spread across machines.
\subsection{Single-Pass: Ordinary Least Squares} \label{sec:regression}
\ksn{A question people would ask on reading the algorithm is what happens when the number of independent variables is large (as is common in text analytics problem).
Would the implementation crash if the matrix to be inverted can't fit in memory? Does the system fail gracefully in that case?
Is there a sentence or two we can add to try to head off that criticism up front?}
In ordinary-least-squares (OLS) linear regression the goal is to fit a linear function to a set of points, with the objective of minimizing the sum of squared residuals. Formally, we are given points $(\vec x_1, y_1), \dots, (\vec x_n, y_n)$, where $\vec x_i \in \R^d$ and $y_i \in \R$, and our goal is to find the vector $\widehat{\vec b}$ that minimizes $\sum_{i=1}^n (y_i - \langle \widehat{\vec b}, \vec x_i \rangle)^2$. OLS is one of the most fundamental methods in statistics. Typically, each $y_i$ is assumed to be an (independent) noisy measurement of $\langle \vec b, \vec x_i \rangle$, where $\vec b$ is an unknown but fixed vector and the noise is uncorrelated with mean 0 and unknown but fixed variance. Under these assumptions, $\widehat{\vec b}$ is the best linear unbiased estimate of~$\vec b$ (Gauss-Markov). Under additional assumptions (normality, independence), $\widehat{\vec b}$ is also the maximum-likelihood estimate. Letting $X$ denote the matrix whose rows are $\vec x_i^T$, and defining $\vec y := (y_1, \dots, y_n)^T$, it is well-known that the sum of squared residuals is minimized by $\widehat{\vec b} = (X^TX)^{-1}X^T \vec y$ (for exposition purposes we assume the full-rank case here, though this is not a requirement for MADlib).
It has been observed before that computing $\widehat{\vec b}$ lends itself well to data-parallel implementations \cite{DBLP:conf/nips/ChuKLYBNO06}---in extensible database terms, it can be done with a simple user-defined aggregate. The principal observation is this: $X^TX = \sum_{i=1}^n \vec x_i \vec x_i^T$ and $X^T \vec y = \sum_{i=1}^n \vec x_i y_i$ are just sums of transformations of each data point. Summation is associative, so data parallelism virtually comes for free---we can compute the per-segment subsums of the previous expressions locally on each segment, and then sum up all subsums during a second-phase aggregation. As a final non-parallelized step, we compute the inverse of $X^TX$ and then multiply with $X^T \vec y$. These final operations are comparatively cheap, since the number of independent variables (and thus the dimensions of $X^T X$ and $X^T \vec y$) is typically ``small''.
\subsubsection{MADlib Implementation}
We assume that data points are stored as \texttt{(x DOUBLE PRECISION[], y DOUBLE PRECISION)} tuples. Linear regression is then implemented as a user-defined aggregate with a transition and final function roughly as in Listings~\ref{fn:lin-reg-trans} and \ref{fn:lin-reg-final}, respectively. Specifically, we have omitted finiteness checks and several output statistics in the code example. \jmh{can you be specific about what got left out of the listings?} \fs{Done.} The merge function, which is not shown, just adds all values in the transition states together.
\begin{Verbatim}[commandchars=\\\{\}, ,numbersep=5pt,tabsize=4,fontsize=\scriptsize ,framesep=2mm]
psql# \PY{k}{SELECT} \PY{p}{(}\PY{n}{linregr}\PY{p}{(}\PY{n}{y}\PY{p}{,} \PY{n}{x}\PY{p}{)}\PY{p}{)}\PY{p}{.}\PY{o}{*} \PY{k}{FROM} \PY{n}{data}\PY{p}{;}
-[ RECORD 1 ]+--------------------------------------------
coef | \{1.73071268159411,2.24279312002405\}
r2 | 0.947520042268114
std_err | \{0.325768074771675,0.0533185970021179\}
t_stats | \{5.31271421488165,42.0639935430964\}
p_values | \{6.76812299982288e-07,4.44089209850063e-16\}
condition_no | 169.509326412661
\end{Verbatim}
\jmh{That's some pretty non-relational looking output. Shall we explain?} \fs{Do you mean explaining that logregr returns a composite type?} \ksn{Please reduce the precision of the floating point numbers to something sensible.}
\jmh{In Listings 1 \& 2, we need either comments inline or a longer caption to explain what's there. Specifically, explain the casts and templates, and note the invocation of Eigen routines (matrix multiple, transposition, pseudo-inverse). I can guess at that, but you'll have to clarify.}
\begin{code}
\begin{cppcode}
AnyType
linregr_transition::run(AnyType &args) {
// Transition state is a class that wraps an array.
// We expect a mutable array. If DBMS allows
// modifications, copying will be avoided.
LinRegrTransitionState<
MutableArrayHandle<double> > state = args[0];
// Dependent variable is a double-precision float
double y = args[1].getAs<double>();
// Vector of independent variables wraps an immutable
// array (again, no unnecessary copying). This maps
// to an Eigen type
HandleMap<const ColumnVector> x
= args[2].getAs<ArrayHandle<double> >();
if (state.numRows == 0)
// The first row determines the number of
// independent variables
state.initialize(*this, x.size());
state.numRows++;
state.y_sum += y;
state.y_square_sum += y * y;
// noalias informs Eigen to multiply in-place
state.X_transp_Y.noalias() += x * y;
// Since $X^TX$ is symmetric, we only need to
// compute a triangular part
triangularView<Lower>(state.X_transp_X)
+= x * trans(x);
return state;
}
\end{cppcode}
\caption{Linear-regression transition function}
\label{fn:lin-reg-trans}
\end{code}
\begin{code}
\begin{cppcode}
AnyType
linregr_final::run(AnyType &args) {
// Immutable array: Array will never be copied
LinRegrTransitionState<
ArrayHandle<double> > state = args[0];
// The following is a MADlib class that wraps Eigen's
// solver for self-adjoint matrices.
SymmetricPositiveDefiniteEigenDecomposition<Matrix>
decomposition(state.X_transp_X, EigenvaluesOnly,
ComputePseudoInverse);
Matrix inverse_of_X_transp_X
= decomposition.pseudoInverse();
// Let backend allocate array for coefficients so to
// avoid copying on return (if supported by DBMS).
HandleMap<ColumnVector> coef(
allocateArray<double>(state.widthOfX));
coef.noalias() = inverse_of_X_transp_X
* state.X_transp_Y;
// Return a composite value.
AnyType tuple;
tuple << coef << decomposition.conditionNo();
return tuple;
}
\end{cppcode}
\caption{Linear-regression final function}
\label{fn:lin-reg-final}
\end{code}
\subsection{Multi-Pass: (Binary) Logistic Regression}
In (binary) logistic regression, we are given points $(\vec x_1, y_1)$, $\dots$, $(\vec x_n, y_n)$, where $\vec x_i \in \R^d$ and $y_i \in \{ 0,1 \}$, and our goal is to find the vector $\widehat{\vec b}$ that maximizes $\prod_{i=1}^n \sigma((-1)^{y_i + 1} \cdot \langle \widehat{\vec b}, \vec x_i \rangle)$. Here, $\sigma(z) = \frac{1}{1+\exp(z)}$ denotes the logistic function. Statistically, this is the max\-i\-mum-likelihood estimate for an unknown vector $\vec b$ under the assumption that each $y_i$ is a random variate with $\Pr[y_i = 1 \mid \vec x_i] = \sigma( \langle \vec b, \vec x_i \rangle )$ and that all observations are independent.
It is well-known that, in general, no closed-formula expression for $\widehat{\vec b}$ exists. Instead, $\widehat{\vec b}$ can be computed as the solution of a convex program via standard iterative methods. Arguably, the most common method is to maximize the logarithm of the likelihood using Newton's method. In the case of logistic regression this reduces to \emph{iteratively reweighted least squares} with iteration rule $\widehat{\vec \beta}_{m+1} = (X^T D X)^{-1} X^T D \vec z_m$. \ksn{I don't think D in the iteration rule is explained.} \fs{Fixed.} Here $D$ and $\vec z_m$ are transformations of $X$ and $\widehat{\vec \beta}_m$.
\subsubsection{MADlib Implementation} \label{sec:log-regression-impl}
\jmh{I think the logistic regression section will need work from you, to help the user understand how the SQL statement invokes the Python driver, and how the Python driver in turn invokes each iteration in SQL. Clarifying that control flow will help a lot more than Listing 3, so if a picture is in order feel free to replace Listing 3. (BTW feel free to upload a photo of a pencil drawing for now, we can clean it up tomorrow). Alternatively, be sure to add comments or caption to Listing 3 so it's understandable.}
Each individual iteration can be implemented via a user-defined aggregate using linear regression as a blueprint. However, the handling of iterations requires a further outer loop. We therefore implement a driver UDF in Python, as shown in Figure~\ref{fig:log-reg-driver}. MADlib provides the Python function \texttt{runIterativeAlg} that iteratively calls an aggregate function, stores the computed state, and terminates once the stopping criterion has been reached.
Unfortunately, implementing logistic regression using a driver function leads to a different interface:
\begin{sqlcode}
SELECT * FROM logregr('y', 'x', 'data');
\end{sqlcode}
A problem with this implementation is obvious: the \texttt{logregr} UDF is not an aggregate function and cannot be used in grouping constructs. This makes doing multiple logistic regressions at once harder than multiple linear regressions.
\fs{I would now remove the rest of the subsubsection. For now, I am leaving the old discussion for reference.}
In addition, this syntax hides identifiers from the SQL parser. \jmh{I don't know what that means.} \fs{My main point is that identifiers passed as strings only become visible to the DBMS when the generated SQL is executed. However, we then should not rely on the SQL parser any more to give reasonable error messages. Waiting for the generated SQL to fail will usually lead to error messages that are enigmatic to the user.} Instead, MADlib is burdened with the responsibility of name binding, including all needed validation and error handling. \jmh{When we say MADlib, we main the logistic regression code. Yes? What's the lesson here? Iterative methods cannot be aggs? Or they can but we failed to think about them properly?} \fs{``Multi-pass'' aggregates would indeed be very useful from a MADlib perspective and enhance the user experience. -- That said, I'll the whole paragraph should be rephrased. See also KeeSiong's comment.} Also, any errors will only be detected at runtime.
\ksn{The discussion ends with too much gloom. Are we not going to propose some solution moving forward? Or does this go down as one of the fundamental limitations of implementing things in SQL?}
\begin{figure}
\includegraphics[scale=0.5]{LogisticRegression}
\caption{Sequence Diagram for Logistic Regression}
\label{fig:log-reg_driver}
\end{figure}
%\begin{code}
% \begin{pythoncode}
%return __runIterativeAlg(
% stateType = "FLOAT8[]",
% initialState = "NULL",
% source = source,
% updateExpr = """
% {MADlibSchema}.logregr_{optimizer}_step(
% ({depColumn})::BOOLEAN,
% ({indepColumn})::FLOAT8[],
% {{state}}
% )
% """.format(
% MADlibSchema = MADlibSchema,
% depColumn = depColumn,
% indepColumn = indepColumn,
% optimizer = optimizer),
% terminateExpr = """
% {MADlibSchema}.
% internal_logregr_{optimizer}_step_distance(
% {{newState}}, {{oldState}}
% ) < {precision}
% """.format(
% MADlibSchema = MADlibSchema,
% optimizer = optimizer,
% precision = precision),
% maxNumIterations = maxNumIterations)
% \end{pythoncode}
% \caption{Logistic regression driver}
% \label{fn:log-reg-driver}
%\end{code}
\subsection{Large-State Iteration: k-Means}
In $k$-means clustering, we are given $n$ points $x_1, \dots, x_n \in \R^d$, and our goal is to position $k$ centroids $c_1, \dots, c_k \in \R^d$ so that the sum of squared distances between each point and its closest centroid is minimized. Formally, we wish to minimize
\begin{math}
\sum_{i=1}^n \min_{j=1}^k \|x_i - c_j \|^2.
\end{math}
This problem is known to be NP-hard unless $d$ and $k$ are fixed \cite{ADH09a,MNV10a}. \ksn{This is a bit mysterious, since d and k are always fixed before we run the algorithm.
Can we word this more carefully? Is the statement even needed at all?}
\fs{If the dimension of the input vectors is not a priori fixed but part of the input, or likewise, if k is part of the input, then k-means is NP hard. Otherwise, it is not. It's pretty typical that complexity differs depending on which parameters are part of the input. So I might not understand your concern. Would it be clearer to write "unless d and k are constants"?}
However, the local-search heuristic proposed by Lloyd~\cite{L82a} performs reasonably well both in theory and in practice~\cite{AV07a,AMR09a}. At a high level, it works as follows:
%
\begin{enumerate}
\item Seeding phase: Find initial positions for $k$ centroids $c_1, \dots, c_k$.
\item Assign each point $x_1, \dots, x_n$ to its closest centroid. \label{enum:kmeans_abstract_points}
\item Reposition each centroid to the barycenter (mean) of all points assigned to it.
\item If no (or only very few) points got reassigned, stop. Otherwise, goto \eqref{enum:kmeans_abstract_points}.
\end{enumerate}
\subsubsection{MADlib implementation}
Based on the assumption that we can always comfortably store $k$ centroids in main memory, we can implement $k$-means similarly to logistic regression: Using a driver function that iteratively calls a user-defined aggregate. In the following, we take a closer look at this implementation. It is important to make a clear distinction between the inter-iteration state (the output of the UDA's final function) and intra-iteration state (as maintained by the UDA's transition and merge functions). During aggregation, the transition state contains both inter\nobreakdash- and intra-iteration state, but only modifies the intra-iteration state. We only store $k$ centroids in both the inter\nobreakdash- and intra-iteration states, and consider the assignments of points to centroids as implicitly given.
In the aggregate transition function, we first compute the centroid that the current point was closest to at the beginning of the iteration, i.e., using the inter-iteration state. We then update the barycenter of this centroid in the intra-iteration state. Only as the final step of the aggregate, the intra-iteration state becomes the new inter-iteration state.
Unfortunately, in order to check the convergence criterion that no or only few points got reassigned, we have to do two closest-centroid computations per point and iteration: First, we need to compute the closest centroid in the previous iteration and then the closest one in the current iteration. If we stored the closest points explicitly, we could avoid half of the closest-centroid calculations.
We can store points in a table called \texttt{points}, with the array of centroids as an attribute \texttt{centroids} \fs{`centroids' I actually thought of as parameter for the query. The array of centroids is not a column in the `points' table.}, a \texttt{coords} attribute containing the points' coordinates, and the current \texttt{centroid\_id} for the point as a third attribute. Then we can define a UDF \texttt{closest\_point(a,b)} that determines the point in array \texttt{a} that is closest to \texttt{b}. Given such a table, we can make the point-to-centroid assignments explicit using the following SQL:
\begin{sqlcode}
UPDATE points
SET centroid_id = closest_point(centroids, coords)
\end{sqlcode}
Ideally, we would like to perform the point reassignment and the repositioning with a single pass over the data. Unfortunately, this cannot be expressed in standard SQL.\footnote{While PostgreSQL and Greenplum provide an optional \texttt{RETURNING} clause for \texttt{UPDATE} commands, this returns only one row for each row affected by the \texttt{UPDATE}, and aggregates cannot be used within the \texttt{RETURNING} clause. Moreover, an \texttt{UPDATE ... RETURNING} cannot be used as a subquery.}
Therefore, while we can reduce the number of closest-centroid calculations by one half, PostgreSQL and Greenplum process queries one-by-one (and do not perform cross-statement optimization), so they will need to make two passes over the data per one $k$-means iteration. In general, it depends on the DBMS, the data, the hardware, etc.\ whether explicitly storing points leads to a performance improvement.
The pattern of updating temporary state is made a bit more awkward in PostgreSQL due to its legacy of versioned storage. PostgreSQL performs an update by first inserting a new row and then marking the old row as invisible \cite[Section~23.1.2]{postgres:9.1.3}. As a result, for updates that touch many rows it is typically faster to copy the updated data into a new table (i.e., \texttt{CREATE TABLE AS SELECT} and \texttt{DROP TABLE}) rather than issue an \texttt{UPDATE}. These kinds of DBMS-specific performance tricks may merit encapsulation in an abstraction layer for SQL portability.
% Our goal is to eventually hide much of the SQL generation in versatile abstraction layers. \fs{That's my goal, but do we really work in that direction?}
\subsection{MADlib in Practice}
\fs{A quick draft about how a customer uses logistic regression. Feel free to remove this again if it feels out of place.} \ksn{ This section should probably be Section 5, since the material doesn't really belong to section named Examples...}
\jmh{I found the 4.4 section hard to read, and without a real customer story it didn't seem to help. We could talk it over, but my inclination is to drop it. We're also a bit over-space just now, though we could fix that. Depends on how much energy we both have the next couple days (I'm waning, have to move on to other stuff).}
\fs{I don't think there is enough to make a real customer story out of 4.4. It's mainly an example of how to use the catalog to convert normalized data into MADlib's array/vector representation. I don't feel strongly about it. Another option: Instead of having a section with the pretentious title "MADlib in Practice", we could move the catalog example into the logistic-regression section.}
A primary design objective in MADlib is to provide modular components that can easily be used as building blocks for new algorithms like, e.g., ensemble or multi-stage learning. While we intend to include simplified interfaces for important special use cases, the most flexible way of using MADlib is always via its core ``textbook'' interfaces. Practitioners are therefore employing the same techniques used in the design of MADlib itself: Using templated SQL and interrogating the database catalog. Suppose, e.g., we want to do a logistic regression on a table called \texttt{crime} that has a large number of columns, and we intend to experiment which columns to include in our analysis. The MADlib logistic-regression function, as seen in Section~\ref{sec:log-regression-impl}, expects a vector/array column as argument. The database catalog is the natural means of automating the mapping between column names and array indices. This can be expressed in standard SQL using the \texttt{information\_schema}, but PostgreSQL's native catalog provides an even more succinct way to access table meta data: Extracting all column names---and initializing them as included in the regression---could be done as follows:
%
\begin{sqlcode}
CREATE TABLE crime_columns AS
SELECT attnum, attname::TEXT, TRUE AS selected
FROM pg_attribute
WHERE attnum > 0 AND attrelid = 'crime'::regclass;
\end{sqlcode}
%
Now select or deselecting columns can be done in standard ways, if desired also with a GUI tool. From there, it is a simple step to generate SQL using array and string functions prevalent in DBMSs. Thus, we could call MADlib's logistic regression as follows (PostgreSQL syntax again):
%
\begin{sqlcode}
SELECT * FROM logregr(
'crime',
'crimerat >= 110',
'ARRAY[1, ' || array_to_string( (
SELECT array_agg(ivar ORDER BY pos)
FROM crime_columns
), ', ') || ']');
\end{sqlcode}
%
We remark that the dependent variable here is the boolean condition \texttt{(crimerat >= 110)}, and we include a constant 1 among the independent variables to determine the intercept in the logistic regression.
\subsection{Initial Performance Results}
\ksn{ I think we're going to cop a lot criticism on this preliminary performance testing.
The whole point of the in-database approach is to scale to large datasets, and we don't even attempt to solve a large enough linear regression problem that doesn't fit completely in memory? Very hard to argue against that...}
\fs{I addressed that point and hoped the message would be: We have perfect scalability for aggregates, and disk I/O will not affect scalability. It would hide the C++ AL overhead in our measurements though. So we are actually more critical of ourselves.}
In its current beta version, MADlib has been tuned a fair bit over PostgreSQL and Greenplum, though much remains to be done. Here we report on some results for a basic scenario that exercises our core functionality, including the C++ abstraction layer, our ability to call out to linear algebra packages, and parallel speedup results.
The basic building block of MADlib is a user-defined aggregate \fs{omit the following four word?} with a driver function. In order to evaluate the scalability of this construct, we ran linear regression over Greenplum's parallel DBMS on various data sizes, using a 24-core test cluster we had available, which was outfitted with 144 GB of RAM over 51 TB of raw storage.\footnote{Our cluster is made up of four SuperMicro X8DTT-H server modules, each equipped with one six-core Intel Xeon X5670 processor, clocked at 2.93~GHz. While hyperthreading is enabled, we only run a single Greenplum ``segment'' (query process) per physical core. Each machine has 24~GB of RAM, an LSI MegaRAID~2108 ``Raid On a Chip'' controller with six attached 360~GB solid-state drives, and a Brocade~1020 converged network adapter. The operating system is Red Hat Enterprise Linux Server release 5.5 (Tikanga). On that we are running Greenplum Database 4.2.0, compiled with gcc~4.4.2.
%The following gcc options were used to compile the Greenplum database: \texttt{-O3 -funroll-loops -fargument-noalias-global -fno-omit-frame-pointer -finline-limit=1800 -fno-strict-aliasing -fwrapv -g}
} This is obviously a relatively modest-sized cluster by today's standards, but it is sufficient to illuminate (a) our efforts at minimizing performance overheads, and (b) our ability to achieve appropriate parallel speedup. \jmh{Should we add something like this: ``We note that the widely-discussed Mahout library has typically reported performance over much smaller data sets and configurations. We also note that none of the current libraries for scalable machine learning target multi-rack or multi-datacenter tasks.''} \fs{I we really want, I could redo the test on a larger dataset. But from previous experience, I would strongly assume that run-times go up a lot, the differences between the versions become small, and scalability remains the same. I would probably not say that Mahout tests are usually smaller. That sounds too defensive for me.}
For running linear regression as outlined in Section~\ref{sec:regression}, we expect runtime
\begin{math}
O(k^3 + (n \cdot k^2)/p)
\end{math}
where $k$ is the number of independent variables, $n$ is the number of observations, and $p$ is the number of segments. The $k^3$ time is needed for the matrix inversion, and the $k^2$ is needed for computing each outer product $\vec x_i \vec x_i^T$ and adding it to the running sum. It turns out that our runtime measurements fit these expectations quite well, and the constant factors are relatively small. See Figures~\ref{fig:regression} and \ref{fig:regression-diagram}. In particular we note:
\begin{itemize}
\item The overhead for a single query is very low and only a fraction of a second. This also implies that we lose little in implementating iterative algorithms using driver functions that run multiple SQL queries.
\item Given the previous points, the Greenplum database achieves perfect linear speedup in the example shown.
\end{itemize}
In the example, all data was essentially in the buffer cache, and disk I/O was not a limiting factor. This is quite typical in practice: an analyst often runs a machine-learning algorithm several times with different parameters. Given massive amounts of main memory on each node in the cluster, much of the data should moreover remain in the cache. Finally, the computational cost per row grows at least quadratically, and thus will easily surpass I/O cost for complex models. As we compared various cluster sizes and number of independent variables and the respective execution times for previous versions of MADlib, the lesson we learned is that even though we anticipate non-trivial overhead by the DBMS, careful performance tuning---e.g., by making use of instruction-accurate profiling using Valgrind~\cite{NS07a}---still makes significant differences:
\begin{itemize}
\item Version 0.1alpha is an implementation in C that computes the outer-vector products $\vec x_i \vec x_i^T$ as a simple nested loop.
\item \ksn{I suspect nobody cares about the issues we had with Armadillo in Verion 0.2.1beta... do we really want to devote so much space to that?} \fs{The take-away point was supposed to be: "Do not hide behind the argument disk I/O will overshadow everything." Performance tuning at a fairly low level is still important.}
Version 0.2.1beta introduced an implementation in C++ that used the Armadillo \cite{armadillo} linear-algebra library as a frontend for LAPACK/BLAS. It turns out that this version was much slower for two reasons: The BLAS library used was the default one shipped with CentOS 5, which is built from the untuned reference BLAS. Profiling and examining the critial code paths revealed that computing $\vec y^T \vec y$ for a row vector $\vec y$ is about three to four times slower than computing $\vec x \vec x^T$ for a column vector $\vec x$ of the same dimension (and the MADlib implementation unfortunately used to do the former). Interestingly, the same holds for Apple's Accelerate framework on Mac~OS~X, which Apple promises to be a tuned library. The second reason for the speed disadvantage is runtime overhead in the first incarnation of the C++ abstraction layer (mostly due to locking and calls into the PostgreSQL/Greenplum backend).
\item Version 0.3 has an updated linear-regression implementation that relies on the Eigen C++ linear-algebra library and takes advantage of the fact that the matrix $X^T X$ is symmetric positive definite. Runtime overhead has been reduced, but some calls into the database backend still need better caching.
\end{itemize}
%
Other noteworthy results during our performance studies included that there are no measurable performance differences between PostgreSQL 9.1.1 (both in single and multi-user mode) and GP 4.1 in running the aggregate function on a single core. Moreover, while testing linear/logistic-regression execution times, single-core performance of even laptop CPUs (like the Core i5 540M) did not differ much from today's server CPUs (like the Xeon family). Typically the differences were even less than what the difference in clock speeds might have suggested. \jmh{Care to speculate why?} \fs{Pure speculation at this stage: Memory speed does not scale similarly, non-linear parts of the code/branches, different pipeline lengths, different number of clock cycles per FP operation, ...?}
\begin{comment}
% The following script was used for testing linear regression.
% --8<--
#!/usr/bin/env bash
PORT=5555
DB=schopf
VARS_LIST="10 20 40 80 160 320"
ROWS_LIST=10000000
GENERATOR=./LinearRegressionRandom
PSQL_ARGS="--port ${PORT} --tuples-only --output /dev/null ${DB}"
TABLE_NAME_FMT='linregr_v${VARS}_r${ROWS}'
for ROWS in ${ROWS_LIST}; do
for VARS in ${VARS_LIST}; do
TABLE_NAME=$(eval "echo ${TABLE_NAME_FMT}")
psql ${PSQL_ARGS} &> /dev/null <<-EOF
DROP TABLE IF EXISTS ${TABLE_NAME};
CREATE TABLE ${TABLE_NAME} (
id SERIAL,
x DOUBLE PRECISION[],
y DOUBLE PRECISION
);
EOF
${GENERATOR} -i ${VARS} -r ${ROWS} -t | \
psql ${PSQL_ARGS} -c \
"COPY ${TABLE_NAME}(x, y) FROM STDIN;"
psql ${PSQL_ARGS} <<-EOF
SELECT count(*) FROM ${TABLE_NAME};
\timing
\echo MADlib v0.2.1beta. Vars: ${VARS} Rows: ${ROWS}
SELECT (madlib_0_2_1beta.linregr(y,x)).coef[1] FROM ${TABLE_NAME};
\echo
EOF
psql ${PSQL_ARGS} <<-EOF
SELECT count(*) FROM ${TABLE_NAME};
\timing
\echo MADlib v0.3. Vars: ${VARS} Rows: ${ROWS}
SELECT (madlib_0_3_g553770b.linregr(y,x)).coef[1] FROM ${TABLE_NAME};
\echo
EOF
psql ${PSQL_ARGS} <<-EOF
SELECT count(*) FROM ${TABLE_NAME};
\timing
\echo Greenplum. Vars: ${VARS} Rows: ${ROWS}
SELECT (mregr_coef(y,x))[1] FROM ${TABLE_NAME};
\echo
EOF
done
done
% -->8--
\end{comment}
\begin{figure*}
\centering
\pgfplotstabletypeset[
every nth row={6}{before row=\midrule},
columns/segments/.style={
column type=r,column name={},
},
columns/indep/.style={
column type=r,column name={},
},
columns/rows/.style={
column type=r,column name={(million)},
},
columns/v0.3/.style={
dcolumn={D{.}{.}{3.3}}{c},column name={(s)},precision=4
},
columns/v0.2/.style={
dcolumn={D{.}{.}{3.3}}{c},column name={(s)},precision=4
},
columns/v0.1/.style={
dcolumn={D{.}{.}{3.4}}{c},column name={(s)},precision=4
},
every head row/.style={before row={%
\toprule
\multicolumn{1}{c}{\textbf{\# Segments}} &
\multicolumn{1}{c}{\textbf{\# Rows}} &
\multicolumn{1}{c}{\textbf{\# Variables}} &
\multicolumn{1}{c}{\textbf{v0.3}} &
\multicolumn{1}{c}{\textbf{v0.2.1beta}} &
\multicolumn{1}{c}{\textbf{v0.1alpha}}
\\%
}, after row=\midrule},
every last row/.style={after row=\bottomrule}
]{Tables/linregr.dat}
\caption{Linear-regression execution times}
\label{fig:regression}
\end{figure*}
\begin{figure}
\begin{tikzpicture}
\begin{axis}[height=5cm,width=8cm,xlabel={\# independent variables},ylabel={execution time (s)}]
\addplot table[
x=indep,y=v0.3,restrict expr to domain={\thisrow{segments}}{6:6}
]{Tables/linregr.dat};
\addplot table[
x=indep,y=v0.3,restrict expr to domain={\thisrow{segments}}{12:12}
]{Tables/linregr.dat};
\addplot table[
x=indep,y=v0.3,restrict expr to domain={\thisrow{segments}}{18:18}