-
Notifications
You must be signed in to change notification settings - Fork 6
/
07-performance.html
1318 lines (1225 loc) · 105 KB
/
07-performance.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title>表現的評估 — 新手村逃脫!初心者的 Python 機器學習攻略 1.0.0 documentation</title>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.11.2/css/all.min.css" integrity="sha384-KA6wR/X5RY4zFAHpv/CnoG2UW1uogYfdnP67Uv7eULvTveboZJg0qUpmJZb5VqzN" crossorigin="anonymous">
<link href="_static/css/index.css" rel="stylesheet">
<link rel="stylesheet" href="_static/sphinx-book-theme.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<script id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script src="_static/sphinx-book-theme.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/language_data.js"></script>
<script src="_static/sphinx-book-theme.js"></script>
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
<script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<script type="text/x-mathjax-config">MathJax.Hub.Config({"TeX": {"equationNumbers": {"autoNumber": "AMS", "useLabelIds": true}}, "jax": ["input/TeX", "output/HTML-CSS"], "displayAlign": "left", "tex2jax": {"inlineMath": [["$", "$"], ["\\(", "\\)"]], "processEscapes": true, "ignoreClass": "document", "processClass": "math|output_area"}})</script>
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="next" title="深度學習入門" href="08-deep-learning.html" />
<link rel="prev" title="類別預測的任務" href="06-classification.html" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="docsearch:language" content="en">
</head>
<body data-spy="scroll" data-target="#bd-toc-nav" data-offset="80">
<div class="container-xl">
<div class="row">
<div class="col-12 col-md-3 bd-sidebar site-navigation show" id="site-navigation">
<div class="navbar-brand-box">
<a class="navbar-brand text-wrap" href="index.html">
<h1 class="site-logo" id="site-title">新手村逃脫!初心者的 Python 機器學習攻略 1.0.0 documentation</h1>
</a>
</div>
<form class="bd-search d-flex align-items-center" action="search.html" method="get">
<i class="icon fas fa-search"></i>
<input type="search" class="form-control" name="q" id="search-input" placeholder="Search the docs ..." aria-label="Search the docs ..." autocomplete="off" >
</form>
<nav class="bd-links" id="bd-docs-nav" aria-label="Main navigation">
<ul class="nav sidenav_l1">
<li class="">
<a href="00-preface.html">關於本書</a>
</li>
<li class="">
<a href="01-introduction.html">關於視覺化與機器學習</a>
</li>
<li class="">
<a href="02-numpy.html">數列運算</a>
</li>
<li class="">
<a href="03-matplotlib.html">資料探索</a>
</li>
<li class="">
<a href="04-sklearn.html">機器學習入門</a>
</li>
<li class="">
<a href="05-regression.html">數值預測的任務</a>
</li>
<li class="">
<a href="06-classification.html">類別預測的任務</a>
</li>
<li class="active">
<a href="">表現的評估</a>
</li>
<li class="">
<a href="08-deep-learning.html">深度學習入門</a>
</li>
<li class="">
<a href="09-appendix-a.html">附錄 A</a>
</li>
</ul>
</nav>
<!-- To handle the deprecated key -->
<div class="navbar_extra_footer">
Theme by the <a href="https://ebp.jupyterbook.org">Executable Book Project</a>
</div>
</div>
<main class="col py-md-3 pl-md-4 bd-content overflow-auto" role="main">
<div class="row topbar fixed-top container-xl">
<div class="col-12 col-md-3 bd-topbar-whitespace site-navigation show">
</div>
<div class="col pl-2 topbar-main">
<button id="navbar-toggler" class="navbar-toggler ml-0" type="button" data-toggle="collapse"
data-toggle="tooltip" data-placement="bottom" data-target=".site-navigation" aria-controls="navbar-menu"
aria-expanded="true" aria-label="Toggle navigation" aria-controls="site-navigation"
title="Toggle navigation" data-toggle="tooltip" data-placement="left">
<i class="fas fa-bars"></i>
<i class="fas fa-arrow-left"></i>
<i class="fas fa-arrow-up"></i>
</button>
<div class="dropdown-buttons-trigger">
<button id="dropdown-buttons-trigger" class="btn btn-secondary topbarbtn" aria-label="Download this page"><i
class="fas fa-download"></i></button>
<div class="dropdown-buttons">
<!-- ipynb file if we had a myst markdown file -->
<!-- Download raw file -->
<a class="dropdown-buttons" href="_sources/07-performance.ipynb"><button type="button"
class="btn btn-secondary topbarbtn" title="Download source file" data-toggle="tooltip"
data-placement="left">.ipynb</button></a>
<!-- Download PDF via print -->
<button type="button" id="download-print" class="btn btn-secondary topbarbtn" title="Print to PDF"
onClick="window.print()" data-toggle="tooltip" data-placement="left">.pdf</button>
</div>
</div>
<!-- Source interaction buttons -->
<div class="dropdown-buttons-trigger">
<button id="dropdown-buttons-trigger" class="btn btn-secondary topbarbtn"
aria-label="Connect with source repository"><i class="fab fa-github"></i></button>
<div class="dropdown-buttons sourcebuttons">
<a class="repository-button"
href="https://github.com/spatialaudio/nbsphinx"><button type="button" class="btn btn-secondary topbarbtn"
data-toggle="tooltip" data-placement="left" title="Source repository"><i
class="fab fa-github"></i>repository</button></a>
<a class="issues-button"
href="https://github.com/spatialaudio/nbsphinx/issues/new?title=Issue%20on%20page%20%2F07-performance.html&body=Your%20issue%20content%20here."><button
type="button" class="btn btn-secondary topbarbtn" data-toggle="tooltip" data-placement="left"
title="Open an issue"><i class="fas fa-lightbulb"></i>open issue</button></a>
<a class="edit-button" href="https://github.com/spatialaudio/nbsphinx/edit/master/doc/07-performance.ipynb"><button
type="button" class="btn btn-secondary topbarbtn" data-toggle="tooltip" data-placement="left"
title="Edit this page"><i class="fas fa-pencil-alt"></i>suggest edit</button></a>
</div>
</div>
<!-- Full screen (wrap in <a> to have style consistency -->
<a class="full-screen-button"><button type="button" class="btn btn-secondary topbarbtn" data-toggle="tooltip"
data-placement="bottom" onclick="toggleFullScreen()" title="Fullscreen mode"><i
class="fas fa-expand"></i></button></a>
<!-- Launch buttons -->
</div>
<div class="d-none d-md-block col-md-2 bd-toc show">
<div class="tocsection onthispage pt-5 pb-3">
<i class="fas fa-list"></i> On this page
</div>
<nav id="bd-toc-nav">
<ul class="nav section-nav flex-column">
<li class="nav-item toc-entry toc-h2">
<a href="#如何評估機器學習演算方法" class="nav-link">如何評估機器學習演算方法</a>
</li>
<li class="nav-item toc-entry toc-h2">
<a href="#評估數值預測任務的表現" class="nav-link">評估數值預測任務的表現</a>
</li>
<li class="nav-item toc-entry toc-h2">
<a href="#評估類別預測任務的表現" class="nav-link">評估類別預測任務的表現</a>
</li>
<li class="nav-item toc-entry toc-h2">
<a href="#自訂計算評估指標的類別-ClfMetrics" class="nav-link">自訂計算評估指標的類別 ClfMetrics</a>
</li>
<li class="nav-item toc-entry toc-h2">
<a href="#誤差的來源" class="nav-link">誤差的來源</a>
</li>
<li class="nav-item toc-entry toc-h2">
<a href="#減少訓練誤差" class="nav-link">減少訓練誤差</a>
</li>
<li class="nav-item toc-entry toc-h2">
<a href="#減少訓練誤差與測試誤差的間距" class="nav-link">減少訓練誤差與測試誤差的間距</a>
</li>
<li class="nav-item toc-entry toc-h2">
<a href="#延伸閱讀" class="nav-link">延伸閱讀</a>
</li>
</ul>
</nav>
<div class="tocsection editthispage">
<a href="https://github.com/spatialaudio/nbsphinx/edit/master/doc/07-performance.ipynb">
<i class="fas fa-pencil-alt"></i> Edit this page
</a>
</div>
</div>
</div>
<div id="main-content" class="row">
<div class="col-12 col-md-9 pl-md-3 pr-md-0">
<div>
<style>
/* CSS for nbsphinx extension */
/* remove conflicting styling from Sphinx themes */
div.nbinput.container,
div.nbinput.container div.prompt,
div.nbinput.container div.input_area,
div.nbinput.container div[class*=highlight],
div.nbinput.container div[class*=highlight] pre,
div.nboutput.container,
div.nboutput.container div.prompt,
div.nboutput.container div.output_area,
div.nboutput.container div[class*=highlight],
div.nboutput.container div[class*=highlight] pre {
background: none;
border: none;
padding: 0 0;
margin: 0;
box-shadow: none;
}
/* avoid gaps between output lines */
div.nboutput.container div[class*=highlight] pre {
line-height: normal;
}
/* input/output containers */
div.nbinput.container,
div.nboutput.container {
display: -webkit-flex;
display: flex;
align-items: flex-start;
margin: 0;
width: 100%;
}
@media (max-width: 540px) {
div.nbinput.container,
div.nboutput.container {
flex-direction: column;
}
}
/* input container */
div.nbinput.container {
padding-top: 5px;
}
/* last container */
div.nblast.container {
padding-bottom: 5px;
}
/* input prompt */
div.nbinput.container div.prompt pre {
color: #307FC1;
}
/* output prompt */
div.nboutput.container div.prompt pre {
color: #BF5B3D;
}
/* all prompts */
div.nbinput.container div.prompt,
div.nboutput.container div.prompt {
width: 4.5ex;
padding-top: 5px;
position: relative;
user-select: none;
}
div.nbinput.container div.prompt > div,
div.nboutput.container div.prompt > div {
position: absolute;
right: 0;
margin-right: 0.3ex;
}
@media (max-width: 540px) {
div.nbinput.container div.prompt,
div.nboutput.container div.prompt {
width: unset;
text-align: left;
padding: 0.4em;
}
div.nboutput.container div.prompt.empty {
padding: 0;
}
div.nbinput.container div.prompt > div,
div.nboutput.container div.prompt > div {
position: unset;
}
}
/* disable scrollbars on prompts */
div.nbinput.container div.prompt pre,
div.nboutput.container div.prompt pre {
overflow: hidden;
}
/* input/output area */
div.nbinput.container div.input_area,
div.nboutput.container div.output_area {
-webkit-flex: 1;
flex: 1;
overflow: auto;
}
@media (max-width: 540px) {
div.nbinput.container div.input_area,
div.nboutput.container div.output_area {
width: 100%;
}
}
/* input area */
div.nbinput.container div.input_area {
border: 1px solid #e0e0e0;
border-radius: 2px;
background: #f5f5f5;
}
/* override MathJax center alignment in output cells */
div.nboutput.container div[class*=MathJax] {
text-align: left !important;
}
/* override sphinx.ext.imgmath center alignment in output cells */
div.nboutput.container div.math p {
text-align: left;
}
/* standard error */
div.nboutput.container div.output_area.stderr {
background: #fdd;
}
/* ANSI colors */
.ansi-black-fg { color: #3E424D; }
.ansi-black-bg { background-color: #3E424D; }
.ansi-black-intense-fg { color: #282C36; }
.ansi-black-intense-bg { background-color: #282C36; }
.ansi-red-fg { color: #E75C58; }
.ansi-red-bg { background-color: #E75C58; }
.ansi-red-intense-fg { color: #B22B31; }
.ansi-red-intense-bg { background-color: #B22B31; }
.ansi-green-fg { color: #00A250; }
.ansi-green-bg { background-color: #00A250; }
.ansi-green-intense-fg { color: #007427; }
.ansi-green-intense-bg { background-color: #007427; }
.ansi-yellow-fg { color: #DDB62B; }
.ansi-yellow-bg { background-color: #DDB62B; }
.ansi-yellow-intense-fg { color: #B27D12; }
.ansi-yellow-intense-bg { background-color: #B27D12; }
.ansi-blue-fg { color: #208FFB; }
.ansi-blue-bg { background-color: #208FFB; }
.ansi-blue-intense-fg { color: #0065CA; }
.ansi-blue-intense-bg { background-color: #0065CA; }
.ansi-magenta-fg { color: #D160C4; }
.ansi-magenta-bg { background-color: #D160C4; }
.ansi-magenta-intense-fg { color: #A03196; }
.ansi-magenta-intense-bg { background-color: #A03196; }
.ansi-cyan-fg { color: #60C6C8; }
.ansi-cyan-bg { background-color: #60C6C8; }
.ansi-cyan-intense-fg { color: #258F8F; }
.ansi-cyan-intense-bg { background-color: #258F8F; }
.ansi-white-fg { color: #C5C1B4; }
.ansi-white-bg { background-color: #C5C1B4; }
.ansi-white-intense-fg { color: #A1A6B2; }
.ansi-white-intense-bg { background-color: #A1A6B2; }
.ansi-default-inverse-fg { color: #FFFFFF; }
.ansi-default-inverse-bg { background-color: #000000; }
.ansi-bold { font-weight: bold; }
.ansi-underline { text-decoration: underline; }
div.nbinput.container div.input_area div[class*=highlight] > pre,
div.nboutput.container div.output_area div[class*=highlight] > pre,
div.nboutput.container div.output_area div[class*=highlight].math,
div.nboutput.container div.output_area.rendered_html,
div.nboutput.container div.output_area > div.output_javascript,
div.nboutput.container div.output_area:not(.rendered_html) > img{
padding: 5px;
}
/* fix copybtn overflow problem in chromium (needed for 'sphinx_copybutton') */
div.nbinput.container div.input_area > div[class^='highlight'],
div.nboutput.container div.output_area > div[class^='highlight']{
overflow-y: hidden;
}
/* hide copybtn icon on prompts (needed for 'sphinx_copybutton') */
.prompt a.copybtn {
display: none;
}
/* Some additional styling taken form the Jupyter notebook CSS */
div.rendered_html table {
border: none;
border-collapse: collapse;
border-spacing: 0;
color: black;
font-size: 12px;
table-layout: fixed;
}
div.rendered_html thead {
border-bottom: 1px solid black;
vertical-align: bottom;
}
div.rendered_html tr,
div.rendered_html th,
div.rendered_html td {
text-align: right;
vertical-align: middle;
padding: 0.5em 0.5em;
line-height: normal;
white-space: normal;
max-width: none;
border: none;
}
div.rendered_html th {
font-weight: bold;
}
div.rendered_html tbody tr:nth-child(odd) {
background: #f5f5f5;
}
div.rendered_html tbody tr:hover {
background: rgba(66, 165, 245, 0.2);
}
</style>
<div class="section" id="表現的評估">
<h1>表現的評估<a class="headerlink" href="#表現的評估" title="Permalink to this headline">¶</a></h1>
<p>我們先載入這個章節範例程式碼中會使用到的第三方套件、模組或者其中的部分類別、函式。</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[1]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="kn">from</span> <span class="nn">pyvizml</span> <span class="kn">import</span> <span class="n">CreateNBAData</span>
<span class="kn">from</span> <span class="nn">datetime</span> <span class="kn">import</span> <span class="n">datetime</span>
<span class="kn">import</span> <span class="nn">requests</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">import</span> <span class="nn">pandas</span> <span class="k">as</span> <span class="nn">pd</span>
<span class="kn">import</span> <span class="nn">matplotlib.pyplot</span> <span class="k">as</span> <span class="nn">plt</span>
<span class="kn">from</span> <span class="nn">sklearn.preprocessing</span> <span class="kn">import</span> <span class="n">PolynomialFeatures</span>
<span class="kn">from</span> <span class="nn">sklearn.linear_model</span> <span class="kn">import</span> <span class="n">LinearRegression</span>
<span class="kn">from</span> <span class="nn">sklearn.linear_model</span> <span class="kn">import</span> <span class="n">Ridge</span>
<span class="kn">from</span> <span class="nn">sklearn.linear_model</span> <span class="kn">import</span> <span class="n">LogisticRegression</span>
<span class="kn">from</span> <span class="nn">sklearn.model_selection</span> <span class="kn">import</span> <span class="n">train_test_split</span>
<span class="kn">from</span> <span class="nn">sklearn.model_selection</span> <span class="kn">import</span> <span class="n">KFold</span>
<span class="kn">from</span> <span class="nn">sklearn.metrics</span> <span class="kn">import</span> <span class="n">mean_squared_error</span>
<span class="kn">from</span> <span class="nn">sklearn.metrics</span> <span class="kn">import</span> <span class="n">mean_absolute_error</span>
<span class="kn">from</span> <span class="nn">sklearn.metrics</span> <span class="kn">import</span> <span class="n">confusion_matrix</span>
<span class="kn">from</span> <span class="nn">sklearn.metrics</span> <span class="kn">import</span> <span class="n">accuracy_score</span>
<span class="kn">from</span> <span class="nn">sklearn.metrics</span> <span class="kn">import</span> <span class="n">precision_score</span>
<span class="kn">from</span> <span class="nn">sklearn.metrics</span> <span class="kn">import</span> <span class="n">recall_score</span>
<span class="kn">from</span> <span class="nn">sklearn.metrics</span> <span class="kn">import</span> <span class="n">f1_score</span>
</pre></div>
</div>
</div>
<div class="section" id="如何評估機器學習演算方法">
<h2>如何評估機器學習演算方法<a class="headerlink" href="#如何評估機器學習演算方法" title="Permalink to this headline">¶</a></h2>
<p>評估機器學習演算方法是否能夠針對特定任務(包含數值預測、類別預測)運作,必須設計能夠量化演算方法表現的指標。評估迴歸模型與分類器表現的指標與尋找係數向量 <span class="math notranslate nohighlight">\(w\)</span> 藉此創建出 <span class="math notranslate nohighlight">\(h(X; w)\)</span> 的原理相同,差別在於究竟要比對哪一組目標向量 <span class="math notranslate nohighlight">\(y\)</span>?</p>
<p>我們找尋係數向量的依據,乃是基於最小化 <span class="math notranslate nohighlight">\(y^{(train)}\)</span> 與 <span class="math notranslate nohighlight">\(\hat{y}^{(train)}\)</span> 之間的誤差,,其中數值預測任務是以均方誤差(Mean squared error, MSE)來表示,<span class="math notranslate nohighlight">\(m\)</span> 代表觀測值筆數。</p>
<p><span class="math">\begin{equation}
Minimize \; \frac{1}{m}\sum_{i}{(y^{(train)}_i - \hat{y_i}^{(train)})^2}
\end{equation}</span></p>
<p>類別預測任務則是以誤分類數(Error)來表示。</p>
<p><span class="math">\begin{equation}
Minimize \; \sum_{i} \mid y^{(train)}_i \neq \hat{y_i}^{(train)} \mid
\end{equation}</span></p>
<p>這是因為機器學習<strong>假設</strong>存在了一個函式 <span class="math notranslate nohighlight">\(f\)</span> 能夠完美描述特徵矩陣與目標向量的關係,但我們不能夠將<strong>假設</strong>存在的 <span class="math notranslate nohighlight">\(f\)</span> 拿來與創建出的 <span class="math notranslate nohighlight">\(h\)</span> 擺在桌面上比較,因此藉由比較 <span class="math notranslate nohighlight">\(y^{(train)}\)</span> 與 <span class="math notranslate nohighlight">\(\hat{y}^{(train)}\)</span> 來達成。評估迴歸模型與分類器的表現同樣是比較預測目標向量與實際目標向量之間的誤差,但是改為驗證資料或測試資料的目標向量。數值預測任務的表現評估以均方誤差衡量,<span class="math notranslate nohighlight">\(m\)</span> 代表觀測值筆數。</p>
<p><span class="math">\begin{equation}
MSE_{valid} = \frac{1}{m}\sum_{i}{(y^{(valid)}_i - \hat{y_i}^{(valid)})^2}
\end{equation}</span></p>
<p>類別預測任務的表現評估以誤分類數衡量。</p>
<p><span class="math">\begin{equation}
Error_{valid} = \sum_{i} \mid y^{(valid)}_i \neq \hat{y_i}^{(valid)} \mid
\end{equation}</span></p>
<p>機器學習專案中的訓練、驗證來自具備已實現數值或標籤資料集,測試則來自未實現數值或標籤資料集;迴歸模型與分類器在從未見過的測試資料上之表現將決定它被部署到正式環境開始運作時的成敗,在現實世界中要評估機器學習演算方法在測試資料上的表現,在時間與金錢成本上都比在驗證資料上實施來得高出許多,像是設計類似實驗組與對照組的測試環境、等待一段時間才會實現數值或標籤。</p>
<p>挑選機器學習演算方法的評估指標除了與任務種類相關,也與模型的應用場景有關,例如即便同屬於疾病的檢測分類模型,針對傳染疾病或罕見疾病所選擇的指標就有可能不同,這是由於和「誤分類」所衍生出的成本連動所致。</p>
</div>
<div class="section" id="評估數值預測任務的表現">
<h2>評估數值預測任務的表現<a class="headerlink" href="#評估數值預測任務的表現" title="Permalink to this headline">¶</a></h2>
<p>數值預測任務的表現評估以均方誤差來衡量 <span class="math notranslate nohighlight">\(y^{(valid)}\)</span> 與 <span class="math notranslate nohighlight">\(\hat{y}^{(valid)}\)</span> 之間的差異,均方誤差愈大推論 <span class="math notranslate nohighlight">\(h_w\)</span> 跟 <span class="math notranslate nohighlight">\(f\)</span> 的相似度愈低,反之均方誤差愈小推論 <span class="math notranslate nohighlight">\(h\)</span> 與 <span class="math notranslate nohighlight">\(f\)</span> 的相似度愈高。使用 Scikit-Learn 定義好的 <code class="docutils literal notranslate"><span class="pre">mean_squared_error</span></code> 函式可以協助我們計算兩個目標向量之間的均方誤差。</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[2]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="c1"># create_player_stats_df() 方法要對 data.nba.net 發出數百次的 HTTP 請求,等待時間會較長,要請讀者耐心等候</span>
<span class="n">cnd</span> <span class="o">=</span> <span class="n">CreateNBAData</span><span class="p">(</span><span class="n">season_year</span><span class="o">=</span><span class="mi">2019</span><span class="p">)</span>
<span class="n">player_stats</span> <span class="o">=</span> <span class="n">cnd</span><span class="o">.</span><span class="n">create_player_stats_df</span><span class="p">()</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt empty docutils container">
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
Creating players df...
Creating players df...
Creating player stats df...
</pre></div></div>
</div>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[3]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="n">X</span> <span class="o">=</span> <span class="n">player_stats</span><span class="p">[</span><span class="s1">'heightMeters'</span><span class="p">]</span><span class="o">.</span><span class="n">values</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="nb">float</span><span class="p">)</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">)</span>
<span class="n">y</span> <span class="o">=</span> <span class="n">player_stats</span><span class="p">[</span><span class="s1">'weightKilograms'</span><span class="p">]</span><span class="o">.</span><span class="n">values</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="nb">float</span><span class="p">)</span>
<span class="n">X_train</span><span class="p">,</span> <span class="n">X_valid</span><span class="p">,</span> <span class="n">y_train</span><span class="p">,</span> <span class="n">y_valid</span> <span class="o">=</span> <span class="n">train_test_split</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">test_size</span><span class="o">=</span><span class="mf">0.33</span><span class="p">,</span> <span class="n">random_state</span><span class="o">=</span><span class="mi">42</span><span class="p">)</span>
<span class="n">h</span> <span class="o">=</span> <span class="n">LinearRegression</span><span class="p">()</span>
<span class="n">h</span><span class="o">.</span><span class="n">fit</span><span class="p">(</span><span class="n">X_train</span><span class="p">,</span> <span class="n">y_train</span><span class="p">)</span>
<span class="n">y_pred</span> <span class="o">=</span> <span class="n">h</span><span class="o">.</span><span class="n">predict</span><span class="p">(</span><span class="n">X_valid</span><span class="p">)</span>
<span class="n">mse_valid</span> <span class="o">=</span> <span class="n">mean_squared_error</span><span class="p">(</span><span class="n">y_valid</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">)</span>
<span class="n">mse_valid</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[3]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
52.74701649791643
</pre></div></div>
</div>
<p>亦可以自訂均方誤差的函式。</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[4]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="k">def</span> <span class="nf">meanSquaredError</span><span class="p">(</span><span class="n">y_true</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">):</span>
<span class="n">error</span> <span class="o">=</span> <span class="p">(</span><span class="n">y_true</span> <span class="o">-</span> <span class="n">y_pred</span><span class="p">)</span>
<span class="n">squared_error</span> <span class="o">=</span> <span class="n">error</span><span class="o">**</span><span class="mi">2</span>
<span class="n">mean_squared_error</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">mean</span><span class="p">(</span><span class="n">squared_error</span><span class="p">)</span>
<span class="k">return</span> <span class="n">mean_squared_error</span>
</pre></div>
</div>
</div>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[5]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="n">mse_valid</span> <span class="o">=</span> <span class="n">meanSquaredError</span><span class="p">(</span><span class="n">y_valid</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">)</span>
<span class="n">mse_valid</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[5]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
52.74701649791643
</pre></div></div>
</div>
<p>另外一個也常被用來評估數值預測任務表現的指標是平均絕對誤差(Mean absolute error),平均絕對誤差和均方誤差相同之處在於他們都能精確捕捉預測失準的量值,無論是低估或者高估,經過平方或絕對值的運算都會成為正數被詳實地累積起來;相異之處在於均方誤差對於預測失準較多的離群值(Outliers)具有放大的效果(平方),而平均絕對誤差則不具有這樣類似加權的效果,因此當離群值在任務預測失準所衍生的成本也大幅上升的應用場景中,就比平均絕對誤差更適合使用,表示迴歸模型的選擇和調校上會傾向避免預測失準較多的情況。</p>
<p>使用 Scikit-Learn 定義好的 <code class="docutils literal notranslate"><span class="pre">mean_absolute_error</span></code> 函式可以協助我們計算兩個目標向量之間的平均絕對誤差。</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[6]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="n">mae_valid</span> <span class="o">=</span> <span class="n">mean_absolute_error</span><span class="p">(</span><span class="n">y_valid</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">)</span>
<span class="n">mae_valid</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[6]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
5.251994295197642
</pre></div></div>
</div>
<p>亦可以自訂平均絕對誤差的函式。</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[7]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="k">def</span> <span class="nf">meanAbsoluteError</span><span class="p">(</span><span class="n">y_true</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">):</span>
<span class="n">error</span> <span class="o">=</span> <span class="p">(</span><span class="n">y_true</span> <span class="o">-</span> <span class="n">y_pred</span><span class="p">)</span>
<span class="n">absolute_error</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">abs</span><span class="p">(</span><span class="n">error</span><span class="p">)</span>
<span class="n">mean_absolute_error</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">mean</span><span class="p">(</span><span class="n">absolute_error</span><span class="p">)</span>
<span class="k">return</span> <span class="n">mean_absolute_error</span>
</pre></div>
</div>
</div>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[8]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="n">mae_valid</span> <span class="o">=</span> <span class="n">meanAbsoluteError</span><span class="p">(</span><span class="n">y_valid</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">)</span>
<span class="n">mae_valid</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[8]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
5.251994295197642
</pre></div></div>
</div>
</div>
<div class="section" id="評估類別預測任務的表現">
<h2>評估類別預測任務的表現<a class="headerlink" href="#評估類別預測任務的表現" title="Permalink to this headline">¶</a></h2>
<p>類別預測任務的表現評估以誤分類數來衡量 <span class="math notranslate nohighlight">\(y^{(valid)}\)</span> 與 <span class="math notranslate nohighlight">\(\hat{y}^{(valid)}\)</span> 之間的差異,誤分類數愈多推論 <span class="math notranslate nohighlight">\(h\)</span> 跟 <span class="math notranslate nohighlight">\(f\)</span> 的相似度愈低,反之誤分類數愈少推論 <span class="math notranslate nohighlight">\(h\)</span> 與 <span class="math notranslate nohighlight">\(f\)</span> 的相似度愈高。分類器常使用的評估指標比迴歸模型為多,像是準確率(Accuracy)、精確率(Precision)、召回率(Recall)與 F1-score 等。這些評估指標乍看之下會讓我們眼花撩亂,但實際上只要能夠拆解正確分類 <span class="math notranslate nohighlight">\(y^{(valid)} = \hat{y}^{(valid)}\)</span> 與錯誤分類 <span class="math notranslate nohighlight">\(y^{(valid)} \neq \hat{y}^{(valid)}\)</span>
的組成,就可以理解評估分類器指標的設計哲學。</p>
<p>正確分類與錯誤分類各自都還能拆解成兩種情境:</p>
<ul class="simple">
<li><p>正確分類</p>
<ul>
<li><p>真陰性(True negative, TN):<span class="math notranslate nohighlight">\(y^{(valid)}=0\)</span> 並且 <span class="math notranslate nohighlight">\(\hat{y}^{(valid)}=0\)</span></p></li>
<li><p>真陽性(True positive, TP):<span class="math notranslate nohighlight">\(y^{(valid)}=1\)</span> 並且 <span class="math notranslate nohighlight">\(\hat{y}^{(valid)}=1\)</span></p></li>
</ul>
</li>
<li><p>錯誤分類</p>
<ul>
<li><p>偽陰性(False negative, FN):<span class="math notranslate nohighlight">\(y^{(valid)}=1\)</span> 並且 <span class="math notranslate nohighlight">\(\hat{y}^{(valid)}=0\)</span></p></li>
<li><p>偽陽性(False positive, FP):<span class="math notranslate nohighlight">\(y^{(valid)}=0\)</span> 並且 <span class="math notranslate nohighlight">\(\hat{y}^{(valid)}=1\)</span></p></li>
</ul>
</li>
</ul>
<p>這四種情境能夠以一個被稱作混淆矩陣(Confusion matrix)的 <span class="math notranslate nohighlight">\(2 \times 2\)</span> 矩陣表達。</p>
<table class="table">
<colgroup>
<col style="width: 27%" />
<col style="width: 36%" />
<col style="width: 36%" />
</colgroup>
<thead>
<tr class="row-odd"><th class="head"><p></p></th>
<th class="head"><p><span class="math notranslate nohighlight">\(\hat{y}^{(valid)}=0\)</span></p></th>
<th class="head"><p><span class="math notranslate nohighlight">\(\hat{y}^{(valid)}=1\)</span></p></th>
</tr>
</thead>
<tbody>
<tr class="row-even"><td><p><span class="math notranslate nohighlight">\(y^{(valid)}=0\)</span></p></td>
<td><p>真陰性(True negative, TN)</p></td>
<td><p>偽陽性(False positive, FP)</p></td>
</tr>
<tr class="row-odd"><td><p><span class="math notranslate nohighlight">\(y^{(valid)}=1\)</span></p></td>
<td><p>偽陰性(False negative, FN)</p></td>
<td><p>真陽性(True positive, TP)</p></td>
</tr>
</tbody>
</table>
<p>前述眼花撩亂的評估指標,其實都能從組成混淆矩陣的四個象限衍生而得,使用 Scikit-Learn 定義好的 <code class="docutils literal notranslate"><span class="pre">confusion_matrix</span></code> 函式可以協助我們創建兩個目標向量之間正確分類、錯誤分類所組成的混淆矩陣。</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[9]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="n">X</span> <span class="o">=</span> <span class="n">player_stats</span><span class="p">[[</span><span class="s1">'apg'</span><span class="p">,</span> <span class="s1">'rpg'</span><span class="p">]]</span><span class="o">.</span><span class="n">values</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="nb">float</span><span class="p">)</span>
<span class="n">pos_dict</span> <span class="o">=</span> <span class="p">{</span>
<span class="mi">0</span><span class="p">:</span> <span class="s1">'G'</span><span class="p">,</span>
<span class="mi">1</span><span class="p">:</span> <span class="s1">'F'</span>
<span class="p">}</span>
<span class="n">pos</span> <span class="o">=</span> <span class="n">player_stats</span><span class="p">[</span><span class="s1">'pos'</span><span class="p">]</span><span class="o">.</span><span class="n">values</span>
<span class="n">y</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">([</span><span class="mi">0</span> <span class="k">if</span> <span class="n">p</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">==</span> <span class="s1">'G'</span> <span class="k">else</span> <span class="mi">1</span> <span class="k">for</span> <span class="n">p</span> <span class="ow">in</span> <span class="n">pos</span><span class="p">])</span>
<span class="n">X_train</span><span class="p">,</span> <span class="n">X_valid</span><span class="p">,</span> <span class="n">y_train</span><span class="p">,</span> <span class="n">y_valid</span> <span class="o">=</span> <span class="n">train_test_split</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">test_size</span><span class="o">=</span><span class="mf">0.33</span><span class="p">,</span> <span class="n">random_state</span><span class="o">=</span><span class="mi">42</span><span class="p">)</span>
<span class="n">h</span> <span class="o">=</span> <span class="n">LogisticRegression</span><span class="p">()</span>
<span class="n">h</span><span class="o">.</span><span class="n">fit</span><span class="p">(</span><span class="n">X_train</span><span class="p">,</span> <span class="n">y_train</span><span class="p">)</span>
<span class="n">y_pred</span> <span class="o">=</span> <span class="n">h</span><span class="o">.</span><span class="n">predict</span><span class="p">(</span><span class="n">X_valid</span><span class="p">)</span>
<span class="n">cm</span> <span class="o">=</span> <span class="n">confusion_matrix</span><span class="p">(</span><span class="n">y_valid</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">)</span>
<span class="n">cm</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[9]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
array([[60, 16],
[20, 70]])
</pre></div></div>
</div>
<p>亦可以自訂創建混淆矩陣的函式。</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[10]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="k">def</span> <span class="nf">confusionMatrix</span><span class="p">(</span><span class="n">y_true</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">):</span>
<span class="n">n_unique</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">unique</span><span class="p">(</span><span class="n">y_true</span><span class="p">)</span><span class="o">.</span><span class="n">size</span>
<span class="n">cm</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">zeros</span><span class="p">((</span><span class="n">n_unique</span><span class="p">,</span> <span class="n">n_unique</span><span class="p">),</span> <span class="n">dtype</span><span class="o">=</span><span class="nb">int</span><span class="p">)</span>
<span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_unique</span><span class="p">):</span>
<span class="k">for</span> <span class="n">j</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_unique</span><span class="p">):</span>
<span class="n">n_obs</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">sum</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">logical_and</span><span class="p">(</span><span class="n">y_true</span> <span class="o">==</span> <span class="n">i</span><span class="p">,</span> <span class="n">y_pred</span> <span class="o">==</span> <span class="n">j</span><span class="p">))</span>
<span class="n">cm</span><span class="p">[</span><span class="n">i</span><span class="p">,</span> <span class="n">j</span><span class="p">]</span> <span class="o">=</span> <span class="n">n_obs</span>
<span class="k">return</span> <span class="n">cm</span>
</pre></div>
</div>
</div>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[11]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="n">cm</span> <span class="o">=</span> <span class="n">confusionMatrix</span><span class="p">(</span><span class="n">y_valid</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">)</span>
<span class="n">cm</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[11]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
array([[60, 16],
[20, 70]])
</pre></div></div>
</div>
<p>準確率(Accuracy)是類別預測任務最常用的評估指標,分子是正確分類的觀測值個數,即真陰性加真陽性;分母是四個象限的觀測值個數總和,即目標向量的長度,準確率愈高代表分類器的表現愈好、反之則代表分類器的表現愈差。</p>
<p><span class="math">\begin{equation}
Accuracy = \frac{TN + TP}{TN + TP + FN + FP}
\end{equation}</span></p>
<p>使用 Scikit-Learn 定義好的 <code class="docutils literal notranslate"><span class="pre">accuracy_score</span></code> 函式可以協助我們計算準確率。</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[12]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="n">accuracy</span> <span class="o">=</span> <span class="n">accuracy_score</span><span class="p">(</span><span class="n">y_valid</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">)</span>
<span class="n">accuracy</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[12]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
0.7831325301204819
</pre></div></div>
</div>
<p>準確率的概念直觀,但是在一些狀況中並不這麼適合評估分類器的表現,像是陽性事件發生率極低的應用場景,例如罕見疾病或市場黑天鵝事件的預測任務。如果設計出一個樸素的分類器(Dummy classifier),它以目標向量中出現頻率最高的類別作為預測依據,如果以 1000 個觀測值中僅有 1 個陽性的情況舉例,準確率可以達到 0.999,是一個乍看之下非常漂亮的評估指標。</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[13]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="n">y_true</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="mi">1000</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="nb">int</span><span class="p">)</span>
<span class="n">y_true</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> <span class="o">=</span> <span class="mi">1</span>
<span class="n">y_pred</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">zeros</span><span class="p">(</span><span class="mi">1000</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="nb">int</span><span class="p">)</span>
<span class="n">accuracy</span> <span class="o">=</span> <span class="n">accuracy_score</span><span class="p">(</span><span class="n">y_true</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">)</span>
<span class="n">accuracy</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[13]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
0.999
</pre></div></div>
</div>
<p>然而這個分類器對預測陽性事件發生率極低的任務卻是完全無用處,亦即使用準確率來評估並不適合。這時使用精確率(Precision)與召回率(Recall)來進行評估會更加適合。精確率的分子是真陽性、分母是真陽性加偽陽性,它的意涵是分類器在所有預測為陽性的觀測值中,正確預測的觀測值數為多少;召回率的分子是真陽性、分母是真陽性加偽陰性,它的意涵是分類器在所有陽性的觀測值中,正確預測的觀測值數為多少。</p>
<p><span class="math">\begin{align}
Precision = \frac{TP}{TP + FP} \\
Recall = \frac{TP}{TP + FN}
\end{align}</span></p>
<p>相較準確率,精確率與召回率更專注評估分類器對陽性事件的預測能力,兩個指標愈高,代表模型的表現愈好。精確率如果表現要好除了真陽性高,偽陽性亦要想辦法降低,而召回率同樣若表現要好除了真陽性高,偽陰性亦要想辦法降低,因此在選擇採用精確率與召回率時,常會延伸探討偽陽性或偽陰性所衍生的誤判成本。採用精確率代表的要盡可能降低偽陽性,這表示的是偽陽性的成本高,意味著是誤判為陽性事件的成本高(例如誤診而進行高風險的手術);採用準確率代表的是要儘可能降低偽陰性,這表示的是偽陰性的成本高,意味著是誤判為陰性事件的成本高(例如誤診而導致超級傳播者沒有隔離而進入社區)。</p>
<p>使用 Scikit-Learn 定義好的 <code class="docutils literal notranslate"><span class="pre">precision_score</span></code> 與 <code class="docutils literal notranslate"><span class="pre">recall_score</span></code> 函式可以協助我們計算精確率與召回率,這時可以看到樸素分類器在精確率和召回率都得到了最低的評估值。</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[14]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="n">precision</span> <span class="o">=</span> <span class="n">precision_score</span><span class="p">(</span><span class="n">y_true</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">,</span> <span class="n">zero_division</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span>
<span class="n">recall</span> <span class="o">=</span> <span class="n">recall_score</span><span class="p">(</span><span class="n">y_true</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">precision</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="n">recall</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt empty docutils container">
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
0.0
0.0
</pre></div></div>
</div>
<p>評估分類模型的表現時可以同時將精確率與召回率納入考量,運用一個係數 <span class="math notranslate nohighlight">\(\beta\)</span> 加權兩個指標合成為一個稱為 F-score 的指標。</p>
<p><span class="math">\begin{equation}
F_{\beta} = (1 + \beta^2) \cdot \frac{precision \cdot recall}{(\beta^2 \cdot precision) + recall}
\end{equation}</span></p>
<p><span class="math notranslate nohighlight">\(\beta\)</span> 係數的值可以表示對精確率或召回率的相對重視程度,如果偽陽性的成本遠高於偽陰性的成本,代表百分百重視精確率,這時代入 <span class="math notranslate nohighlight">\(\beta = 0\)</span> F-score 就會是精確率;如果偽陰性的成本遠高於偽陽性的成本,代表百分百重視召回率,這時代入 <span class="math notranslate nohighlight">\(\beta = \infty\)</span> F-score 就會是召回率;如果偽陽性的成本和偽陰性的成本相當,代表兩個指標同等重要,這時代入 <span class="math notranslate nohighlight">\(\beta = 1\)</span> F-score 就被稱為 F1-score,指標愈高,代表模型的表現愈好。</p>
<p><span class="math">\begin{equation}
F_{1} = 2 \cdot \frac{precision \cdot recall}{precision + recall}
\end{equation}</span></p>
<p>使用 Scikit-Learn 定義好的 <code class="docutils literal notranslate"><span class="pre">f1_score</span></code> 函式可以協助我們計算 F1-score,同樣可以看到樸素分類器依然在 F1-score 獲得了最低的評估值。</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[15]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="n">f1</span> <span class="o">=</span> <span class="n">f1_score</span><span class="p">(</span><span class="n">y_true</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">)</span>
<span class="n">f1</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[15]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
0.0
</pre></div></div>
</div>
</div>
<div class="section" id="自訂計算評估指標的類別-ClfMetrics">
<h2>自訂計算評估指標的類別 <code class="docutils literal notranslate"><span class="pre">ClfMetrics</span></code><a class="headerlink" href="#自訂計算評估指標的類別-ClfMetrics" title="Permalink to this headline">¶</a></h2>
<p>我們亦可以根據混淆矩陣自訂分類器評估指標的類別。</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[16]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="k">class</span> <span class="nc">ClfMetrics</span><span class="p">:</span>
<span class="sd">"""</span>
<span class="sd"> This class calculates some of the metrics of classifier including accuracy, precision, recall, f1 according to confusion matrix.</span>
<span class="sd"> Args:</span>
<span class="sd"> y_true (ndarray): 1d-array for true target vector.</span>
<span class="sd"> y_pred (ndarray): 1d-array for predicted target vector.</span>
<span class="sd"> """</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">y_true</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_y_true</span> <span class="o">=</span> <span class="n">y_true</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_y_pred</span> <span class="o">=</span> <span class="n">y_pred</span>
<span class="k">def</span> <span class="nf">confusion_matrix</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> This function returns the confusion matrix given true/predicted target vectors.</span>
<span class="sd"> """</span>
<span class="n">n_unique</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">unique</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_y_true</span><span class="p">)</span><span class="o">.</span><span class="n">size</span>
<span class="n">cm</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">zeros</span><span class="p">((</span><span class="n">n_unique</span><span class="p">,</span> <span class="n">n_unique</span><span class="p">),</span> <span class="n">dtype</span><span class="o">=</span><span class="nb">int</span><span class="p">)</span>
<span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_unique</span><span class="p">):</span>
<span class="k">for</span> <span class="n">j</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">n_unique</span><span class="p">):</span>
<span class="n">n_obs</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">sum</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">logical_and</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_y_true</span> <span class="o">==</span> <span class="n">i</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">_y_pred</span> <span class="o">==</span> <span class="n">j</span><span class="p">))</span>
<span class="n">cm</span><span class="p">[</span><span class="n">i</span><span class="p">,</span> <span class="n">j</span><span class="p">]</span> <span class="o">=</span> <span class="n">n_obs</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_tn</span> <span class="o">=</span> <span class="n">cm</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">0</span><span class="p">]</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_tp</span> <span class="o">=</span> <span class="n">cm</span><span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">]</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_fn</span> <span class="o">=</span> <span class="n">cm</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">]</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_fp</span> <span class="o">=</span> <span class="n">cm</span><span class="p">[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">0</span><span class="p">]</span>
<span class="k">return</span> <span class="n">cm</span>
<span class="k">def</span> <span class="nf">accuracy_score</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> This function returns the accuracy score given true/predicted target vectors.</span>
<span class="sd"> """</span>
<span class="n">cm</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">confusion_matrix</span><span class="p">()</span>
<span class="n">accuracy</span> <span class="o">=</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_tn</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">_tp</span><span class="p">)</span> <span class="o">/</span> <span class="n">np</span><span class="o">.</span><span class="n">sum</span><span class="p">(</span><span class="n">cm</span><span class="p">)</span>
<span class="k">return</span> <span class="n">accuracy</span>
<span class="k">def</span> <span class="nf">precision_score</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> This function returns the precision score given true/predicted target vectors.</span>
<span class="sd"> """</span>
<span class="n">precision</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_tp</span> <span class="o">/</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_tp</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">_fp</span><span class="p">)</span>
<span class="k">return</span> <span class="n">precision</span>
<span class="k">def</span> <span class="nf">recall_score</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> This function returns the recall score given true/predicted target vectors.</span>
<span class="sd"> """</span>
<span class="n">recall</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_tp</span> <span class="o">/</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_tp</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">_fn</span><span class="p">)</span>
<span class="k">return</span> <span class="n">recall</span>
<span class="k">def</span> <span class="nf">f1_score</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">beta</span><span class="o">=</span><span class="mi">1</span><span class="p">):</span>
<span class="sd">"""</span>
<span class="sd"> This function returns the f1 score given true/predicted target vectors.</span>
<span class="sd"> Args:</span>
<span class="sd"> beta (int, float): Can be used to generalize from f1 score to f score.</span>
<span class="sd"> """</span>
<span class="n">precision</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">precision_score</span><span class="p">()</span>
<span class="n">recall</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">recall_score</span><span class="p">()</span>
<span class="n">f1</span> <span class="o">=</span> <span class="p">(</span><span class="mi">1</span> <span class="o">+</span> <span class="n">beta</span><span class="o">**</span><span class="mi">2</span><span class="p">)</span><span class="o">*</span><span class="n">precision</span><span class="o">*</span><span class="n">recall</span> <span class="o">/</span> <span class="p">((</span><span class="n">beta</span><span class="o">**</span><span class="mi">2</span> <span class="o">*</span> <span class="n">precision</span><span class="p">)</span> <span class="o">+</span> <span class="n">recall</span><span class="p">)</span>
<span class="k">return</span> <span class="n">f1</span>
</pre></div>
</div>
</div>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[17]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="n">pos</span> <span class="o">=</span> <span class="n">player_stats</span><span class="p">[</span><span class="s1">'pos'</span><span class="p">]</span><span class="o">.</span><span class="n">values</span>
<span class="n">y</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">([</span><span class="mi">0</span> <span class="k">if</span> <span class="n">p</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">==</span> <span class="s1">'G'</span> <span class="k">else</span> <span class="mi">1</span> <span class="k">for</span> <span class="n">p</span> <span class="ow">in</span> <span class="n">pos</span><span class="p">])</span>
<span class="n">X_train</span><span class="p">,</span> <span class="n">X_valid</span><span class="p">,</span> <span class="n">y_train</span><span class="p">,</span> <span class="n">y_valid</span> <span class="o">=</span> <span class="n">train_test_split</span><span class="p">(</span><span class="n">X</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">test_size</span><span class="o">=</span><span class="mf">0.33</span><span class="p">,</span> <span class="n">random_state</span><span class="o">=</span><span class="mi">42</span><span class="p">)</span>
<span class="n">h</span> <span class="o">=</span> <span class="n">LogisticRegression</span><span class="p">()</span>
<span class="n">h</span><span class="o">.</span><span class="n">fit</span><span class="p">(</span><span class="n">X_train</span><span class="p">,</span> <span class="n">y_train</span><span class="p">)</span>
<span class="n">y_pred</span> <span class="o">=</span> <span class="n">h</span><span class="o">.</span><span class="n">predict</span><span class="p">(</span><span class="n">X_valid</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[18]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="c1"># 混淆矩陣</span>
<span class="n">clf_metrics</span> <span class="o">=</span> <span class="n">ClfMetrics</span><span class="p">(</span><span class="n">y_valid</span><span class="p">,</span> <span class="n">y_pred</span><span class="p">)</span>
<span class="n">clf_metrics</span><span class="o">.</span><span class="n">confusion_matrix</span><span class="p">()</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[18]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
array([[60, 16],
[20, 70]])
</pre></div></div>
</div>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[19]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="c1"># 準確率</span>
<span class="n">clf_metrics</span><span class="o">.</span><span class="n">accuracy_score</span><span class="p">()</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[19]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
0.7831325301204819
</pre></div></div>
</div>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[20]:
</pre></div>
</div>
<div class="input_area highlight-ipython3 notranslate"><div class="highlight"><pre>
<span></span><span class="c1"># 精確率</span>
<span class="n">clf_metrics</span><span class="o">.</span><span class="n">precision_score</span><span class="p">()</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[20]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
0.7777777777777778
</pre></div></div>