forked from shokru/mlfactor.github.io
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathensemble.html
More file actions
1000 lines (920 loc) · 118 KB
/
ensemble.html
File metadata and controls
1000 lines (920 loc) · 118 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE html>
<html lang="" xml:lang="">
<head>
<meta charset="utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<title>Chapter 11 Ensemble models | Machine Learning for Factor Investing</title>
<meta name="description" content="Chapter 11 Ensemble models | Machine Learning for Factor Investing" />
<meta name="generator" content="bookdown 0.21 and GitBook 2.6.7" />
<meta property="og:title" content="Chapter 11 Ensemble models | Machine Learning for Factor Investing" />
<meta property="og:type" content="book" />
<meta name="twitter:card" content="summary" />
<meta name="twitter:title" content="Chapter 11 Ensemble models | Machine Learning for Factor Investing" />
<meta name="author" content="Guillaume Coqueret and Tony Guida" />
<meta name="date" content="2021-01-08" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<meta name="apple-mobile-web-app-capable" content="yes" />
<meta name="apple-mobile-web-app-status-bar-style" content="black" />
<link rel="prev" href="valtune.html"/>
<link rel="next" href="backtest.html"/>
<script src="libs/jquery-2.2.3/jquery.min.js"></script>
<link href="libs/gitbook-2.6.7/css/style.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-table.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-bookdown.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-highlight.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-search.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-fontsettings.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-clipboard.css" rel="stylesheet" />
<script src="libs/accessible-code-block-0.0.1/empty-anchor.js"></script>
<link href="libs/anchor-sections-1.0/anchor-sections.css" rel="stylesheet" />
<script src="libs/anchor-sections-1.0/anchor-sections.js"></script>
<script src="libs/kePrint-0.0.1/kePrint.js"></script>
<link href="libs/lightable-0.0.1/lightable.css" rel="stylesheet" />
<style type="text/css">
code.sourceCode > span { display: inline-block; line-height: 1.25; }
code.sourceCode > span { color: inherit; text-decoration: inherit; }
code.sourceCode > span:empty { height: 1.2em; }
.sourceCode { overflow: visible; }
code.sourceCode { white-space: pre; position: relative; }
pre.sourceCode { margin: 0; }
@media screen {
div.sourceCode { overflow: auto; }
}
@media print {
code.sourceCode { white-space: pre-wrap; }
code.sourceCode > span { text-indent: -5em; padding-left: 5em; }
}
pre.numberSource code
{ counter-reset: source-line 0; }
pre.numberSource code > span
{ position: relative; left: -4em; counter-increment: source-line; }
pre.numberSource code > span > a:first-child::before
{ content: counter(source-line);
position: relative; left: -1em; text-align: right; vertical-align: baseline;
border: none; display: inline-block;
-webkit-touch-callout: none; -webkit-user-select: none;
-khtml-user-select: none; -moz-user-select: none;
-ms-user-select: none; user-select: none;
padding: 0 4px; width: 4em;
color: #aaaaaa;
}
pre.numberSource { margin-left: 3em; border-left: 1px solid #aaaaaa; padding-left: 4px; }
div.sourceCode
{ }
@media screen {
code.sourceCode > span > a:first-child::before { text-decoration: underline; }
}
code span.al { color: #ff0000; font-weight: bold; } /* Alert */
code span.an { color: #60a0b0; font-weight: bold; font-style: italic; } /* Annotation */
code span.at { color: #7d9029; } /* Attribute */
code span.bn { color: #40a070; } /* BaseN */
code span.bu { } /* BuiltIn */
code span.cf { color: #007020; font-weight: bold; } /* ControlFlow */
code span.ch { color: #4070a0; } /* Char */
code span.cn { color: #880000; } /* Constant */
code span.co { color: #60a0b0; font-style: italic; } /* Comment */
code span.cv { color: #60a0b0; font-weight: bold; font-style: italic; } /* CommentVar */
code span.do { color: #ba2121; font-style: italic; } /* Documentation */
code span.dt { color: #902000; } /* DataType */
code span.dv { color: #40a070; } /* DecVal */
code span.er { color: #ff0000; font-weight: bold; } /* Error */
code span.ex { } /* Extension */
code span.fl { color: #40a070; } /* Float */
code span.fu { color: #06287e; } /* Function */
code span.im { } /* Import */
code span.in { color: #60a0b0; font-weight: bold; font-style: italic; } /* Information */
code span.kw { color: #007020; font-weight: bold; } /* Keyword */
code span.op { color: #666666; } /* Operator */
code span.ot { color: #007020; } /* Other */
code span.pp { color: #bc7a00; } /* Preprocessor */
code span.sc { color: #4070a0; } /* SpecialChar */
code span.ss { color: #bb6688; } /* SpecialString */
code span.st { color: #4070a0; } /* String */
code span.va { color: #19177c; } /* Variable */
code span.vs { color: #4070a0; } /* VerbatimString */
code span.wa { color: #60a0b0; font-weight: bold; font-style: italic; } /* Warning */
</style>
</head>
<body>
<div class="book without-animation with-summary font-size-2 font-family-1" data-basepath=".">
<div class="book-summary">
<nav role="navigation">
<ul class="summary">
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html"><i class="fa fa-check"></i>Preface</a><ul>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#what-this-book-is-not-about"><i class="fa fa-check"></i>What this book is not about</a></li>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#the-targeted-audience"><i class="fa fa-check"></i>The targeted audience</a></li>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#how-this-book-is-structured"><i class="fa fa-check"></i>How this book is structured</a></li>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#companion-website"><i class="fa fa-check"></i>Companion website</a></li>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#why-r"><i class="fa fa-check"></i>Why R?</a></li>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#coding-instructions"><i class="fa fa-check"></i>Coding instructions</a></li>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#acknowledgments"><i class="fa fa-check"></i>Acknowledgments</a></li>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#future-developments"><i class="fa fa-check"></i>Future developments</a></li>
</ul></li>
<li class="part"><span><b>I Introduction</b></span></li>
<li class="chapter" data-level="1" data-path="notdata.html"><a href="notdata.html"><i class="fa fa-check"></i><b>1</b> Notations and data</a><ul>
<li class="chapter" data-level="1.1" data-path="notdata.html"><a href="notdata.html#notations"><i class="fa fa-check"></i><b>1.1</b> Notations</a></li>
<li class="chapter" data-level="1.2" data-path="notdata.html"><a href="notdata.html#dataset"><i class="fa fa-check"></i><b>1.2</b> Dataset</a></li>
</ul></li>
<li class="chapter" data-level="2" data-path="intro.html"><a href="intro.html"><i class="fa fa-check"></i><b>2</b> Introduction</a><ul>
<li class="chapter" data-level="2.1" data-path="intro.html"><a href="intro.html#context"><i class="fa fa-check"></i><b>2.1</b> Context</a></li>
<li class="chapter" data-level="2.2" data-path="intro.html"><a href="intro.html#portfolio-construction-the-workflow"><i class="fa fa-check"></i><b>2.2</b> Portfolio construction: the workflow</a></li>
<li class="chapter" data-level="2.3" data-path="intro.html"><a href="intro.html#machine-learning-is-no-magic-wand"><i class="fa fa-check"></i><b>2.3</b> Machine learning is no magic wand</a></li>
</ul></li>
<li class="chapter" data-level="3" data-path="factor.html"><a href="factor.html"><i class="fa fa-check"></i><b>3</b> Factor investing and asset pricing anomalies</a><ul>
<li class="chapter" data-level="3.1" data-path="factor.html"><a href="factor.html#introduction"><i class="fa fa-check"></i><b>3.1</b> Introduction</a></li>
<li class="chapter" data-level="3.2" data-path="factor.html"><a href="factor.html#detecting-anomalies"><i class="fa fa-check"></i><b>3.2</b> Detecting anomalies</a><ul>
<li class="chapter" data-level="3.2.1" data-path="factor.html"><a href="factor.html#challenges"><i class="fa fa-check"></i><b>3.2.1</b> Challenges</a></li>
<li class="chapter" data-level="3.2.2" data-path="factor.html"><a href="factor.html#simple-portfolio-sorts"><i class="fa fa-check"></i><b>3.2.2</b> Simple portfolio sorts </a></li>
<li class="chapter" data-level="3.2.3" data-path="factor.html"><a href="factor.html#factors"><i class="fa fa-check"></i><b>3.2.3</b> Factors</a></li>
<li class="chapter" data-level="3.2.4" data-path="factor.html"><a href="factor.html#predictive-regressions-sorts-and-p-value-issues"><i class="fa fa-check"></i><b>3.2.4</b> Predictive regressions, sorts, and p-value issues</a></li>
<li class="chapter" data-level="3.2.5" data-path="factor.html"><a href="factor.html#fama-macbeth-regressions"><i class="fa fa-check"></i><b>3.2.5</b> Fama-Macbeth regressions</a></li>
<li class="chapter" data-level="3.2.6" data-path="factor.html"><a href="factor.html#factor-competition"><i class="fa fa-check"></i><b>3.2.6</b> Factor competition</a></li>
<li class="chapter" data-level="3.2.7" data-path="factor.html"><a href="factor.html#advanced-techniques"><i class="fa fa-check"></i><b>3.2.7</b> Advanced techniques</a></li>
</ul></li>
<li class="chapter" data-level="3.3" data-path="factor.html"><a href="factor.html#factors-or-characteristics"><i class="fa fa-check"></i><b>3.3</b> Factors or characteristics?</a></li>
<li class="chapter" data-level="3.4" data-path="factor.html"><a href="factor.html#hot-topics-momentum-timing-and-esg"><i class="fa fa-check"></i><b>3.4</b> Hot topics: momentum, timing and ESG</a><ul>
<li class="chapter" data-level="3.4.1" data-path="factor.html"><a href="factor.html#factor-momentum"><i class="fa fa-check"></i><b>3.4.1</b> Factor momentum</a></li>
<li class="chapter" data-level="3.4.2" data-path="factor.html"><a href="factor.html#factor-timing"><i class="fa fa-check"></i><b>3.4.2</b> Factor timing</a></li>
<li class="chapter" data-level="3.4.3" data-path="factor.html"><a href="factor.html#the-green-factors"><i class="fa fa-check"></i><b>3.4.3</b> The green factors</a></li>
</ul></li>
<li class="chapter" data-level="3.5" data-path="factor.html"><a href="factor.html#the-links-with-machine-learning"><i class="fa fa-check"></i><b>3.5</b> The links with machine learning</a><ul>
<li class="chapter" data-level="3.5.1" data-path="factor.html"><a href="factor.html#a-short-list-of-recent-references"><i class="fa fa-check"></i><b>3.5.1</b> A short list of recent references</a></li>
<li class="chapter" data-level="3.5.2" data-path="factor.html"><a href="factor.html#explicit-connections-with-asset-pricing-models"><i class="fa fa-check"></i><b>3.5.2</b> Explicit connections with asset pricing models</a></li>
</ul></li>
<li class="chapter" data-level="3.6" data-path="factor.html"><a href="factor.html#coding-exercises"><i class="fa fa-check"></i><b>3.6</b> Coding exercises</a></li>
</ul></li>
<li class="chapter" data-level="4" data-path="Data.html"><a href="Data.html"><i class="fa fa-check"></i><b>4</b> Data preprocessing</a><ul>
<li class="chapter" data-level="4.1" data-path="Data.html"><a href="Data.html#know-your-data"><i class="fa fa-check"></i><b>4.1</b> Know your data</a></li>
<li class="chapter" data-level="4.2" data-path="Data.html"><a href="Data.html#missing-data"><i class="fa fa-check"></i><b>4.2</b> Missing data</a></li>
<li class="chapter" data-level="4.3" data-path="Data.html"><a href="Data.html#outlier-detection"><i class="fa fa-check"></i><b>4.3</b> Outlier detection</a></li>
<li class="chapter" data-level="4.4" data-path="Data.html"><a href="Data.html#feateng"><i class="fa fa-check"></i><b>4.4</b> Feature engineering</a><ul>
<li class="chapter" data-level="4.4.1" data-path="Data.html"><a href="Data.html#feature-selection"><i class="fa fa-check"></i><b>4.4.1</b> Feature selection</a></li>
<li class="chapter" data-level="4.4.2" data-path="Data.html"><a href="Data.html#scaling"><i class="fa fa-check"></i><b>4.4.2</b> Scaling the predictors</a></li>
</ul></li>
<li class="chapter" data-level="4.5" data-path="Data.html"><a href="Data.html#labelling"><i class="fa fa-check"></i><b>4.5</b> Labelling</a><ul>
<li class="chapter" data-level="4.5.1" data-path="Data.html"><a href="Data.html#simple-labels"><i class="fa fa-check"></i><b>4.5.1</b> Simple labels</a></li>
<li class="chapter" data-level="4.5.2" data-path="Data.html"><a href="Data.html#categorical-labels"><i class="fa fa-check"></i><b>4.5.2</b> Categorical labels</a></li>
<li class="chapter" data-level="4.5.3" data-path="Data.html"><a href="Data.html#the-triple-barrier-method"><i class="fa fa-check"></i><b>4.5.3</b> The triple barrier method</a></li>
<li class="chapter" data-level="4.5.4" data-path="Data.html"><a href="Data.html#filtering-the-sample"><i class="fa fa-check"></i><b>4.5.4</b> Filtering the sample</a></li>
<li class="chapter" data-level="4.5.5" data-path="Data.html"><a href="Data.html#horizons"><i class="fa fa-check"></i><b>4.5.5</b> Return horizons</a></li>
</ul></li>
<li class="chapter" data-level="4.6" data-path="Data.html"><a href="Data.html#pers"><i class="fa fa-check"></i><b>4.6</b> Handling persistence</a></li>
<li class="chapter" data-level="4.7" data-path="Data.html"><a href="Data.html#extensions"><i class="fa fa-check"></i><b>4.7</b> Extensions</a><ul>
<li class="chapter" data-level="4.7.1" data-path="Data.html"><a href="Data.html#transforming-features"><i class="fa fa-check"></i><b>4.7.1</b> Transforming features</a></li>
<li class="chapter" data-level="4.7.2" data-path="Data.html"><a href="Data.html#macrovar"><i class="fa fa-check"></i><b>4.7.2</b> Macro-economic variables</a></li>
<li class="chapter" data-level="4.7.3" data-path="Data.html"><a href="Data.html#active-learning"><i class="fa fa-check"></i><b>4.7.3</b> Active learning</a></li>
</ul></li>
<li class="chapter" data-level="4.8" data-path="Data.html"><a href="Data.html#additional-code-and-results"><i class="fa fa-check"></i><b>4.8</b> Additional code and results</a><ul>
<li class="chapter" data-level="4.8.1" data-path="Data.html"><a href="Data.html#impact-of-rescaling-graphical-representation"><i class="fa fa-check"></i><b>4.8.1</b> Impact of rescaling: graphical representation</a></li>
<li class="chapter" data-level="4.8.2" data-path="Data.html"><a href="Data.html#impact-of-rescaling-toy-example"><i class="fa fa-check"></i><b>4.8.2</b> Impact of rescaling: toy example</a></li>
</ul></li>
<li class="chapter" data-level="4.9" data-path="Data.html"><a href="Data.html#coding-exercises-1"><i class="fa fa-check"></i><b>4.9</b> Coding exercises</a></li>
</ul></li>
<li class="part"><span><b>II Common supervised algorithms</b></span></li>
<li class="chapter" data-level="5" data-path="lasso.html"><a href="lasso.html"><i class="fa fa-check"></i><b>5</b> Penalized regressions and sparse hedging for minimum variance portfolios</a><ul>
<li class="chapter" data-level="5.1" data-path="lasso.html"><a href="lasso.html#penalized-regressions"><i class="fa fa-check"></i><b>5.1</b> Penalized regressions</a><ul>
<li class="chapter" data-level="5.1.1" data-path="lasso.html"><a href="lasso.html#penreg"><i class="fa fa-check"></i><b>5.1.1</b> Simple regressions</a></li>
<li class="chapter" data-level="5.1.2" data-path="lasso.html"><a href="lasso.html#forms-of-penalizations"><i class="fa fa-check"></i><b>5.1.2</b> Forms of penalizations</a></li>
<li class="chapter" data-level="5.1.3" data-path="lasso.html"><a href="lasso.html#illustrations"><i class="fa fa-check"></i><b>5.1.3</b> Illustrations</a></li>
</ul></li>
<li class="chapter" data-level="5.2" data-path="lasso.html"><a href="lasso.html#sparse-hedging-for-minimum-variance-portfolios"><i class="fa fa-check"></i><b>5.2</b> Sparse hedging for minimum variance portfolios</a><ul>
<li class="chapter" data-level="5.2.1" data-path="lasso.html"><a href="lasso.html#presentation-and-derivations"><i class="fa fa-check"></i><b>5.2.1</b> Presentation and derivations</a></li>
<li class="chapter" data-level="5.2.2" data-path="lasso.html"><a href="lasso.html#sparseex"><i class="fa fa-check"></i><b>5.2.2</b> Example</a></li>
</ul></li>
<li class="chapter" data-level="5.3" data-path="lasso.html"><a href="lasso.html#predictive-regressions"><i class="fa fa-check"></i><b>5.3</b> Predictive regressions</a><ul>
<li class="chapter" data-level="5.3.1" data-path="lasso.html"><a href="lasso.html#literature-review-and-principle"><i class="fa fa-check"></i><b>5.3.1</b> Literature review and principle</a></li>
<li class="chapter" data-level="5.3.2" data-path="lasso.html"><a href="lasso.html#code-and-results"><i class="fa fa-check"></i><b>5.3.2</b> Code and results</a></li>
</ul></li>
<li class="chapter" data-level="5.4" data-path="lasso.html"><a href="lasso.html#coding-exercise"><i class="fa fa-check"></i><b>5.4</b> Coding exercise</a></li>
</ul></li>
<li class="chapter" data-level="6" data-path="trees.html"><a href="trees.html"><i class="fa fa-check"></i><b>6</b> Tree-based methods</a><ul>
<li class="chapter" data-level="6.1" data-path="trees.html"><a href="trees.html#simple-trees"><i class="fa fa-check"></i><b>6.1</b> Simple trees</a><ul>
<li class="chapter" data-level="6.1.1" data-path="trees.html"><a href="trees.html#principle"><i class="fa fa-check"></i><b>6.1.1</b> Principle</a></li>
<li class="chapter" data-level="6.1.2" data-path="trees.html"><a href="trees.html#treeclass"><i class="fa fa-check"></i><b>6.1.2</b> Further details on classification</a></li>
<li class="chapter" data-level="6.1.3" data-path="trees.html"><a href="trees.html#pruning-criteria"><i class="fa fa-check"></i><b>6.1.3</b> Pruning criteria</a></li>
<li class="chapter" data-level="6.1.4" data-path="trees.html"><a href="trees.html#code-and-interpretation"><i class="fa fa-check"></i><b>6.1.4</b> Code and interpretation</a></li>
</ul></li>
<li class="chapter" data-level="6.2" data-path="trees.html"><a href="trees.html#random-forests"><i class="fa fa-check"></i><b>6.2</b> Random forests</a><ul>
<li class="chapter" data-level="6.2.1" data-path="trees.html"><a href="trees.html#principle-1"><i class="fa fa-check"></i><b>6.2.1</b> Principle</a></li>
<li class="chapter" data-level="6.2.2" data-path="trees.html"><a href="trees.html#code-and-results-1"><i class="fa fa-check"></i><b>6.2.2</b> Code and results</a></li>
</ul></li>
<li class="chapter" data-level="6.3" data-path="trees.html"><a href="trees.html#adaboost"><i class="fa fa-check"></i><b>6.3</b> Boosted trees: Adaboost</a><ul>
<li class="chapter" data-level="6.3.1" data-path="trees.html"><a href="trees.html#methodology"><i class="fa fa-check"></i><b>6.3.1</b> Methodology</a></li>
<li class="chapter" data-level="6.3.2" data-path="trees.html"><a href="trees.html#illustration"><i class="fa fa-check"></i><b>6.3.2</b> Illustration</a></li>
</ul></li>
<li class="chapter" data-level="6.4" data-path="trees.html"><a href="trees.html#boosted-trees-extreme-gradient-boosting"><i class="fa fa-check"></i><b>6.4</b> Boosted trees: extreme gradient boosting</a><ul>
<li class="chapter" data-level="6.4.1" data-path="trees.html"><a href="trees.html#managing-loss"><i class="fa fa-check"></i><b>6.4.1</b> Managing loss</a></li>
<li class="chapter" data-level="6.4.2" data-path="trees.html"><a href="trees.html#penalization"><i class="fa fa-check"></i><b>6.4.2</b> Penalization</a></li>
<li class="chapter" data-level="6.4.3" data-path="trees.html"><a href="trees.html#aggregation"><i class="fa fa-check"></i><b>6.4.3</b> Aggregation</a></li>
<li class="chapter" data-level="6.4.4" data-path="trees.html"><a href="trees.html#tree-structure"><i class="fa fa-check"></i><b>6.4.4</b> Tree structure</a></li>
<li class="chapter" data-level="6.4.5" data-path="trees.html"><a href="trees.html#boostext"><i class="fa fa-check"></i><b>6.4.5</b> Extensions</a></li>
<li class="chapter" data-level="6.4.6" data-path="trees.html"><a href="trees.html#boostcode"><i class="fa fa-check"></i><b>6.4.6</b> Code and results</a></li>
<li class="chapter" data-level="6.4.7" data-path="trees.html"><a href="trees.html#instweight"><i class="fa fa-check"></i><b>6.4.7</b> Instance weighting</a></li>
</ul></li>
<li class="chapter" data-level="6.5" data-path="trees.html"><a href="trees.html#discussion"><i class="fa fa-check"></i><b>6.5</b> Discussion</a></li>
<li class="chapter" data-level="6.6" data-path="trees.html"><a href="trees.html#coding-exercises-2"><i class="fa fa-check"></i><b>6.6</b> Coding exercises</a></li>
</ul></li>
<li class="chapter" data-level="7" data-path="NN.html"><a href="NN.html"><i class="fa fa-check"></i><b>7</b> Neural networks</a><ul>
<li class="chapter" data-level="7.1" data-path="NN.html"><a href="NN.html#the-original-perceptron"><i class="fa fa-check"></i><b>7.1</b> The original perceptron</a></li>
<li class="chapter" data-level="7.2" data-path="NN.html"><a href="NN.html#multilayer-perceptron"><i class="fa fa-check"></i><b>7.2</b> Multilayer perceptron</a><ul>
<li class="chapter" data-level="7.2.1" data-path="NN.html"><a href="NN.html#introduction-and-notations"><i class="fa fa-check"></i><b>7.2.1</b> Introduction and notations</a></li>
<li class="chapter" data-level="7.2.2" data-path="NN.html"><a href="NN.html#universal-approximation"><i class="fa fa-check"></i><b>7.2.2</b> Universal approximation</a></li>
<li class="chapter" data-level="7.2.3" data-path="NN.html"><a href="NN.html#backprop"><i class="fa fa-check"></i><b>7.2.3</b> Learning via back-propagation</a></li>
<li class="chapter" data-level="7.2.4" data-path="NN.html"><a href="NN.html#further-details-on-classification"><i class="fa fa-check"></i><b>7.2.4</b> Further details on classification</a></li>
</ul></li>
<li class="chapter" data-level="7.3" data-path="NN.html"><a href="NN.html#howdeep"><i class="fa fa-check"></i><b>7.3</b> How deep we should go and other practical issues</a><ul>
<li class="chapter" data-level="7.3.1" data-path="NN.html"><a href="NN.html#architectural-choices"><i class="fa fa-check"></i><b>7.3.1</b> Architectural choices</a></li>
<li class="chapter" data-level="7.3.2" data-path="NN.html"><a href="NN.html#frequency-of-weight-updates-and-learning-duration"><i class="fa fa-check"></i><b>7.3.2</b> Frequency of weight updates and learning duration</a></li>
<li class="chapter" data-level="7.3.3" data-path="NN.html"><a href="NN.html#penalizations-and-dropout"><i class="fa fa-check"></i><b>7.3.3</b> Penalizations and dropout</a></li>
</ul></li>
<li class="chapter" data-level="7.4" data-path="NN.html"><a href="NN.html#code-samples-and-comments-for-vanilla-mlp"><i class="fa fa-check"></i><b>7.4</b> Code samples and comments for vanilla MLP</a><ul>
<li class="chapter" data-level="7.4.1" data-path="NN.html"><a href="NN.html#regression-example"><i class="fa fa-check"></i><b>7.4.1</b> Regression example</a></li>
<li class="chapter" data-level="7.4.2" data-path="NN.html"><a href="NN.html#classification-example"><i class="fa fa-check"></i><b>7.4.2</b> Classification example</a></li>
<li class="chapter" data-level="7.4.3" data-path="NN.html"><a href="NN.html#custloss"><i class="fa fa-check"></i><b>7.4.3</b> Custom losses</a></li>
</ul></li>
<li class="chapter" data-level="7.5" data-path="NN.html"><a href="NN.html#recurrent-networks"><i class="fa fa-check"></i><b>7.5</b> Recurrent networks</a><ul>
<li class="chapter" data-level="7.5.1" data-path="NN.html"><a href="NN.html#presentation"><i class="fa fa-check"></i><b>7.5.1</b> Presentation</a></li>
<li class="chapter" data-level="7.5.2" data-path="NN.html"><a href="NN.html#code-and-results-2"><i class="fa fa-check"></i><b>7.5.2</b> Code and results</a></li>
</ul></li>
<li class="chapter" data-level="7.6" data-path="NN.html"><a href="NN.html#other-common-architectures"><i class="fa fa-check"></i><b>7.6</b> Other common architectures</a><ul>
<li class="chapter" data-level="7.6.1" data-path="NN.html"><a href="NN.html#generative-aversarial-networks"><i class="fa fa-check"></i><b>7.6.1</b> Generative adversarial networks</a></li>
<li class="chapter" data-level="7.6.2" data-path="NN.html"><a href="NN.html#autoencoders"><i class="fa fa-check"></i><b>7.6.2</b> Autoencoders</a></li>
<li class="chapter" data-level="7.6.3" data-path="NN.html"><a href="NN.html#a-word-on-convolutional-networks"><i class="fa fa-check"></i><b>7.6.3</b> A word on convolutional networks</a></li>
<li class="chapter" data-level="7.6.4" data-path="NN.html"><a href="NN.html#advanced-architectures"><i class="fa fa-check"></i><b>7.6.4</b> Advanced architectures</a></li>
</ul></li>
<li class="chapter" data-level="7.7" data-path="NN.html"><a href="NN.html#coding-exercise-1"><i class="fa fa-check"></i><b>7.7</b> Coding exercise</a></li>
</ul></li>
<li class="chapter" data-level="8" data-path="svm.html"><a href="svm.html"><i class="fa fa-check"></i><b>8</b> Support vector machines</a><ul>
<li class="chapter" data-level="8.1" data-path="svm.html"><a href="svm.html#svm-for-classification"><i class="fa fa-check"></i><b>8.1</b> SVM for classification</a></li>
<li class="chapter" data-level="8.2" data-path="svm.html"><a href="svm.html#svm-for-regression"><i class="fa fa-check"></i><b>8.2</b> SVM for regression</a></li>
<li class="chapter" data-level="8.3" data-path="svm.html"><a href="svm.html#practice"><i class="fa fa-check"></i><b>8.3</b> Practice</a></li>
<li class="chapter" data-level="8.4" data-path="svm.html"><a href="svm.html#coding-exercises-3"><i class="fa fa-check"></i><b>8.4</b> Coding exercises</a></li>
</ul></li>
<li class="chapter" data-level="9" data-path="bayes.html"><a href="bayes.html"><i class="fa fa-check"></i><b>9</b> Bayesian methods</a><ul>
<li class="chapter" data-level="9.1" data-path="bayes.html"><a href="bayes.html#the-bayesian-framework"><i class="fa fa-check"></i><b>9.1</b> The Bayesian framework</a></li>
<li class="chapter" data-level="9.2" data-path="bayes.html"><a href="bayes.html#bayesian-sampling"><i class="fa fa-check"></i><b>9.2</b> Bayesian sampling</a><ul>
<li class="chapter" data-level="9.2.1" data-path="bayes.html"><a href="bayes.html#gibbs-sampling"><i class="fa fa-check"></i><b>9.2.1</b> Gibbs sampling</a></li>
<li class="chapter" data-level="9.2.2" data-path="bayes.html"><a href="bayes.html#metropolis-hastings-sampling"><i class="fa fa-check"></i><b>9.2.2</b> Metropolis-Hastings sampling</a></li>
</ul></li>
<li class="chapter" data-level="9.3" data-path="bayes.html"><a href="bayes.html#bayesian-linear-regression"><i class="fa fa-check"></i><b>9.3</b> Bayesian linear regression</a></li>
<li class="chapter" data-level="9.4" data-path="bayes.html"><a href="bayes.html#naive-bayes-classifier"><i class="fa fa-check"></i><b>9.4</b> Naive Bayes classifier</a></li>
<li class="chapter" data-level="9.5" data-path="bayes.html"><a href="bayes.html#BART"><i class="fa fa-check"></i><b>9.5</b> Bayesian additive trees</a><ul>
<li class="chapter" data-level="9.5.1" data-path="bayes.html"><a href="bayes.html#general-formulation"><i class="fa fa-check"></i><b>9.5.1</b> General formulation</a></li>
<li class="chapter" data-level="9.5.2" data-path="bayes.html"><a href="bayes.html#priors"><i class="fa fa-check"></i><b>9.5.2</b> Priors</a></li>
<li class="chapter" data-level="9.5.3" data-path="bayes.html"><a href="bayes.html#sampling-and-predictions"><i class="fa fa-check"></i><b>9.5.3</b> Sampling and predictions</a></li>
<li class="chapter" data-level="9.5.4" data-path="bayes.html"><a href="bayes.html#code"><i class="fa fa-check"></i><b>9.5.4</b> Code</a></li>
</ul></li>
</ul></li>
<li class="part"><span><b>III From predictions to portfolios</b></span></li>
<li class="chapter" data-level="10" data-path="valtune.html"><a href="valtune.html"><i class="fa fa-check"></i><b>10</b> Validating and tuning</a><ul>
<li class="chapter" data-level="10.1" data-path="valtune.html"><a href="valtune.html#mlmetrics"><i class="fa fa-check"></i><b>10.1</b> Learning metrics</a><ul>
<li class="chapter" data-level="10.1.1" data-path="valtune.html"><a href="valtune.html#regression-analysis"><i class="fa fa-check"></i><b>10.1.1</b> Regression analysis</a></li>
<li class="chapter" data-level="10.1.2" data-path="valtune.html"><a href="valtune.html#classification-analysis"><i class="fa fa-check"></i><b>10.1.2</b> Classification analysis</a></li>
</ul></li>
<li class="chapter" data-level="10.2" data-path="valtune.html"><a href="valtune.html#validation"><i class="fa fa-check"></i><b>10.2</b> Validation</a><ul>
<li class="chapter" data-level="10.2.1" data-path="valtune.html"><a href="valtune.html#the-variance-bias-tradeoff-theory"><i class="fa fa-check"></i><b>10.2.1</b> The variance-bias tradeoff: theory</a></li>
<li class="chapter" data-level="10.2.2" data-path="valtune.html"><a href="valtune.html#the-variance-bias-tradeoff-illustration"><i class="fa fa-check"></i><b>10.2.2</b> The variance-bias tradeoff: illustration</a></li>
<li class="chapter" data-level="10.2.3" data-path="valtune.html"><a href="valtune.html#the-risk-of-overfitting-principle"><i class="fa fa-check"></i><b>10.2.3</b> The risk of overfitting: principle</a></li>
<li class="chapter" data-level="10.2.4" data-path="valtune.html"><a href="valtune.html#the-risk-of-overfitting-some-solutions"><i class="fa fa-check"></i><b>10.2.4</b> The risk of overfitting: some solutions</a></li>
</ul></li>
<li class="chapter" data-level="10.3" data-path="valtune.html"><a href="valtune.html#the-search-for-good-hyperparameters"><i class="fa fa-check"></i><b>10.3</b> The search for good hyperparameters</a><ul>
<li class="chapter" data-level="10.3.1" data-path="valtune.html"><a href="valtune.html#methods"><i class="fa fa-check"></i><b>10.3.1</b> Methods</a></li>
<li class="chapter" data-level="10.3.2" data-path="valtune.html"><a href="valtune.html#example-grid-search"><i class="fa fa-check"></i><b>10.3.2</b> Example: grid search</a></li>
<li class="chapter" data-level="10.3.3" data-path="valtune.html"><a href="valtune.html#example-bayesian-optimization"><i class="fa fa-check"></i><b>10.3.3</b> Example: Bayesian optimization</a></li>
</ul></li>
<li class="chapter" data-level="10.4" data-path="valtune.html"><a href="valtune.html#short-discussion-on-validation-in-backtests"><i class="fa fa-check"></i><b>10.4</b> Short discussion on validation in backtests</a></li>
</ul></li>
<li class="chapter" data-level="11" data-path="ensemble.html"><a href="ensemble.html"><i class="fa fa-check"></i><b>11</b> Ensemble models</a><ul>
<li class="chapter" data-level="11.1" data-path="ensemble.html"><a href="ensemble.html#linear-ensembles"><i class="fa fa-check"></i><b>11.1</b> Linear ensembles</a><ul>
<li class="chapter" data-level="11.1.1" data-path="ensemble.html"><a href="ensemble.html#principles"><i class="fa fa-check"></i><b>11.1.1</b> Principles</a></li>
<li class="chapter" data-level="11.1.2" data-path="ensemble.html"><a href="ensemble.html#example"><i class="fa fa-check"></i><b>11.1.2</b> Example</a></li>
</ul></li>
<li class="chapter" data-level="11.2" data-path="ensemble.html"><a href="ensemble.html#stacked-ensembles"><i class="fa fa-check"></i><b>11.2</b> Stacked ensembles</a><ul>
<li class="chapter" data-level="11.2.1" data-path="ensemble.html"><a href="ensemble.html#two-stage-training"><i class="fa fa-check"></i><b>11.2.1</b> Two-stage training</a></li>
<li class="chapter" data-level="11.2.2" data-path="ensemble.html"><a href="ensemble.html#code-and-results-3"><i class="fa fa-check"></i><b>11.2.2</b> Code and results</a></li>
</ul></li>
<li class="chapter" data-level="11.3" data-path="ensemble.html"><a href="ensemble.html#extensions-1"><i class="fa fa-check"></i><b>11.3</b> Extensions</a><ul>
<li class="chapter" data-level="11.3.1" data-path="ensemble.html"><a href="ensemble.html#exogenous-variables"><i class="fa fa-check"></i><b>11.3.1</b> Exogenous variables</a></li>
<li class="chapter" data-level="11.3.2" data-path="ensemble.html"><a href="ensemble.html#shrinking-inter-model-correlations"><i class="fa fa-check"></i><b>11.3.2</b> Shrinking inter-model correlations</a></li>
</ul></li>
<li class="chapter" data-level="11.4" data-path="ensemble.html"><a href="ensemble.html#exercise"><i class="fa fa-check"></i><b>11.4</b> Exercise</a></li>
</ul></li>
<li class="chapter" data-level="12" data-path="backtest.html"><a href="backtest.html"><i class="fa fa-check"></i><b>12</b> Portfolio backtesting</a><ul>
<li class="chapter" data-level="12.1" data-path="backtest.html"><a href="backtest.html#protocol"><i class="fa fa-check"></i><b>12.1</b> Setting the protocol</a></li>
<li class="chapter" data-level="12.2" data-path="backtest.html"><a href="backtest.html#turning-signals-into-portfolio-weights"><i class="fa fa-check"></i><b>12.2</b> Turning signals into portfolio weights</a></li>
<li class="chapter" data-level="12.3" data-path="backtest.html"><a href="backtest.html#perfmet"><i class="fa fa-check"></i><b>12.3</b> Performance metrics</a><ul>
<li class="chapter" data-level="12.3.1" data-path="backtest.html"><a href="backtest.html#discussion-1"><i class="fa fa-check"></i><b>12.3.1</b> Discussion</a></li>
<li class="chapter" data-level="12.3.2" data-path="backtest.html"><a href="backtest.html#pure-performance-and-risk-indicators"><i class="fa fa-check"></i><b>12.3.2</b> Pure performance and risk indicators</a></li>
<li class="chapter" data-level="12.3.3" data-path="backtest.html"><a href="backtest.html#factor-based-evaluation"><i class="fa fa-check"></i><b>12.3.3</b> Factor-based evaluation</a></li>
<li class="chapter" data-level="12.3.4" data-path="backtest.html"><a href="backtest.html#risk-adjusted-measures"><i class="fa fa-check"></i><b>12.3.4</b> Risk-adjusted measures</a></li>
<li class="chapter" data-level="12.3.5" data-path="backtest.html"><a href="backtest.html#transaction-costs-and-turnover"><i class="fa fa-check"></i><b>12.3.5</b> Transaction costs and turnover</a></li>
</ul></li>
<li class="chapter" data-level="12.4" data-path="backtest.html"><a href="backtest.html#common-errors-and-issues"><i class="fa fa-check"></i><b>12.4</b> Common errors and issues</a><ul>
<li class="chapter" data-level="12.4.1" data-path="backtest.html"><a href="backtest.html#forward-looking-data"><i class="fa fa-check"></i><b>12.4.1</b> Forward looking data</a></li>
<li class="chapter" data-level="12.4.2" data-path="backtest.html"><a href="backtest.html#backov"><i class="fa fa-check"></i><b>12.4.2</b> Backtest overfitting</a></li>
<li class="chapter" data-level="12.4.3" data-path="backtest.html"><a href="backtest.html#simple-safeguards"><i class="fa fa-check"></i><b>12.4.3</b> Simple safeguards</a></li>
</ul></li>
<li class="chapter" data-level="12.5" data-path="backtest.html"><a href="backtest.html#implication-of-non-stationarity-forecasting-is-hard"><i class="fa fa-check"></i><b>12.5</b> Implication of non-stationarity: forecasting is hard</a><ul>
<li class="chapter" data-level="12.5.1" data-path="backtest.html"><a href="backtest.html#general-comments"><i class="fa fa-check"></i><b>12.5.1</b> General comments</a></li>
<li class="chapter" data-level="12.5.2" data-path="backtest.html"><a href="backtest.html#the-no-free-lunch-theorem"><i class="fa fa-check"></i><b>12.5.2</b> The no free lunch theorem</a></li>
</ul></li>
<li class="chapter" data-level="12.6" data-path="backtest.html"><a href="backtest.html#first-example-a-complete-backtest"><i class="fa fa-check"></i><b>12.6</b> First example: a complete backtest</a></li>
<li class="chapter" data-level="12.7" data-path="backtest.html"><a href="backtest.html#second-example-backtest-overfitting"><i class="fa fa-check"></i><b>12.7</b> Second example: backtest overfitting</a></li>
<li class="chapter" data-level="12.8" data-path="backtest.html"><a href="backtest.html#coding-exercises-4"><i class="fa fa-check"></i><b>12.8</b> Coding exercises</a></li>
</ul></li>
<li class="part"><span><b>IV Further important topics</b></span></li>
<li class="chapter" data-level="13" data-path="interp.html"><a href="interp.html"><i class="fa fa-check"></i><b>13</b> Interpretability</a><ul>
<li class="chapter" data-level="13.1" data-path="interp.html"><a href="interp.html#global-interpretations"><i class="fa fa-check"></i><b>13.1</b> Global interpretations</a><ul>
<li class="chapter" data-level="13.1.1" data-path="interp.html"><a href="interp.html#surr"><i class="fa fa-check"></i><b>13.1.1</b> Simple models as surrogates</a></li>
<li class="chapter" data-level="13.1.2" data-path="interp.html"><a href="interp.html#variable-importance"><i class="fa fa-check"></i><b>13.1.2</b> Variable importance (tree-based)</a></li>
<li class="chapter" data-level="13.1.3" data-path="interp.html"><a href="interp.html#variable-importance-agnostic"><i class="fa fa-check"></i><b>13.1.3</b> Variable importance (agnostic)</a></li>
<li class="chapter" data-level="13.1.4" data-path="interp.html"><a href="interp.html#partial-dependence-plot"><i class="fa fa-check"></i><b>13.1.4</b> Partial dependence plot</a></li>
</ul></li>
<li class="chapter" data-level="13.2" data-path="interp.html"><a href="interp.html#local-interpretations"><i class="fa fa-check"></i><b>13.2</b> Local interpretations</a><ul>
<li class="chapter" data-level="13.2.1" data-path="interp.html"><a href="interp.html#lime"><i class="fa fa-check"></i><b>13.2.1</b> LIME</a></li>
<li class="chapter" data-level="13.2.2" data-path="interp.html"><a href="interp.html#shapley-values"><i class="fa fa-check"></i><b>13.2.2</b> Shapley values</a></li>
<li class="chapter" data-level="13.2.3" data-path="interp.html"><a href="interp.html#breakdown"><i class="fa fa-check"></i><b>13.2.3</b> Breakdown</a></li>
</ul></li>
</ul></li>
<li class="chapter" data-level="14" data-path="causality.html"><a href="causality.html"><i class="fa fa-check"></i><b>14</b> Two key concepts: causality and non-stationarity</a><ul>
<li class="chapter" data-level="14.1" data-path="causality.html"><a href="causality.html#causality-1"><i class="fa fa-check"></i><b>14.1</b> Causality</a><ul>
<li class="chapter" data-level="14.1.1" data-path="causality.html"><a href="causality.html#granger"><i class="fa fa-check"></i><b>14.1.1</b> Granger causality</a></li>
<li class="chapter" data-level="14.1.2" data-path="causality.html"><a href="causality.html#causal-additive-models"><i class="fa fa-check"></i><b>14.1.2</b> Causal additive models</a></li>
<li class="chapter" data-level="14.1.3" data-path="causality.html"><a href="causality.html#structural-time-series-models"><i class="fa fa-check"></i><b>14.1.3</b> Structural time series models</a></li>
</ul></li>
<li class="chapter" data-level="14.2" data-path="causality.html"><a href="causality.html#nonstat"><i class="fa fa-check"></i><b>14.2</b> Dealing with changing environments</a><ul>
<li class="chapter" data-level="14.2.1" data-path="causality.html"><a href="causality.html#non-stationarity-yet-another-illustration"><i class="fa fa-check"></i><b>14.2.1</b> Non-stationarity: yet another illustration</a></li>
<li class="chapter" data-level="14.2.2" data-path="causality.html"><a href="causality.html#online-learning"><i class="fa fa-check"></i><b>14.2.2</b> Online learning</a></li>
<li class="chapter" data-level="14.2.3" data-path="causality.html"><a href="causality.html#homogeneous-transfer-learning"><i class="fa fa-check"></i><b>14.2.3</b> Homogeneous transfer learning</a></li>
</ul></li>
</ul></li>
<li class="chapter" data-level="15" data-path="unsup.html"><a href="unsup.html"><i class="fa fa-check"></i><b>15</b> Unsupervised learning</a><ul>
<li class="chapter" data-level="15.1" data-path="unsup.html"><a href="unsup.html#corpred"><i class="fa fa-check"></i><b>15.1</b> The problem with correlated predictors</a></li>
<li class="chapter" data-level="15.2" data-path="unsup.html"><a href="unsup.html#principal-component-analysis-and-autoencoders"><i class="fa fa-check"></i><b>15.2</b> Principal component analysis and autoencoders</a><ul>
<li class="chapter" data-level="15.2.1" data-path="unsup.html"><a href="unsup.html#a-bit-of-algebra"><i class="fa fa-check"></i><b>15.2.1</b> A bit of algebra</a></li>
<li class="chapter" data-level="15.2.2" data-path="unsup.html"><a href="unsup.html#pca"><i class="fa fa-check"></i><b>15.2.2</b> PCA</a></li>
<li class="chapter" data-level="15.2.3" data-path="unsup.html"><a href="unsup.html#ae"><i class="fa fa-check"></i><b>15.2.3</b> Autoencoders</a></li>
<li class="chapter" data-level="15.2.4" data-path="unsup.html"><a href="unsup.html#application"><i class="fa fa-check"></i><b>15.2.4</b> Application</a></li>
</ul></li>
<li class="chapter" data-level="15.3" data-path="unsup.html"><a href="unsup.html#clustering-via-k-means"><i class="fa fa-check"></i><b>15.3</b> Clustering via k-means</a></li>
<li class="chapter" data-level="15.4" data-path="unsup.html"><a href="unsup.html#nearest-neighbors"><i class="fa fa-check"></i><b>15.4</b> Nearest neighbors</a></li>
<li class="chapter" data-level="15.5" data-path="unsup.html"><a href="unsup.html#coding-exercise-2"><i class="fa fa-check"></i><b>15.5</b> Coding exercise</a></li>
</ul></li>
<li class="chapter" data-level="16" data-path="RL.html"><a href="RL.html"><i class="fa fa-check"></i><b>16</b> Reinforcement learning</a><ul>
<li class="chapter" data-level="16.1" data-path="RL.html"><a href="RL.html#theoretical-layout"><i class="fa fa-check"></i><b>16.1</b> Theoretical layout</a><ul>
<li class="chapter" data-level="16.1.1" data-path="RL.html"><a href="RL.html#general-framework"><i class="fa fa-check"></i><b>16.1.1</b> General framework</a></li>
<li class="chapter" data-level="16.1.2" data-path="RL.html"><a href="RL.html#q-learning"><i class="fa fa-check"></i><b>16.1.2</b> Q-learning</a></li>
<li class="chapter" data-level="16.1.3" data-path="RL.html"><a href="RL.html#sarsa"><i class="fa fa-check"></i><b>16.1.3</b> SARSA</a></li>
</ul></li>
<li class="chapter" data-level="16.2" data-path="RL.html"><a href="RL.html#the-curse-of-dimensionality"><i class="fa fa-check"></i><b>16.2</b> The curse of dimensionality</a></li>
<li class="chapter" data-level="16.3" data-path="RL.html"><a href="RL.html#policy-gradient"><i class="fa fa-check"></i><b>16.3</b> Policy gradient</a><ul>
<li class="chapter" data-level="16.3.1" data-path="RL.html"><a href="RL.html#principle-2"><i class="fa fa-check"></i><b>16.3.1</b> Principle</a></li>
<li class="chapter" data-level="16.3.2" data-path="RL.html"><a href="RL.html#extensions-2"><i class="fa fa-check"></i><b>16.3.2</b> Extensions</a></li>
</ul></li>
<li class="chapter" data-level="16.4" data-path="RL.html"><a href="RL.html#simple-examples"><i class="fa fa-check"></i><b>16.4</b> Simple examples</a><ul>
<li class="chapter" data-level="16.4.1" data-path="RL.html"><a href="RL.html#q-learning-with-simulations"><i class="fa fa-check"></i><b>16.4.1</b> Q-learning with simulations</a></li>
<li class="chapter" data-level="16.4.2" data-path="RL.html"><a href="RL.html#RLemp2"><i class="fa fa-check"></i><b>16.4.2</b> Q-learning with market data</a></li>
</ul></li>
<li class="chapter" data-level="16.5" data-path="RL.html"><a href="RL.html#concluding-remarks"><i class="fa fa-check"></i><b>16.5</b> Concluding remarks</a></li>
<li class="chapter" data-level="16.6" data-path="RL.html"><a href="RL.html#exercises"><i class="fa fa-check"></i><b>16.6</b> Exercises</a></li>
</ul></li>
<li class="part"><span><b>V Appendix</b></span></li>
<li class="chapter" data-level="17" data-path="data-description.html"><a href="data-description.html"><i class="fa fa-check"></i><b>17</b> Data description</a></li>
<li class="chapter" data-level="18" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html"><i class="fa fa-check"></i><b>18</b> Solutions to exercises</a><ul>
<li class="chapter" data-level="18.1" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-3"><i class="fa fa-check"></i><b>18.1</b> Chapter 3</a></li>
<li class="chapter" data-level="18.2" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-4"><i class="fa fa-check"></i><b>18.2</b> Chapter 4</a></li>
<li class="chapter" data-level="18.3" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-5"><i class="fa fa-check"></i><b>18.3</b> Chapter 5</a></li>
<li class="chapter" data-level="18.4" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-6"><i class="fa fa-check"></i><b>18.4</b> Chapter 6</a></li>
<li class="chapter" data-level="18.5" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-7-the-autoencoder-model"><i class="fa fa-check"></i><b>18.5</b> Chapter 7: the autoencoder model</a></li>
<li class="chapter" data-level="18.6" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-8"><i class="fa fa-check"></i><b>18.6</b> Chapter 8</a></li>
<li class="chapter" data-level="18.7" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-11-ensemble-neural-network"><i class="fa fa-check"></i><b>18.7</b> Chapter 11: ensemble neural network</a></li>
<li class="chapter" data-level="18.8" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-12"><i class="fa fa-check"></i><b>18.8</b> Chapter 12</a><ul>
<li class="chapter" data-level="18.8.1" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#ew-portfolios-with-the-tidyverse"><i class="fa fa-check"></i><b>18.8.1</b> EW portfolios with the tidyverse</a></li>
<li class="chapter" data-level="18.8.2" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#advanced-weighting-function"><i class="fa fa-check"></i><b>18.8.2</b> Advanced weighting function</a></li>
<li class="chapter" data-level="18.8.3" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#functional-programming-in-the-backtest"><i class="fa fa-check"></i><b>18.8.3</b> Functional programming in the backtest</a></li>
</ul></li>
<li class="chapter" data-level="18.9" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-15"><i class="fa fa-check"></i><b>18.9</b> Chapter 15</a></li>
<li class="chapter" data-level="18.10" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-16"><i class="fa fa-check"></i><b>18.10</b> Chapter 16</a></li>
</ul></li>
</ul>
</nav>
</div>
<div class="book-body">
<div class="body-inner">
<div class="book-header" role="navigation">
<h1>
<i class="fa fa-circle-o-notch fa-spin"></i><a href="./">Machine Learning for Factor Investing</a>
</h1>
</div>
<div class="page-wrapper" tabindex="-1" role="main">
<div class="page-inner">
<section class="normal" id="section-">
<div id="ensemble" class="section level1">
<h1><span class="header-section-number">Chapter 11</span> Ensemble models</h1>
<p>
Let us be honest. When facing a prediction task, it is not obvious to determine the best choice between ML tools: penalized regressions, tree methods, neural networks, SVMs, etc. A natural and tempting alternative is to <strong>combine</strong> several algorithms (or the predictions that result from them) to try to extract value out of each engine (or learner). This intention is not new and contributions towards this goal go back at least to <span class="citation">Bates and Granger (<a href="#ref-bates1969combination" role="doc-biblioref">1969</a>)</span> (for the purpose of passenger flow forecasting).</p>
<p>Below, we outline a few books on the topic of ensembles. The latter have many names and synonyms, such as <strong>forecast aggregation</strong>, <strong>model averaging</strong>, <strong>mixture of experts</strong> or <strong>prediction combination</strong>. The first four references below are monographs, while the last two are compilations of contributions: </p>
<ul>
<li><span class="citation">Zhou (<a href="#ref-zhou2012ensemble" role="doc-biblioref">2012</a>)</span>: a very didactic book that covers the main ideas of ensembles;<br />
</li>
<li><span class="citation">Schapire and Freund (<a href="#ref-schapire2012boosting" role="doc-biblioref">2012</a>)</span>: the main reference for boosting (and hence, ensembling) with many theoretical results and thus strong mathematical groundings;<br />
</li>
<li><span class="citation">Seni and Elder (<a href="#ref-seni2010ensemble" role="doc-biblioref">2010</a>)</span>: an introduction dedicated to tree methods mainly;<br />
</li>
<li><span class="citation">Claeskens and Hjort (<a href="#ref-claeskens2008model" role="doc-biblioref">2008</a>)</span>: an overview of model selection techniques with a few chapters focused on model averaging;<br />
</li>
<li><span class="citation">Zhang and Ma (<a href="#ref-zhang2012ensemble" role="doc-biblioref">2012</a>)</span>: a collection of thematic chapters on ensemble learning;<br />
</li>
<li><span class="citation">Okun, Valentini, and Re (<a href="#ref-okun2011ensembles" role="doc-biblioref">2011</a>)</span>: examples of applications of ensembles.</li>
</ul>
<p>In this chapter, we cover the basic ideas and concepts behind the notion of ensembles. We refer to the above books for deeper treatments on the topic. We underline that several ensemble methods have already been mentioned and covered earlier, notably in Chapter <a href="trees.html#trees">6</a>. Indeed, random forests and boosted trees are examples of ensembles. Hence, other early articles on the combination of learners are <span class="citation">Schapire (<a href="#ref-schapire1990strength" role="doc-biblioref">1990</a>)</span>, <span class="citation">Jacobs et al. (<a href="#ref-jacobs1991adaptive" role="doc-biblioref">1991</a>)</span> (for neural networks particularly), and <span class="citation">Freund and Schapire (<a href="#ref-freund1997decision" role="doc-biblioref">1997</a>)</span>. Ensembles can for instance be used to aggregate models that are built on different datasets (<span class="citation">Pesaran and Pick (<a href="#ref-pesaran2011forecast" role="doc-biblioref">2011</a>)</span>), and can be made time-dependent (<span class="citation">Sun et al. (<a href="#ref-sun2020time" role="doc-biblioref">2020</a>)</span>). For a theoretical view on ensembles with a Bayesian perspective, we refer to <span class="citation">Razin and Levy (<a href="#ref-razin2020drowning" role="doc-biblioref">2020</a>)</span>. Finally, perspectives linked to asset pricing and factor modelling are provided in <span class="citation">Gospodinov and Maasoumi (<a href="#ref-gospodinov2020generalized" role="doc-biblioref">2020</a>)</span> and <span class="citation">De Nard, Hediger, and Leippold (<a href="#ref-de2020subsampled" role="doc-biblioref">2020</a>)</span> (subsampling and forecast aggregation).</p>
<div id="linear-ensembles" class="section level2">
<h2><span class="header-section-number">11.1</span> Linear ensembles</h2>
<div id="principles" class="section level3">
<h3><span class="header-section-number">11.1.1</span> Principles</h3>
<p>
In this chapter we adopt the following notations. We work with <span class="math inline">\(M\)</span> models where <span class="math inline">\(\tilde{y}_{i,m}\)</span> is the prediction of model <span class="math inline">\(m\)</span> for instance <span class="math inline">\(i\)</span> and errors <span class="math inline">\(\epsilon_{i,m}=y_i-\tilde{y}_{i,m}\)</span> are stacked into a <span class="math inline">\((I\times M)\)</span> matrix <span class="math inline">\(\textbf{E}\)</span>. A linear combination of models has sample errors equal to <span class="math inline">\(\textbf{Ew}\)</span>, where <span class="math inline">\(\textbf{w}=w_m\)</span> are the weights assigned to each model and we assume <span class="math inline">\(\textbf{w}'\textbf{1}_M=1\)</span>. Minimizing the total (squared) error is thus a simple quadratic program with unique constraint. The Lagrange function is <span class="math inline">\(L(\textbf{w})=\textbf{w}'\textbf{E}'\textbf{E}\textbf{w}-\lambda (\textbf{w}'\textbf{1}_M-1)\)</span> and hence
<span class="math display">\[\frac{\partial}{\partial \textbf{w}}L(\textbf{w})=\textbf{E}'\textbf{E}\textbf{w}-\lambda \textbf{1}_M=0 \quad \Leftrightarrow \quad \textbf{w}=\lambda(\textbf{E}'\textbf{E})^{-1}\textbf{1}_M,\]</span></p>
<p>and the constraint imposes <span class="math inline">\(\textbf{w}^*=\frac{(\textbf{E}'\textbf{E})^{-1}\textbf{1}_M}{(\textbf{1}_M'\textbf{E}'\textbf{E})^{-1}\textbf{1}_M}\)</span>. This form is similar to that of minimum variance portfolios. If errors are unbiased (<span class="math inline">\(\textbf{1}_I'\textbf{E}=\textbf{0}_M'\)</span>), then <span class="math inline">\(\textbf{E}'\textbf{E}\)</span> is the covariance matrix of errors.</p>
<p>This expression shows an important feature of optimized linear ensembles: they can only add value if the models tell different stories. If two models are redundant, <span class="math inline">\(\textbf{E}'\textbf{E}\)</span> will be close to singular and <span class="math inline">\(\textbf{w}^*\)</span> will arbitrage one against the other in a spurious fashion. This is the exact same problem as when mean-variance portfolios are constituted with highly correlated assets: in this case, diversification fails because when things go wrong, all assets go down. Another problem arises when the number of observations is too small compared to the number of assets so that the covariance matrix of returns is singular. This is not an issue for ensembles because the number of observations will usually be much larger than the number of models (<span class="math inline">\(I>>M\)</span>).</p>
<p>In the limit when correlations increase to one, the above formulation becomes highly unstable and ensembles cannot be trusted. One heuristic way to see this is when <span class="math inline">\(M=2\)</span> and
<span class="math display">\[\textbf{E}'\textbf{E}=\left[
\begin{array}{cc} \sigma_1^2 & \rho\sigma_1\sigma_2 \\
\rho\sigma_1\sigma_2 & \sigma_2^2 \\
\end{array}
\right] \quad \Leftrightarrow \quad
(\textbf{E}'\textbf{E})^{-1}=\frac{1}{1-\rho^2}\left[
\begin{array}{cc} \sigma_1^{-2} & -\rho(\sigma_1\sigma_2)^{-1} \\
-\rho(\sigma_1\sigma_2)^{-1} & \sigma_2^{-2} \\
\end{array}
\right]\]</span></p>
<p>so that when <span class="math inline">\(\rho \rightarrow 1\)</span>, the model with the smallest errors (minimum <span class="math inline">\(\sigma_i^2\)</span>) will see its weight increasing towards infinity while the other model will have a similarly large <strong>negative weight</strong>: the model arbitrages between two highly correlated variables. This seems like a very bad idea.</p>
<p>There is another illustration of the issues caused by correlations. Let’s assume we face <span class="math inline">\(M\)</span> correlated errors <span class="math inline">\(\epsilon_m\)</span> with pairwise correlation <span class="math inline">\(\rho\)</span>, zero mean and variance <span class="math inline">\(\sigma^2\)</span>. The variance of errors is
<span class="math display">\[\begin{align*}
\mathbb{E}\left[\frac{1}{M}\sum_{m=1}^M \epsilon_m^2 \right]&=\frac{1}{M^2}\left[\sum_{m=1}^M\epsilon_m^2+\sum_{m\neq n}\epsilon_n\epsilon_m\right] \\
&=\frac{\sigma^2}{M}+\frac{1}{M^2}\sum_{n\neq m} \rho \sigma^2 \\
& =\rho \sigma^2 +\frac{\sigma^2(1-\rho)}{M}
\end{align*}\]</span>
where while the second term converges to zero as <span class="math inline">\(M\)</span> increases, the first term remains and is <strong>linearly increasing</strong> with <span class="math inline">\(\rho\)</span>. In passing, because variances are always positive, this result implies that the common pairwise correlation between <span class="math inline">\(M\)</span> variables is bounded below by <span class="math inline">\(-(M-1)^{-1}\)</span>. This result is interesting but rarely found in textbooks.</p>
<p>One improvement proposed to circumvent the trouble caused by correlations, advocated in a seminal publication (<span class="citation">Breiman (<a href="#ref-breiman1996stacked" role="doc-biblioref">1996</a>)</span>), is to enforce positivity constraints on the weights and solve</p>
<p><span class="math display">\[\underset{\textbf{w}}{\text{argmin}} \ \textbf{w}'\textbf{E}'\textbf{E}\textbf{w} , \quad \text{s.t.} \quad \left\{
\begin{array}{l} \textbf{w}'\textbf{1}_M=1 \\ w_m \ge 0 \quad \forall m \end{array}\right. .\]</span></p>
<p>Mechanically, if several models are highly correlated, the constraint will impose that only one of them will have a nonzero weight. If there are many models, then just a few of them will be selected by the minimization program. In the context of portfolio optimization, <span class="citation">Jagannathan and Ma (<a href="#ref-jagannathan2003risk" role="doc-biblioref">2003</a>)</span> have shown the counter-intuitive benefits of constraints in the construction of mean-variance allocations. In our setting, the constraint will similarly help discriminate wisely among the ‘best’ models.</p>
<p>In the literature, forecast combination and model averaging (which are synonyms of ensembles) have been tested on stock markets as early as in <span class="citation">Von Holstein (<a href="#ref-von1972probabilistic" role="doc-biblioref">1972</a>)</span>. Surprisingly, the articles were not published in Finance journals but rather in fields such as Management (<span class="citation">Virtanen and Yli-Olli (<a href="#ref-virtanen1987forecasting" role="doc-biblioref">1987</a>)</span>, <span class="citation">Wang et al. (<a href="#ref-wang2012stock" role="doc-biblioref">2012</a>)</span>), Economics and Econometrics (<span class="citation">Donaldson and Kamstra (<a href="#ref-donaldson1996forecast" role="doc-biblioref">1996</a>)</span>, <span class="citation">Clark and McCracken (<a href="#ref-clark2009improving" role="doc-biblioref">2009</a>)</span>, <span class="citation">Mascio, Fabozzi, and Zumwalt (<a href="#ref-mascio2020market" role="doc-biblioref">2020</a>)</span>), Operations Reasearch (<span class="citation">Huang, Nakamori, and Wang (<a href="#ref-huang2005forecasting" role="doc-biblioref">2005</a>)</span>, <span class="citation">Leung, Daouk, and Chen (<a href="#ref-leung2001using" role="doc-biblioref">2001</a>)</span>, and <span class="citation">Bonaccolto and Paterlini (<a href="#ref-bonaccolto2019developing" role="doc-biblioref">2019</a>)</span>), and Computer Science (<span class="citation">Harrald and Kamstra (<a href="#ref-harrald1997evolving" role="doc-biblioref">1997</a>)</span>, <span class="citation">Hassan, Nath, and Kirley (<a href="#ref-hassan2007fusion" role="doc-biblioref">2007</a>)</span>).</p>
<p>In the general forecasting literature, many alternative (refined) methods for combining forecasts have been studied. Trimmed opinion pools (<span class="citation">Grushka-Cockayne, Jose, and Lichtendahl Jr (<a href="#ref-grushka2016ensembles" role="doc-biblioref">2016</a>)</span>) compute averages over the predictions that are not too extreme. Ensembles with weights that depend on previous past errors are developed in <span class="citation">Pike and Vazquez-Grande (<a href="#ref-pike2020combining" role="doc-biblioref">2020</a>)</span>. We refer to <span class="citation">Gaba, Tsetlin, and Winkler (<a href="#ref-gaba2017combining" role="doc-biblioref">2017</a>)</span> for a more exhaustive list of combinations as well as for an empirical study of their respective efficiency.
Overall, findings are mixed and the heuristic simple average is, as usual, hard to beat (see, e.g., <span class="citation">Genre et al. (<a href="#ref-genre2013combining" role="doc-biblioref">2013</a>)</span>).</p>
</div>
<div id="example" class="section level3">
<h3><span class="header-section-number">11.1.2</span> Example</h3>
<p>In order to build an ensemble, we must gather the predictions and the corresponding errors into the <span class="math inline">\(\textbf{E}\)</span> matrix. We will work with 5 models that were trained in the previous chapters: penalized regression, simple tree, random forest, xgboost and feed-forward neural network. The training errors have zero means, hence <span class="math inline">\(\textbf{E}'\textbf{E}\)</span> is the covariance matrix of errors between models.</p>
<div class="sourceCode" id="cb145"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb145-1"><a href="ensemble.html#cb145-1"></a>err_pen_train <-<span class="st"> </span><span class="kw">predict</span>(fit_pen_pred, x_penalized_train) <span class="op">-</span><span class="st"> </span>training_sample<span class="op">$</span>R1M_Usd <span class="co"># Reg.</span></span>
<span id="cb145-2"><a href="ensemble.html#cb145-2"></a>err_tree_train <-<span class="st"> </span><span class="kw">predict</span>(fit_tree, training_sample) <span class="op">-</span><span class="st"> </span>training_sample<span class="op">$</span>R1M_Usd <span class="co"># Tree</span></span>
<span id="cb145-3"><a href="ensemble.html#cb145-3"></a>err_RF_train <-<span class="st"> </span><span class="kw">predict</span>(fit_RF, training_sample) <span class="op">-</span><span class="st"> </span>training_sample<span class="op">$</span>R1M_Usd <span class="co"># RF</span></span>
<span id="cb145-4"><a href="ensemble.html#cb145-4"></a>err_XGB_train <-<span class="st"> </span><span class="kw">predict</span>(fit_xgb, train_matrix_xgb) <span class="op">-</span><span class="st"> </span>training_sample<span class="op">$</span>R1M_Usd <span class="co"># XGBoost</span></span>
<span id="cb145-5"><a href="ensemble.html#cb145-5"></a>err_NN_train <-<span class="st"> </span><span class="kw">predict</span>(model, NN_train_features) <span class="op">-</span><span class="st"> </span>training_sample<span class="op">$</span>R1M_Usd <span class="co"># NN</span></span>
<span id="cb145-6"><a href="ensemble.html#cb145-6"></a>E <-<span class="st"> </span><span class="kw">cbind</span>(err_pen_train, err_tree_train, err_RF_train, err_XGB_train, err_NN_train) <span class="co"># E matrix</span></span>
<span id="cb145-7"><a href="ensemble.html#cb145-7"></a><span class="kw">colnames</span>(E) <-<span class="st"> </span><span class="kw">c</span>(<span class="st">"Pen_reg"</span>, <span class="st">"Tree"</span>, <span class="st">"RF"</span>, <span class="st">"XGB"</span>, <span class="st">"NN"</span>) <span class="co"># Names</span></span>
<span id="cb145-8"><a href="ensemble.html#cb145-8"></a><span class="kw">cor</span>(E) <span class="co"># Cor. mat.</span></span></code></pre></div>
<pre><code>## Pen_reg Tree RF XGB NN
## Pen_reg 1.0000000 0.9984394 0.9968224 0.9310186 0.9965702
## Tree 0.9984394 1.0000000 0.9974647 0.9296081 0.9973310
## RF 0.9968224 0.9974647 1.0000000 0.9281725 0.9972484
## XGB 0.9310186 0.9296081 0.9281725 1.0000000 0.9279230
## NN 0.9965702 0.9973310 0.9972484 0.9279230 1.0000000</code></pre>
<p>As is shown by the correlation matrix, the models fail to generate heterogeneity in their predictions. The minimum correlation (though above 95%!) is obtained by the boosted tree models. Below, we compare the training accuracy of models by computing the average absolute value of errors.</p>
<div class="sourceCode" id="cb147"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb147-1"><a href="ensemble.html#cb147-1"></a><span class="kw">apply</span>(<span class="kw">abs</span>(E), <span class="dv">2</span>, mean) <span class="co"># Mean absolute error or columns of E </span></span></code></pre></div>
<pre><code>## Pen_reg Tree RF XGB NN
## 0.08345916 0.08362133 0.08327121 0.08986993 0.08372445</code></pre>
<p>The best performing ML engine is the random forest. The boosted tree model is the worst, by far. Below, we compute the optimal (non-constrained) weights for the combination of models.</p>
<div class="sourceCode" id="cb149"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb149-1"><a href="ensemble.html#cb149-1"></a>w_ensemble <-<span class="st"> </span><span class="kw">solve</span>(<span class="kw">t</span>(E) <span class="op">%*%</span><span class="st"> </span>E) <span class="op">%*%</span><span class="st"> </span><span class="kw">rep</span>(<span class="dv">1</span>,<span class="dv">5</span>) <span class="co"># Optimal weights</span></span>
<span id="cb149-2"><a href="ensemble.html#cb149-2"></a>w_ensemble <-<span class="st"> </span>w_ensemble <span class="op">/</span><span class="st"> </span><span class="kw">sum</span>(w_ensemble)</span>
<span id="cb149-3"><a href="ensemble.html#cb149-3"></a>w_ensemble</span></code></pre></div>
<pre><code>## [,1]
## Pen_reg -0.5781710818
## Tree -0.1685807693
## RF 1.3024288196
## XGB -0.0002405839
## NN 0.4445636155</code></pre>
<p>Because of the high correlations, the optimal weights are not balanced and diversified: they load heavily on the random forest learner (best in sample model) and ‘short’ a few models in order to compensate. As one could expect, the model with the largest negative weights (Pen_reg) has a very high correlation with the random forest algorithm (0.997).</p>
<p>Note that the weights are of course computed with <strong>training errors</strong>. The optimal combination is then tested on the testing sample. Below, we compute out-of-sample (testing) errors and their average absolute value.</p>
<div class="sourceCode" id="cb151"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb151-1"><a href="ensemble.html#cb151-1"></a>err_pen_test <-<span class="st"> </span><span class="kw">predict</span>(fit_pen_pred, x_penalized_test) <span class="op">-</span><span class="st"> </span>testing_sample<span class="op">$</span>R1M_Usd <span class="co"># Reg.</span></span>
<span id="cb151-2"><a href="ensemble.html#cb151-2"></a>err_tree_test <-<span class="st"> </span><span class="kw">predict</span>(fit_tree, testing_sample) <span class="op">-</span><span class="st"> </span>testing_sample<span class="op">$</span>R1M_Usd <span class="co"># Tree</span></span>
<span id="cb151-3"><a href="ensemble.html#cb151-3"></a>err_RF_test <-<span class="st"> </span><span class="kw">predict</span>(fit_RF, testing_sample) <span class="op">-</span><span class="st"> </span>testing_sample<span class="op">$</span>R1M_Usd <span class="co"># RF</span></span>
<span id="cb151-4"><a href="ensemble.html#cb151-4"></a>err_XGB_test <-<span class="st"> </span><span class="kw">predict</span>(fit_xgb, xgb_test) <span class="op">-</span><span class="st"> </span>testing_sample<span class="op">$</span>R1M_Usd <span class="co"># XGBoost</span></span>
<span id="cb151-5"><a href="ensemble.html#cb151-5"></a>err_NN_test <-<span class="st"> </span><span class="kw">predict</span>(model, NN_test_features) <span class="op">-</span><span class="st"> </span>testing_sample<span class="op">$</span>R1M_Usd <span class="co"># NN</span></span>
<span id="cb151-6"><a href="ensemble.html#cb151-6"></a>E_test <-<span class="st"> </span><span class="kw">cbind</span>(err_pen_test, err_tree_test, err_RF_test, err_XGB_test, err_NN_test) <span class="co"># E matrix</span></span>
<span id="cb151-7"><a href="ensemble.html#cb151-7"></a><span class="kw">colnames</span>(E_test) <-<span class="st"> </span><span class="kw">c</span>(<span class="st">"Pen_reg"</span>, <span class="st">"Tree"</span>, <span class="st">"RF"</span>, <span class="st">"XGB"</span>, <span class="st">"NN"</span>)</span>
<span id="cb151-8"><a href="ensemble.html#cb151-8"></a><span class="kw">apply</span>(<span class="kw">abs</span>(E_test), <span class="dv">2</span>, mean) <span class="co"># Mean absolute error or columns of E </span></span></code></pre></div>
<pre><code>## Pen_reg Tree RF XGB NN
## 0.06618181 0.06653527 0.06710349 0.07170802 0.06704251</code></pre>
<p>The boosted tree model is still the worst performing algorithm while the simple models (regression and simple tree) are the ones that fare the best. The most naive combination is the simple average of model and predictions.</p>
<div class="sourceCode" id="cb153"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb153-1"><a href="ensemble.html#cb153-1"></a>err_EW_test <-<span class="st"> </span><span class="kw">apply</span>(E_test, <span class="dv">1</span>, mean) <span class="co"># Equally weighted combination</span></span>
<span id="cb153-2"><a href="ensemble.html#cb153-2"></a><span class="kw">mean</span>(<span class="kw">abs</span>(err_EW_test))</span></code></pre></div>
<pre><code>## [1] 0.06690517</code></pre>
<p>Because the errors are very correlated, the equally weighted combination of forecasts yields an average error which lies ‘in the middle’ of individual errors. The diversification benefits are too small. Let us now test the ‘optimal’ combination <span class="math inline">\(\textbf{w}^*=\frac{(\textbf{E}'\textbf{E})^{-1}\textbf{1}_M}{(\textbf{1}_M'\textbf{E}'\textbf{E})^{-1}\textbf{1}_M}\)</span>.</p>
<div class="sourceCode" id="cb155"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb155-1"><a href="ensemble.html#cb155-1"></a>err_opt_test <-<span class="st"> </span>E_test <span class="op">%*%</span><span class="st"> </span>w_ensemble <span class="co"># Optimal unconstrained combination</span></span>
<span id="cb155-2"><a href="ensemble.html#cb155-2"></a><span class="kw">mean</span>(<span class="kw">abs</span>(err_opt_test))</span></code></pre></div>
<pre><code>## [1] 0.06836327</code></pre>
<p>Again, the result is disappointing because of the lack of diversification across models. The correlations between errors are high not only on the training sample, but also on the testing sample, as shown below.</p>
<div class="sourceCode" id="cb157"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb157-1"><a href="ensemble.html#cb157-1"></a><span class="kw">cor</span>(E_test)</span></code></pre></div>
<pre><code>## Pen_reg Tree RF XGB NN
## Pen_reg 1.0000000 0.9987069 0.9968882 0.9537914 0.9962205
## Tree 0.9987069 1.0000000 0.9978366 0.9583641 0.9974515
## RF 0.9968882 0.9978366 1.0000000 0.9606570 0.9975484
## XGB 0.9537914 0.9583641 0.9606570 1.0000000 0.9612949
## NN 0.9962205 0.9974515 0.9975484 0.9612949 1.0000000</code></pre>
<p>The leverage from the optimal solution only exacerbates the problem and underperforms the heuristic uniform combination. We end this section with the constrained formulation of <span class="citation">Breiman (<a href="#ref-breiman1996stacked" role="doc-biblioref">1996</a>)</span> using the <em>quadprog</em> package. If we write <span class="math inline">\(\mathbf{\Sigma}\)</span> for the covariance matrix of errors, we seek
<span class="math display">\[\mathbf{w}^*=\underset{\mathbf{w}}{\text{argmin}} \ \mathbf{w}'\mathbf{\Sigma}\mathbf{w}, \quad \mathbf{1}'\mathbf{w}=1, \quad w_i\ge 0,\]</span>
The constraints will be handled as:</p>
<p><span class="math display">\[\mathbf{A} \mathbf{w}= \begin{bmatrix}
1 & 1 & 1 \\
1 & 0 & 0\\
0 & 1 & 0 \\
0 & 0 & 1
\end{bmatrix} \mathbf{w} \hspace{9mm} \text{ compared to} \hspace{9mm} \mathbf{b}=\begin{bmatrix} 1 \\ 0 \\ 0 \\ 0 \end{bmatrix}, \]</span></p>
<p>where the first line will be an equality (weights sum to one) and the last three will be inequalities (weights are all positive).</p>
<div class="sourceCode" id="cb159"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb159-1"><a href="ensemble.html#cb159-1"></a><span class="kw">library</span>(quadprog) <span class="co"># Package for quadratic programming</span></span>
<span id="cb159-2"><a href="ensemble.html#cb159-2"></a>Sigma <-<span class="st"> </span><span class="kw">t</span>(E) <span class="op">%*%</span><span class="st"> </span>E <span class="co"># Unscaled covariance matrix</span></span>
<span id="cb159-3"><a href="ensemble.html#cb159-3"></a>nb_mods <-<span class="st"> </span><span class="kw">nrow</span>(Sigma) <span class="co"># Number of models</span></span>
<span id="cb159-4"><a href="ensemble.html#cb159-4"></a>w_const <-<span class="st"> </span><span class="kw">solve.QP</span>(<span class="dt">Dmat =</span> Sigma, <span class="co"># D matrix = Sigma</span></span>
<span id="cb159-5"><a href="ensemble.html#cb159-5"></a> <span class="dt">dvec =</span> <span class="kw">rep</span>(<span class="dv">0</span>, nb_mods), <span class="co"># Zero vector</span></span>
<span id="cb159-6"><a href="ensemble.html#cb159-6"></a> <span class="dt">Amat =</span> <span class="kw">rbind</span>(<span class="kw">rep</span>(<span class="dv">1</span>, nb_mods), <span class="kw">diag</span>(nb_mods)) <span class="op">%>%</span><span class="st"> </span><span class="kw">t</span>(), <span class="co"># A matrix for constraints</span></span>
<span id="cb159-7"><a href="ensemble.html#cb159-7"></a> <span class="dt">bvec =</span> <span class="kw">c</span>(<span class="dv">1</span>,<span class="kw">rep</span>(<span class="dv">0</span>, nb_mods)), <span class="co"># b vector for constraints</span></span>
<span id="cb159-8"><a href="ensemble.html#cb159-8"></a> <span class="dt">meq =</span> <span class="dv">1</span> <span class="co"># 1 line of equality constraints, others = inequalities</span></span>
<span id="cb159-9"><a href="ensemble.html#cb159-9"></a> )</span>
<span id="cb159-10"><a href="ensemble.html#cb159-10"></a>w_const<span class="op">$</span>solution <span class="op">%>%</span><span class="st"> </span><span class="kw">round</span>(<span class="dv">3</span>) <span class="co"># Solution</span></span></code></pre></div>
<pre><code>## [1] 0.000 0.000 0.854 0.000 0.146</code></pre>
<p>Compared to the unconstrained solution, the weights are sparse and concentrated in one or two models, usually those with small training sample errors.</p>
</div>
</div>
<div id="stacked-ensembles" class="section level2">
<h2><span class="header-section-number">11.2</span> Stacked ensembles</h2>
<p></p>
<div id="two-stage-training" class="section level3">
<h3><span class="header-section-number">11.2.1</span> Two-stage training</h3>
<p><strong>Stacked ensembles</strong> are a natural generalization of linear ensembles. The idea of generalizing linear ensembles goes back at least to <span class="citation">Wolpert (<a href="#ref-wolpert1992stacked" role="doc-biblioref">1992</a><a href="#ref-wolpert1992stacked" role="doc-biblioref">b</a>)</span>. In the general case, the training is performed in two stages. The first stage is the simple one, whereby the <span class="math inline">\(M\)</span> models are trained independently, yielding the predictions <span class="math inline">\(\tilde{y}_{i,m}\)</span> for instance <span class="math inline">\(i\)</span> and model <span class="math inline">\(m\)</span>. The second step is to consider the output of the trained models as input for a new level of machine learning optimization. The second level predictions are <span class="math inline">\(\breve{y}_i=h(\tilde{y}_{i,1},\dots,\tilde{y}_{i,M})\)</span>, where <span class="math inline">\(h\)</span> is a new learner (see Figure <a href="ensemble.html#fig:stackscheme">11.1</a>). Linear ensembles are of course stacked ensembles in which the second layer is a linear regression.</p>
<p>The same techniques are then applied to minimize the error between the true values <span class="math inline">\(y_i\)</span> and the predicted ones <span class="math inline">\(\breve{y}_i\)</span>.</p>
<div class="figure" style="text-align: center"><span id="fig:stackscheme"></span>
<img src="images/stack.png" alt="Scheme of stacked ensembles." width="350px" />
<p class="caption">
FIGURE 11.1: Scheme of stacked ensembles.
</p>
</div>
</div>
<div id="code-and-results-3" class="section level3">
<h3><span class="header-section-number">11.2.2</span> Code and results</h3>
<p>Below, we create a low-dimensional neural network which takes in the individual predictions of each model and compiles them into a synthetic forecast.</p>
<div class="sourceCode" id="cb161"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb161-1"><a href="ensemble.html#cb161-1"></a>model_stack <-<span class="st"> </span><span class="kw">keras_model_sequential</span>()</span>
<span id="cb161-2"><a href="ensemble.html#cb161-2"></a>model_stack <span class="op">%>%</span><span class="st"> </span><span class="co"># This defines the structure of the network, i.e. how layers are organized</span></span>
<span id="cb161-3"><a href="ensemble.html#cb161-3"></a><span class="st"> </span><span class="kw">layer_dense</span>(<span class="dt">units =</span> <span class="dv">8</span>, <span class="dt">activation =</span> <span class="st">'relu'</span>, <span class="dt">input_shape =</span> nb_mods) <span class="op">%>%</span></span>
<span id="cb161-4"><a href="ensemble.html#cb161-4"></a><span class="st"> </span><span class="kw">layer_dense</span>(<span class="dt">units =</span> <span class="dv">4</span>, <span class="dt">activation =</span> <span class="st">'tanh'</span>) <span class="op">%>%</span></span>
<span id="cb161-5"><a href="ensemble.html#cb161-5"></a><span class="st"> </span><span class="kw">layer_dense</span>(<span class="dt">units =</span> <span class="dv">1</span>) </span></code></pre></div>
<p>The configuration is very simple. We do not include any optional arguments and hence the model is likely to overfit. As we seek to predict returns, the loss function is the standard <span class="math inline">\(L^2\)</span> norm.</p>
<div class="sourceCode" id="cb162"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb162-1"><a href="ensemble.html#cb162-1"></a>model_stack <span class="op">%>%</span><span class="st"> </span><span class="kw">compile</span>( <span class="co"># Model specification</span></span>
<span id="cb162-2"><a href="ensemble.html#cb162-2"></a> <span class="dt">loss =</span> <span class="st">'mean_squared_error'</span>, <span class="co"># Loss function</span></span>
<span id="cb162-3"><a href="ensemble.html#cb162-3"></a> <span class="dt">optimizer =</span> <span class="kw">optimizer_rmsprop</span>(), <span class="co"># Optimisation method (weight updating)</span></span>
<span id="cb162-4"><a href="ensemble.html#cb162-4"></a> <span class="dt">metrics =</span> <span class="kw">c</span>(<span class="st">'mean_absolute_error'</span>) <span class="co"># Output metric</span></span>
<span id="cb162-5"><a href="ensemble.html#cb162-5"></a>)</span>
<span id="cb162-6"><a href="ensemble.html#cb162-6"></a><span class="kw">summary</span>(model_stack) <span class="co"># Model architecture</span></span></code></pre></div>
<pre><code>## Model: "sequential_5"
## __________________________________________________________________________________________
## Layer (type) Output Shape Param #
## ==========================================================================================
## dense_11 (Dense) (None, 8) 48
## __________________________________________________________________________________________
## dense_12 (Dense) (None, 4) 36
## __________________________________________________________________________________________
## dense_13 (Dense) (None, 1) 5
## ==========================================================================================
## Total params: 89
## Trainable params: 89
## Non-trainable params: 0
## __________________________________________________________________________________________</code></pre>
<div class="sourceCode" id="cb164"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb164-1"><a href="ensemble.html#cb164-1"></a>y_tilde <-<span class="st"> </span>E <span class="op">+</span><span class="st"> </span><span class="kw">matrix</span>(<span class="kw">rep</span>(training_sample<span class="op">$</span>R1M_Usd, nb_mods), <span class="dt">ncol =</span> nb_mods) <span class="co"># Train preds</span></span>
<span id="cb164-2"><a href="ensemble.html#cb164-2"></a>y_test <-<span class="st"> </span>E_test <span class="op">+</span><span class="st"> </span><span class="kw">matrix</span>(<span class="kw">rep</span>(testing_sample<span class="op">$</span>R1M_Usd, nb_mods), <span class="dt">ncol =</span> nb_mods) <span class="co"># Testing</span></span>
<span id="cb164-3"><a href="ensemble.html#cb164-3"></a>fit_NN_stack <-<span class="st"> </span>model_stack <span class="op">%>%</span><span class="st"> </span><span class="kw">fit</span>(y_tilde, <span class="co"># Train features</span></span>
<span id="cb164-4"><a href="ensemble.html#cb164-4"></a> training_sample<span class="op">$</span>R1M_Usd, <span class="co"># Train labels</span></span>
<span id="cb164-5"><a href="ensemble.html#cb164-5"></a> <span class="dt">epochs =</span> <span class="dv">12</span>, <span class="dt">batch_size =</span> <span class="dv">512</span>, <span class="co"># Train parameters</span></span>
<span id="cb164-6"><a href="ensemble.html#cb164-6"></a> <span class="dt">validation_data =</span> <span class="kw">list</span>(y_test, <span class="co"># Test features</span></span>
<span id="cb164-7"><a href="ensemble.html#cb164-7"></a> testing_sample<span class="op">$</span>R1M_Usd) <span class="co"># Test labels</span></span>
<span id="cb164-8"><a href="ensemble.html#cb164-8"></a>)</span>
<span id="cb164-9"><a href="ensemble.html#cb164-9"></a><span class="kw">plot</span>(fit_NN_stack) <span class="co"># Plot, evidently!</span></span></code></pre></div>
<div class="figure" style="text-align: center"><span id="fig:stackNN2"></span>
<img src="ML_factor_files/figure-html/stackNN2-1.png" alt="Training metrics for the ensemble model." width="350px" />
<p class="caption">
FIGURE 11.2: Training metrics for the ensemble model.
</p>
</div>
<p>The performance of the ensemble is again disappointing: the learning curve is flat in Figure <a href="ensemble.html#fig:stackNN2">11.2</a>, hence the rounds of back-propagation are useless. The training adds little value which means that the new overarching layer of ML does not enhance the original predictions. Again, this is because all ML engines seem to be capturing the same patterns and both their linear and non-linear combinations fail to improve their performance.</p>
</div>
</div>
<div id="extensions-1" class="section level2">
<h2><span class="header-section-number">11.3</span> Extensions</h2>
<div id="exogenous-variables" class="section level3">
<h3><span class="header-section-number">11.3.1</span> Exogenous variables</h3>
<p>In a financial context, macro-economic indicators could add value to the process. It is possible that some models perform better under certain conditions and exogenous predictors can help introduce a flavor of <strong>economic-driven conditionality</strong> in the predictions.</p>
<p>Adding macro-variables to the set of predictors (here, predictions) <span class="math inline">\(\tilde{y}_{i,m}\)</span> could seem like one way to achieve this. However, this would amount to mix predicted values with (possibly scaled) economic indicators and that would not make much sense.</p>
<p>One alternative outside the perimeter of ensembles is to train simple trees on a set of macro-economic indicators. If the labels are the (possibly absolute) errors stemming from the original predictions, then the trees will create clusters of homogeneous error values. This will hint towards which conditions lead to the best and worst forecasts.
We test this idea below, using aggregate data from the Federal Reserve of Saint Louis. A simple downloading function is available in the <em>quantmod</em> package. We download and format the data in the next chunk. CPIAUCSL is a code for consumer price index and T10Y2YM is a code for the term spread (10Y minus 2Y).</p>
<div class="sourceCode" id="cb165"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb165-1"><a href="ensemble.html#cb165-1"></a><span class="kw">library</span>(quantmod) <span class="co"># Package that extracts the data</span></span>
<span id="cb165-2"><a href="ensemble.html#cb165-2"></a><span class="kw">library</span>(lubridate) <span class="co"># Package for date management</span></span>
<span id="cb165-3"><a href="ensemble.html#cb165-3"></a><span class="kw">getSymbols</span>(<span class="st">"CPIAUCSL"</span>, <span class="dt">src =</span> <span class="st">"FRED"</span>) <span class="co"># FRED is the Fed of St Louis</span></span></code></pre></div>
<pre><code>## [1] "CPIAUCSL"</code></pre>
<div class="sourceCode" id="cb167"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb167-1"><a href="ensemble.html#cb167-1"></a><span class="kw">getSymbols</span>(<span class="st">"T10Y2YM"</span>, <span class="dt">src =</span> <span class="st">"FRED"</span>) </span></code></pre></div>
<pre><code>## [1] "T10Y2YM"</code></pre>
<div class="sourceCode" id="cb169"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb169-1"><a href="ensemble.html#cb169-1"></a>cpi <-<span class="st"> </span><span class="kw">fortify</span>(CPIAUCSL) <span class="op">%>%</span><span class="st"> </span></span>
<span id="cb169-2"><a href="ensemble.html#cb169-2"></a><span class="st"> </span><span class="kw">mutate</span> (<span class="dt">inflation =</span> CPIAUCSL <span class="op">/</span><span class="st"> </span><span class="kw">lag</span>(CPIAUCSL) <span class="op">-</span><span class="st"> </span><span class="dv">1</span>) <span class="co"># Inflation via Consumer Price Index</span></span>
<span id="cb169-3"><a href="ensemble.html#cb169-3"></a>ts <-<span class="st"> </span><span class="kw">fortify</span>(T10Y2YM) <span class="co"># Term spread (10Y minus 2Y rates)</span></span>
<span id="cb169-4"><a href="ensemble.html#cb169-4"></a><span class="kw">colnames</span>(ts)[<span class="dv">2</span>] <-<span class="st"> "termspread"</span> <span class="co"># To make things clear</span></span>
<span id="cb169-5"><a href="ensemble.html#cb169-5"></a>ens_data <-<span class="st"> </span>testing_sample <span class="op">%>%</span><span class="st"> </span><span class="co"># Creating aggregate dataset</span></span>
<span id="cb169-6"><a href="ensemble.html#cb169-6"></a><span class="st"> </span>dplyr<span class="op">::</span><span class="kw">select</span>(date) <span class="op">%>%</span><span class="st"> </span></span>
<span id="cb169-7"><a href="ensemble.html#cb169-7"></a><span class="st"> </span><span class="kw">cbind</span>(err_NN_test) <span class="op">%>%</span></span>
<span id="cb169-8"><a href="ensemble.html#cb169-8"></a><span class="st"> </span><span class="kw">mutate</span>(<span class="dt">Index =</span> <span class="kw">make_date</span>(<span class="dt">year =</span> lubridate<span class="op">::</span><span class="kw">year</span>(date), <span class="co"># Change date to first day of month</span></span>
<span id="cb169-9"><a href="ensemble.html#cb169-9"></a> <span class="dt">month =</span> lubridate<span class="op">::</span><span class="kw">month</span>(date), </span>
<span id="cb169-10"><a href="ensemble.html#cb169-10"></a> <span class="dt">day =</span> <span class="dv">1</span>)) <span class="op">%>%</span><span class="st"> </span></span>
<span id="cb169-11"><a href="ensemble.html#cb169-11"></a><span class="st"> </span><span class="kw">left_join</span>(cpi) <span class="op">%>%</span><span class="st"> </span><span class="co"># Add CPI to the dataset</span></span>
<span id="cb169-12"><a href="ensemble.html#cb169-12"></a><span class="st"> </span><span class="kw">left_join</span>(ts) <span class="co"># Add termspread</span></span>
<span id="cb169-13"><a href="ensemble.html#cb169-13"></a><span class="kw">head</span>(ens_data) <span class="co"># Show first lines</span></span></code></pre></div>
<pre><code>## date err_NN_test Index CPIAUCSL inflation termspread
## 1 2014-01-31 -0.15116310 2014-01-01 235.288 0.002424175 2.47
## 2 2014-02-28 0.07187722 2014-02-01 235.547 0.001100779 2.38
## 3 2014-03-31 -0.02526811 2014-03-01 236.028 0.002042055 2.32
## 4 2014-04-30 -0.09116794 2014-04-01 236.468 0.001864186 2.29
## 5 2014-05-31 -0.09811382 2014-05-01 236.918 0.001903006 2.17
## 6 2014-06-30 0.03238936 2014-06-01 237.231 0.001321132 2.15</code></pre>
<p>We can now build a tree that tries to explain the accuracy of models as a function of macro-variables.</p>
<div class="sourceCode" id="cb171"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb171-1"><a href="ensemble.html#cb171-1"></a><span class="kw">library</span>(rpart.plot) <span class="co"># Load package for tree plotting</span></span>
<span id="cb171-2"><a href="ensemble.html#cb171-2"></a>fit_ens <-<span class="st"> </span><span class="kw">rpart</span>(<span class="kw">abs</span>(err_NN_test) <span class="op">~</span><span class="st"> </span>inflation <span class="op">+</span><span class="st"> </span>termspread, <span class="co"># Tree model</span></span>
<span id="cb171-3"><a href="ensemble.html#cb171-3"></a> <span class="dt">data =</span> ens_data,</span>
<span id="cb171-4"><a href="ensemble.html#cb171-4"></a> <span class="dt">cp =</span> <span class="fl">0.001</span>) <span class="co"># Complexity param (size of tree)</span></span>
<span id="cb171-5"><a href="ensemble.html#cb171-5"></a><span class="kw">rpart.plot</span>(fit_ens) <span class="co"># Plot tree</span></span></code></pre></div>
<div class="figure" style="text-align: center"><span id="fig:ensfred2"></span>
<img src="ML_factor_files/figure-html/ensfred2-1.png" alt="Conditional performance of a ML engine." width="250px" />
<p class="caption">
FIGURE 11.3: Conditional performance of a ML engine.
</p>
</div>
<p>The tree creates clusters which have homogeneous values of absolute errors. One big cluster gathers 92% of predictions (the left one) and is the one with the smallest average. It corresponds to the periods when the term spread is above 0.29 (in percentage points). The other two groups (when the term spread is below 0.29%) are determined according to the level of inflation. If the latter is positive, then the average absolute error is 7%, if not, it is 12%. This last number, the highest of the three clusters, indicates that when the term spread is low and the inflation negative, the model’s predictions are not trustworthy because their errors have a magnitude twice as large as in other periods. Under these circumstances (which seem to be linked to a dire economic environment), it may be wiser not to use ML-based forecasts.</p>
</div>
<div id="shrinking-inter-model-correlations" class="section level3">
<h3><span class="header-section-number">11.3.2</span> Shrinking inter-model correlations</h3>
<p>
As shown earlier in this chapter, one major problem with ensembles arises when the first layer of predictions is highly correlated. In this case, ensembles are pretty much useless. There are several tricks that can help reduce this correlation, but the simplest and best is probably to alter training samples. If algorithms do not see the same data, they will probably infer different patterns.</p>
<p>There are several ways to split the training data so as to build different subsets of training samples. The first dichotomy is between random versus deterministic splits. Random splits are easy and require only the target sample size to be fixed. Note that the training samples can be overlapping as long as the overlap is not too large. Hence if the original training sample has <span class="math inline">\(I\)</span> instance and the ensemble requires <span class="math inline">\(M\)</span> models, then a subsample size of <span class="math inline">\(\lfloor I/M \rfloor\)</span> may be too conservative especially if the training sample is not very large. In this case <span class="math inline">\(\lfloor I/\sqrt{M} \rfloor\)</span> may be a better alternative. Random forests are one example of ensembles built in random training samples.</p>
<p>One advantage of deterministic splits is that they are easy to reproduce and their outcome does not depend on the random seed. By the nature of factor-based training samples, the second splitting dichotomy is between time and assets. A split within assets is straightforward: each model is trained on a different set of stocks. Note that the choices of sets can be random, or dictacted by some factor-based criterion: size, momentum, book-to-market ratio, etc.</p>
<p>A split in dates requires other decisions: is the data split in large blocks (like years) and each model gets a block, which may stand for one particular kind of market condition? Or are the training dates divided more regularly? For instance, if there are 12 models in the ensemble, each model can be trained on data from a given month (e.g., January for the first models, February for the second, etc.).</p>
<p>Below, we train four models on four different years to see if this helps reduce the inter-model correlations. This process is a bit lengthy because the samples and models need to be all redefined. We start by creating the four training samples. The third model works on the small subset of features, hence the sample is smaller.</p>
<div class="sourceCode" id="cb172"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb172-1"><a href="ensemble.html#cb172-1"></a>training_sample_<span class="dv">2007</span> <-<span class="st"> </span>training_sample <span class="op">%>%</span><span class="st"> </span></span>
<span id="cb172-2"><a href="ensemble.html#cb172-2"></a><span class="st"> </span><span class="kw">filter</span>(date <span class="op">></span><span class="st"> "2006-12-31"</span>, date <span class="op"><</span><span class="st"> "2008-01-01"</span>)</span>
<span id="cb172-3"><a href="ensemble.html#cb172-3"></a>training_sample_<span class="dv">2009</span> <-<span class="st"> </span>training_sample <span class="op">%>%</span><span class="st"> </span></span>
<span id="cb172-4"><a href="ensemble.html#cb172-4"></a><span class="st"> </span><span class="kw">filter</span>(date <span class="op">></span><span class="st"> "2008-12-31"</span>, date <span class="op"><</span><span class="st"> "2010-01-01"</span>)</span>
<span id="cb172-5"><a href="ensemble.html#cb172-5"></a>training_sample_<span class="dv">2011</span> <-<span class="st"> </span>training_sample <span class="op">%>%</span><span class="st"> </span></span>
<span id="cb172-6"><a href="ensemble.html#cb172-6"></a><span class="st"> </span>dplyr<span class="op">::</span><span class="kw">select</span>(<span class="kw">c</span>(<span class="st">"date"</span>,features_short, <span class="st">"R1M_Usd"</span>)) <span class="op">%>%</span></span>
<span id="cb172-7"><a href="ensemble.html#cb172-7"></a><span class="st"> </span><span class="kw">filter</span>(date <span class="op">></span><span class="st"> "2010-12-31"</span>, date <span class="op"><</span><span class="st"> "2012-01-01"</span>)</span>
<span id="cb172-8"><a href="ensemble.html#cb172-8"></a>training_sample_<span class="dv">2013</span> <-<span class="st"> </span>training_sample <span class="op">%>%</span><span class="st"> </span></span>
<span id="cb172-9"><a href="ensemble.html#cb172-9"></a><span class="st"> </span><span class="kw">filter</span>(date <span class="op">></span><span class="st"> "2012-12-31"</span>, date <span class="op"><</span><span class="st"> "2014-01-01"</span>)</span></code></pre></div>
<p>Then, we proceed to the training of the models. The syntaxes are those used in the previous chapters, nothing new here. We start with a penalized regression. In all predictions below, the original testing sample is used <em>for all models</em>.</p>
<div class="sourceCode" id="cb173"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb173-1"><a href="ensemble.html#cb173-1"></a>y_ens_<span class="dv">2007</span> <-<span class="st"> </span>training_sample_<span class="dv">2007</span><span class="op">$</span>R1M_Usd <span class="co"># Dep. var.</span></span>
<span id="cb173-2"><a href="ensemble.html#cb173-2"></a>x_ens_<span class="dv">2007</span> <-<span class="st"> </span>training_sample_<span class="dv">2007</span> <span class="op">%>%</span><span class="st"> </span><span class="co"># Predictors</span></span>
<span id="cb173-3"><a href="ensemble.html#cb173-3"></a><span class="st"> </span>dplyr<span class="op">::</span><span class="kw">select</span>(features) <span class="op">%>%</span><span class="st"> </span><span class="kw">as.matrix</span>() </span>
<span id="cb173-4"><a href="ensemble.html#cb173-4"></a>fit_ens_<span class="dv">2007</span> <-<span class="st"> </span><span class="kw">glmnet</span>(x_ens_<span class="dv">2007</span>, y_ens_<span class="dv">2007</span>, <span class="dt">alpha =</span> <span class="fl">0.1</span>, <span class="dt">lambda =</span> <span class="fl">0.1</span>) <span class="co"># Model</span></span>
<span id="cb173-5"><a href="ensemble.html#cb173-5"></a>err_ens_<span class="dv">2007</span> <-<span class="st"> </span><span class="kw">predict</span>(fit_ens_<span class="dv">2007</span>, x_penalized_test) <span class="op">-</span><span class="st"> </span>testing_sample<span class="op">$</span>R1M_Usd <span class="co"># Pred. errs</span></span></code></pre></div>
<p>We continue with a random forest.</p>
<div class="sourceCode" id="cb174"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb174-1"><a href="ensemble.html#cb174-1"></a>fit_ens_<span class="dv">2009</span> <-<span class="st"> </span><span class="kw">randomForest</span>(formula, <span class="co"># Same formula as for simple trees!</span></span>
<span id="cb174-2"><a href="ensemble.html#cb174-2"></a> <span class="dt">data =</span> training_sample_<span class="dv">2009</span>, <span class="co"># Data source: 2011 training sample</span></span>
<span id="cb174-3"><a href="ensemble.html#cb174-3"></a> <span class="dt">sampsize =</span> <span class="dv">4000</span>, <span class="co"># Size of (random) sample for each tree</span></span>
<span id="cb174-4"><a href="ensemble.html#cb174-4"></a> <span class="dt">replace =</span> <span class="ot">FALSE</span>, <span class="co"># Is the sampling done with replacement?</span></span>
<span id="cb174-5"><a href="ensemble.html#cb174-5"></a> <span class="dt">nodesize =</span> <span class="dv">100</span>, <span class="co"># Minimum size of terminal cluster</span></span>
<span id="cb174-6"><a href="ensemble.html#cb174-6"></a> <span class="dt">ntree =</span> <span class="dv">40</span>, <span class="co"># Nb of random trees</span></span>
<span id="cb174-7"><a href="ensemble.html#cb174-7"></a> <span class="dt">mtry =</span> <span class="dv">30</span> <span class="co"># Nb of predictive variables for each tree</span></span>
<span id="cb174-8"><a href="ensemble.html#cb174-8"></a> )</span>
<span id="cb174-9"><a href="ensemble.html#cb174-9"></a>err_ens_<span class="dv">2009</span> <-<span class="st"> </span><span class="kw">predict</span>(fit_ens_<span class="dv">2009</span>, testing_sample) <span class="op">-</span><span class="st"> </span>testing_sample<span class="op">$</span>R1M_Usd <span class="co"># Pred. errs</span></span></code></pre></div>
<p>The third model is a boosted tree.</p>
<div class="sourceCode" id="cb175"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb175-1"><a href="ensemble.html#cb175-1"></a>train_features_<span class="dv">2011</span> <-<span class="st"> </span>training_sample_<span class="dv">2011</span> <span class="op">%>%</span><span class="st"> </span></span>
<span id="cb175-2"><a href="ensemble.html#cb175-2"></a><span class="st"> </span>dplyr<span class="op">::</span><span class="kw">select</span>(features_short) <span class="op">%>%</span><span class="st"> </span><span class="kw">as.matrix</span>() <span class="co"># Independent variable</span></span>
<span id="cb175-3"><a href="ensemble.html#cb175-3"></a>train_label_<span class="dv">2011</span> <-<span class="st"> </span>training_sample_<span class="dv">2011</span> <span class="op">%>%</span></span>
<span id="cb175-4"><a href="ensemble.html#cb175-4"></a><span class="st"> </span>dplyr<span class="op">::</span><span class="kw">select</span>(R1M_Usd) <span class="op">%>%</span><span class="st"> </span><span class="kw">as.matrix</span>() <span class="co"># Dependent variable</span></span>
<span id="cb175-5"><a href="ensemble.html#cb175-5"></a>train_matrix_<span class="dv">2011</span> <-<span class="st"> </span><span class="kw">xgb.DMatrix</span>(<span class="dt">data =</span> train_features_<span class="dv">2011</span>, </span>
<span id="cb175-6"><a href="ensemble.html#cb175-6"></a> <span class="dt">label =</span> train_label_<span class="dv">2011</span>) <span class="co"># XGB format!</span></span>
<span id="cb175-7"><a href="ensemble.html#cb175-7"></a>fit_ens_<span class="dv">2011</span> <-<span class="st"> </span><span class="kw">xgb.train</span>(<span class="dt">data =</span> train_matrix_<span class="dv">2011</span>, <span class="co"># Data source </span></span>
<span id="cb175-8"><a href="ensemble.html#cb175-8"></a> <span class="dt">eta =</span> <span class="fl">0.4</span>, <span class="co"># Learning rate</span></span>
<span id="cb175-9"><a href="ensemble.html#cb175-9"></a> <span class="dt">objective =</span> <span class="st">"reg:linear"</span>, <span class="co"># Objective function</span></span>
<span id="cb175-10"><a href="ensemble.html#cb175-10"></a> <span class="dt">max_depth =</span> <span class="dv">4</span>, <span class="co"># Maximum depth of trees</span></span>
<span id="cb175-11"><a href="ensemble.html#cb175-11"></a> <span class="dt">nrounds =</span> <span class="dv">18</span> <span class="co"># Number of trees used</span></span>
<span id="cb175-12"><a href="ensemble.html#cb175-12"></a> )</span></code></pre></div>
<pre><code>## [21:30:00] WARNING: amalgamation/../src/objective/regression_obj.cu:174: reg:linear is now deprecated in favor of reg:squarederror.</code></pre>
<div class="sourceCode" id="cb177"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb177-1"><a href="ensemble.html#cb177-1"></a>err_ens_<span class="dv">2011</span> <-<span class="st"> </span><span class="kw">predict</span>(fit_ens_<span class="dv">2011</span>, xgb_test) <span class="op">-</span><span class="st"> </span>testing_sample<span class="op">$</span>R1M_Usd <span class="co"># Prediction errors</span></span></code></pre></div>
<p>Finally, the last model is a simple neural network.</p>
<div class="sourceCode" id="cb178"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb178-1"><a href="ensemble.html#cb178-1"></a>NN_features_<span class="dv">2013</span> <-<span class="st"> </span>dplyr<span class="op">::</span><span class="kw">select</span>(training_sample_<span class="dv">2013</span>, features) <span class="op">%>%</span><span class="st"> </span></span>
<span id="cb178-2"><a href="ensemble.html#cb178-2"></a><span class="st"> </span><span class="kw">as.matrix</span>() <span class="co"># Matrix format is important</span></span>
<span id="cb178-3"><a href="ensemble.html#cb178-3"></a>NN_labels_<span class="dv">2013</span> <-<span class="st"> </span>training_sample_<span class="dv">2013</span><span class="op">$</span>R1M_Usd</span>
<span id="cb178-4"><a href="ensemble.html#cb178-4"></a>model_ens_<span class="dv">2013</span> <-<span class="st"> </span><span class="kw">keras_model_sequential</span>()</span>
<span id="cb178-5"><a href="ensemble.html#cb178-5"></a>model_ens_<span class="dv">2013</span> <span class="op">%>%</span><span class="st"> </span><span class="co"># This defines the structure of the network, i.e. how layers are organized</span></span>
<span id="cb178-6"><a href="ensemble.html#cb178-6"></a><span class="st"> </span><span class="kw">layer_dense</span>(<span class="dt">units =</span> <span class="dv">16</span>, <span class="dt">activation =</span> <span class="st">'relu'</span>, <span class="dt">input_shape =</span> <span class="kw">ncol</span>(NN_features_<span class="dv">2013</span>)) <span class="op">%>%</span></span>
<span id="cb178-7"><a href="ensemble.html#cb178-7"></a><span class="st"> </span><span class="kw">layer_dense</span>(<span class="dt">units =</span> <span class="dv">8</span>, <span class="dt">activation =</span> <span class="st">'tanh'</span>) <span class="op">%>%</span></span>
<span id="cb178-8"><a href="ensemble.html#cb178-8"></a><span class="st"> </span><span class="kw">layer_dense</span>(<span class="dt">units =</span> <span class="dv">1</span>) </span>
<span id="cb178-9"><a href="ensemble.html#cb178-9"></a>model_ens_<span class="dv">2013</span> <span class="op">%>%</span><span class="st"> </span><span class="kw">compile</span>( <span class="co"># Model specification</span></span>
<span id="cb178-10"><a href="ensemble.html#cb178-10"></a> <span class="dt">loss =</span> <span class="st">'mean_squared_error'</span>, <span class="co"># Loss function</span></span>
<span id="cb178-11"><a href="ensemble.html#cb178-11"></a> <span class="dt">optimizer =</span> <span class="kw">optimizer_rmsprop</span>(), <span class="co"># Optimisation method (weight updating)</span></span>
<span id="cb178-12"><a href="ensemble.html#cb178-12"></a> <span class="dt">metrics =</span> <span class="kw">c</span>(<span class="st">'mean_absolute_error'</span>) <span class="co"># Output metric</span></span>
<span id="cb178-13"><a href="ensemble.html#cb178-13"></a>)</span>
<span id="cb178-14"><a href="ensemble.html#cb178-14"></a>model_ens_<span class="dv">2013</span> <span class="op">%>%</span><span class="st"> </span><span class="kw">fit</span>(NN_features_<span class="dv">2013</span>, <span class="co"># Training features</span></span>
<span id="cb178-15"><a href="ensemble.html#cb178-15"></a> NN_labels_<span class="dv">2013</span>, <span class="co"># Training labels</span></span>
<span id="cb178-16"><a href="ensemble.html#cb178-16"></a> <span class="dt">epochs =</span> <span class="dv">9</span>, <span class="dt">batch_size =</span> <span class="dv">128</span> <span class="co"># Training parameters</span></span>
<span id="cb178-17"><a href="ensemble.html#cb178-17"></a>)</span>
<span id="cb178-18"><a href="ensemble.html#cb178-18"></a>err_ens_<span class="dv">2013</span> <-<span class="st"> </span><span class="kw">predict</span>(model_ens_<span class="dv">2013</span>, NN_test_features) <span class="op">-</span><span class="st"> </span>testing_sample<span class="op">$</span>R1M_Usd</span></code></pre></div>
<p>Endowed with the errors of the four models, we can compute their correlation matrix.</p>
<div class="sourceCode" id="cb179"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb179-1"><a href="ensemble.html#cb179-1"></a>E_subtraining <-<span class="st"> </span><span class="kw">tibble</span>(err_ens_<span class="dv">2007</span>,</span>
<span id="cb179-2"><a href="ensemble.html#cb179-2"></a> err_ens_<span class="dv">2009</span>,</span>
<span id="cb179-3"><a href="ensemble.html#cb179-3"></a> err_ens_<span class="dv">2011</span>,</span>
<span id="cb179-4"><a href="ensemble.html#cb179-4"></a> err_ens_<span class="dv">2013</span>)</span>
<span id="cb179-5"><a href="ensemble.html#cb179-5"></a><span class="kw">cor</span>(E_subtraining)</span></code></pre></div>
<pre><code>## err_ens_2007 err_ens_2009 err_ens_2011 err_ens_2013
## err_ens_2007 1.0000000 0.9542497 0.6460091 0.9996685
## err_ens_2009 0.9542497 1.0000000 0.6317006 0.9549044
## err_ens_2011 0.6460091 0.6317006 1.0000000 0.6464010
## err_ens_2013 0.9996685 0.9549044 0.6464010 1.0000000</code></pre>
<p>The results are overall disappointing. Only one model manages to extract patterns that are somewhat different from the other ones, resulting in a 65% correlation across the board. Neural networks (on 2013 data) and penalized regressions (2007) remain highly correlated. One possible explanation could be that the models capture mainly noise and little signal. Working with long-term labels like annual returns could help improve diversification across models. </p>
</div>
</div>
<div id="exercise" class="section level2">
<h2><span class="header-section-number">11.4</span> Exercise</h2>
<p>Build an integrated ensemble on top of 3 neural networks trained entirely with Keras. Each network obtains one third of predictors as input. The three networks yield a classification (yes/no or buy/sell). The overarching network aggregates the three outputs into a final decision. Evaluate its performance on the testing sample. Use the functional API.</p>
</div>
</div>
<h3>References</h3>
<div id="refs" class="references">
<div id="ref-bates1969combination">
<p>Bates, John M, and Clive WJ Granger. 1969. “The Combination of Forecasts.” <em>Journal of the Operational Research Society</em> 20 (4): 451–68.</p>
</div>
<div id="ref-bonaccolto2019developing">
<p>Bonaccolto, Giovanni, and Sandra Paterlini. 2019. “Developing New Portfolio Strategies by Aggregation.” <em>Annals of Operations Research</em>, 1–39.</p>
</div>
<div id="ref-breiman1996stacked">
<p>Breiman, Leo. 1996. “Stacked Regressions.” <em>Machine Learning</em> 24 (1): 49–64.</p>
</div>
<div id="ref-claeskens2008model">
<p>Claeskens, Gerda, and Nils Lid Hjort. 2008. <em>Model Selection and Model Averaging</em>. Cambridge University Press.</p>
</div>
<div id="ref-clark2009improving">
<p>Clark, Todd E, and Michael W McCracken. 2009. “Improving Forecast Accuracy by Combining Recursive and Rolling Forecasts.” <em>International Economic Review</em> 50 (2): 363–95.</p>
</div>
<div id="ref-de2020subsampled">
<p>De Nard, Gianluca, Simon Hediger, and Markus Leippold. 2020. “Subsampled Factor Models for Asset Pricing: The Rise of Vasa.” <em>SSRN Working Paper</em> 3557957.</p>
</div>
<div id="ref-donaldson1996forecast">
<p>Donaldson, R Glen, and Mark Kamstra. 1996. “Forecast Combining with Neural Networks.” <em>Journal of Forecasting</em> 15 (1): 49–61.</p>
</div>
<div id="ref-freund1997decision">
<p>Freund, Yoav, and Robert E Schapire. 1997. “A Decision-Theoretic Generalization of on-Line Learning and an Application to Boosting.” <em>Journal of Computer and System Sciences</em> 55 (1): 119–39.</p>
</div>
<div id="ref-gaba2017combining">
<p>Gaba, Anil, Ilia Tsetlin, and Robert L Winkler. 2017. “Combining Interval Forecasts.” <em>Decision Analysis</em> 14 (1): 1–20.</p>
</div>
<div id="ref-genre2013combining">
<p>Genre, Véronique, Geoff Kenny, Aidan Meyler, and Allan Timmermann. 2013. “Combining Expert Forecasts: Can Anything Beat the Simple Average?” <em>International Journal of Forecasting</em> 29 (1): 108–21.</p>
</div>
<div id="ref-gospodinov2020generalized">
<p>Gospodinov, Nikolay, and Esfandiar Maasoumi. 2020. “Generalized Aggregation of Misspecified Models: With an Application to Asset Pricing.” <em>Journal of Econometrics</em> Forthcoming.</p>
</div>
<div id="ref-grushka2016ensembles">
<p>Grushka-Cockayne, Yael, Victor Richmond R Jose, and Kenneth C Lichtendahl Jr. 2016. “Ensembles of Overfit and Overconfident Forecasts.” <em>Management Science</em> 63 (4): 1110–30.</p>
</div>
<div id="ref-harrald1997evolving">
<p>Harrald, Paul G, and Mark Kamstra. 1997. “Evolving Artificial Neural Networks to Combine Financial Forecasts.” <em>IEEE Transactions on Evolutionary Computation</em> 1 (1): 40–52.</p>
</div>
<div id="ref-hassan2007fusion">
<p>Hassan, Md Rafiul, Baikunth Nath, and Michael Kirley. 2007. “A Fusion Model of Hmm, Ann and Ga for Stock Market Forecasting.” <em>Expert Systems with Applications</em> 33 (1): 171–80.</p>
</div>
<div id="ref-huang2005forecasting">
<p>Huang, Wei, Yoshiteru Nakamori, and Shou-Yang Wang. 2005. “Forecasting Stock Market Movement Direction with Support Vector Machine.” <em>Computers & Operations Research</em> 32 (10): 2513–22.</p>
</div>
<div id="ref-jacobs1991adaptive">
<p>Jacobs, Robert A, Michael I Jordan, Steven J Nowlan, Geoffrey E Hinton, and others. 1991. “Adaptive Mixtures of Local Experts.” <em>Neural Computation</em> 3 (1): 79–87.</p>
</div>
<div id="ref-jagannathan2003risk">
<p>Jagannathan, Ravi, and Tongshu Ma. 2003. “Risk Reduction in Large Portfolios: Why Imposing the Wrong Constraints Helps.” <em>Journal of Finance</em> 58 (4): 1651–83.</p>
</div>
<div id="ref-leung2001using">
<p>Leung, Mark T, Hazem Daouk, and An-Sing Chen. 2001. “Using Investment Portfolio Return to Combine Forecasts: A Multiobjective Approach.” <em>European Journal of Operational Research</em> 134 (1): 84–102.</p>
</div>
<div id="ref-mascio2020market">
<p>Mascio, David A, Frank J Fabozzi, and J Kenton Zumwalt. 2020. “Market Timing Using Combined Forecasts and Machine Learning.” <em>Journal of Forecasting</em> Forthcoming.</p>
</div>
<div id="ref-okun2011ensembles">
<p>Okun, Oleg, Giorgio Valentini, and Matteo Re. 2011. <em>Ensembles in Machine Learning Applications</em>. Vol. 373. Springer Science & Business Media.</p>
</div>
<div id="ref-pesaran2011forecast">
<p>Pesaran, M Hashem, and Andreas Pick. 2011. “Forecast Combination Across Estimation Windows.” <em>Journal of Business & Economic Statistics</em> 29 (2): 307–18.</p>
</div>
<div id="ref-pike2020combining">
<p>Pike, Tyler, and Francisco Vazquez-Grande. 2020. “Combining Forecasts: Can Machines Beat the Average?” <em>SSRN Working Paper</em> 3691117.</p>
</div>
<div id="ref-razin2020drowning">
<p>Razin, Ronny, and Gilat Levy. 2020. “A Maximum Likelihood Approach to Combining Forecasts.” <em>Theoretical Economics</em> Forthcoming.</p>
</div>
<div id="ref-schapire1990strength">
<p>Schapire, Robert E. 1990. “The Strength of Weak Learnability.” <em>Machine Learning</em> 5 (2): 197–227.</p>
</div>
<div id="ref-schapire2012boosting">
<p>Schapire, Robert E, and Yoav Freund. 2012. <em>Boosting: Foundations and Algorithms</em>. MIT Press.</p>
</div>
<div id="ref-seni2010ensemble">
<p>Seni, Giovanni, and John F Elder. 2010. “Ensemble Methods in Data Mining: Improving Accuracy Through Combining Predictions.” <em>Synthesis Lectures on Data Mining and Knowledge Discovery</em> 2 (1): 1–126.</p>
</div>
<div id="ref-sun2020time">
<p>Sun, Yuying, YM Hong, T Lee, Shouyang Wang, and Xinyu Zhang. 2020. “Time-Varying Model Averaging.” <em>Journal of Econometrics</em> Forthcoming.</p>
</div>
<div id="ref-virtanen1987forecasting">
<p>Virtanen, Ilkka, and Paavo Yli-Olli. 1987. “Forecasting Stock Market Prices in a Thin Security Market.” <em>Omega</em> 15 (2): 145–55.</p>
</div>
<div id="ref-von1972probabilistic">
<p>Von Holstein, Carl-Axel S Staël. 1972. “Probabilistic Forecasting: An Experiment Related to the Stock Market.” <em>Organizational Behavior and Human Performance</em> 8 (1): 139–58.</p>
</div>
<div id="ref-wang2012stock">
<p>Wang, Ju-Jie, Jian-Zhou Wang, Zhe-George Zhang, and Shu-Po Guo. 2012. “Stock Index Forecasting Based on a Hybrid Model.” <em>Omega</em> 40 (6): 758–66.</p>
</div>
<div id="ref-wolpert1992stacked">
<p>Wolpert, David H. 1992b. “Stacked Generalization.” <em>Neural Networks</em> 5 (2): 241–59.</p>
</div>
<div id="ref-zhang2012ensemble">
<p>Zhang, Cha, and Yunqian Ma. 2012. <em>Ensemble Machine Learning: Methods and Applications</em>. Springer.</p>
</div>
<div id="ref-zhou2012ensemble">
<p>Zhou, Zhi-Hua. 2012. <em>Ensemble Methods: Foundations and Algorithms</em>. Chapman & Hall / CRC.</p>
</div>
</div>
</section>
</div>
</div>
</div>
<a href="valtune.html" class="navigation navigation-prev " aria-label="Previous page"><i class="fa fa-angle-left"></i></a>
<a href="backtest.html" class="navigation navigation-next " aria-label="Next page"><i class="fa fa-angle-right"></i></a>
</div>
</div>
<script src="libs/gitbook-2.6.7/js/app.min.js"></script>
<script src="libs/gitbook-2.6.7/js/lunr.js"></script>
<script src="libs/gitbook-2.6.7/js/clipboard.min.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-search.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-sharing.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-fontsettings.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-bookdown.js"></script>
<script src="libs/gitbook-2.6.7/js/jquery.highlight.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-clipboard.js"></script>
<script>
gitbook.require(["gitbook"], function(gitbook) {
gitbook.start({
"sharing": {
"github": false,
"facebook": false,
"twitter": true,
"linkedin": true,
"weibo": false,
"instapaper": false,
"vk": false,
"all": ["facebook", "twitter", "linkedin", "weibo", "instapaper"]
},
"fontsettings": {
"theme": "white",
"family": "sans",
"size": 2
},
"edit": null,
"history": {
"link": null,
"text": null
},
"view": {
"link": null,
"text": null
},
"download": null,
"toc": {
"collapse": "section",
"scroll_highlight": true
},
"toolbar": {
"position": "fixed",
"download": false
},
"search": true,
"info": true
});
});
</script>
<!-- dynamically load mathjax for compatibility with self-contained -->
<script>
(function () {
var script = document.createElement("script");
script.type = "text/javascript";
var src = "true";
if (src === "" || src === "true") src = "https://mathjax.rstudio.com/latest/MathJax.js?config=TeX-MML-AM_CHTML";
if (location.protocol !== "file:")
if (/^https?:/.test(src))
src = src.replace(/^https?:/, '');
script.src = src;
document.getElementsByTagName("head")[0].appendChild(script);
})();
</script>
</body>
</html>