-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path03-Deep Learning.html
More file actions
1106 lines (1056 loc) · 123 KB
/
03-Deep Learning.html
File metadata and controls
1106 lines (1056 loc) · 123 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!doctype html>
<html class="no-js" lang="en" data-content_root="./">
<head><meta charset="utf-8"/>
<meta name="viewport" content="width=device-width,initial-scale=1"/>
<meta name="color-scheme" content="light dark"><meta name="viewport" content="width=device-width, initial-scale=1" />
<link rel="index" title="Index" href="genindex.html" /><link rel="search" title="Search" href="search.html" /><link rel="next" title="CPU Acceleration" href="04-CPU%20Acceleration.html" /><link rel="prev" title="Multiple Dispatch" href="02-Multiple%20Dispatch.html" />
<!-- Generated with Sphinx 8.1.3 and Furo 2024.08.06 -->
<title>Deep Learning - RLtools Documentation</title>
<link rel="stylesheet" type="text/css" href="_static/pygments.css?v=8f2a1f02" />
<link rel="stylesheet" type="text/css" href="_static/styles/furo.css?v=354aac6f" />
<link rel="stylesheet" type="text/css" href="_static/nbsphinx-code-cells.css?v=2aa19091" />
<link rel="stylesheet" type="text/css" href="_static/styles/furo-extensions.css?v=302659d7" />
<link rel="stylesheet" type="text/css" href="_static/overrides.css?v=ca018f06" />
<style>
body {
--color-code-background: #f8f8f8;
--color-code-foreground: black;
--color-brand-primary: #639694;
--color-brand-content: #639694;
--color-admonition-background: orange;
--sidebar_hide_name: True;
}
@media not print {
body[data-theme="dark"] {
--color-code-background: #202020;
--color-code-foreground: #d0d0d0;
}
@media (prefers-color-scheme: dark) {
body:not([data-theme="light"]) {
--color-code-background: #202020;
--color-code-foreground: #d0d0d0;
}
}
}
</style></head>
<body>
<script>
document.body.dataset.theme = localStorage.getItem("theme") || "auto";
</script>
<svg xmlns="http://www.w3.org/2000/svg" style="display: none;">
<symbol id="svg-toc" viewBox="0 0 24 24">
<title>Contents</title>
<svg stroke="currentColor" fill="currentColor" stroke-width="0" viewBox="0 0 1024 1024">
<path d="M408 442h480c4.4 0 8-3.6 8-8v-56c0-4.4-3.6-8-8-8H408c-4.4 0-8 3.6-8 8v56c0 4.4 3.6 8 8 8zm-8 204c0 4.4 3.6 8 8 8h480c4.4 0 8-3.6 8-8v-56c0-4.4-3.6-8-8-8H408c-4.4 0-8 3.6-8 8v56zm504-486H120c-4.4 0-8 3.6-8 8v56c0 4.4 3.6 8 8 8h784c4.4 0 8-3.6 8-8v-56c0-4.4-3.6-8-8-8zm0 632H120c-4.4 0-8 3.6-8 8v56c0 4.4 3.6 8 8 8h784c4.4 0 8-3.6 8-8v-56c0-4.4-3.6-8-8-8zM115.4 518.9L271.7 642c5.8 4.6 14.4.5 14.4-6.9V388.9c0-7.4-8.5-11.5-14.4-6.9L115.4 505.1a8.74 8.74 0 0 0 0 13.8z"/>
</svg>
</symbol>
<symbol id="svg-menu" viewBox="0 0 24 24">
<title>Menu</title>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor"
stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather-menu">
<line x1="3" y1="12" x2="21" y2="12"></line>
<line x1="3" y1="6" x2="21" y2="6"></line>
<line x1="3" y1="18" x2="21" y2="18"></line>
</svg>
</symbol>
<symbol id="svg-arrow-right" viewBox="0 0 24 24">
<title>Expand</title>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor"
stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather-chevron-right">
<polyline points="9 18 15 12 9 6"></polyline>
</svg>
</symbol>
<symbol id="svg-sun" viewBox="0 0 24 24">
<title>Light mode</title>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor"
stroke-width="1" stroke-linecap="round" stroke-linejoin="round" class="feather-sun">
<circle cx="12" cy="12" r="5"></circle>
<line x1="12" y1="1" x2="12" y2="3"></line>
<line x1="12" y1="21" x2="12" y2="23"></line>
<line x1="4.22" y1="4.22" x2="5.64" y2="5.64"></line>
<line x1="18.36" y1="18.36" x2="19.78" y2="19.78"></line>
<line x1="1" y1="12" x2="3" y2="12"></line>
<line x1="21" y1="12" x2="23" y2="12"></line>
<line x1="4.22" y1="19.78" x2="5.64" y2="18.36"></line>
<line x1="18.36" y1="5.64" x2="19.78" y2="4.22"></line>
</svg>
</symbol>
<symbol id="svg-moon" viewBox="0 0 24 24">
<title>Dark mode</title>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor"
stroke-width="1" stroke-linecap="round" stroke-linejoin="round" class="icon-tabler-moon">
<path stroke="none" d="M0 0h24v24H0z" fill="none" />
<path d="M12 3c.132 0 .263 0 .393 0a7.5 7.5 0 0 0 7.92 12.446a9 9 0 1 1 -8.313 -12.454z" />
</svg>
</symbol>
<symbol id="svg-sun-with-moon" viewBox="0 0 24 24">
<title>Auto light/dark, in light mode</title>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor"
stroke-width="1" stroke-linecap="round" stroke-linejoin="round"
class="icon-custom-derived-from-feather-sun-and-tabler-moon">
<path style="opacity: 50%" d="M 5.411 14.504 C 5.471 14.504 5.532 14.504 5.591 14.504 C 3.639 16.319 4.383 19.569 6.931 20.352 C 7.693 20.586 8.512 20.551 9.25 20.252 C 8.023 23.207 4.056 23.725 2.11 21.184 C 0.166 18.642 1.702 14.949 4.874 14.536 C 5.051 14.512 5.231 14.5 5.411 14.5 L 5.411 14.504 Z"/>
<line x1="14.5" y1="3.25" x2="14.5" y2="1.25"/>
<line x1="14.5" y1="15.85" x2="14.5" y2="17.85"/>
<line x1="10.044" y1="5.094" x2="8.63" y2="3.68"/>
<line x1="19" y1="14.05" x2="20.414" y2="15.464"/>
<line x1="8.2" y1="9.55" x2="6.2" y2="9.55"/>
<line x1="20.8" y1="9.55" x2="22.8" y2="9.55"/>
<line x1="10.044" y1="14.006" x2="8.63" y2="15.42"/>
<line x1="19" y1="5.05" x2="20.414" y2="3.636"/>
<circle cx="14.5" cy="9.55" r="3.6"/>
</svg>
</symbol>
<symbol id="svg-moon-with-sun" viewBox="0 0 24 24">
<title>Auto light/dark, in dark mode</title>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor"
stroke-width="1" stroke-linecap="round" stroke-linejoin="round"
class="icon-custom-derived-from-feather-sun-and-tabler-moon">
<path d="M 8.282 7.007 C 8.385 7.007 8.494 7.007 8.595 7.007 C 5.18 10.184 6.481 15.869 10.942 17.24 C 12.275 17.648 13.706 17.589 15 17.066 C 12.851 22.236 5.91 23.143 2.505 18.696 C -0.897 14.249 1.791 7.786 7.342 7.063 C 7.652 7.021 7.965 7 8.282 7 L 8.282 7.007 Z"/>
<line style="opacity: 50%" x1="18" y1="3.705" x2="18" y2="2.5"/>
<line style="opacity: 50%" x1="18" y1="11.295" x2="18" y2="12.5"/>
<line style="opacity: 50%" x1="15.316" y1="4.816" x2="14.464" y2="3.964"/>
<line style="opacity: 50%" x1="20.711" y1="10.212" x2="21.563" y2="11.063"/>
<line style="opacity: 50%" x1="14.205" y1="7.5" x2="13.001" y2="7.5"/>
<line style="opacity: 50%" x1="21.795" y1="7.5" x2="23" y2="7.5"/>
<line style="opacity: 50%" x1="15.316" y1="10.184" x2="14.464" y2="11.036"/>
<line style="opacity: 50%" x1="20.711" y1="4.789" x2="21.563" y2="3.937"/>
<circle style="opacity: 50%" cx="18" cy="7.5" r="2.169"/>
</svg>
</symbol>
<symbol id="svg-pencil" viewBox="0 0 24 24">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor"
stroke-width="1" stroke-linecap="round" stroke-linejoin="round" class="icon-tabler-pencil-code">
<path d="M4 20h4l10.5 -10.5a2.828 2.828 0 1 0 -4 -4l-10.5 10.5v4" />
<path d="M13.5 6.5l4 4" />
<path d="M20 21l2 -2l-2 -2" />
<path d="M17 17l-2 2l2 2" />
</svg>
</symbol>
<symbol id="svg-eye" viewBox="0 0 24 24">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none" stroke="currentColor"
stroke-width="1" stroke-linecap="round" stroke-linejoin="round" class="icon-tabler-eye-code">
<path stroke="none" d="M0 0h24v24H0z" fill="none" />
<path d="M10 12a2 2 0 1 0 4 0a2 2 0 0 0 -4 0" />
<path
d="M11.11 17.958c-3.209 -.307 -5.91 -2.293 -8.11 -5.958c2.4 -4 5.4 -6 9 -6c3.6 0 6.6 2 9 6c-.21 .352 -.427 .688 -.647 1.008" />
<path d="M20 21l2 -2l-2 -2" />
<path d="M17 17l-2 2l2 2" />
</svg>
</symbol>
</svg>
<input type="checkbox" class="sidebar-toggle" name="__navigation" id="__navigation">
<input type="checkbox" class="sidebar-toggle" name="__toc" id="__toc">
<label class="overlay sidebar-overlay" for="__navigation">
<div class="visually-hidden">Hide navigation sidebar</div>
</label>
<label class="overlay toc-overlay" for="__toc">
<div class="visually-hidden">Hide table of contents sidebar</div>
</label>
<a class="skip-to-content muted-link" href="#furo-main-content">Skip to content</a>
<div class="page">
<header class="mobile-header">
<div class="header-left">
<label class="nav-overlay-icon" for="__navigation">
<div class="visually-hidden">Toggle site navigation sidebar</div>
<i class="icon"><svg><use href="#svg-menu"></use></svg></i>
</label>
</div>
<div class="header-center">
<a href="index.html"><div class="brand">RLtools Documentation</div></a>
</div>
<div class="header-right">
<div class="theme-toggle-container theme-toggle-header">
<button class="theme-toggle">
<div class="visually-hidden">Toggle Light / Dark / Auto color theme</div>
<svg class="theme-icon-when-auto-light"><use href="#svg-sun-with-moon"></use></svg>
<svg class="theme-icon-when-auto-dark"><use href="#svg-moon-with-sun"></use></svg>
<svg class="theme-icon-when-dark"><use href="#svg-moon"></use></svg>
<svg class="theme-icon-when-light"><use href="#svg-sun"></use></svg>
</button>
</div>
<label class="toc-overlay-icon toc-header-icon" for="__toc">
<div class="visually-hidden">Toggle table of contents sidebar</div>
<i class="icon"><svg><use href="#svg-toc"></use></svg></i>
</label>
</div>
</header>
<aside class="sidebar-drawer">
<div class="sidebar-container">
<div class="sidebar-sticky"><a class="sidebar-brand" href="index.html">
<div class="sidebar-logo-container">
<img class="sidebar-logo" src="_static/banner.svg" alt="Logo"/>
</div>
<span class="sidebar-brand-text">RLtools Documentation</span>
</a><form class="sidebar-search-container" method="get" action="search.html" role="search">
<input class="sidebar-search" placeholder="Search" name="q" aria-label="Search">
<input type="hidden" name="check_keywords" value="yes">
<input type="hidden" name="area" value="default">
</form>
<div id="searchbox"></div><div class="sidebar-scroll"><div class="sidebar-tree">
<p class="caption" role="heading"><span class="caption-text">Contents:</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="overview.html">Overview</a></li>
<li class="toctree-l1"><a class="reference internal" href="getting_started.html">Getting Started</a></li>
<li class="toctree-l1"><a class="reference internal" href="01-Containers.html">Containers</a></li>
<li class="toctree-l1"><a class="reference internal" href="02-Multiple%20Dispatch.html">Multiple Dispatch</a></li>
<li class="toctree-l1 current current-page"><a class="current reference internal" href="#">Deep Learning</a></li>
<li class="toctree-l1"><a class="reference internal" href="04-CPU%20Acceleration.html">CPU Acceleration</a></li>
<li class="toctree-l1"><a class="reference internal" href="05-MNIST%20Classification.html">MNIST Classification</a></li>
<li class="toctree-l1"><a class="reference internal" href="06-Deep%20Reinforcement%20Learning.html">Deep Reinforcement Learning</a></li>
<li class="toctree-l1"><a class="reference internal" href="07-The%20Loop%20Interface.html">The Loop Interface</a></li>
<li class="toctree-l1"><a class="reference internal" href="08-Custom%20Environment.html">Custom Environment</a></li>
<li class="toctree-l1"><a class="reference internal" href="09-Python%20Interface.html">Python Interface</a></li>
<li class="toctree-l1"><a class="reference internal" href="09-Python%20Interface.html#Embedding-Exported-Checkpoints">Embedding Exported Checkpoints</a></li>
<li class="toctree-l1"><a class="reference internal" href="10-Experiment%20Tracking.html">Experiment Tracking</a></li>
<li class="toctree-l1"><a class="reference internal" href="11-RNN.html">Recurrent Neural Networks (RNNs)</a></li>
<li class="toctree-l1"><a class="reference internal" href="12-Memory.html">Memory</a></li>
</ul>
</div>
</div>
</div>
</div>
</aside>
<div class="main">
<div class="content">
<div class="article-container">
<a href="#" class="back-to-top muted-link">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
<path d="M13 20h-2V8l-5.5 5.5-1.42-1.42L12 4.16l7.92 7.92-1.42 1.42L13 8v12z"></path>
</svg>
<span>Back to top</span>
</a>
<div class="content-icon-container">
<div class="view-this-page">
<a class="muted-link" href="_sources/03-Deep Learning.ipynb.txt" title="View this page">
<svg><use href="#svg-eye"></use></svg>
<span class="visually-hidden">View this page</span>
</a>
</div>
<div class="theme-toggle-container theme-toggle-content">
<button class="theme-toggle">
<div class="visually-hidden">Toggle Light / Dark / Auto color theme</div>
<svg class="theme-icon-when-auto-light"><use href="#svg-sun-with-moon"></use></svg>
<svg class="theme-icon-when-auto-dark"><use href="#svg-moon-with-sun"></use></svg>
<svg class="theme-icon-when-dark"><use href="#svg-moon"></use></svg>
<svg class="theme-icon-when-light"><use href="#svg-sun"></use></svg>
</button>
</div>
<label class="toc-overlay-icon toc-content-icon" for="__toc">
<div class="visually-hidden">Toggle table of contents sidebar</div>
<i class="icon"><svg><use href="#svg-toc"></use></svg></i>
</label>
</div>
<article role="main" id="furo-main-content">
<section id="Deep-Learning">
<h1>Deep Learning<a class="headerlink" href="#Deep-Learning" title="Link to this heading">¶</a></h1>
<p><a class="reference external" href="https://mybinder.org/v2/gh/rl-tools/documentation/binder?labpath=03-Deep%20Learning.ipynb"><img alt="Binder" src="https://mybinder.org/badge_logo.svg" /></a></p>
<p>Because of the static multiple dispatch paradigm layed out in <a class="reference internal" href="02-Multiple%20Dispatch.html"><span class="doc">Multiple Dispatch</span></a>, we need to first include the primitive operations for the device(s) we are inteding on using such that the algorithms (and datastructures) we later include for deep learning can use them.</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[1]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="cp">#include</span><span class="w"> </span><span class="cpf"><rl_tools/operations/cpu.h></span>
</pre></div>
</div>
</div>
<p>Note that the Adam instance operations are only required later but since some of these operations need to later be called by the dense layer operations (the <code class="docutils literal notranslate"><span class="pre">reset_optimizer_state</span></code> in particular), it needs to be included beforehand. Such that these operations are visible when the compiler reaches the call sites:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[2]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="cp">#include</span><span class="w"> </span><span class="cpf"><rl_tools/nn/optimizers/adam/instance/operations_generic.h></span>
<span class="cp">#include</span><span class="w"> </span><span class="cpf"><rl_tools/nn/layers/dense/operations_cpu.h></span>
</pre></div>
</div>
</div>
<p>We set up the environment as described in <a class="reference internal" href="01-Containers.html"><span class="doc">Containers</span></a> with the additional definition of a <code class="docutils literal notranslate"><span class="pre">TYPE_POLICY</span></code>. The <code class="docutils literal notranslate"><span class="pre">TYPE_POLICY</span></code> allows configuring different types (e.g. <code class="docutils literal notranslate"><span class="pre">float</span></code>, <code class="docutils literal notranslate"><span class="pre">double</span></code>, <code class="docutils literal notranslate"><span class="pre">bf16</span></code>) for different parts of the system. You can find more information on how to set up e.g. mixed-precision training in the (not yet existing) Mixed-Precision Training section. Since we don’t specify any <code class="docutils literal notranslate"><span class="pre">USE_CASE</span></code> when setting up the <code class="docutils literal notranslate"><span class="pre">numeric_types::Policy</span></code>, we will use
<code class="docutils literal notranslate"><span class="pre">T</span> <span class="pre">=</span> <span class="pre">float</span></code> everywhere:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[3]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">namespace</span><span class="w"> </span><span class="nn">rlt</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="nn">rl_tools</span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">DEVICE</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">devices</span><span class="o">::</span><span class="n">DefaultCPU</span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">RNG</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">DEVICE</span><span class="o">::</span><span class="n">SPEC</span><span class="o">::</span><span class="n">RANDOM</span><span class="o">::</span><span class="n">ENGINE</span><span class="o"><></span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">T</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="kt">float</span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">TYPE_POLICY</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">numeric_types</span><span class="o">::</span><span class="n">Policy</span><span class="o"><</span><span class="n">T</span><span class="o">></span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">TI</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="k">typename</span><span class="w"> </span><span class="nc">DEVICE</span><span class="o">::</span><span class="n">index_t</span><span class="p">;</span>
<span class="n">DEVICE</span><span class="w"> </span><span class="n">device</span><span class="p">;</span>
<span class="n">RNG</span><span class="w"> </span><span class="n">rng</span><span class="p">;</span>
<span class="n">TI</span><span class="w"> </span><span class="n">seed</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">1</span><span class="p">;</span>
</pre></div>
</div>
</div>
<p>As justified by our analysis of the reinforcement learnign for continuous control landscape (in the <a class="reference external" href="https://arxiv.org/abs/2306.03530">paper</a>) in the beginning <strong>RLtools</strong> only supports fully connected neural networks. But we are planning on adding more architectures (especially recurrent neural networks) in the future.</p>
<p>We can instantiate a simple layer by first defining its hyperparameters (which are compile-time <code class="docutils literal notranslate"><span class="pre">constexpr</span></code> and types):</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[4]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">constexpr</span><span class="w"> </span><span class="n">TI</span><span class="w"> </span><span class="n">BATCH_SIZE</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">1</span><span class="p">;</span>
<span class="k">constexpr</span><span class="w"> </span><span class="n">TI</span><span class="w"> </span><span class="n">INPUT_DIM</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">5</span><span class="p">;</span>
<span class="k">constexpr</span><span class="w"> </span><span class="n">TI</span><span class="w"> </span><span class="n">OUTPUT_DIM</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">5</span><span class="p">;</span>
<span class="k">constexpr</span><span class="w"> </span><span class="k">auto</span><span class="w"> </span><span class="n">ACTIVATION_FUNCTION</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">activation_functions</span><span class="o">::</span><span class="n">RELU</span><span class="p">;</span>
</pre></div>
</div>
</div>
<p>These hyperparameters and other options are combined into a specification type such that it is easier to pass it around and such that we don’t need to write out all hyperparameters and options as template parameters when a function takes the datastructure as an argument:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[5]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">using</span><span class="w"> </span><span class="n">LAYER_CONFIG</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">layers</span><span class="o">::</span><span class="n">dense</span><span class="o">::</span><span class="n">Configuration</span><span class="o"><</span><span class="n">TYPE_POLICY</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="n">OUTPUT_DIM</span><span class="p">,</span><span class="w"> </span><span class="n">ACTIVATION_FUNCTION</span><span class="o">></span><span class="p">;</span>
</pre></div>
</div>
</div>
<p>The data structure of a layer does not only depend on its (previously defined) structure but also on the required capabilities. E.g. if we want to do backward passes, the layer needs to store intermediate activations during the forward pass. Furthermore, the buffers of these intermediate values also depend on the batch size. We decouple the capabilities from the structure specification such that we can easily change the capability of a layer or model (e.g. for checkpointing, where we only want
to save the parts required for inference or changing the batch size).</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[6]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">using</span><span class="w"> </span><span class="n">CAPABILITY</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">capability</span><span class="o">::</span><span class="n">Forward</span><span class="o"><></span><span class="p">;</span>
</pre></div>
</div>
</div>
<p>You might have noticed that the <code class="docutils literal notranslate"><span class="pre">LAYER_CONFIG</span></code> does not specify an input dimensionality. This is because the input shapes automatically cascade through models in RLtools, where the input shape of a subsequent layer is determined by the output shape of the preceding layer. This will make more sense once we introduce the <code class="docutils literal notranslate"><span class="pre">Sequential</span></code> model that combines multiple layers later in this document.</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[7]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">using</span><span class="w"> </span><span class="n">INPUT_SHAPE</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">tensor</span><span class="o">::</span><span class="n">Shape</span><span class="o"><</span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="n">BATCH_SIZE</span><span class="p">,</span><span class="w"> </span><span class="n">INPUT_DIM</span><span class="o">></span><span class="p">;</span>
</pre></div>
</div>
</div>
<p>Using this specification we can declare an actual layer:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[8]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">layers</span><span class="o">::</span><span class="n">dense</span><span class="o">::</span><span class="n">Layer</span><span class="o"><</span><span class="n">LAYER_CONFIG</span><span class="p">,</span><span class="w"> </span><span class="n">CAPABILITY</span><span class="p">,</span><span class="w"> </span><span class="n">INPUT_SHAPE</span><span class="o">></span><span class="w"> </span><span class="n">layer</span><span class="p">;</span>
</pre></div>
</div>
</div>
<p>A fully connected neural network consists of layers each implementing:</p>
<div class="math-wrapper docutils container">
<div class="math notranslate nohighlight">
\[y = f(Wx + b)\]</div>
</div>
<p>where <span class="math notranslate nohighlight">\(x\)</span> is the input (external or from the previous layer), <span class="math notranslate nohighlight">\(W\)</span> and <span class="math notranslate nohighlight">\(b\)</span> are the weight matrix and biases respectively and <span class="math notranslate nohighlight">\(f\)</span> is an element-wise non-linear function. Hence the data structure of a layer should contain at least <span class="math notranslate nohighlight">\(W\)</span> and <span class="math notranslate nohighlight">\(b\)</span>. Because these parameters are containers they need to be allocated:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[9]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">malloc</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer</span><span class="p">);</span>
</pre></div>
</div>
</div>
<p>Now that the memory is allocated we need to initialize it (because it may contain arbitrary values). By default, we use the standard <a class="reference external" href="https://pytorch.org/docs/stable/nn.init.html?highlight=kaiming#torch.nn.init.kaiming_normal_">Kaiming</a> initialization scheme:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[10]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">init</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">,</span><span class="w"> </span><span class="n">seed</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">init_weights</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
</pre></div>
</div>
</div>
<p>We can print <span class="math notranslate nohighlight">\(W\)</span> and <span class="math notranslate nohighlight">\(b\)</span>:</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[11]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">print</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer</span><span class="p">.</span><span class="n">weights</span><span class="p">.</span><span class="n">parameters</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt empty docutils container">
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
-2.816712e-01 -4.240245e-01 3.859819e-01 4.441947e-02 4.004624e-01
-1.364082e-02 -7.122520e-02 -1.605171e-01 -1.517532e-01 -3.090902e-01
1.778682e-01 1.066792e-01 -3.399266e-01 -1.791943e-01 -1.325908e-02
1.187242e-01 1.083454e-01 2.846306e-01 2.606547e-02 1.637034e-01
-1.286924e-03 1.214463e-02 7.763362e-02 -2.822456e-01 1.965541e-01
</pre></div></div>
</div>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[12]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">print</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer</span><span class="p">.</span><span class="n">biases</span><span class="p">.</span><span class="n">parameters</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt empty docutils container">
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
-5.724791e-02 -5.784941e-02 -2.641703e-01 -2.085560e-01 -3.268416e-01
</pre></div></div>
</div>
<p>Now that the layer is initialized we can run inference using a random input. We first declare and allocate input and output matrices and then randomly initialize the input:</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[13]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">Matrix</span><span class="o"><</span><span class="n">rlt</span><span class="o">::</span><span class="n">matrix</span><span class="o">::</span><span class="n">Specification</span><span class="o"><</span><span class="n">T</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="n">BATCH_SIZE</span><span class="p">,</span><span class="w"> </span><span class="n">INPUT_DIM</span><span class="o">>></span><span class="w"> </span><span class="n">input</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">Matrix</span><span class="o"><</span><span class="n">rlt</span><span class="o">::</span><span class="n">matrix</span><span class="o">::</span><span class="n">Specification</span><span class="o"><</span><span class="n">T</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="n">BATCH_SIZE</span><span class="p">,</span><span class="w"> </span><span class="n">OUTPUT_DIM</span><span class="o">>></span><span class="w"> </span><span class="n">output</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">malloc</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">input</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">malloc</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">output</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">randn</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">input</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">print</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">input</span><span class="p">);</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt empty docutils container">
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
-0.696818 0.150269 -2.589913 -0.316640 -1.897954
</pre></div></div>
</div>
<p>Now we can evaluate output of the layer:</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[14]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">decltype</span><span class="p">(</span><span class="n">layer</span><span class="p">)</span><span class="o">::</span><span class="n">Buffer</span><span class="o"><</span><span class="n">BATCH_SIZE</span><span class="o">></span><span class="w"> </span><span class="n">buffer</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">evaluate</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer</span><span class="p">,</span><span class="w"> </span><span class="n">input</span><span class="p">,</span><span class="w"> </span><span class="n">output</span><span class="p">,</span><span class="w"> </span><span class="n">buffer</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">print</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">output</span><span class="p">);</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt empty docutils container">
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
0.000000 0.991368 0.590204 0.000000 0.000000
</pre></div></div>
</div>
<p>Now we are revisiting the capabilities mentioned earlier. For inference storing <span class="math notranslate nohighlight">\(W\)</span> and <span class="math notranslate nohighlight">\(b\)</span> is sufficient but for training we at least need to also store the gradient of the loss <span class="math notranslate nohighlight">\(L\)</span> wrt. <span class="math notranslate nohighlight">\(W\)</span> and <span class="math notranslate nohighlight">\(b\)</span>: <span class="math notranslate nohighlight">\(\frac{\mathrm{d}L}{\mathrm{d}W}\)</span> and <span class="math notranslate nohighlight">\(\frac{\mathrm{d}L}{\mathrm{d}b}\)</span>. Because depending on the optimizer type we might need to store more information per parameter (like the first and second-order moment in the case of
<a class="reference external" href="https://en.wikipedia.org/wiki/Stochastic_gradient_descent#Adam">Adam</a>), we abstract the storage for the weights and biases using a parameter type (defined under the <code class="docutils literal notranslate"><span class="pre">rl_tools::nn::parameters</span></code> namespace) that can e.b. be <code class="docutils literal notranslate"><span class="pre">Plain</span></code>, <code class="docutils literal notranslate"><span class="pre">Gradient</span></code>, <code class="docutils literal notranslate"><span class="pre">Adam</span></code> or any other type extended by the user. For this illustration we are using <code class="docutils literal notranslate"><span class="pre">Gradient</span></code>:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[15]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">using</span><span class="w"> </span><span class="n">PARAMETER_TYPE</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">parameters</span><span class="o">::</span><span class="n">Gradient</span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">CAPABILITY_2</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">capability</span><span class="o">::</span><span class="n">Gradient</span><span class="o"><</span><span class="n">PARAMETER_TYPE</span><span class="p">,</span><span class="w"> </span><span class="n">BATCH_SIZE</span><span class="o">></span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">LAYER_2_CONFIG</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">layers</span><span class="o">::</span><span class="n">dense</span><span class="o">::</span><span class="n">Configuration</span><span class="o"><</span><span class="n">TYPE_POLICY</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="n">OUTPUT_DIM</span><span class="p">,</span><span class="w"> </span><span class="n">ACTIVATION_FUNCTION</span><span class="o">></span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">layers</span><span class="o">::</span><span class="n">dense</span><span class="o">::</span><span class="n">Layer</span><span class="o"><</span><span class="n">LAYER_2_CONFIG</span><span class="p">,</span><span class="w"> </span><span class="n">CAPABILITY_2</span><span class="p">,</span><span class="w"> </span><span class="n">INPUT_SHAPE</span><span class="o">></span><span class="w"> </span><span class="n">layer_2</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">malloc</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer_2</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">copy</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer</span><span class="p">,</span><span class="w"> </span><span class="n">layer_2</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">zero_gradient</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer_2</span><span class="p">);</span>
</pre></div>
</div>
</div>
<p>Note that by using the <code class="docutils literal notranslate"><span class="pre">rl_tools::nn::capability::Gradient</span></code> capability, the <code class="docutils literal notranslate"><span class="pre">rl_tools::nn::layers::dense::Layer</span></code> datastructure contains the necessary buffers (e.g. for itermediate activations) to support the backpropagation algorithm. Additionally, similar to PyTorch we are setting the gradient to zero because it is accumulated with subsequent backward passes.</p>
<p>Now we can backpropagate the derivative of the loss wrt. the <code class="docutils literal notranslate"><span class="pre">output</span></code> to calculate the derivative of the loss wrt. the <code class="docutils literal notranslate"><span class="pre">input</span></code>. Hence the derivative of the loss wrt. the <code class="docutils literal notranslate"><span class="pre">output</span></code>: <code class="docutils literal notranslate"><span class="pre">d_output</span></code> is actually an input to the <code class="docutils literal notranslate"><span class="pre">rl_tools::backward</span></code> operator. The operator also accumulates the derivative of the loss wrt. the weights and biases in the layer. We first allocate containers for <code class="docutils literal notranslate"><span class="pre">d_input</span></code> and <code class="docutils literal notranslate"><span class="pre">d_output</span></code> and randomly set <code class="docutils literal notranslate"><span class="pre">d_output</span></code> (a hypothetical gradient of the input of some
upstream layers)</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[16]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">Matrix</span><span class="o"><</span><span class="n">rlt</span><span class="o">::</span><span class="n">matrix</span><span class="o">::</span><span class="n">Specification</span><span class="o"><</span><span class="n">T</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="n">BATCH_SIZE</span><span class="p">,</span><span class="w"> </span><span class="n">OUTPUT_DIM</span><span class="o">>></span><span class="w"> </span><span class="n">d_output</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">Matrix</span><span class="o"><</span><span class="n">rlt</span><span class="o">::</span><span class="n">matrix</span><span class="o">::</span><span class="n">Specification</span><span class="o"><</span><span class="n">T</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="n">BATCH_SIZE</span><span class="p">,</span><span class="w"> </span><span class="n">INPUT_DIM</span><span class="o">>></span><span class="w"> </span><span class="n">d_input</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">malloc</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">d_input</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">malloc</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">d_output</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">randn</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">d_output</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
</pre></div>
</div>
</div>
<p>Now we execute the backpropagation and display the gradient of the loss wrt. the input:</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[17]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">forward</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer_2</span><span class="p">,</span><span class="w"> </span><span class="n">input</span><span class="p">,</span><span class="w"> </span><span class="n">buffer</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="n">std</span><span class="o">::</span><span class="n">cout</span><span class="w"> </span><span class="o"><<</span><span class="w"> </span><span class="s">"Output (should be identical to layer_1): "</span><span class="w"> </span><span class="o"><<</span><span class="w"> </span><span class="n">std</span><span class="o">::</span><span class="n">endl</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">print</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer_2</span><span class="p">.</span><span class="n">output</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">backward_full</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer_2</span><span class="p">,</span><span class="w"> </span><span class="n">input</span><span class="p">,</span><span class="w"> </span><span class="n">d_output</span><span class="p">,</span><span class="w"> </span><span class="n">d_input</span><span class="p">,</span><span class="w"> </span><span class="n">buffer</span><span class="p">);</span>
<span class="n">std</span><span class="o">::</span><span class="n">cout</span><span class="w"> </span><span class="o"><<</span><span class="w"> </span><span class="s">"Derivative with respect to the input: "</span><span class="w"> </span><span class="o"><<</span><span class="w"> </span><span class="n">std</span><span class="o">::</span><span class="n">endl</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">print</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">d_input</span><span class="p">);</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt empty docutils container">
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
Output (should be identical to layer_1):
0.000000 0.991368 0.590204 0.000000 0.000000
Derivative with respect to the input:
0.010383 0.067850 0.162538 0.151305 0.302343
</pre></div></div>
</div>
<p>This also accumulates the gradient in the weights and biases:</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[18]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">print</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer_2</span><span class="p">.</span><span class="n">weights</span><span class="p">.</span><span class="n">gradient</span><span class="p">);</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt empty docutils container">
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00
6.811106e-01 -1.468820e-01 2.531533e+00 3.095022e-01 1.855171e+00
1.155892e-02 -2.492689e-03 4.296186e-02 5.252466e-03 3.148353e-02
0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00
0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00
</pre></div></div>
</div>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[19]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">print</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer_2</span><span class="p">.</span><span class="n">biases</span><span class="p">.</span><span class="n">gradient</span><span class="p">);</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt empty docutils container">
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
0.000000e+00 -9.774587e-01 -1.658815e-02 0.000000e+00 0.000000e+00
</pre></div></div>
</div>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[20]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">free</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">free</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">layer_2</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">free</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">input</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">free</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">output</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">free</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">d_input</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">free</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">d_output</span><span class="p">);</span>
</pre></div>
</div>
</div>
<section id="Multilayer-Perceptron-(MLP)">
<h2>Multilayer Perceptron (MLP)<a class="headerlink" href="#Multilayer-Perceptron-(MLP)" title="Link to this heading">¶</a></h2>
<p>Until now we showed the behavior of a single, fully-connected layer. <strong>RLtools</strong> contains an <a class="reference external" href="https://en.wikipedia.org/wiki/Multilayer_perceptron">Multilayer Perceptron (MLP)</a> that conveniently integrates an arbitrary number of layers into a single data structure with algorithms to perform forward passes and backpropagation across the whole model. The MLP is locate under the namespace <code class="docutils literal notranslate"><span class="pre">rl_tools::nn_models</span></code> and we include it as well as the operations of the Adam optimizer:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[21]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="cp">#include</span><span class="w"> </span><span class="cpf"><rl_tools/nn_models/mlp/operations_generic.h></span>
<span class="cp">#include</span><span class="w"> </span><span class="cpf"><rl_tools/nn/optimizers/adam/operations_generic.h></span>
</pre></div>
</div>
</div>
<p>Note that the operations of the (Adam) optimizer are split into <code class="docutils literal notranslate"><span class="pre">instance/operations_generic.h</span></code> and <code class="docutils literal notranslate"><span class="pre">operations_generic.h</span></code>. The former contains operations that use and modify the values that are associated with a particular set of parameters (e.g. <code class="docutils literal notranslate"><span class="pre">weights</span></code> or <code class="docutils literal notranslate"><span class="pre">biases</span></code> of a particular layer). An example is the <code class="docutils literal notranslate"><span class="pre">rl_tools::update</span></code> operation that applies the optimizer and in the case of Adam, updates the first and second order moment based on the gradient and then applies the update rule
to the parameters. Since these parameters can reside in an arbitrary structure (like an MLP or Sequential <code class="docutils literal notranslate"><span class="pre">nn_model</span></code>) the <code class="docutils literal notranslate"><span class="pre">rl_tools::update</span></code> function is called by an operation that knows about this structure (e.g. the <code class="docutils literal notranslate"><span class="pre">rl_tools::update</span></code> of the <code class="docutils literal notranslate"><span class="pre">rl_tools::nn_models::mlp</span></code> in turn calls the update operations of its layers). These instance-associated operations carry out the bulk of the gradient descent step but are necessarily myopic because they don’t know about higher-level structure.
Because optimizers like Adam not only have instance-associated state (like the first and second order moments of the gradient) but also global state like the step counter we also include the global <code class="docutils literal notranslate"><span class="pre">.../adam/operations_generic.h</span></code>.</p>
<p>The order of the include is dictated by the underlying usage, where we call <code class="docutils literal notranslate"><span class="pre">rl_tools::update</span></code> on the optimizer, providing the model. The optimizer then invokes the update of the model, which invokes the update of its submodels/layers which in turn call the update on then parameter instance. For each step in this chain, the next operation should already be included, hence we arrive at the order used in this example.</p>
<p>Next we define the hyperparameters:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[22]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">constexpr</span><span class="w"> </span><span class="n">TI</span><span class="w"> </span><span class="n">INPUT_DIM_MLP</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">5</span><span class="p">;</span>
<span class="k">constexpr</span><span class="w"> </span><span class="n">TI</span><span class="w"> </span><span class="n">OUTPUT_DIM_MLP</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">1</span><span class="p">;</span>
<span class="k">constexpr</span><span class="w"> </span><span class="n">TI</span><span class="w"> </span><span class="n">NUM_LAYERS</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">3</span><span class="p">;</span>
<span class="k">constexpr</span><span class="w"> </span><span class="n">TI</span><span class="w"> </span><span class="n">HIDDEN_DIM</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">10</span><span class="p">;</span>
<span class="k">constexpr</span><span class="w"> </span><span class="k">auto</span><span class="w"> </span><span class="n">ACTIVATION_FUNCTION_MLP</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">activation_functions</span><span class="o">::</span><span class="n">RELU</span><span class="p">;</span>
<span class="k">constexpr</span><span class="w"> </span><span class="k">auto</span><span class="w"> </span><span class="n">OUTPUT_ACTIVATION_FUNCTION_MLP</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">activation_functions</span><span class="o">::</span><span class="n">IDENTITY</span><span class="p">;</span>
</pre></div>
</div>
</div>
<p>Note that the MLP supports architectures with an arbitrary depth but each layer has to have the same dimensionality. This is because the layers are stored in an array and hence all need to have the same type. If we would allow for different hidden dimensions, we would have to give up on having arbitrary depths.</p>
<p>We aggregate the hyperparameters into a specification again (first just for the structure, later for the full network, incorporating the structure):</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[23]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">using</span><span class="w"> </span><span class="n">MODEL_CONFIG</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn_models</span><span class="o">::</span><span class="n">mlp</span><span class="o">::</span><span class="n">Configuration</span><span class="o"><</span><span class="n">TYPE_POLICY</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="n">OUTPUT_DIM_MLP</span><span class="p">,</span><span class="w"> </span><span class="n">NUM_LAYERS</span><span class="p">,</span><span class="w"> </span><span class="n">HIDDEN_DIM</span><span class="p">,</span><span class="w"> </span><span class="n">ACTIVATION_FUNCTION_MLP</span><span class="p">,</span><span class="w"> </span><span class="n">OUTPUT_ACTIVATION_FUNCTION_MLP</span><span class="o">></span><span class="p">;</span>
</pre></div>
</div>
</div>
<p>We use the default Adam parameters (taken from TensorFlow) and set up the optimizer type using these parameters. Moreover, we create a full network specification for a network that can be trained with Adam which takes the structure specification as an input. Finally we define the full network type:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[24]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">using</span><span class="w"> </span><span class="n">PARAMETER_TYPE</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">parameters</span><span class="o">::</span><span class="n">Adam</span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">CAPABILITY</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">capability</span><span class="o">::</span><span class="n">Gradient</span><span class="o"><</span><span class="n">PARAMETER_TYPE</span><span class="o">></span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">OPTIMIZER_SPEC</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">optimizers</span><span class="o">::</span><span class="n">adam</span><span class="o">::</span><span class="n">Specification</span><span class="o"><</span><span class="n">TYPE_POLICY</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="o">></span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">OPTIMIZER</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">optimizers</span><span class="o">::</span><span class="n">Adam</span><span class="o"><</span><span class="n">OPTIMIZER_SPEC</span><span class="o">></span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">MODEL_TYPE</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn_models</span><span class="o">::</span><span class="n">mlp</span><span class="o">::</span><span class="n">NeuralNetwork</span><span class="o"><</span><span class="n">MODEL_CONFIG</span><span class="p">,</span><span class="w"> </span><span class="n">CAPABILITY</span><span class="p">,</span><span class="w"> </span><span class="n">INPUT_SHAPE</span><span class="o">></span><span class="p">;</span>
</pre></div>
</div>
</div>
<p>Using these type definitions we can now declare the optimizer and the model. All the optimizer state is contained in the <code class="docutils literal notranslate"><span class="pre">PARAMETER_TYPE</span></code> of the model (and an additional <code class="docutils literal notranslate"><span class="pre">age</span></code> integer in the model in the case of Adam). In comparison to PyTorch which stores the optimizer state in the optimizer, we prefer to store the first and second-order moment next to the parameters like it is the case for the gradient anyways (in PyTorch as well). Hence the optimizer is stateless in this case (does not
need to be for user-defined optimizers) and we only need to allocate the model.</p>
<p>The backpropagation algorithm needs to store the intermediate gradients. To save memory we do not add a <code class="docutils literal notranslate"><span class="pre">d_input</span></code> or <code class="docutils literal notranslate"><span class="pre">d_output</span></code> to each layer but rather use a double buffer with the maximum size of the hidden representation needed.</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[25]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">OPTIMIZER</span><span class="w"> </span><span class="n">optimizer</span><span class="p">;</span>
<span class="n">MODEL_TYPE</span><span class="w"> </span><span class="n">model</span><span class="p">;</span>
<span class="k">typename</span><span class="w"> </span><span class="nc">MODEL_TYPE</span><span class="o">::</span><span class="n">Buffer</span><span class="o"><></span><span class="w"> </span><span class="n">buffer</span><span class="p">;</span>
</pre></div>
</div>
</div>
<p>We allocate the model and set initialize its weights randomly like in the case for the single layer. We are again zeroing the gradient of all parameters of all layers as well as resetting the optimizer state of all parameters of all layers (e.g. in the case of Adam the first and second order moments are set to zero). Finally we also allocate the buffers</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[26]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">malloc</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">model</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">malloc</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">optimizer</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">init_weights</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">model</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span><span class="w"> </span><span class="c1">// recursively initializes all layers using kaiming initialization</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">init</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">optimizer</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">zero_gradient</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">model</span><span class="p">);</span><span class="w"> </span><span class="c1">// recursively zeros all gradients in the layers</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">reset_optimizer_state</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">optimizer</span><span class="p">,</span><span class="w"> </span><span class="n">model</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">malloc</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">buffer</span><span class="p">);</span>
</pre></div>
</div>
</div>
<p>In this example we showcase an MLP with a five dimensional input and a one dimensional output (remember the <code class="docutils literal notranslate"><span class="pre">OUTPUT_ACTIVATION_FUNCTION_MLP</span></code> is <code class="docutils literal notranslate"><span class="pre">IDENTITY</span></code> so it can also output negative values). For these new shapes we declare and allocate the input and output containers:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[27]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">Matrix</span><span class="o"><</span><span class="n">rlt</span><span class="o">::</span><span class="n">matrix</span><span class="o">::</span><span class="n">Specification</span><span class="o"><</span><span class="n">T</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="n">BATCH_SIZE</span><span class="p">,</span><span class="w"> </span><span class="n">INPUT_DIM_MLP</span><span class="o">>></span><span class="w"> </span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="n">d_input_mlp</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">Matrix</span><span class="o"><</span><span class="n">rlt</span><span class="o">::</span><span class="n">matrix</span><span class="o">::</span><span class="n">Specification</span><span class="o"><</span><span class="n">T</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="n">BATCH_SIZE</span><span class="p">,</span><span class="w"> </span><span class="n">OUTPUT_DIM_MLP</span><span class="o">>></span><span class="w"> </span><span class="n">d_output_mlp</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">malloc</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">input_mlp</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">malloc</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">d_input_mlp</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">malloc</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">d_output_mlp</span><span class="p">);</span>
</pre></div>
</div>
</div>
<p>Now, like in the case of the single layer, we can run a forward pass using the input. Because the model is a Adam model (which is a subclass of <code class="docutils literal notranslate"><span class="pre">rlt::nn_models::mlp::NeuralNetworkBackwardGradient</span></code>), it stores the intermediate (and final) outputs.</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[28]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">randn</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">forward</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">model</span><span class="p">,</span><span class="w"> </span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="n">buffer</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="n">T</span><span class="w"> </span><span class="n">output_value</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">get</span><span class="p">(</span><span class="n">model</span><span class="p">.</span><span class="n">output_layer</span><span class="p">.</span><span class="n">output</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">);</span>
<span class="n">output_value</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[28]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
0.164536f
</pre></div></div>
</div>
<p>Now imagine we want the output of the model (for this input) to be <span class="math notranslate nohighlight">\(1\)</span>. We calculate the error and feed it back through the model using backpropagation. <code class="docutils literal notranslate"><span class="pre">d_output_mlp</span></code> should be the derivative of the loss function, hence it gives the direction of the output that would increase the loss. Our error is the opposite, if we would move the output into the direction of the error we would come closer to our target value and hence decrease the loss. Because of this, we feed back <code class="docutils literal notranslate"><span class="pre">-error</span></code>. This
procedure also corresponds to using a squared loss because <code class="docutils literal notranslate"><span class="pre">error</span></code> is (up to a constant) the derivative of the squared loss.</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[29]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">T</span><span class="w"> </span><span class="n">target_output_value</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">1</span><span class="p">;</span>
<span class="n">T</span><span class="w"> </span><span class="n">error</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">target_output_value</span><span class="w"> </span><span class="o">-</span><span class="w"> </span><span class="n">output_value</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">set</span><span class="p">(</span><span class="n">d_output_mlp</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="o">-</span><span class="n">error</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">backward</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">model</span><span class="p">,</span><span class="w"> </span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="n">d_output_mlp</span><span class="p">,</span><span class="w"> </span><span class="n">buffer</span><span class="p">);</span>
</pre></div>
</div>
</div>
<p>The backward pass populates the gradient in all parameters of the model. Using this gradient we can apply the <code class="docutils literal notranslate"><span class="pre">rlt::step</span></code> operator which updates the first and second order moments of the gradient of all parameters and afterwards applies the Adam update rule to update the parameters:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[30]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">step</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">optimizer</span><span class="p">,</span><span class="w"> </span><span class="n">model</span><span class="p">);</span>
</pre></div>
</div>
</div>
<p>Now the next forward pass should be closer to the target value:</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[31]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">forward</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">model</span><span class="p">,</span><span class="w"> </span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="n">buffer</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="n">get</span><span class="p">(</span><span class="n">model</span><span class="p">.</span><span class="n">output_layer</span><span class="p">.</span><span class="n">output</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[31]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
0.173487f
</pre></div></div>
</div>
<p>Next we will train the network to actually perform a function (not only trying to output a constant value as before). With the following training loop we train it to behave like the <code class="docutils literal notranslate"><span class="pre">rlt::max</span></code> operator which outputs the max of the five inputs. We run the forward and backward pass for <span class="math notranslate nohighlight">\(32\)</span> iterations while accumulating the gradient which effectively leads to a batch size of <span class="math notranslate nohighlight">\(32\)</span></p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[32]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">for</span><span class="p">(</span><span class="n">TI</span><span class="w"> </span><span class="n">i</span><span class="o">=</span><span class="mi">0</span><span class="p">;</span><span class="w"> </span><span class="n">i</span><span class="w"> </span><span class="o"><</span><span class="w"> </span><span class="mi">10000</span><span class="p">;</span><span class="w"> </span><span class="n">i</span><span class="o">++</span><span class="p">){</span>
<span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">zero_gradient</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">model</span><span class="p">);</span>
<span class="w"> </span><span class="n">T</span><span class="w"> </span><span class="n">mse</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">0</span><span class="p">;</span>
<span class="w"> </span><span class="k">for</span><span class="p">(</span><span class="n">TI</span><span class="w"> </span><span class="n">batch_i</span><span class="o">=</span><span class="mi">0</span><span class="p">;</span><span class="w"> </span><span class="n">batch_i</span><span class="w"> </span><span class="o"><</span><span class="w"> </span><span class="mi">32</span><span class="p">;</span><span class="w"> </span><span class="n">batch_i</span><span class="o">++</span><span class="p">){</span>
<span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">randn</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">forward</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">model</span><span class="p">,</span><span class="w"> </span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="n">buffer</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="w"> </span><span class="n">T</span><span class="w"> </span><span class="n">output_value</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">get</span><span class="p">(</span><span class="n">model</span><span class="p">.</span><span class="n">output_layer</span><span class="p">.</span><span class="n">output</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">);</span>
<span class="w"> </span><span class="n">T</span><span class="w"> </span><span class="n">target_output_value</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">max</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">input_mlp</span><span class="p">);</span>
<span class="w"> </span><span class="n">T</span><span class="w"> </span><span class="n">error</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">target_output_value</span><span class="w"> </span><span class="o">-</span><span class="w"> </span><span class="n">output_value</span><span class="p">;</span>
<span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">set</span><span class="p">(</span><span class="n">d_output_mlp</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="o">-</span><span class="n">error</span><span class="p">);</span>
<span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">backward</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">model</span><span class="p">,</span><span class="w"> </span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="n">d_output_mlp</span><span class="p">,</span><span class="w"> </span><span class="n">buffer</span><span class="p">);</span>
<span class="w"> </span><span class="n">mse</span><span class="w"> </span><span class="o">+=</span><span class="w"> </span><span class="n">error</span><span class="w"> </span><span class="o">*</span><span class="w"> </span><span class="n">error</span><span class="p">;</span>
<span class="w"> </span><span class="p">}</span>
<span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">step</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">optimizer</span><span class="p">,</span><span class="w"> </span><span class="n">model</span><span class="p">);</span>
<span class="w"> </span><span class="k">if</span><span class="p">(</span><span class="n">i</span><span class="w"> </span><span class="o">%</span><span class="w"> </span><span class="mi">1000</span><span class="w"> </span><span class="o">==</span><span class="w"> </span><span class="mi">0</span><span class="p">)</span>
<span class="w"> </span><span class="n">std</span><span class="o">::</span><span class="n">cout</span><span class="w"> </span><span class="o"><<</span><span class="w"> </span><span class="s">"Squared error: "</span><span class="w"> </span><span class="o"><<</span><span class="w"> </span><span class="n">mse</span><span class="o">/</span><span class="mi">32</span><span class="w"> </span><span class="o"><<</span><span class="w"> </span><span class="n">std</span><span class="o">::</span><span class="n">endl</span><span class="p">;</span>
<span class="p">}</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt empty docutils container">
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
Squared error: 1.261306e+00
Squared error: 9.547270e-02
Squared error: 6.743468e-02
Squared error: 2.294786e-02
Squared error: 1.615597e-02
Squared error: 1.639998e-02
Squared error: 1.701169e-02
Squared error: 1.863800e-02
Squared error: 1.497162e-02
Squared error: 3.066690e-02
</pre></div></div>
</div>
<p>Now we can test the model using some arbitrary input (which should be in the distribution of input values) and the model should output a value close to the maximum of the five input values:</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[33]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">set</span><span class="p">(</span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="o">+</span><span class="mf">0.0</span><span class="p">);</span>
<span class="n">set</span><span class="p">(</span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="mi">1</span><span class="p">,</span><span class="w"> </span><span class="mf">-0.1</span><span class="p">);</span>
<span class="n">set</span><span class="p">(</span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="mi">2</span><span class="p">,</span><span class="w"> </span><span class="o">+</span><span class="mf">0.5</span><span class="p">);</span>
<span class="n">set</span><span class="p">(</span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="mi">3</span><span class="p">,</span><span class="w"> </span><span class="mf">-0.4</span><span class="p">);</span>
<span class="n">set</span><span class="p">(</span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="mi">4</span><span class="p">,</span><span class="w"> </span><span class="o">+</span><span class="mf">0.1</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">forward</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">model</span><span class="p">,</span><span class="w"> </span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="n">buffer</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">get</span><span class="p">(</span><span class="n">model</span><span class="p">.</span><span class="n">output_layer</span><span class="p">.</span><span class="n">output</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">)</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[33]:
</pre></div>
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
0.531318f
</pre></div></div>
</div>
<p>We can also automatically test it with <span class="math notranslate nohighlight">\(10\)</span> random inputs:</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[34]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">for</span><span class="p">(</span><span class="n">TI</span><span class="w"> </span><span class="n">i</span><span class="o">=</span><span class="mi">0</span><span class="p">;</span><span class="w"> </span><span class="n">i</span><span class="w"> </span><span class="o"><</span><span class="w"> </span><span class="mi">10</span><span class="p">;</span><span class="w"> </span><span class="n">i</span><span class="o">++</span><span class="p">){</span>
<span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">randn</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">forward</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">model</span><span class="p">,</span><span class="w"> </span><span class="n">input_mlp</span><span class="p">,</span><span class="w"> </span><span class="n">buffer</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="w"> </span><span class="n">std</span><span class="o">::</span><span class="n">cout</span><span class="w"> </span><span class="o"><<</span><span class="w"> </span><span class="s">"max: "</span><span class="w"> </span><span class="o"><<</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">max</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">input_mlp</span><span class="p">)</span><span class="w"> </span><span class="o"><<</span><span class="w"> </span><span class="s">" output: "</span><span class="w"> </span><span class="o"><<</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">get</span><span class="p">(</span><span class="n">model</span><span class="p">.</span><span class="n">output_layer</span><span class="p">.</span><span class="n">output</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">,</span><span class="w"> </span><span class="mi">0</span><span class="p">)</span><span class="w"> </span><span class="o"><<</span><span class="w"> </span><span class="n">std</span><span class="o">::</span><span class="n">endl</span><span class="p">;</span>
<span class="p">}</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt empty docutils container">
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
max: 1.191373e+00 output: 1.114110e+00
max: 1.042214e-01 output: 1.975734e-01
max: 8.064697e-01 output: 8.125502e-01
max: 6.621215e-01 output: 5.229679e-01
max: 1.077457e+00 output: 9.719392e-01
max: 1.125902e+00 output: 1.106198e+00
max: 8.146965e-01 output: 7.666936e-01
max: 5.186371e-01 output: 5.125691e-01
max: 1.329031e+00 output: 1.429670e+00
max: 1.430187e+00 output: 1.412748e+00
</pre></div></div>
</div>
<p>If the values are not close the model might need some more training iterations.</p>
</section>
<section id="Sequential">
<h2>Sequential<a class="headerlink" href="#Sequential" title="Link to this heading">¶</a></h2>
<p>The great advantage of the previously introduced MLP module is that the number of layers is a parameter and hence the architecture can be scaled in width and depth through just two parameters without defining additional types. In many cases more flexibility is required, though, which is why we introduced the Sequential model.</p>
<p>The Sequential model follows the <code class="docutils literal notranslate"><span class="pre">torch.nn.Sequential</span></code> and <code class="docutils literal notranslate"><span class="pre">tensorflow.keras.Sequential</span></code> philosophy. Initially, the Sequential model was created to introduce automatic differentiation to RLtools, since the MLP just has a hard-coded backward pass. With the sequential model the user can just specify a sequence of layers and the forward and backward passes are inferred by the compiler at compile-time, automatically. With the addition of the <code class="docutils literal notranslate"><span class="pre">rlt::Tensor</span></code> interface for arbitrary-dimensional
containers, the role of Sequential model was amplified as we wanted to move away from hard-coded assumptions about dimensions (like batch dimension or sequence dimension) and move to layers adapting depending on the input shape. This “adaptation” of course happens all at compile time to maintain the main philosophy of RLtools that the sizes of all datastructures and loops is known at compile time.</p>
<p>With the move to <code class="docutils literal notranslate"><span class="pre">rlt::Tensor</span></code> the sequential interface adopted semantics that are more similar to <code class="docutils literal notranslate"><span class="pre">tensorflow.keras.Sequential</span></code> where the intermediate shapes are inferred based on the input shape as well. Practically this means, that e.g. dense layers or MLPs (yes, MLPs can be “layers” inside a Sequential model) broadcast over all leading dimensions except the last one. While e.g. recurrent layers like the GRU are consuming (and outputting) the last three dimensions
(<code class="docutils literal notranslate"><span class="pre">SEQUENCE_LENGTH</span> <span class="pre">x</span> <span class="pre">BATCH_SIZE</span> <span class="pre">x</span> <span class="pre">FEATURE_DIM</span></code>).</p>
<p>In the following we build a three-layer, “funnel”-type MLP that we could not build using the <code class="docutils literal notranslate"><span class="pre">MLP</span></code> model because it requires all hidden layers to be of the same dimensionality for the reasons described before. The following specifies the <code class="docutils literal notranslate"><span class="pre">[32,</span> <span class="pre">16,</span> <span class="pre">4]</span></code> MLP with input dim 5:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[35]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="cp">#include</span><span class="w"> </span><span class="cpf"><rl_tools/nn_models/sequential/operations_generic.h></span>
<span class="k">using</span><span class="w"> </span><span class="k">namespace</span><span class="w"> </span><span class="nn">rlt</span><span class="o">::</span><span class="nn">nn_models</span><span class="o">::</span><span class="nn">sequential</span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">LAYER_1_CONFIG</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">layers</span><span class="o">::</span><span class="n">dense</span><span class="o">::</span><span class="n">Configuration</span><span class="o"><</span><span class="n">TYPE_POLICY</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="mi">32</span><span class="p">,</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">activation_functions</span><span class="o">::</span><span class="n">ActivationFunction</span><span class="o">::</span><span class="n">RELU</span><span class="o">></span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">LAYER_1</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">layers</span><span class="o">::</span><span class="n">dense</span><span class="o">::</span><span class="n">BindConfiguration</span><span class="o"><</span><span class="n">LAYER_1_CONFIG</span><span class="o">></span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">LAYER_2_CONFIG</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">layers</span><span class="o">::</span><span class="n">dense</span><span class="o">::</span><span class="n">Configuration</span><span class="o"><</span><span class="n">TYPE_POLICY</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="mi">16</span><span class="p">,</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">activation_functions</span><span class="o">::</span><span class="n">ActivationFunction</span><span class="o">::</span><span class="n">RELU</span><span class="o">></span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">LAYER_2</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">layers</span><span class="o">::</span><span class="n">dense</span><span class="o">::</span><span class="n">BindConfiguration</span><span class="o"><</span><span class="n">LAYER_2_CONFIG</span><span class="o">></span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">LAYER_3_CONFIG</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">layers</span><span class="o">::</span><span class="n">dense</span><span class="o">::</span><span class="n">Configuration</span><span class="o"><</span><span class="n">TYPE_POLICY</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="mi">4</span><span class="p">,</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">activation_functions</span><span class="o">::</span><span class="n">ActivationFunction</span><span class="o">::</span><span class="n">IDENTITY</span><span class="o">></span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">LAYER_3</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">layers</span><span class="o">::</span><span class="n">dense</span><span class="o">::</span><span class="n">BindConfiguration</span><span class="o"><</span><span class="n">LAYER_3_CONFIG</span><span class="o">></span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">MODULE_CHAIN</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">Module</span><span class="o"><</span><span class="n">LAYER_1</span><span class="p">,</span><span class="w"> </span><span class="n">Module</span><span class="o"><</span><span class="n">LAYER_2</span><span class="p">,</span><span class="w"> </span><span class="n">Module</span><span class="o"><</span><span class="n">LAYER_3</span><span class="o">>>></span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">CAPABILITY</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">capability</span><span class="o">::</span><span class="n">Forward</span><span class="o"><></span><span class="p">;</span>
<span class="k">constexpr</span><span class="w"> </span><span class="n">TI</span><span class="w"> </span><span class="n">SEQUENCE_LENGTH</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">1</span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">INPUT_SHAPE</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">tensor</span><span class="o">::</span><span class="n">Shape</span><span class="o"><</span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="n">SEQUENCE_LENGTH</span><span class="p">,</span><span class="w"> </span><span class="n">BATCH_SIZE</span><span class="p">,</span><span class="w"> </span><span class="n">INPUT_DIM_MLP</span><span class="o">></span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">SEQUENTIAL</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">Build</span><span class="o"><</span><span class="n">CAPABILITY</span><span class="p">,</span><span class="w"> </span><span class="n">MODULE_CHAIN</span><span class="p">,</span><span class="w"> </span><span class="n">INPUT_SHAPE</span><span class="o">></span><span class="p">;</span>
</pre></div>
</div>
</div>
<p>For each layer, we specify a configuration and create a wrapper that binds this configuration. The semantics of this wrapper is that it represents a layer with the given configuration but for any capability or input shape. Technically the wrapper contains a template that is used by the sequential model to actually instantiate the layer with the appropriate capability and input shape.</p>
<p><strong>Capability</strong>: The capability specifies the model’s overarching properties and capabilities. The main capability is if the module (and its constituting layers) only supports forward, backward wrt. to the input or backward including the gradient wrt. the parameters. Additionally the capability is also used to specify if the model should be statically or dynamically allocated. The reason we do not specify these attributes in the configuration is that we might want to use a particular model in
different ways: e.g. having the critics being capable of backward-gradient but the target critic only being capable of forward inference in TD3/SAC. Furthermore, when checkpointing we probably only want to save the parameters and not the gradient and optimizer state. We will showcase this later</p>
<p><strong>Input Shape</strong>: Similar to the capability, we might want to use the model for different input shapes. It is not compelling to change the <code class="docutils literal notranslate"><span class="pre">FEATURE_DIM</span></code> of the input shape, but it is often desirable to change the <code class="docutils literal notranslate"><span class="pre">BATCH_SIZE</span></code> e.g. between RL training and data-collection/exploration.</p>
<p>The API is the same as for other models and layers:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[36]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">SEQUENTIAL</span><span class="w"> </span><span class="n">sequential</span><span class="p">;</span>
<span class="n">SEQUENTIAL</span><span class="o">::</span><span class="n">Buffer</span><span class="o"><></span><span class="w"> </span><span class="n">sequential_buffer</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">malloc</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">sequential</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">malloc</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">sequential_buffer</span><span class="p">);</span>
</pre></div>
</div>
</div>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[37]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">init_weights</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">sequential</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
</pre></div>
</div>
</div>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[38]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">Tensor</span><span class="o"><</span><span class="n">rlt</span><span class="o">::</span><span class="n">tensor</span><span class="o">::</span><span class="n">Specification</span><span class="o"><</span><span class="n">T</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="k">typename</span><span class="w"> </span><span class="nc">SEQUENTIAL</span><span class="o">::</span><span class="n">INPUT_SHAPE</span><span class="p">,</span><span class="w"> </span><span class="nb">false</span><span class="o">>></span><span class="w"> </span><span class="n">input</span><span class="p">;</span><span class="w"> </span><span class="c1">// note the "false" in the specification making it static/stack-allocated, hence no "malloc" is required</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">Tensor</span><span class="o"><</span><span class="n">rlt</span><span class="o">::</span><span class="n">tensor</span><span class="o">::</span><span class="n">Specification</span><span class="o"><</span><span class="n">T</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="k">typename</span><span class="w"> </span><span class="nc">SEQUENTIAL</span><span class="o">::</span><span class="n">OUTPUT_SHAPE</span><span class="p">,</span><span class="w"> </span><span class="nb">false</span><span class="o">>></span><span class="w"> </span><span class="n">output</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">randn</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">input</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
</pre></div>
</div>
</div>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[39]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">evaluate</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">sequential</span><span class="p">,</span><span class="w"> </span><span class="n">input</span><span class="p">,</span><span class="w"> </span><span class="n">output</span><span class="p">,</span><span class="w"> </span><span class="n">sequential_buffer</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">print</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">output</span><span class="p">);</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt empty docutils container">
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
dim[0] = 0:
2.216075e-01 1.333383e-01 1.274712e-02 -2.575321e-01
</pre></div></div>
</div>
<p>Now if we want to train the model we should have defined the <code class="docutils literal notranslate"><span class="pre">CAPABILITY</span></code> to support backward-gradient. But due to the decoupling from the configuration we can do this in hindsight:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[40]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">using</span><span class="w"> </span><span class="n">PARAMETER_TYPE</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">parameters</span><span class="o">::</span><span class="n">Adam</span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">NEW_CAPABILITY</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">rlt</span><span class="o">::</span><span class="n">nn</span><span class="o">::</span><span class="n">capability</span><span class="o">::</span><span class="n">Gradient</span><span class="o"><</span><span class="n">PARAMETER_TYPE</span><span class="p">,</span><span class="w"> </span><span class="nb">false</span><span class="o">></span><span class="p">;</span><span class="w"> </span><span class="c1">// note the "false" specifies that it is statically allocated, so no "malloc" required</span>
<span class="k">using</span><span class="w"> </span><span class="n">NEW_SEQUENTIAL_CAPABILITY</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">SEQUENTIAL</span><span class="o">::</span><span class="n">CHANGE_CAPABILITY</span><span class="o"><</span><span class="n">NEW_CAPABILITY</span><span class="o">></span><span class="p">;</span>
<br/></pre></div>
</div>
</div>
<p>Lets also say we want to increase the <code class="docutils literal notranslate"><span class="pre">BATCH_SIZE</span></code> for training:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[41]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">constexpr</span><span class="w"> </span><span class="n">TI</span><span class="w"> </span><span class="n">NEW_BATCH_SIZE</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="mi">32</span><span class="p">;</span>
<span class="k">using</span><span class="w"> </span><span class="n">NEW_SEQUENTIAL</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">NEW_SEQUENTIAL_CAPABILITY</span><span class="o">::</span><span class="n">CHANGE_BATCH_SIZE</span><span class="o"><</span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="n">NEW_BATCH_SIZE</span><span class="o">></span><span class="p">;</span>
<span class="n">NEW_SEQUENTIAL</span><span class="w"> </span><span class="n">new_sequential</span><span class="p">;</span>
<span class="n">NEW_SEQUENTIAL</span><span class="o">::</span><span class="n">Buffer</span><span class="o"><</span><span class="nb">false</span><span class="o">></span><span class="w"> </span><span class="n">new_sequential_buffer</span><span class="p">;</span><span class="w"> </span><span class="c1">// "false" again specifies a static buffer, no "malloc" required</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">copy</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">sequential</span><span class="p">,</span><span class="w"> </span><span class="n">new_sequential</span><span class="p">);</span>
</pre></div>
</div>
</div>
<p>Models and layers support smaller batch sizes than specified in the <code class="docutils literal notranslate"><span class="pre">evaluate</span></code> call (as long as the buffer matches the batch size), hence we can check if it still gives the same result:</p>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[42]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">evaluate</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">new_sequential</span><span class="p">,</span><span class="w"> </span><span class="n">input</span><span class="p">,</span><span class="w"> </span><span class="n">output</span><span class="p">,</span><span class="w"> </span><span class="n">new_sequential_buffer</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">print</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">output</span><span class="p">);</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt empty docutils container">
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
dim[0] = 0:
2.216075e-01 1.333383e-01 1.274712e-02 -2.575321e-01
</pre></div></div>
</div>
<p>Now for the backward pass we need to record intermediate values, which are thrown away for efficiency in the <code class="docutils literal notranslate"><span class="pre">evaluate</span></code> call. Hence we call the <code class="docutils literal notranslate"><span class="pre">forward</span></code> function</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[43]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">Tensor</span><span class="o"><</span><span class="n">rlt</span><span class="o">::</span><span class="n">tensor</span><span class="o">::</span><span class="n">Specification</span><span class="o"><</span><span class="n">T</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="k">typename</span><span class="w"> </span><span class="nc">NEW_SEQUENTIAL</span><span class="o">::</span><span class="n">INPUT_SHAPE</span><span class="p">,</span><span class="w"> </span><span class="nb">false</span><span class="o">>></span><span class="w"> </span><span class="n">new_input</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">randn</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">new_input</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">forward</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">new_sequential</span><span class="p">,</span><span class="w"> </span><span class="n">new_input</span><span class="p">,</span><span class="w"> </span><span class="n">new_sequential_buffer</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
</pre></div>
</div>
</div>
<p>After the forward pass we can simulate some loss function by generating a random <span class="math notranslate nohighlight">\(\frac{\mathrm{d}L}{\mathrm{d}\;\text{output}}\)</span> and calculating the gradient wrt. the parameters:</p>
<div class="nbinput nblast docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[44]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">Tensor</span><span class="o"><</span><span class="n">rlt</span><span class="o">::</span><span class="n">tensor</span><span class="o">::</span><span class="n">Specification</span><span class="o"><</span><span class="n">T</span><span class="p">,</span><span class="w"> </span><span class="n">TI</span><span class="p">,</span><span class="w"> </span><span class="k">typename</span><span class="w"> </span><span class="nc">NEW_SEQUENTIAL</span><span class="o">::</span><span class="n">OUTPUT_SHAPE</span><span class="p">,</span><span class="w"> </span><span class="nb">false</span><span class="o">>></span><span class="w"> </span><span class="n">new_d_output</span><span class="p">;</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">randn</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">new_d_output</span><span class="p">,</span><span class="w"> </span><span class="n">rng</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">zero_gradient</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">new_sequential</span><span class="p">);</span>
<span class="n">rlt</span><span class="o">::</span><span class="n">backward</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">new_sequential</span><span class="p">,</span><span class="w"> </span><span class="n">new_input</span><span class="p">,</span><span class="w"> </span><span class="n">new_d_output</span><span class="p">,</span><span class="w"> </span><span class="n">new_sequential_buffer</span><span class="p">);</span>
</pre></div>
</div>
</div>
<div class="nbinput docutils container">
<div class="prompt highlight-none notranslate"><div class="highlight"><pre><span></span>[45]:
</pre></div>
</div>
<div class="input_area highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">rlt</span><span class="o">::</span><span class="n">print</span><span class="p">(</span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">new_sequential</span><span class="p">.</span><span class="n">content</span><span class="p">.</span><span class="n">weights</span><span class="p">.</span><span class="n">gradient</span><span class="p">);</span>
</pre></div>
</div>
</div>
<div class="nboutput nblast docutils container">
<div class="prompt empty docutils container">
</div>
<div class="output_area docutils container">
<div class="highlight"><pre>
2.579533e-01 3.145525e-01 -4.339635e-01 -1.778798e-01 3.592615e-01
-1.873235e-01 5.400052e-01 -4.057380e-01 -1.054345e-01 5.222924e-03
-1.100354e+00 8.127176e-01 -1.222185e-01 -6.958845e-01 -6.932434e-01
-2.796313e-01 1.849589e-01 -4.633480e-02 -2.568239e-01 6.913626e-02
4.246684e-01 -2.479236e-01 -4.023247e-01 4.705055e-01 4.454972e-01
7.384500e-02 2.571734e-01 -2.764131e-01 -4.181166e-01 6.097677e-01
-7.713830e-02 -1.457755e-01 5.746083e-01 2.499162e-01 -1.755463e-01
-5.050038e-01 4.263113e-01 -4.560746e-02 -6.121553e-01 -2.975982e-01
-2.506281e-01 1.032468e-03 8.974903e-02 5.027909e-01 -2.590285e-01
3.962268e-02 -3.042428e-01 3.587382e-01 2.725840e-03 2.849094e-02