-
Notifications
You must be signed in to change notification settings - Fork 4
/
Bibliography.bib
10577 lines (9870 loc) · 818 KB
/
Bibliography.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
%% This BibTeX bibliography file was created using BibDesk.
%% https://bibdesk.sourceforge.io/
%% Created for RH VT at 2021-07-27 23:24:57 +1200
%% Saved with string encoding Unicode (UTF-8)
@conference{ablationSpeech,
author = {Allen Newell, D. Raj Reddy},
booktitle = {In Speech Recognition},
date-added = {2021-07-27 23:22:53 +1200},
date-modified = {2021-07-27 23:24:57 +1200},
editor = {IEEE Symposium},
keywords = {ablation test, speech recognition},
title = {A Tutorial on Speech Understanding Systems},
volume = {Invited Papers Presented},
year = {1974}}
@inproceedings{Price:2015:OEO:2791321.2791333,
acmid = {2791333},
address = {New York, NY, USA},
articleno = {12},
author = {Price, James and McIntosh-Smith, Simon},
booktitle = {Proceedings of the 3rd International Workshop on OpenCL},
date-added = {2021-06-01 13:18:25 +1200},
date-modified = {2021-06-01 13:18:39 +1200},
doi = {10.1145/2791321.2791333},
isbn = {978-1-4503-3484-6},
keywords = {Other, Github, GPGPU, OpenCL, SPIR, debugging, simulation},
location = {Palo Alto, California},
numpages = {7},
pages = {12:1--12:7},
publisher = {ACM},
series = {IWOCL '15},
title = {Oclgrind: An Extensible OpenCL Device Simulator},
url = {http://doi.acm.org/10.1145/2791321.2791333},
year = {2015},
Bdsk-Url-1 = {http://doi.acm.org/10.1145/2791321.2791333},
Bdsk-Url-2 = {https://doi.org/10.1145/2791321.2791333}}
@conference{muralidharan2015semi,
author = {Muralidharan, Servesh and O'Brien, Kenneth and Lalanne, Christian},
booktitle = {First International Workshop on Heterogeneous High-performance Reconfigurable Computing},
date-added = {2021-05-31 21:48:12 +1200},
date-modified = {2021-05-31 21:51:10 +1200},
keywords = {FPGA, Roofline Analysis, High Performance Computing},
title = {A Semi-Automated Tool Flow for Roofline Anaylsis of OpenCL Kernels on Accelerators},
year = {2015}}
@techreport{CommingAIHackers,
author = {Bruce Schneier},
date-added = {2021-05-18 13:51:02 +1200},
date-modified = {2021-05-18 13:52:54 +1200},
institution = {Belfer Center},
keywords = {Other, Hack, AI},
month = {April},
title = {The Comming AI Hackers},
url = {https://www.belfercenter.org/publication/coming-ai-hackers},
year = {2021},
Bdsk-Url-1 = {https://www.belfercenter.org/publication/coming-ai-hackers}}
@jurthesis{FPGA-Roofline,
author = {Moein Pahlavan Yali},
date-added = {2021-05-15 15:21:35 +1200},
date-modified = {2021-05-15 15:28:06 +1200},
keywords = {Book, Embedded systems, FPGA, roofline model, performance analysis tools},
month = {Dec},
school = {Virgina Polytechnic Institute and State University},
title = {FPGA-Roofline: An Insightful Model for FPGA-based Hardware Accelerators in Modern Embedded Systems},
type = {Masters},
year = {2014}}
@article{Cardoso:2013ve,
abstract = {The potential of FPGAs as accelerators for high-performance computing applications is very large, but many factors are involved in their performance. The design for FPGAs and the selection of the proper optimizations when mapping computations to FPGAs lead to prohibitively long developing time. Alternatives are the high-level synthesis (HLS) tools, which promise a fast design space exploration due to design at high-level or analytical performance models which provide realistic performance expectations, potential impediments to performance, and optimization guidelines. In this paper we propose the combination of both, in order to construct a performance model for FPGAs which is able to visually condense all the helpful information for the designer. Our proposed model extends the roofline model, by considering the resource consumption and the parameters used in the HLS tools, to maximize the performance and the resource utilization within the area of the FPGA. The proposed model is applied to optimize the design exploration of a class of window-based image processing applications using two different HLS tools. The results show the accuracyof the model as well as its flexibility to be combined with any HLS tool.},
author = {Cardoso, Jo{\~a}o and da Silva, Bruno and Braeken, An and D'Hollander, Erik H. and Touhafi, Abdellah},
da = {2013/12/26},
date-added = {2021-05-12 22:38:15 +1200},
date-modified = {2021-05-12 22:39:19 +1200},
doi = {10.1155/2013/428078},
isbn = {1687-7195},
journal = {International Journal of Reconfigurable Computing},
keywords = {FPGA, roofline model, performance analysis tools},
pages = {428078},
publisher = {Hindawi Publishing Corporation},
title = {Performance Modeling for FPGAs: Extending the Roofline Model with High-Level Synthesis Tools},
ty = {JOUR},
url = {https://doi.org/10.1155/2013/428078},
volume = {2013},
year = {2013},
Bdsk-Url-1 = {https://doi.org/10.1155/2013/428078}}
@inproceedings{10.1007/978-3-662-44788-8_17,
abstract = {Cryptanalysis of a cryptographic function usually requires advanced cryptanalytical skills and extensive amount of human labor with an option of using randomness testing suites like STS NIST [1] or Dieharder [2]. These can be applied to test statistical properties of cryptographic function outputs. We propose a more open approach based on software circuit that acts as a testing function automatically evolved by a stochastic optimization algorithm. Information leaked during cryptographic function evaluation is used to find a distinguisher [4] of outputs produced by 25 candidate algorithms for eStream and SHA-3 competition from truly random sequences. We obtained similar results (with some exceptions) as those produced by STS NIST and Dieharder tests w.r.t. the number of rounds of the inspected algorithm.},
address = {Berlin, Heidelberg},
author = {{\v{S}}venda, Petr and Ukrop, Martin and Maty{\'a}{\v{s}}, Vashek},
booktitle = {E-Business and Telecommunications},
date-added = {2021-05-11 21:22:18 +1200},
date-modified = {2021-05-11 21:52:58 +1200},
editor = {Obaidat, Mohammad S. and Filipe, Joaquim},
isbn = {978-3-662-44788-8},
keywords = {Books, Other, NIST, STS},
pages = {290--305},
publisher = {Springer Berlin Heidelberg},
title = {Determining Cryptographic Distinguishers for eStream and SHA-3 Candidate Functions with Evolutionary Circuits},
year = {2014}}
@inproceedings{10.1007/978-3-319-12060-7_18,
abstract = {Randomness testing plays an important role in cryptography. Randomness is typically examined by batteries of statistical tests. One of the most frequently used test batteries is the NIST Statistical Test Suite. The tests of randomness should be rather fast since they usually process large volumes of data. Unfortunately, this is not the case for the NIST STS, where a complete test can take hours. Alternative implementations do exist, but are not very efficient either or they do not focus on the most time-consuming tests. We reimplemented all NIST STS tests and achieved interesting speedups in most of the tests, including the tests with the highest time complexity. Overall, our implementation runs 30 times faster than the original code.},
address = {Cham},
author = {S{\'y}s, Marek and {\v{R}}{\'\i}ha, Zden{\v{e}}k},
booktitle = {Security, Privacy, and Applied Cryptography Engineering},
date-added = {2021-05-11 21:17:43 +1200},
date-modified = {2021-05-11 21:18:02 +1200},
editor = {Chakraborty, Rajat Subhra and Matyas, Vashek and Schaumont, Patrick},
isbn = {978-3-319-12060-7},
keywords = {Other, NIST, STS},
pages = {272--284},
publisher = {Springer International Publishing},
title = {Faster Randomness Testing with the NIST Statistical Test Suite},
year = {2014}}
@inproceedings{10.1145/3431920.3439296,
abstract = {Binary neural networks (BNNs) have 1-bit weights and activations. Such networks are well suited for FPGAs, as their dominant computations are bitwise arithmetic and the memory requirement is also significantly reduced. However, compared to start-of-the-art compact convolutional neural network (CNN) models, BNNs tend to produce a much lower accuracy on realistic datasets such as ImageNet. In addition, the input layer of BNNs has gradually become a major compute bottleneck, because it is conventionally excluded from binarization to avoid a large accuracy loss.This work proposes FracBNN, which exploits fractional activations to substantially improve the accuracy of BNNs. Specifically, our approach employs a dual-precision activation scheme to compute features with up to two bits, using an additional sparse binary convolution. We further binarize the input layer using a novel thermometer encoding. Overall, FracBNN preserves the key benefits of conventional BNNs, where all convolutional layers are computed in pure binary MAC operations (BMACs). We design an efficient FPGA-based accelerator for our novel BNN model that supports the fractional activations. To evaluate the performance of FracBNN under a resource-constrained scenario, we implement the entire optimized network architecture on an embedded FPGA (Xilinx Ultra96 v2). Our experiments on ImageNet show that FracBNN achieves an accuracy comparable to MobileNetV2, surpassing the best-known BNN design on FPGAs with an increase of 28.9% in top-1 accuracy and a 2.5x reduction in model size. FracBNN also outperforms a recently introduced BNN model with an increase of 2.4% in top-1 accuracy while using the same model size. On the embedded FPGA device, FracBNN demonstrates the ability of real-time image classification.},
address = {New York, NY, USA},
author = {Zhang, Yichi and Pan, Junhao and Liu, Xinheng and Chen, Hongzheng and Chen, Deming and Zhang, Zhiru},
booktitle = {The 2021 ACM/SIGDA International Symposium on Field-Programmable Gate Arrays},
date-added = {2021-05-05 16:23:58 +1200},
date-modified = {2021-05-05 16:24:07 +1200},
doi = {10.1145/3431920.3439296},
isbn = {9781450382182},
keywords = {BNN, FPGA accelerators, binary neural networks, deep learning, high-level synthesis},
location = {Virtual Event, USA},
numpages = {12},
pages = {171--182},
publisher = {Association for Computing Machinery},
series = {FPGA '21},
title = {FracBNN: Accurate and FPGA-Efficient Binary Neural Networks with Fractional Activations},
url = {https://doi.org/10.1145/3431920.3439296},
year = {2021},
Bdsk-Url-1 = {https://doi.org/10.1145/3431920.3439296}}
@inproceedings{9411537,
address = {Los Alamitos, CA, USA},
author = {R. Valencia and C. Sham},
booktitle = {2020 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)},
date-added = {2021-05-05 16:10:23 +1200},
date-modified = {2021-05-05 16:10:45 +1200},
doi = {10.1109/CSDE50874.2020.9411537},
keywords = {BNN, BiSUNA, training;visualization;image coding;neural networks;sociology;reinforcement learning;encoding},
month = {dec},
pages = {1-5},
publisher = {IEEE Computer Society},
title = {Adversarial Neuro Encoding with Binary Neural Networks},
url = {https://doi.ieeecomputersociety.org/10.1109/CSDE50874.2020.9411537},
year = {2020},
Bdsk-Url-1 = {https://doi.ieeecomputersociety.org/10.1109/CSDE50874.2020.9411537},
Bdsk-Url-2 = {https://doi.org/10.1109/CSDE50874.2020.9411537}}
@inproceedings{9411568,
address = {Los Alamitos, CA, USA},
author = {R. Valencia and C. Sham},
booktitle = {2020 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)},
date-added = {2021-05-05 16:06:58 +1200},
date-modified = {2021-05-05 16:07:33 +1200},
doi = {10.1109/CSDE50874.2020.9411568},
keywords = {FPGA-NN, BiSUNA, deep learning;training;switches;reinforcement learning;programmable logic arrays;tools;parallel processing},
month = {dec},
pages = {1-6},
publisher = {IEEE Computer Society},
title = {FPGA deployment of neuroevolved Binary Neural Networks},
url = {https://doi.ieeecomputersociety.org/10.1109/CSDE50874.2020.9411568},
year = {2020},
Bdsk-Url-1 = {https://doi.ieeecomputersociety.org/10.1109/CSDE50874.2020.9411568},
Bdsk-Url-2 = {https://doi.org/10.1109/CSDE50874.2020.9411568}}
@article{10.3389/fnhum.2013.00887,
abstract = {The past 20 years have represented an important period in the development of principles underlying neuroplasticity, especially as they apply to recovery from neurological injury. It is now generally accepted that acquired brain injuries, such as occur in stroke or trauma, initiate a cascade of regenerative events that last for at least several weeks, if not months. Many investigators have pointed out striking parallels between post-injury plasticity and the molecular and cellular events that take place during normal brain development. As evidence for the principles and mechanisms underlying post-injury neuroplasticity has been gleaned from both animal models and human populations, novel approaches to therapeutic intervention have been proposed. One important theme has persisted as the sophistication of clinicians and scientists in their knowledge of neuroplasticity mechanisms has grown: behavioral experience is the most potent modulator of brain plasticity. While there is substantial evidence for this principle in normal, healthy brains, the injured brain is particularly malleable. Based on the quantity and quality of motor experience, the brain can be reshaped after injury in either adaptive or maladaptive ways. This paper reviews selected studies that have demonstrated the neurophysiological and neuroanatomical changes that are triggered by motor experience, by injury, and the interaction of these processes. In addition, recent studies using new and elegant techniques are providing novel perspectives on the events that take place in the injured brain, providing a real-time window into post-injury plasticity. These new approaches are likely to accelerate the pace of basic research, and provide a wealth of opportunities to translate basic principles into therapeutic methodologies.},
author = {Nudo, Randolph},
date-added = {2021-05-04 21:23:51 +1200},
date-modified = {2021-05-04 21:24:30 +1200},
doi = {10.3389/fnhum.2013.00887},
issn = {1662-5161},
journal = {Frontiers in Human Neuroscience},
keywords = {Other, Neuroscience, Brain, synaptic plasticity},
pages = {887},
title = {Recovery after brain injury: mechanisms and principles},
url = {https://www.frontiersin.org/article/10.3389/fnhum.2013.00887},
volume = {7},
year = {2013},
Bdsk-Url-1 = {https://www.frontiersin.org/article/10.3389/fnhum.2013.00887},
Bdsk-Url-2 = {https://doi.org/10.3389/fnhum.2013.00887}}
@article{10.7554/eLife.61277,
abstract = {Complex cognitive functions such as working memory and decision-making require information maintenance over seconds to years, from transient sensory stimuli to long-term contextual cues. While theoretical accounts predict the emergence of a corresponding hierarchy of neuronal timescales, direct electrophysiological evidence across the human cortex is lacking. Here, we infer neuronal timescales from invasive intracranial recordings. Timescales increase along the principal sensorimotor-to-association axis across the entire human cortex, and scale with single-unit timescales within macaques. Cortex-wide transcriptomic analysis shows direct alignment between timescales and expression of excitation- and inhibition-related genes, as well as genes specific to voltage-gated transmembrane ion transporters. Finally, neuronal timescales are functionally dynamic: prefrontal cortex timescales expand during working memory maintenance and predict individual performance, while cortex-wide timescales compress with aging. Thus, neuronal timescales follow cytoarchitectonic gradients across the human cortex and are relevant for cognition in both short and long terms, bridging microcircuit physiology with macroscale dynamics and behavior.},
article_type = {journal},
author = {Gao, Richard and van den Brink, Ruud L and Pfeffer, Thomas and Voytek, Bradley},
citation = {eLife 2020;9:e61277},
date-added = {2021-05-04 21:14:17 +1200},
date-modified = {2021-05-04 21:14:26 +1200},
doi = {10.7554/eLife.61277},
editor = {Vinck, Martin and Colgin, Laura L and Womelsdorf, Thilo},
issn = {2050-084X},
journal = {eLife},
keywords = {Other, neuronal timescales, cortical gradients, functional specialization, transcriptomics, spectral analysis},
month = {nov},
pages = {e61277},
pub_date = {2020-11-23},
publisher = {eLife Sciences Publications, Ltd},
title = {Neuronal timescales are functionally dynamic and shaped by cortical microarchitecture},
url = {https://doi.org/10.7554/eLife.61277},
volume = 9,
year = 2020,
Bdsk-Url-1 = {https://doi.org/10.7554/eLife.61277}}
@inbook{Zhang2020,
abstract = {This chapter aims to briefly introduce the fundamentals for deep learning, which is the key component of deep reinforcement learning. We will start with a naive single-layer network and gradually progress to much more complex but powerful architectures such as convolutional neural networks (CNNs) and recurrent neural networks (RNNs). We will end this chapter with a couple of examples that demonstrate how to implement deep learning models in practice.},
address = {Singapore},
author = {Zhang, Jingqing and Yuan, Hang and Dong, Hao},
date-added = {2021-05-02 14:54:39 +1200},
date-modified = {2021-05-03 14:44:58 +1200},
doi = {10.1007/978-981-15-4095-0_1},
editor = {Dong, Hao and Ding, Zihan and Zhang, Shanghang},
isbn = {978-981-15-4095-0},
keywords = {Books, DQN, Deep RL, AlphaZero},
pages = {3--46},
publisher = {Springer Singapore},
title = {Deep Reinforcement Learning: Fundamentals, Research and Applications},
url = {https://doi.org/10.1007/978-981-15-4095-0_1},
year = {2020},
Bdsk-Url-1 = {https://doi.org/10.1007/978-981-15-4095-0_1}}
@book{finger1994origins,
author = {Finger, S. and Oxford University Press},
date-added = {2021-04-30 15:07:24 +1200},
date-modified = {2021-04-30 15:11:27 +1200},
isbn = {9780195065039},
keywords = {Neuroscience, Neurons, Medical services},
lccn = {92048265},
publisher = {Oxford University Press},
series = {Oxford University Press paperback},
title = {Origins of Neuroscience: A History of Explorations Into Brain Function},
url = {https://books.google.ca/books?id=BdRqAAAAMAAJ},
year = {1994},
Bdsk-Url-1 = {https://books.google.ca/books?id=BdRqAAAAMAAJ}}
@article{aaai-16669,
author = {Peter Henderson, Riashat Islam, Philip Bachman, Joelle Pineau, Doina Precup, David Meger},
date-added = {2021-04-28 16:58:12 +1200},
date-modified = {2021-04-28 17:01:07 +1200},
journal = {AAAI Publications, Thirty-Second AAAI Conference on Artificial Intelligence},
keywords = {DNN, Reinforcement Learning, Machine Learning, OpenAI Baselines},
month = {04},
number = {16669},
title = {Deep Reinforcement Learning That Matters},
year = {2018}}
@article{IGNATOV2020276,
abstract = {Despite the growing popularity of deep learning technologies, high memory requirements and power consumption are essentially limiting their application in mobile and IoT areas. While binary convolutional networks can alleviate these problems, the limited bitwidth of weights is often leading to significant degradation of prediction accuracy. In this paper, we present a method for training binary networks that maintains a stable predefined level of their information capacity throughout the training process by applying Shannon entropy based penalty to convolutional filters. The results of experiments conducted on the SVHN, CIFAR and ImageNet datasets demonstrate that the proposed approach can statistically significantly improve the accuracy of binary networks.},
author = {Dmitry Ignatov and Andrey Ignatov},
date-added = {2021-04-22 19:01:10 +1200},
date-modified = {2021-04-22 19:01:10 +1200},
doi = {https://doi.org/10.1016/j.patrec.2020.07.033},
issn = {0167-8655},
journal = {Pattern Recognition Letters},
keywords = {Deep learning, Binary neural network, Information theory, Shannon entropy},
pages = {276-281},
title = {Controlling information capacity of binary neural network},
url = {https://www.sciencedirect.com/science/article/pii/S0167865520302877},
volume = {138},
year = {2020},
Bdsk-Url-1 = {https://www.sciencedirect.com/science/article/pii/S0167865520302877},
Bdsk-Url-2 = {https://doi.org/10.1016/j.patrec.2020.07.033}}
@article{KOYAMA202012,
abstract = {This paper presents three-layer dynamic binary neural networks characterized by ternary connection parameters and the signum activation function. The dynamics is described by a difference equation of binary state variables. Depending on the parameters, the network can generate various binary periodic orbits. We give two main theoretical results. First, when a desired periodic orbit is given, we can set the parameters that guarantee storage and local stability of the periodic orbit. The stability is related to error correction of various binary signals in engineering applications. Second, if a part of the connection parameters becomes zero then stability of the periodic orbit becomes very strong. In this case, all the initial states fall directly into the periodic orbit.},
author = {Seitaro Koyama and Toshimichi Saito},
date-added = {2021-04-22 18:55:32 +1200},
date-modified = {2021-04-22 18:55:39 +1200},
doi = {https://doi.org/10.1016/j.neucom.2020.01.105},
issn = {0925-2312},
journal = {Neurocomputing},
keywords = {BNN, Binary neural networks, Binary connects, Periodic orbits, Storage, Stability},
pages = {12-18},
title = {Guaranteed storage and stabilization of desired binary periodic orbits in three-layer dynamic binary neural networks},
url = {https://www.sciencedirect.com/science/article/pii/S0925231220301697},
volume = {416},
year = {2020},
Bdsk-Url-1 = {https://www.sciencedirect.com/science/article/pii/S0925231220301697},
Bdsk-Url-2 = {https://doi.org/10.1016/j.neucom.2020.01.105}}
@book{9781788835725,
author = {Sayon Dutta},
date-added = {2021-04-22 16:42:44 +1200},
date-modified = {2021-04-22 16:47:38 +1200},
keywords = {Books, OpenAI Gym, Tensorflow,},
number = {327},
publisher = {Packt Publishing},
title = {Reinforcement Learning with TensorFlow - A Beginner's Guide to Designing Self-Learning Systems with TensorFlow and OpenAI Gym},
year = {2018}}
@article{9063667,
author = {Labao, Alfonso B. and Martija, Mygel Andrei M. and Naval, Prospero C.},
date-added = {2021-04-22 16:15:14 +1200},
date-modified = {2021-04-22 16:15:50 +1200},
doi = {10.1109/TNNLS.2020.2980743},
journal = {IEEE Transactions on Neural Networks and Learning Systems},
keywords = {DNN, Atari, Reinforcement Learning, A3C,},
number = {3},
pages = {1162-1176},
title = {A3C-GS: Adaptive Moment Gradient Sharing With Locks for Asynchronous Actor--Critic Agents},
volume = {32},
year = {2021},
Bdsk-Url-1 = {https://doi.org/10.1109/TNNLS.2020.2980743}}
@book{978-1-4842-5126-3,
author = {Taweh Beysolow},
date-added = {2021-04-22 16:03:42 +1200},
date-modified = {2021-04-22 16:05:21 +1200},
keywords = {OpenAI Gym, Artificial Intelligence, Python, open source},
number = {XV, 168},
publisher = {Apress, Berkeley, CA},
title = {Applied Reinforcement Learning with Python},
year = {2019}}
@book{Palanisamy:2018:HIA:3285236,
author = {Palanisamy, Praveen},
date-added = {2021-04-22 15:49:51 +1200},
date-modified = {2021-04-22 15:52:07 +1200},
keywords = {OpenAI Gym, Deep Learning, DeepQNetwork, Atari},
publisher = {Packt Publishing},
title = {Hands-On Intelligent Agents with OpenAI Gym: Your Guide to Developing AI Agents Using Deep Reinforcement Learning},
year = {2018}}
@article{Feldmann:2021aa,
abstract = {With the proliferation of ultrahigh-speed mobile networks and internet-connected devices, along with the rise of artificial intelligence (AI)1, the world is generating exponentially increasing amounts of data that need to be processed in a fast and efficient way. Highly parallelized, fast and scalable hardware is therefore becoming progressively more important2. Here we demonstrate a computationally specific integrated photonic hardware accelerator (tensor core) that is capable of operating at speeds of trillions of multiply-accumulate operations per second (1012 MAC operations per second or tera-MACs per second). The tensor core can be considered as the optical analogue of an application-specific integrated circuit (ASIC). It achieves parallelized photonic in-memory computing using phase-change-material memory arrays and photonic chip-based optical frequency combs (soliton microcombs3). The computation is reduced to measuring the optical transmission of reconfigurable and non-resonant passive components and can operate at a bandwidth exceeding 14 gigahertz, limited only by the speed of the modulators and photodetectors. Given recent advances in hybrid integration of soliton microcombs at microwave line rates3--5, ultralow-loss silicon nitride waveguides6,7, and high-speed on-chip detectors and modulators, our approach provides a path towards full complementary metal--oxide--semiconductor (CMOS) wafer-scale integration of the photonic tensor core. Although we focus on convolutional processing, more generally our results indicate the potential of integrated photonics for parallel, fast, and efficient computational hardware in data-heavy AI applications such as autonomous driving, live video processing, and next-generation cloud computing services.},
author = {Feldmann, J. and Youngblood, N. and Karpov, M. and Gehring, H. and Li, X. and Stappers, M. and Le Gallo, M. and Fu, X. and Lukashchuk, A. and Raja, A. S. and Liu, J. and Wright, C. D. and Sebastian, A. and Kippenberg, T. J. and Pernice, W. H. P. and Bhaskaran, H.},
da = {2021/01/01},
date-added = {2021-01-11 21:16:02 +1300},
date-modified = {2021-01-11 21:16:35 +1300},
doi = {10.1038/s41586-020-03070-1},
id = {Feldmann2021},
isbn = {1476-4687},
journal = {Nature},
keywords = {DNN, Nature, Photonics, Silicon, CNN},
number = {7840},
pages = {52--58},
title = {Parallel convolutional processing using an integrated photonic tensor core},
ty = {JOUR},
url = {https://doi.org/10.1038/s41586-020-03070-1},
volume = {589},
year = {2021},
Bdsk-Url-1 = {https://doi.org/10.1038/s41586-020-03070-1}}
@article{Geirhos:2020aa,
abstract = {Deep learning has triggered the current rise of artificial intelligence and is the workhorse of today's machine intelligence. Numerous success stories have rapidly spread all over science, industry and society, but its limitations have only recently come into focus. In this Perspective we seek to distil how many of deep learning's failures can be seen as different symptoms of the same underlying problem: shortcut learning. Shortcuts are decision rules that perform well on standard benchmarks but fail to transfer to more challenging testing conditions, such as real-world scenarios. Related issues are known in comparative psychology, education and linguistics, suggesting that shortcut learning may be a common characteristic of learning systems, biological and artificial alike. Based on these observations, we develop a set of recommendations for model interpretation and benchmarking, highlighting recent advances in machine learning to improve robustness and transferability from the lab to real-world applications.},
author = {Geirhos, Robert and Jacobsen, J{\"o}rn-Henrik and Michaelis, Claudio and Zemel, Richard and Brendel, Wieland and Bethge, Matthias and Wichmann, Felix A.},
da = {2020/11/01},
date-added = {2021-01-11 21:14:20 +1300},
date-modified = {2021-01-11 21:14:40 +1300},
doi = {10.1038/s42256-020-00257-z},
id = {Geirhos2020},
isbn = {2522-5839},
journal = {Nature Machine Intelligence},
keywords = {DNN, Nature, Supervised learning},
number = {11},
pages = {665--673},
title = {Shortcut learning in deep neural networks},
ty = {JOUR},
url = {https://doi.org/10.1038/s42256-020-00257-z},
volume = {2},
year = {2020},
Bdsk-Url-1 = {https://doi.org/10.1038/s42256-020-00257-z}}
@article{Schrittwieser:2020aa,
abstract = {Constructing agents with planning capabilities has long been one of the main challenges in the pursuit of artificial intelligence. Tree-based planning methods have enjoyed huge success in challenging domains, such as chess1 and Go2, where a perfect simulator is available. However, in real-world problems, the dynamics governing the environment are often complex and unknown. Here we present the MuZero algorithm, which, by combining a tree-based search with a learned model, achieves superhuman performance in a range of challenging and visually complex domains, without any knowledge of their underlying dynamics. The MuZero algorithm learns an iterable model that produces predictions relevant to planning: the action-selection policy, the value function and the reward. When evaluated on 57 different Atari games3---the canonical video game environment for testing artificial intelligence techniques, in which model-based planning approaches have historically struggled4---the MuZero algorithm achieved state-of-the-art performance. When evaluated on Go, chess and shogi---canonical environments for high-performance planning---the MuZero algorithm matched, without any knowledge of the game dynamics, the superhuman performance of the AlphaZero algorithm5 that was supplied with the rules of the game.},
author = {Schrittwieser, Julian and Antonoglou, Ioannis and Hubert, Thomas and Simonyan, Karen and Sifre, Laurent and Schmitt, Simon and Guez, Arthur and Lockhart, Edward and Hassabis, Demis and Graepel, Thore and Lillicrap, Timothy and Silver, David},
da = {2020/12/01},
date-added = {2021-01-11 21:07:11 +1300},
date-modified = {2021-04-22 16:17:06 +1200},
doi = {10.1038/s41586-020-03051-4},
id = {Schrittwieser2020},
isbn = {1476-4687},
journal = {Nature},
keywords = {DNN, Nature, Reinforcement Learning, Atari},
number = {7839},
pages = {604--609},
title = {Mastering Atari, Go, chess and shogi by planning with a learned model},
ty = {JOUR},
url = {https://doi.org/10.1038/s41586-020-03051-4},
volume = {588},
year = {2020},
Bdsk-Url-1 = {https://doi.org/10.1038/s41586-020-03051-4}}
@article{Weng:2020aa,
abstract = {Analyzing scattered wave to recognize object is of fundamental significance in wave physics. Recently-emerged deep learning technique achieved great success in interpreting wave field such as in ultrasound non-destructive testing and disease diagnosis, but conventionally need time-consuming computer postprocessing or bulky-sized diffractive elements. Here we theoretically propose and experimentally demonstrate a purely-passive and small-footprint meta-neural-network for real-time recognizing complicated objects by analyzing acoustic scattering. We prove meta-neural-network mimics a standard neural network despite its compactness, thanks to unique capability of its metamaterial unit-cells (dubbed meta-neurons) to produce deep-subwavelength phase shift as training parameters. The resulting device exhibits the ``intelligence''to perform desired tasks with potential to overcome the current limitations, showcased by two distinctive examples of handwritten digit recognition and discerning misaligned orbital-angular-momentum vortices. Our mechanism opens the route to new metamaterial-based deep-learning paradigms and enable conceptual devices automatically analyzing signals, with far-reaching implications for acoustics and related fields.},
author = {Weng, Jingkai and Ding, Yujiang and Hu, Chengbo and Zhu, Xue-Feng and Liang, Bin and Yang, Jing and Cheng, Jianchun},
da = {2020/12/09},
date-added = {2021-01-11 18:14:49 +1300},
date-modified = {2021-01-11 18:15:17 +1300},
doi = {10.1038/s41467-020-19693-x},
id = {Weng2020},
isbn = {2041-1723},
journal = {Nature Communications},
keywords = {DNN, Nature, Passive learning, Object recognition},
number = {1},
pages = {6309},
title = {Meta-neural-network for real-time and passive deep-learning-based object recognition},
ty = {JOUR},
url = {https://doi.org/10.1038/s41467-020-19693-x},
volume = {11},
year = {2020},
Bdsk-Url-1 = {https://doi.org/10.1038/s41467-020-19693-x}}
@article{Babanezhad:2020aa,
abstract = {Bubbly flow behavior simulation in two-phase chemical reactors such bubble column type reactors is widely employed for chemical industry purposes. The computational fluid dynamics (CFD) approach has been employed by engineers and researchers for modeling these types of chemical reactors. In spite of the CFD robustness for simulating transport phenomena and chemical reactions in these reactors, this approach has been known as expensive for modeling such turbulent complex flows. Artificial intelligence (AI) algorithm of the adaptive network-based fuzzy inference system (ANFIS) are largely understood and utilized for the CFD approach optimization. In this hybrid approach, the CFD findings are learned by AI algorithms like ANFIS to save computational time and expenses. Once the pattern of the CFD results have been captured by the AI model, this hybrid model can be then used for process simulation and optimization. As such, there is no need for further simulations of new conditions. The objective of this paper is to obviate the need for expensive CFD computations for two-phase flows in chemical reactors via coupling CFD data to an AI algorithm, i.e., differential evolution based fuzzy inference system (DEFIS). To do so, air velocity as the output and the values of the x, and y coordinates, water velocity, and time step as the inputs are inputted the AI model for learning the flow pattern. The effects of cross over as the DE parameter and also the number of inputs on the best intelligence are investigated. Indeed, DEFIS correlates the air velocity to the nodes coordinates, time, and liquid velocity and then after the CFD modeling could be replaced with the simple correlation. For the first time, a comparison is made between the ANFIS and the DEFIS performances in terms of the prediction capability of the gas (air) velocity. The results released that both ANFIS and DEFIS could accurately predict the CFD pattern. The prediction times of both methods were obtained to be equal. However, the learning time of the DEFIS was fourfold of ANFIS.},
author = {Babanezhad, Meisam and Behroyan, Iman and Nakhjiri, Ali Taghvaie and Marjani, Azam and Rezakazemi, Mashallah and Shirazian, Saeed},
da = {2020/12/04},
date-added = {2021-01-11 18:09:24 +1300},
date-modified = {2021-01-11 18:11:15 +1300},
doi = {10.1038/s41598-020-78277-3},
id = {Babanezhad2020},
isbn = {2045-2322},
journal = {Scientific Reports},
keywords = {Other, Nature, Differential Evolution, Fuzzy inference},
number = {1},
pages = {21304},
title = {High-performance hybrid modeling chemical reactors using differential evolution based fuzzy inference system},
ty = {JOUR},
url = {https://doi.org/10.1038/s41598-020-78277-3},
volume = {10},
year = {2020},
Bdsk-Url-1 = {https://doi.org/10.1038/s41598-020-78277-3}}
@book{OpenCL-In-Action,
author = {Matthew Scarpino},
date-added = {2020-12-25 23:00:49 +1300},
date-modified = {2021-05-11 21:53:12 +1200},
keywords = {OpenCL, Books},
month = {November},
publisher = {Manning},
title = {OpenCL In Action},
url = {https://www.manning.com/books/opencl-in-action},
year = {2011},
Bdsk-Url-1 = {https://www.manning.com/books/opencl-in-action}}
@webpage{Xilinx-Dev-Accel,
author = {Rob Armstrong},
date-added = {2020-12-25 21:54:27 +1300},
date-modified = {2020-12-25 21:55:42 +1300},
keywords = {URL, Xilinx, Tutorial, Developer, FPGA},
month = {Nov},
title = {Get Moving with Alveo},
url = {https://developer.xilinx.com/en/articles/acceleration-basics.html},
year = {2019},
Bdsk-Url-1 = {https://developer.xilinx.com/en/articles/acceleration-basics.html}}
@inproceedings{10.1145/3377929.3398164,
abstract = {Spectrum-Diverse Neuroevolution with Unified Neural Models (SUNA) has been shown to be a successful alternative to the algorithm NeuroEvolution of Augmenting Topologies (NEAT). Requiring less parameters than NEAT yet possessing a more unified representation power and effective spectrum-based diversity preservation, SUNA outperformed NEAT on most of the problems to be experimented. However, we think a simple improvement approach can be made to improve SUNA's efficiency in the strategic decision-making problem tested by the model itself, i.e. the multiplexer problem. In the proposed method, we try to incorporate the idea of logical gates to the hidden neurons in the model, suggesting it the solutions that solve the problem in the real world in the form of neurons. It is shown that with the simple logic gates neuron variations, SUNA can be slightly enhanced to resolve the multiplexer problem.},
address = {New York, NY, USA},
author = {Ta, Anh Due and Vargas, Danilo Vasconcellos},
booktitle = {Proceedings of the 2020 Genetic and Evolutionary Computation Conference Companion},
date-added = {2020-12-21 17:11:46 +1300},
date-modified = {2020-12-21 17:12:07 +1300},
doi = {10.1145/3377929.3398164},
isbn = {9781450371278},
keywords = {Evolutionary, SUNA, Logic Gates, Multuplexer},
location = {Canc\'{u}n, Mexico},
numpages = {2},
pages = {53--54},
publisher = {Association for Computing Machinery},
series = {GECCO '20},
title = {Towards Improvement of SUNA in Multiplexers with Preliminary Results of Simple Logic Gate Neuron Variation},
url = {https://doi.org/10.1145/3377929.3398164},
year = {2020},
Bdsk-Url-1 = {https://doi.org/10.1145/3377929.3398164}}
@article{6772729,
abstract = {The author was led to the study given in this paper from a consideration of large scale computing machines in which a large number of operations must be performed without a single error in the end result. This problem of ``doing things right'' on a large scale is not essentially new; in a telephone central office, for example, a very large number of operations are performed while the errors leading to wrong numbers are kept well under control, though they have not been completely eliminated. This has been achieved, in part, through the use of self-checking circuits. The occasional failure that escapes routine checking is still detected by the customer and will, if it persists, result in customer complaint, while if it is transient it will produce only occasional wrong numbers. At the same time the rest of the central office functions satisfactorily. In a digital computer, on the other hand, a single failure usually means the complete failure, in the sense that if it is detected no more computing can be done until the failure is located and corrected, while if it escapes detection then it invalidates all subsequent operations of the machine. Put in other words, in a telephone central office there are a number of parallel paths which are more or less independent of each other; in a digital machine there is usually a single long path which passes through the same piece of equipment many, many times before the answer is obtained.},
author = {R. W. {Hamming}},
date-added = {2020-12-20 22:21:09 +1300},
date-modified = {2020-12-20 22:21:48 +1300},
doi = {10.1002/j.1538-7305.1950.tb00463.x},
issn = {0005-8580},
journal = {The Bell System Technical Journal},
keywords = {Other, Hamming Distance, Error detection},
month = {April},
number = {2},
pages = {147-160},
title = {Error detecting and error correcting codes},
volume = {29},
year = {1950},
Bdsk-Url-1 = {https://doi.org/10.1002/j.1538-7305.1950.tb00463.x}}
@inproceedings{Kassahun07commongenetic,
author = {Yohannes Kassahun and Gerald Sommer and Mark Edgington and Jan Hendrik Metzen and Frank Kirchner},
booktitle = {In Proceedings of the Genetic and Evolutionary Computation Conference (GECCO 2007},
date-added = {2020-12-17 13:15:14 +1300},
date-modified = {2020-12-17 13:17:27 +1300},
keywords = {Evolutionary, Genetic Encoding, Genotype Phenotype Mapping},
pages = {1029--1036},
publisher = {ACM Press},
title = {Common genetic encoding for both direct and indirect encodings of networks},
year = {2007}}
@inproceedings{103390-app9204460,
author = {Francesco Rundo},
booktitle = {Journal of Applied Sciences},
date-added = {2020-12-11 19:09:27 +1300},
date-modified = {2020-12-11 19:12:56 +1300},
keywords = {NN-Fin;STM32; financial; deep learning; LSTM; reinforcement learning},
month = {October},
title = {Deep LSTM with Reinforcement Learning Layer for Financial Trend Prediction in FX High Frequency Trading Systems},
url = {https://www.mdpi.com/2076-3417/9/20/4460},
volume = {9},
year = {2019},
Bdsk-Url-1 = {https://www.mdpi.com/2076-3417/9/20/4460}}
@url{Schneier-Crypto,
author = {Bruce Schneier},
date-added = {2020-12-09 21:56:38 +1300},
date-modified = {2020-12-09 21:57:41 +1300},
keywords = {URL, Cryptoanalisys, Cryptography},
title = {Memo to the Amateur Cipher Designer},
url = {https://www.schneier.com/crypto-gram/archives/1998/1015.html#cipherdesign},
year = {1998},
Bdsk-Url-1 = {https://www.schneier.com/crypto-gram/archives/1998/1015.html#cipherdesign}}
@electronic{Fast-STS,
author = {Zdenek {\v R}{\'\i}ha, Marek S{\'y}s},
date-added = {2020-12-09 21:53:59 +1300},
date-modified = {2020-12-09 21:55:29 +1300},
keywords = {URL, STS, NIST, Randomness},
title = {Faster randomness testing},
url = {https://randomness-tests.fi.muni.cz/},
year = {2014},
Bdsk-Url-1 = {https://randomness-tests.fi.muni.cz/}}
@webpage{gpipe,
author = {Yanping Huang},
date-added = {2020-12-09 21:50:35 +1300},
date-modified = {2020-12-09 21:52:00 +1300},
keywords = {URL, NN, Training, DNN},
title = {Introducing GPipe, an Open Source Library for Efficiently Training Large-scale Neural Network Models},
url = {https://ai.googleblog.com/2019/03/introducing-gpipe-open-source-library.html},
year = {2019},
Bdsk-Url-1 = {https://ai.googleblog.com/2019/03/introducing-gpipe-open-source-library.html}}
@article{10.2307/169420,
abstract = {This paper considers the problem of minimizing a function F(x1,⋯,xn) over a closed, bounded region S in n-dimensional space under the assumption that there exists a unique minimizing point (z1,⋯,zn)∈ S. In a previous paper I represented the coordinates of the minimizing point as the limit of a ratio of integrals. The same type of ratio appears, in a different context, in statistical mechanics where a Monte Carlo method has been developed, by Metropolis et al., for its numerical evaluation. The purpose of this paper is to point out the connection of Metropolis's method with the above type of minimization problem. The idea of the method is to associate with the minimization problem a Markov chain whose sample averages converge with probability one to (approximately) the minimizing point (z1,⋯,zn). The Markov chain should be easily realizable on a computer. An estimate of the error from sampling over a finite time period is given.},
author = {Martin Pincus},
date-added = {2020-12-08 15:15:34 +1300},
date-modified = {2020-12-08 15:16:22 +1300},
issn = {0030364X, 15265463},
journal = {Operations Research},
keywords = {Other, Simulated annealing, Monte Carlo, Probability},
number = {6},
pages = {1225--1228},
publisher = {INFORMS},
title = {A Monte Carlo Method for the Approximate Solution of Certain Types of Constrained Optimization Problems},
url = {http://www.jstor.org/stable/169420},
volume = {18},
year = {1970},
Bdsk-Url-1 = {http://www.jstor.org/stable/169420}}
@article{Bianchi:2009aa,
abstract = {Metaheuristics are general algorithmic frameworks, often nature-inspired, designed to solve complex optimization problems, and they are a growing research area since a few decades. In recent years, metaheuristics are emerging as successful alternatives to more classical approaches also for solving optimization problems that include in their mathematical formulation uncertain, stochastic, and dynamic information. In this paper metaheuristics such as Ant Colony Optimization, Evolutionary Computation, Simulated Annealing, Tabu Search and others are introduced, and their applications to the class of Stochastic Combinatorial Optimization Problems (SCOPs) is thoroughly reviewed. Issues common to all metaheuristics, open problems, and possible directions of research are proposed and discussed. In this survey, the reader familiar to metaheuristics finds also pointers to classical algorithmic approaches to optimization under uncertainty, and useful informations to start working on this problem domain, while the reader new to metaheuristics should find a good tutorial in those metaheuristics that are currently being applied to optimization under uncertainty, and motivations for interest in this field.},
author = {Bianchi, Leonora and Dorigo, Marco and Gambardella, Luca Maria and Gutjahr, Walter J.},
da = {2009/06/01},
date-added = {2020-12-07 15:53:14 +1300},
date-modified = {2020-12-07 15:53:48 +1300},
doi = {10.1007/s11047-008-9098-4},
id = {Bianchi2009},
isbn = {1572-9796},
journal = {Natural Computing},
keywords = {Other, Metaheuristics, Evolutionary computation, Survey},
number = {2},
pages = {239--287},
title = {A survey on metaheuristics for stochastic combinatorial optimization},
ty = {JOUR},
url = {https://doi.org/10.1007/s11047-008-9098-4},
volume = {8},
year = {2009},
Bdsk-Url-1 = {https://doi.org/10.1007/s11047-008-9098-4}}
@article{ILSVRC15,
author = {Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},
date-added = {2020-12-07 15:07:25 +1300},
date-modified = {2020-12-07 15:07:37 +1300},
doi = {10.1007/s11263-015-0816-y},
journal = {International Journal of Computer Vision (IJCV)},
keywords = {Other, ImageNet, ILSVRC},
number = {3},
pages = {211-252},
title = {{ImageNet Large Scale Visual Recognition Challenge}},
volume = {115},
year = {2015},
Bdsk-Url-1 = {https://doi.org/10.1007/s11263-015-0816-y}}
@inproceedings{5206848,
abstract = {The explosion of image data on the Internet has the potential to foster more sophisticated and robust models and algorithms to index, retrieve, organize and interact with images and multimedia data. But exactly how such data can be harnessed and organized remains a critical problem. We introduce here a new database called ``ImageNet'', a large-scale ontology of images built upon the backbone of the WordNet structure. ImageNet aims to populate the majority of the 80,000 synsets of WordNet with an average of 500-1000 clean and full resolution images. This will result in tens of millions of annotated images organized by the semantic hierarchy of WordNet. This paper offers a detailed analysis of ImageNet in its current state: 12 subtrees with 5247 synsets and 3.2 million images in total. We show that ImageNet is much larger in scale and diversity and much more accurate than the current image datasets. Constructing such a large-scale database is a challenging task. We describe the data collection scheme with Amazon Mechanical Turk. Lastly, we illustrate the usefulness of ImageNet through three simple applications in object recognition, image classification and automatic object clustering. We hope that the scale, accuracy, diversity and hierarchical structure of ImageNet can offer unparalleled opportunities to researchers in the computer vision community and beyond.},
author = {J. {Deng} and W. {Dong} and R. {Socher} and L. {Li} and {Kai Li} and {Li Fei-Fei}},
booktitle = {2009 IEEE Conference on Computer Vision and Pattern Recognition},
date-added = {2020-12-07 15:03:21 +1300},
date-modified = {2020-12-07 15:03:35 +1300},
doi = {10.1109/CVPR.2009.5206848},
issn = {1063-6919},
keywords = {NN, ImageNet, computer vision;image resolution;image retrieval;Internet;multimedia computing;ontologies (artificial intelligence);trees (mathematics);very large databases;visual databases;ImageNet database;large-scale hierarchical image database;Internet;image retrieval;multimedia data;large-scale ontology;wordNet structure;image resolution;subtree;computer vision;Large-scale systems;Image databases;Explosions;Internet;Robustness;Information retrieval;Image retrieval;Multimedia databases;Ontologies;Spine},
month = {June},
pages = {248-255},
title = {ImageNet: A large-scale hierarchical image database},
year = {2009},
Bdsk-Url-1 = {https://doi.org/10.1109/CVPR.2009.5206848}}
@article{8766229,
abstract = {This standard specifies interchange and arithmetic formats and methods for binary and decimal floating-point arithmetic in computer programming environments. This standard specifies exception conditions and their default handling. An implementation of a floating-point system conforming to this standard may be realized entirely in software, entirely in hardware, or in any combination of software and hardware. For operations specified in the normative part of this standard, numerical results and exceptions are uniquely determined by the values of the input data, sequence of operations, and destination formats, all under user control.},
author = {IEEE},
date-added = {2020-12-06 18:10:11 +1300},
date-modified = {2020-12-06 18:10:24 +1300},
doi = {10.1109/IEEESTD.2019.8766229},
journal = {IEEE Std 754-2019 (Revision of IEEE 754-2008)},
keywords = {Other, IEEE Standards;Floating-point arithmetic;arithmetic;binary;computer;decimal;exponent;floating-point;format;IEEE 754;interchange;NaN;number;rounding;significand;subnormal.},
month = {July},
pages = {1-84},
title = {IEEE Standard for Floating-Point Arithmetic},
year = {2019},
Bdsk-Url-1 = {https://doi.org/10.1109/IEEESTD.2019.8766229}}
@article{935097,
abstract = {We present methods for optimizing portfolios, asset allocations, and trading systems based on direct reinforcement (DR). In this approach, investment decision-making is viewed as a stochastic control problem, and strategies are discovered directly. We present an adaptive algorithm called recurrent reinforcement learning (RRL) for discovering investment policies. The need to build forecasting models is eliminated, and better trading performance is obtained. The direct reinforcement approach differs from dynamic programming and reinforcement algorithms such as TD-learning and Q-learning, which attempt to estimate a value function for the control problem. We find that the RRL direct reinforcement framework enables a simpler problem representation, avoids Bellman's curse of dimensionality and offers compelling advantages in efficiency. We demonstrate how direct reinforcement can be used to optimize risk-adjusted investment returns (including the differential Sharpe ratio), while accounting for the effects of transaction costs. In extensive simulation work using real financial data, we find that our approach based on RRL produces better trading strategies than systems utilizing Q-learning (a value function method). Real-world applications include an intra-daily currency trader and a monthly asset allocation system for the S&P 500 Stock Index and T-Bills.},
author = {J. {Moody} and M. {Saffell}},
date-added = {2020-12-05 18:27:07 +1300},
date-modified = {2021-04-22 16:17:06 +1200},
doi = {10.1109/72.935097},
issn = {1941-0093},
journal = {IEEE Transactions on Neural Networks},
keywords = {NN-Fin, Reinforcement Learning, DNN, stock markets;investment;decision theory;learning (artificial intelligence);stochastic systems;optimisation;direct reinforcement learning;portfolio optimization;asset allocations;trading systems;DR;investment decision-making;stochastic control problem;recurrent reinforcement learning;RRL;investment policies;forecasting models;risk-adjusted investment return optimization;differential Sharpe ratio;transaction costs;financial data;intra-daily currency trader;monthly asset allocation system;S&P 500 Stock Index;T-Bills;Investments;Asset management;Optimization methods;Portfolios;Decision making;Stochastic processes;Adaptive algorithm;Learning;Predictive models;Dynamic programming},
month = {July},
number = {4},
pages = {875-889},
title = {Learning to trade via direct reinforcement},
volume = {12},
year = {2001},
Bdsk-Url-1 = {https://doi.org/10.1109/72.935097}}
@article{10.1145/2949662,
abstract = {The power of deep neural networks has sparked renewed interest in reinforcement learning, with applications to games, robotics, and beyond.},
address = {New York, NY, USA},
author = {Krakovsky, Marina},
date-added = {2020-12-05 18:17:29 +1300},
date-modified = {2021-04-22 16:17:06 +1200},
doi = {10.1145/2949662},
issn = {0001-0782},
issue_date = {August 2016},
journal = {Commun. ACM},
keywords = {DNN, Reinforcement Learning, Robotics, Article},
month = jul,
number = {8},
numpages = {3},
pages = {12--14},
publisher = {Association for Computing Machinery},
title = {Reinforcement Renaissance},
url = {https://doi-org.ezproxy.auckland.ac.nz/10.1145/2949662},
volume = {59},
year = {2016},
Bdsk-Url-1 = {https://doi-org.ezproxy.auckland.ac.nz/10.1145/2949662},
Bdsk-Url-2 = {https://doi.org/10.1145/2949662}}
@book{sra2012optimization,
author = {Sra, S. and Nowozin, S. and Wright, S.J.},
date-added = {2020-12-04 21:46:25 +1300},
date-modified = {2020-12-05 13:34:27 +1300},
isbn = {9780262016469},
keywords = {Books, NN, SGD, Gradient Descent, ML, DNN},
lccn = {2011002059},
publisher = {MIT Press},
series = {Neural information processing series},
title = {Optimization for Machine Learning},
url = {https://books.google.ca/books?id=JPQx7s2L1A8C},
year = {2012},
Bdsk-Url-1 = {https://books.google.ca/books?id=JPQx7s2L1A8C}}
@article{Hassan:2020aa,
abstract = {Deep Convolutional Neural Networks (CNNs) are the state-of-the-art systems for image classification due to their high accuracy but on the other hand their high computational complexity is very costly. The acceleration is the target in this field nowadays for using these systems in real time applications. The Graphics Processing Units is the solution but its high-power consumption prevents its utilization in daily-used equipment moreover the Field Programmable Gate Array (FPGA) has low power consumption and flexible architecture which fits more for CNN implementations. This work discusses this problem and provides a solution that compromises between the speed of the CNN and the power consumption of the FPGA. This solution depends on two main techniques for speeding up: parallelism of layers resources and pipelining inside some layers. On the other hand, we added a new methodology to compromise the area requirements with the speed and design time by implementing CNN using Xilinx SDSOC tool (including processor and FPGA on the same board). Implementing design using HW/SW partitioning will enhance time design based on high level language(C or C++) in Vivado HLS (High Level Synthesis). It also fits for more large designs than using FPGA only and faster in design time.},
author = {Hassan, Rania O. and Mostafa, Hassan},
da = {2020/03/24},
date-added = {2020-12-02 13:45:26 +1300},
date-modified = {2020-12-02 13:45:48 +1300},
doi = {10.1007/s10470-020-01638-5},
id = {Hassan2020},
isbn = {1573-1979},
journal = {Analog Integrated Circuits and Signal Processing},
keywords = {FPGA-NN, Xilinx, SoC, DNN},
title = {Implementation of deep neural networks on FPGA-CPU platform using Xilinx SDSOC},
ty = {JOUR},
url = {https://doi.org/10.1007/s10470-020-01638-5},
year = {2020},
Bdsk-Url-1 = {https://doi.org/10.1007/s10470-020-01638-5}}
@techreport{zynqDPU,
author = {Xilinx},
date-added = {2020-12-01 23:47:16 +1300},
date-modified = {2020-12-01 23:55:12 +1300},
keywords = {Other, Zynq, Xilinx, DPU},
month = {July},
number = {PG338},
publisher = {Xilinx},
title = {Zynq DPU v3.2},
type = {Product Guide},
url = {https://www.xilinx.com/support/documentation/ip_documentation/dpu/v3_2/pg338-dpu.pdf},
year = {2020},
Bdsk-Url-1 = {https://www.xilinx.com/support/documentation/ip_documentation/dpu/v3_2/pg338-dpu.pdf}}
@inproceedings{DBLP:conf/eccv/HuLWZC18,
author = {Qinghao Hu and Gang Li and Peisong Wang and Yifan Zhang and Jian Cheng},
booktitle = {ECCV (13)},
cdate = {1514764800000},
crossref = {conf/eccv/2018-13},
date-added = {2020-12-01 18:09:15 +1300},
date-modified = {2020-12-01 18:10:58 +1300},
keywords = {BNN, FPGA, DNN},
pages = {657-673},
title = {Training Binary Weight Networks via Semi-Binary Decomposition},
url = {https://doi.org/10.1007/978-3-030-01261-8_39},
year = {2018},
Bdsk-Url-1 = {https://doi.org/10.1007/978-3-030-01261-8_39}}
@article{Qin_2020,
author = {Qin, Haotong and Gong, Ruihao and Liu, Xianglong and Bai, Xiao and Song, Jingkuan and Sebe, Nicu},
date-added = {2020-12-01 17:09:05 +1300},
date-modified = {2020-12-01 17:09:39 +1300},
doi = {10.1016/j.patcog.2020.107281},
issn = {0031-3203},
journal = {Pattern Recognition},
keywords = {BNN, Survey, gradient descent},
month = {Sep},
pages = {107281},
publisher = {Elsevier BV},
title = {Binary neural networks: A survey},
url = {http://dx.doi.org/10.1016/j.patcog.2020.107281},
volume = {105},
year = {2020},
Bdsk-Url-1 = {http://dx.doi.org/10.1016/j.patcog.2020.107281}}
@article{PASQUALINI20201122,
abstract = {Pseudo-Random Numbers Generators (PRNGs) are algorithms produced to generate long sequences of statistically uncorrelated numbers, i.e. Pseudo-Random Numbers (PRNs). These numbers are widely employed in mid-level cryptography and in software applications. Test suites are used to evaluate PRNGs quality by checking statistical properties of the generated sequences. Machine learning techniques are often used to break these generators, i.e. approximating a certain generator or a certain sequence using a neural network. But what about using machine learning to generate PRNs generators? This paper proposes a Reinforcement Learning (RL) approach to the task of generating PRNGs from scratch by learning a policy to solve an N-dimensional navigation problem. In this context, N is the length of the period of the sequence to generate and the policy is iteratively improved using the average score of an appropriate test suite run over that period. Aim of this work is to demonstrate the feasibility of the proposed approach, to compare it with classical methods, and to lay the foundation of a research path which combines RL and PRNGs.},
author = {Luca Pasqualini and Maurizio Parton},
date-added = {2020-11-19 17:48:56 +1300},
date-modified = {2021-04-22 16:17:06 +1200},
doi = {https://doi.org/10.1016/j.procs.2020.03.057},
issn = {1877-0509},
journal = {Procedia Computer Science},
keywords = {Crypto, NIST, STS, Reinforcement Learning, PRNG, Pseudo-Random Number, Machine Learning, Reinforcement Learning, Deep Learning, Neural Networks},
note = {The 11th International Conference on Ambient Systems, Networks and Technologies (ANT) / The 3rd International Conference on Emerging Data and Industry 4.0 (EDI40) / Affiliated Workshops},
pages = {1122 - 1127},
title = {Pseudo Random Number Generation: a Reinforcement Learning approach},
url = {http://www.sciencedirect.com/science/article/pii/S1877050920304944},
volume = {170},
year = {2020},
Bdsk-Url-1 = {http://www.sciencedirect.com/science/article/pii/S1877050920304944},
Bdsk-Url-2 = {https://doi.org/10.1016/j.procs.2020.03.057}}
@inproceedings{pmlr-v80-kleinberg18a,
abstract = {Stochastic gradient descent (SGD) is widely used in machine learning. Although being commonly viewed as a fast but not accurate version of gradient descent (GD), it always finds better solutions than GD for modern neural networks. In order to understand this phenomenon, we take an alternative view that SGD is working on the convolved (thus smoothed) version of the loss function. We show that, even if the function $f$ has many bad local minima or saddle points, as long as for every point $x$, the weighted average of the gradients of its neighborhoods is one point convex with respect to the desired solution $x^*$, SGD will get close to, and then stay around $x^*$ with constant probability. Our result identifies a set of functions that SGD provably works, which is much larger than the set of convex functions. Empirically, we observe that the loss surface of neural networks enjoys nice one point convexity properties locally, therefore our theorem helps explain why SGD works so well for neural networks.},
address = {Stockholmsm{\"a}ssan, Stockholm Sweden},
author = {Kleinberg, Bobby and Li, Yuanzhi and Yuan, Yang},
date-added = {2020-11-06 18:39:43 +1300},
date-modified = {2020-11-06 18:41:24 +1300},
editor = {Jennifer Dy and Andreas Krause},
keywords = {Other, SDG, Stochastic Gradient Descent, Local Minima},
month = {10--15 Jul},
pages = {2698--2707},
pdf = {http://proceedings.mlr.press/v80/kleinberg18a/kleinberg18a.pdf},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
title = {An Alternative View: When Does {SGD} Escape Local Minima?},
url = {http://proceedings.mlr.press/v80/kleinberg18a.html},
volume = {80},
year = {2018},
Bdsk-Url-1 = {http://proceedings.mlr.press/v80/kleinberg18a.html}}
@article{robbins1951,
author = {Robbins, Herbert and Monro, Sutton},
date-added = {2020-11-05 20:36:23 +1300},
date-modified = {2020-11-05 20:36:53 +1300},
doi = {10.1214/aoms/1177729586},
fjournal = {Annals of Mathematical Statistics},
journal = {Ann. Math. Statist.},
keywords = {Other, Stochastic Gradient Descent, SGD, Derivates},
month = {09},
number = {3},
pages = {400--407},
publisher = {The Institute of Mathematical Statistics},
title = {A Stochastic Approximation Method},
url = {https://doi.org/10.1214/aoms/1177729586},
volume = {22},
year = {1951},
Bdsk-Url-1 = {https://doi.org/10.1214/aoms/1177729586}}
@article{gd-Haskell,
author = {Haskell B. Curry},
date-added = {2020-11-03 23:05:54 +1300},
date-modified = {2020-11-03 23:08:40 +1300},
journal = {Quarterly of Applied Mathematics},
keywords = {Other, gradient descent,},
pages = {258-261},
title = {The method of steepest descent for non-linearminimization problems},
volume = {2},
year = {1944}}
@book{aiNorvig,
author = {Stuart Russell and Peter Norvig},
date-added = {2020-11-03 22:17:17 +1300},
date-modified = {2021-05-11 21:53:12 +1200},
edition = {4},
keywords = {Books, AI, ML, Unsupervised learning},
publisher = {Pearson},
title = {Artificial Intelligence: A Modern Approach},
volume = {1},
year = {2020}}
@inproceedings{44873,
author = {Geoffrey Hinton and Oriol Vinyals and Jeffrey Dean},
booktitle = {NIPS Deep Learning and Representation Learning Workshop},
date-added = {2020-11-03 17:38:11 +1300},
date-modified = {2020-11-03 17:39:30 +1300},
keywords = {NN, DNN, MNIST, Supervised learning},
title = {Distilling the Knowledge in a Neural Network},
url = {http://arxiv.org/abs/1503.02531},
year = {2015},
Bdsk-Url-1 = {http://arxiv.org/abs/1503.02531}}
@inproceedings{10.1007/11844297_68,
abstract = {The evolution of artificial neural networks (ANNs) is often used to tackle difficult control problems. There are different approaches to the encoding of neural networks in artificial genomes. Analog Genetic Encoding (AGE) is a new implicit method derived from the observation of biological genetic regulatory networks. This paper shows how AGE can be used to simultaneously evolve the topology and the weights of ANNs for complex control systems. AGE is applied to a standard benchmark problem and we show that its performance is equivalent or superior to some of the most powerful algorithms for neuroevolution in the literature.},
address = {Berlin, Heidelberg},
author = {D{\"u}rr, Peter and Mattiussi, Claudio and Floreano, Dario},
booktitle = {Parallel Problem Solving from Nature - PPSN IX},
date-added = {2020-10-28 20:17:07 +1300},
date-modified = {2021-05-11 21:53:12 +1200},
editor = {Runarsson, Thomas Philip and Beyer, Hans-Georg and Burke, Edmund and Merelo-Guerv{\'o}s, Juan J. and Whitley, L. Darrell and Yao, Xin},
isbn = {978-3-540-38991-0},
keywords = {Books, Evolution, Neuroevolution, Genetic Encoding},
pages = {671--680},
publisher = {Springer Berlin Heidelberg},
title = {Neuroevolution with Analog Genetic Encoding},
year = {2006}}
@webpage{initializationIntervalXilinx,
author = {Xilinx},
date-added = {2020-10-27 20:01:23 +1300},
date-modified = {2020-10-27 20:02:27 +1300},
keywords = {Webpage, Xilinx, FPGA,},
title = {Loop Pipelining and Loop Unrolling},
url = {https://www.xilinx.com/support/documentation/sw_manuals/xilinx2015_2/sdsoc_doc/topics/calling-coding-guidelines/concept_pipelining_loop_unrolling.html},
year = {2020},
Bdsk-Url-1 = {https://www.xilinx.com/support/documentation/sw_manuals/xilinx2015_2/sdsoc_doc/topics/calling-coding-guidelines/concept_pipelining_loop_unrolling.html}}
@electronic{awsF1,
author = {Amazon Web Services},
date-added = {2020-10-27 13:27:49 +1300},
date-modified = {2020-10-27 13:28:35 +1300},
keywords = {Webpage, AWS F1, FPGA, Xilinx},
title = {AWS F1 instance}}
@webpage{nimbixAlveo,
author = {Nimbix},
date-added = {2020-10-27 13:27:05 +1300},
date-modified = {2020-10-27 13:27:45 +1300},
keywords = {Webpage, Cloud, Numbix, FPGA},
title = {Alveo FPGA},
url = {https://www.nimbix.net/alveo},
year = {2020},
Bdsk-Url-1 = {https://www.nimbix.net/alveo}}
@webpage{bisunaU50Git,
author = {R. {Valencia}},
date-added = {2020-10-27 12:50:38 +1300},
date-modified = {2020-10-27 12:53:43 +1300},
keywords = {WebPage, BiSUNA},
title = {BiSUNA - Alveo U50},
url = {https://github.com/rval735/bisunaU50},
year = {2020},
Bdsk-Url-1 = {https://github.com/rval735/bisunaU50}}
@electronic{gpgpu-ai-dominance,
author = {James Kobielus},
date-added = {2020-10-26 23:07:36 +1300},
date-modified = {2020-10-26 23:09:10 +1300},
keywords = {WebPage, GPGPU, AI},
title = {GPUs Continue to Dominate the AI Accelerator Market for Now},
url = {https://www.informationweek.com/big-data/ai-machine-learning/gpus-continue-to-dominate-the-ai-accelerator-market-for-now/a/d-id/1336475},
year = {2019},
Bdsk-Url-1 = {https://www.informationweek.com/big-data/ai-machine-learning/gpus-continue-to-dominate-the-ai-accelerator-market-for-now/a/d-id/1336475}}
@webpage{alveoU50,
author = {Xilinx},
date-added = {2020-10-26 23:04:04 +1300},
date-modified = {2020-10-26 23:05:51 +1300},
keywords = {WebPage, Xilinx, FPGA, Alveo U50},
title = {Alveo U50},
url = {https://www.xilinx.com/products/boards-and-kits/alveo/u50.html},
year = {2020},
Bdsk-Url-1 = {https://www.xilinx.com/products/boards-and-kits/alveo/u50.html}}
@webpage{altera-ai,
author = {Intel},
date-added = {2020-10-26 22:32:55 +1300},
date-modified = {2020-10-26 22:34:37 +1300},
keywords = {WebPage, Intel, FPGA, Altera, AI},
lastchecked = {2020},
title = {FPGAs for Artificial Intelligence (AI)},
url = {https://www.intel.com/content/www/us/en/artificial-intelligence/programmable/overview.html},
Bdsk-Url-1 = {https://www.intel.com/content/www/us/en/artificial-intelligence/programmable/overview.html}}
@webpage{vitis-ai,
author = {Xilinx},
date-added = {2020-10-26 22:25:35 +1300},
date-modified = {2020-10-26 22:31:16 +1300},
keywords = {Webpage, Xilinx, FPGA, Vitis, AI,},
title = {Vitis AI},
url = {https://www.xilinx.com/products/design-tools/vitis/vitis-ai.html},
year = {2020},
Bdsk-Url-1 = {https://www.xilinx.com/products/design-tools/vitis/vitis-ai.html}}
@misc{clary2019lets,
archiveprefix = {arXiv},
author = {Kaleigh Clary and Emma Tosch and John Foley and David Jensen},
date-added = {2020-10-01 11:43:48 +1300},
date-modified = {2020-10-01 11:44:12 +1300},
eprint = {1904.06312},
keywords = {DNN, OpenAI Gym, Baselines, Atari},
primaryclass = {cs.LG},
title = {Let's Play Again: Variability of Deep Reinforcement Learning Agents in Atari Environments},
year = {2019}}
@inproceedings{9102924,
abstract = {Recent years have witnessed the great success of deep reinforcement learning (DRL) on a variety of vision games. Although DNN has demonstrated strong power in representation learning, such capacity is under-explored in most DRL works whose focus is usually on optimization solvers. In fact, we discover that the state feature learning is the main obstacle for further improvement of DRL algorithms. To address this issue, we propose a new state representation learning scheme with our Adjacent State Consistency Loss (ASC Loss). The loss is defined based on the hypothesis that there are fewer changes between adjacent states than that of far apart ones, since scenes in videos generally evolve smoothly. In this paper, we exploit ASC loss as an assistant of RL loss in the training phase to boost the state feature learning. We conduct evaluation on Atari games and MuJoCo continuous control tasks, which demonstrates that our method is superior to OpenAI baselines.},
author = {J. {Zhao} and W. {Zhou} and T. {Zhao} and Y. {Zhou} and H. {Li}},
booktitle = {2020 IEEE International Conference on Multimedia and Expo (ICME)},
date-added = {2020-10-01 11:32:51 +1300},
date-modified = {2021-04-28 17:01:07 +1200},
doi = {10.1109/ICME46284.2020.9102924},
issn = {1945-788X},
keywords = {DNN, Baselines, OpenAI Gym, computer games;computer vision; artificial intelligence;neural nets;OpenAI Baselines;MuJoCo continuous control tasks;Atari games;adjacent state consistency loss;optimization solvers;DNN;deep reinforcement learning;state representation learning;vision games;RL loss;ASC loss;DRL algorithms;state feature learning;Representation learning; Reinforcement Learning},
month = {July},
pages = {1-6},
title = {State Representation Learning For Effective Deep Reinforcement Learning},
year = {2020},
Bdsk-Url-1 = {https://doi.org/10.1109/ICME46284.2020.9102924}}
@url{oaiBaseline-Github,
author = {OpenAI},
date-added = {2020-10-01 11:24:56 +1300},
date-modified = {2020-10-01 11:26:52 +1300},
keywords = {OpenAI Gym, Baselines, Github},
title = {Baselines},
url = {https://github.com/openai/baselines},
Bdsk-Url-1 = {https://github.com/openai/baselines}}
@url{oaiBaselines-Page,
author = {OpenAI},
date-added = {2020-10-01 11:23:29 +1300},
date-modified = {2020-10-01 11:24:30 +1300},
keywords = {OpenAI Gym, Baselines, WebPage},
title = {OpenAI Baselines: DQN},
url = {https://openai.com/blog/openai-baselines-dqn/},
Bdsk-Url-1 = {https://openai.com/blog/openai-baselines-dqn/}}
@inproceedings{ijcai2019-0452,
author = {Petroski Such, Felipe and Madhavan, Vashisht and Liu, Rosanne and Wang, Rui and Castro, Pablo Samuel and Li, Yulun and Zhi, Jiale and Schubert, Ludwig and Bellemare, Marc G. and Clune, Jeff and Lehman, Joel},
booktitle = {Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, {IJCAI-19}},
date-added = {2020-10-01 11:21:34 +1300},
date-modified = {2021-04-22 16:17:06 +1200},
doi = {10.24963/ijcai.2019/452},
keywords = {DNN, Reinforcement Learning, Atari, OpenAI Gym, Baselines},
month = {7},
pages = {3260--3267},
publisher = {International Joint Conferences on Artificial Intelligence Organization},
title = {An Atari Model Zoo for Analyzing, Visualizing, and Comparing Deep Reinforcement Learning Agents},
url = {https://doi.org/10.24963/ijcai.2019/452},
year = {2019},
Bdsk-Url-1 = {https://doi.org/10.24963/ijcai.2019/452}}
@inproceedings{FastNIST-STS-Muni,
author = {S{\'y}s, M.; Z. {\v R}{\'\i}ha, V. Maty{\'a}{\v s}, K.M{\'a}rton, A. Suciu},
booktitle = {Romanian Journal of Information Science and Technology},
date-added = {2020-09-17 13:39:21 +1200},
date-modified = {2020-09-17 13:43:18 +1200},
editor = {Publishing House of the Romanian Academy},
keywords = {Crypto, NIST, Muni, Fast-STS, STS},
pages = {18-32},
title = {On the Interpretation of Results from the NIST Statistical Test Suite},
volume = {18},
year = {2015}}
@article{PETRICA2018251,
abstract = {Pseudo-random number generators (PRNGs) are important to applications ranging from cryptography to Monte-Carlo methods. Consequently, many PRNG architectures have been proposed, including some optimized for FPGA, e.g the LUT-SR family of PRNGs which utilize embedded FPGA shift registers, and self-programmable cellular automaton (SPCA) PRNGs. However, LUT-SR and other PRNGs do not utilize key features of modern Xilinx FPGAs: embedded carry chains and splittable Look-Up Tables (LUTs), i.e., 6-input LUTs which can operate as two 5-input LUTs which share inputs. In this paper we explore the SPCA structure and derive a set of parameter constraints which allow a SPCA PRNG to produce 2 random bits per LUT in every clock cycle on modern Xilinx FPGAs. We determine this to be the maximum logic density achievable for SPCA, and propose an architectural improvement of SPCA to enable further density increase by making use of FPGA embedded carry chains as a method to compute an additional random bit per LUT in each clock cycle. The resulting Split-LUT-Carry SPCA (SLC-SPCA) PRNG achieves 6x improvement in logic density compared to LUT-SR, and a 1.5x density increase compared to SPCA. We evaluate the randomness of SLC-SPCA utilizing the NIST Statistical Test Suite, and we provide a power and energy comparison of LUT-SR and SLC-SPCA on a Xilinx Zynq 7020 FPGA device. Our results indicate that SLC-SPCA generates 3x more bits per clock at approximately the same power dissipation as LUT-SR, and consequently 3x less energy to generate 1 gigabit of random data. SLC-SPCA is also 1.5x more energy-efficient than a SPCA PRNG.},
author = {Lucian Petrica},