From e175d5558b794008e2ade8922f596791b4b4b76e Mon Sep 17 00:00:00 2001 From: Sasha Petrenko Date: Fri, 22 Apr 2022 18:19:30 -0500 Subject: [PATCH 1/2] Correct bib errors, remove annotes and files --- paper/paper.bib | 76 +++++++++++++++---------------------------------- 1 file changed, 23 insertions(+), 53 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index 3e83aa67..4584c98e 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -1,41 +1,42 @@ @misc{CNS_Software, - author = {Boston University of Cognitive and Neural Systems Technology Lab}, - title = {Cognitive and Neural Systems Technology Lab Software Repository}, + title = {Boston University Cognitive and Neural Systems Technology Lab Software Repository}, year = {2009}, - url = {http://techlab.bu.edu/resources/software/C51/index.html} + url = {http://techlab.bu.edu/resources/software/C51/index.html}, + note = {Accessed 2022-04-22} } @misc{ACIL_GitHub, - author = {Missouri University of Science and Technology Applied Computational Intelligence Laboratory}, - title = {Applied Computational Intelligence Laboratory GitHub Software Repository}, + title = {Missouri University of Science and Technology Applied Computational Intelligence Laboratory GitHub Software Repository}, year = {2022}, - url = {https://github.com/ACIL-Group} + url = {https://github.com/ACIL-Group}, + note = {Accessed 2022-04-22} } @misc{NuART-Py, author = {Islam Elnabarawy}, title = {NuART-Py: A Python Library of Adaptive Theory Neural Networks}, year = {2019}, - url = {https://github.com/ACIL-Group/NuART-Py} + url = {https://github.com/ACIL-Group/NuART-Py}, + note = {Accessed 2022-04-22} } @misc{JavaART, author = {Xianshun Chen}, title = {java-adaptive-resonance-theory}, year = {2018}, - url = {https://github.com/chen0040/java-adaptive-resonance-theory} + url = {https://github.com/chen0040/java-adaptive-resonance-theory}, + note = {Accessed 2022-04-22} } @article{R_FuzzyART, author = {Steinmeister, Louis and Wunsch, Donald C}, - file = {:G\:/My Drive/Research/Literature/ART/FuzzyART_ An R Package for ART-based Clustering.pdf:pdf}, - number = {May}, - title = {{Scholars ' Mine FuzzyART : An R Package for ART-based Clustering FuzzyART : An R Package for ART-based Clustering .}}, - year = {2021} + title = {FuzzyART: An R Package for ART-based Clustering FuzzyART: An R Package for ART-based Clustering.}, + year = {2021}, + doi = {10.13140/RG.2.2.11823.25761} } @article{Grossberg2013, - title = {{Adaptive Resonance Theory: How a brain learns to consciously attend, learn, and recognize a changing world}}, + title = {Adaptive Resonance Theory: How a brain learns to consciously attend, learn, and recognize a changing world}, year = {2013}, journal = {Neural Networks}, author = {Grossberg, Stephen}, @@ -50,7 +51,7 @@ @article{Grossberg2013 } @article{Grossberg1980, - title = {{How Does a Brain Build a Cognitive Code ?}}, + title = {How Does a Brain Build a Cognitive Code?}, year = {1980}, journal = {Psychological Review}, author = {Grossberg, Stephen}, @@ -64,11 +65,10 @@ @article{Grossberg1980 } @article{DaSilva2019, - title = {{A Survey of Adaptive Resonance Theory Neural Network Models for Engineering Applications}}, + title = {A Survey of Adaptive Resonance Theory Neural Network Models for Engineering Applications}, year = {2019}, journal = {Neural Networks}, author = {Brito da Silva, Leonardo Enzo and Elnabarawy, Islam and Wunsch, Donald C.}, - number = {xxxx}, pages = {167--203}, volume = {120}, publisher = {Elsevier Ltd}, @@ -80,95 +80,71 @@ @article{DaSilva2019 } @inproceedings{Carpenter1991, - abstract = {Summary form only given. The authors introduced a neural network architecture, called ARTMAP, that autonomously learns to classify arbitrarily many, arbitrarily ordered vectors into recognition categories based on predictive success. This supervised learning system is built up from a pair of adaptive resonance theory modules (ARTa and ARTb) that are capable of self-organizing stable recognition categories in response to arbitrary sequences of input patterns. Tested on a benchmark machine learning database in both online and offline simulations, the ARTMAP system learns orders of magnitude more quickly, efficiently, and accurately than alternative algorithms, and achieves 100% accuracy after training on less than half of the input patterns in the database.}, - annote = {In this paper, Dr. Gail Carpenter introduces the ARTMAP algorithm, which introduces a supervisory learning mechanism to ordinarily unsupervised ART modules. This is done by introducing two ART modules (ARTa and ARTb) and a resonance-based connection between them, mapping categories one modules to labels in another.}, author = {Carpenter, Gail A. and Grossberg, Stephen and Reynolds, John H.}, booktitle = {IEEE Conference on Neural Networks for Ocean Engineering}, doi = {10.1016/0893-6080(91)90012-T}, - file = {:C\:/Users/Sasha/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Carpenter, Grossberg, Reynolds - 1991 - ARTMAP Supervised real-time learning and classification of nonstationary data by a self-organizi.pdf:pdf}, isbn = {0780302052}, issn = {08936080}, mendeley-groups = {ART}, pages = {341--342}, - title = {{ARTMAP: Supervised real-time learning and classification of nonstationary data by a self-organizing neural network}}, + title = {ARTMAP: Supervised real-time learning and classification of nonstationary data by a self-organizing neural network}, year = {1991} } @article{Carpenter1992, - abstract = {— A new neural network architecture is introduced for incremental supervised learning of recognition categories and multidimensional maps in response to arbitrary sequences of analog or binary input vectors, which may represent fuzzy or crisp sets of features. The architecture, called fuzzy ARTMAP, achieves a synthesis of fuzzy logic and adaptive resonance theory (ART) neural networks by exploiting a close formal similarity between the computations of fuzzy subsethood and ART category choice, resonance, and learning. Fuzzy ARTMAP also realizes a new minimax learning rule that conjointly minimizes predictive error and maximizes code compression, or generalization. This is achieved by a match tracking process that increases the ART vigilance parameter by the minimum amount needed to correct a predictive error. As a result, the system automatically learns a minimal number of recognition categories, or “hidden units,” to meet accuracy criteria. Category proliferation is prevented by normalizing input vectors at a preprocessing stage. A normalization procedure called complement coding leads to a symmetric theory in which the and operator (V) and the OR operator (A) of fuzzy logic play complementary roles. Complement coding uses on cells and off cells to represent the input pattern, and preserves individual feature amplitudes while normalizing the total on cell/off cell vector. Learning is stable because all adaptive weights can only decrease in time. Decreasing weights correspond to increasing sizes of category “boxes.” Smaller vigilance values lead to larger category boxes. Improved prediction is achieved by training the system several times using different orderings of the input set. This voting strategy can also be used to assign confidence estimates to competing predictions given small, noisy, or incomplete training sets. Four classes of simulations illustrate fuzzy ARTMAP performance in relation to benchmark backpropagation and genetic algorithm systems. These simulations include (i) finding points inside versus outside a circle; (ii) learning to tell two spirals apart, (iii) incremental approximation of a piecewise-continuous function; and (iv) a letter recognition database. The fuzzy ARTMAP system is also compared with Salzberg's NGE system and with Simpson's FMMC system. {\textcopyright} 1992 IEEE}, - annote = {In this paper, Drs. Gail Carpenter and Stephen Grossberg demonstrate how .the use of fuzzy set theory operations in the ARTMAP algorithm augment its learning capabilities without sacrificing algorithmic complexity.}, author = {Carpenter, Gail A. and Grossberg, Stephen and Markuzon, Natalya and Reynolds, John H. and Rosen, David B.}, doi = {10.1109/72.159059}, - file = {:C\:/Users/Sasha/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Carpenter et al. - 1992 - Fuzzy ARTMAP A Neural Network Architecture for Incremental Supervised Learning of Analog Multidimensional Maps.pdf:pdf}, issn = {19410093}, journal = {IEEE Transactions on Neural Networks}, mendeley-groups = {ART}, number = {5}, pages = {698--713}, - title = {{Fuzzy ARTMAP: A Neural Network Architecture for Incremental Supervised Learning of Analog Multidimensional Maps}}, + title = {Fuzzy ARTMAP: A Neural Network Architecture for Incremental Supervised Learning of Analog Multidimensional Maps}, volume = {3}, year = {1992} } @inproceedings{ARTHestenes1987, - abstract = {In spite of the}, - annote = {From Duplicate 1 (How the Brain Works: The Next Great Scientific Revolution - Hestenes, David) - - David Hestenes, a physicist by training, provides an overview of the adaptive resonance theory of Dr. Stephen Grossberg and its significance to the neuroscience as a whole. He provides evidence for his claim that ART exemplifies a revolution in brain science by giving a historical perspective on the field and illustrating the key points of ART, showing their most significant ramifications. Hestenes provides this paper to make Grossberg's work more accessible, necessary because of the lateral thinking required to appreciate the magnitude of Grossberg's work. - - From Duplicate 2 (How the Brain Works: The Next Great Scientific Revolution - Hestenes, David) - - From Duplicate 3 (How the Brain Works: The Next Great Scientific Revolution - Hestenes, David) - - David Hestenes, a physicist by training, provides an overview of the adaptive resonance theory of Dr. Stephen Grossberg and its significance to the neuroscience as a whole. He provides evidence for his claim that ART exemplifies a revolution in brain science by giving a historical perspective on the field and illustrating the key points of ART, showing their most significant ramifications. Hestenes provides this paper to make Grossberg's work more accessible, necessary because of the lateral thinking required to appreciate the magnitude of Grossberg's work.}, author = {Hestenes, David}, booktitle = {Maximum-Entropy and Bayesian Spectral Analysis and Estimation Problems}, doi = {10.1007/978-94-009-3961-5_11}, - file = {:C\:/Users/Sasha/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Unknown - Unknown - HestenesDavidHowTheBrainWorks001.pdf.pdf:pdf;:C\:/Users/Sasha/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Hestenes - 1987 - How the Brain Works The Next Great Scientific Revolution.pdf:pdf}, mendeley-groups = {ART}, pages = {173--205}, publisher = {Springer Netherlands}, - title = {{How the Brain Works: The Next Great Scientific Revolution}}, + title = {How the Brain Works: The Next Great Scientific Revolution}, year = {1987} } @article{Grossberg2017, - abstract = {The hard problem of consciousness is the problem of explaining how we experience qualia or phenomenal experiences, such as seeing, hearing, and feeling, and knowing what they are. To solve this problem, a theory of consciousness needs to link brain to mind by modeling how emergent properties of several brain mechanisms interacting together embody detailed properties of individual conscious psychological experiences. This article summarizes evidence that Adaptive Resonance Theory, or ART, accomplishes this goal. ART is a cognitive and neural theory of how advanced brains autonomously learn to attend, recognize, and predict objects and events in a changing world. ART has predicted that “all conscious states are resonant states” as part of its specification of mechanistic links between processes of consciousness, learning, expectation, attention, resonance, and synchrony. It hereby provides functional and mechanistic explanations of data ranging from individual spikes and their synchronization to the dynamics of conscious perceptual, cognitive, and cognitive–emotional experiences. ART has reached sufficient maturity to begin classifying the brain resonances that support conscious experiences of seeing, hearing, feeling, and knowing. Psychological and neurobiological data in both normal individuals and clinical patients are clarified by this classification. This analysis also explains why not all resonances become conscious, and why not all brain dynamics are resonant. The global organization of the brain into computationally complementary cortical processing streams (complementary computing), and the organization of the cerebral cortex into characteristic layers of cells (laminar computing), figure prominently in these explanations of conscious and unconscious processes. Alternative models of consciousness are also discussed.}, author = {Grossberg, Stephen}, doi = {10.1016/j.neunet.2016.11.003}, - file = {:G\:/My Drive/Research/Literature/ART/Papers/BUpapersSteveGrossbergGailCarpenterEtc/Consciousness2017SteveGrossbergNN.pdf:pdf}, issn = {18792782}, journal = {Neural Networks}, keywords = {Adaptive resonance,Attention,Audition,Consciousness,Emotion,Vision}, pages = {38--95}, pmid = {28088645}, publisher = {Elsevier Ltd}, - title = {{Towards solving the hard problem of consciousness: The varieties of brain resonances and the conscious experiences that they support}}, + title = {Towards solving the hard problem of consciousness: The varieties of brain resonances and the conscious experiences that they support}, url = {http://dx.doi.org/10.1016/j.neunet.2016.11.003}, volume = {87}, year = {2017} } @article{Cohen1983a, - abstract = {The process whereby input patterns are transformed and stored by competitive cellular networks is considered. This process arises in such diverse subjects as the short-term storage of visual or language patterns by neural networks, pattern formation due to the firing of morphogenetic gradients in developmental biology, control of choice behavior during macromolecular evolution, and the design of stable context-sensitive parallel processors. In addition to systems capable of approaching one of perhaps infinitely many equilibrium points in response to arbitrary input patterns and initial data, one finds in these subjects a wide variety of other behaviors, notably traveling waves, standing waves, resonance, and chaos. The question of what general dynamical constraints cause global approach to equilibria rather than large amplitude waves is therefore of considerable interest. In another terminology, this is the question of whether global pattern formation occurs. A related question is whether the global pattern formation property persists when system parameters slowly change in an unpredictable fashion due to self-organization (development, learning). This is the question of absolute stability of global pattern formation. It is shown that many model systems which exhibit the absolute stability property can be written in the form i = 1, 2, {\textperiodcentered}{\textperiodcentered}{\textperiodcentered}, n, where the matrix C = ||cik|| is symmetric and the system as a whole is competitive. Under these circumstances, this system defines a global Liapunov function. The absolute stability of systems with infinite but totally disconnected sets of equilibrium points can then be studied using the LaSalle invariance principle, the theory of several complex variables, and Sard's theorem. The symmetry of matrix C is important since competitive systems of the form (1) exist wherein C is arbitrarily close to a symmetric matrix but almost all trajectories persistently oscillate, as in the voting paradox. Slowing down the competitive feedback without violating symmetry, as in the systems also enables sustained oscillations to occur. Our results thus show that the use of fast symmetric competitive feedback is a robust design constraint for guaranteeing absolute stability of global pattern formation. {\textcopyright} 1983 IEEE}, author = {Cohen, Michael A. and Grossberg, Stephen}, doi = {10.1109/TSMC.1983.6313075}, - file = {:C\:/Users/Sasha/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Cohen, Grossberg - 1983 - Absolute Stability of Global Pattern Formation and Parallel Memory Storage by Competitive Neural Networks.pdf:pdf}, issn = {21682909}, journal = {IEEE Transactions on Systems, Man and Cybernetics}, number = {5}, pages = {815--826}, - title = {{Absolute Stability of Global Pattern Formation and Parallel Memory Storage by Competitive Neural Networks}}, + title = {Absolute Stability of Global Pattern Formation and Parallel Memory Storage by Competitive Neural Networks}, volume = {SMC-13}, year = {1983} } @article{Grossberg2009, - abstract = {How do humans rapidly recognize a scene? How can neural models capture this biological competence to achieve state-of-the-art scene classification? The ARTSCENE neural system classifies natural scene photographs by using multiple spatial scales to efficiently accumulate evidence for gist and texture. ARTSCENE embodies a coarse-to-fine Texture Size Ranking Principle whereby spatial attention processes multiple scales of scenic information, from global gist to local textures, to learn and recognize scenic properties. The model can incrementally learn and rapidly predict scene identity by gist information alone, and then accumulate learned evidence from scenic textures to refine this hypothesis. The model shows how texture-fitting allocations of spatial attention, called attentional shrouds, can facilitate scene recognition, particularly when they include a border of adjacent textures. Using grid gist plus three shroud textures on a benchmark photograph dataset, ARTSCENE discriminates 4 landscape scene categories (coast, forest, mountain, and countryside) with up to 91.85% correct on a test set, outperforms alternative models in the literature which use biologically implausible computations, and outperforms component systems that use either gist or texture information alone. {\textcopyright} ARVO.}, - annote = {This paper outlines several different toolchains that together together comprise the ARTSCENE algorithm. The paper at its core is an investigation into the construction of a system that recognizes whole-scene global descriptors from local textures. It does this through a series of image filters that mimik the processing occuring in the mammalian LGN and learning/recognition processing via the Default ARTMAP 2 algorithm.}, author = {Grossberg, Stephen and Huang, Tsung Ren}, doi = {10.1167/9.4.6}, - file = {:G\:/My Drive/Research/Literature/ART/jov-9-4-6.pdf:pdf}, issn = {15347362}, journal = {Journal of Vision}, keywords = {ARTMAP,Attentional shroud,Coarse-to-fine processing,Gist,Multiple-scale processing,Scene classification,Spatial attention,Texture}, @@ -176,7 +152,7 @@ @article{Grossberg2009 number = {4}, pages = {1--19}, pmid = {19757915}, - title = {{ARTSCENE: A neural system for natural scene classification}}, + title = {ARTSCENE: A neural system for natural scene classification}, volume = {9}, year = {2009} } @@ -191,14 +167,8 @@ @Book{grossberg2021conscious } @article{Tan2019, - abstract = {Learning and memory are two intertwined cognitive functions of the human brain. This paper shows how a family of biologically-inspired self-organizing neural networks, known as fusion Adaptive Resonance Theory (fusion ART), may provide a viable approach to realizing the learning and memory functions. Fusion ART extends the single-channel Adaptive Resonance Theory (ART) model to learn multimodal pattern associative mappings. As a natural extension of ART, various forms of fusion ART have been developed for a myriad of learning paradigms, ranging from unsupervised learning to supervised learning, semi-supervised learning, multimodal learning, reinforcement learning, and sequence learning. In addition, fusion ART models may be used for representing various types of memories, notably episodic memory, semantic memory and procedural memory. In accordance with the notion of embodied intelligence, such neural models thus provide a computational account of how an autonomous agent may learn and adapt in a real-world environment. The efficacy of fusion ART in learning and memory shall be discussed through various examples and illustrative case studies.}, - annote = {From Duplicate 1 (Self-organizing neural networks for universal learning and multimodal memory encoding - Tan, Ah-Hwee Hwee; Subagdja, Budhitama; Wang, Di; Meng, Lei) - - This paper is effectively an appraisal of the Fusion ART algorithm, effectively outlining the details of the algorithm, its capabilities, and its limitations. The paper outlines some practical applications of this algorithm, especially as backbone for other algorithms (i.e., FALCON, iFALCON, EM-ART, OMC-ART, etc.). - }, author = {Tan, Ah-Hwee Hwee and Subagdja, Budhitama and Wang, Di and Meng, Lei}, doi = {10.1016/j.neunet.2019.08.020}, - file = {:C\:/Users/Sasha/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Tan et al. - 2019 - Self-organizing neural networks for universal learning and multimodal memory encoding.pdf:pdf;:C\:/Users/Sasha/AppData/Local/Mendeley Ltd./Mendeley Desktop/Downloaded/Tan et al. - 2019 - Self-organizing neural networks for universal learning and multimodal memory encoding(2).pdf:pdf}, issn = {08936080}, journal = {Neural Networks}, keywords = {,Adaptive resonance theory,Memory encoding,Universal learning,adaptive resonance theory}, @@ -206,7 +176,7 @@ @article{Tan2019 number = {xxxx}, pages = {58--73}, publisher = {Elsevier Ltd}, - title = {{Self-organizing neural networks for universal learning and multimodal memory encoding}}, + title = {Self-organizing neural networks for universal learning and multimodal memory encoding}, url = {https://doi.org/10.1016/j.neunet.2019.08.020}, volume = {120}, year = {2019} From 499ecbcdf786ecbcbeea99020b7e5136e28a7b02 Mon Sep 17 00:00:00 2001 From: Sasha Petrenko Date: Fri, 22 Apr 2022 18:27:04 -0500 Subject: [PATCH 2/2] Bump, correct software years --- Project.toml | 2 +- paper/paper.bib | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Project.toml b/Project.toml index 58a4d000..9086d233 100644 --- a/Project.toml +++ b/Project.toml @@ -2,7 +2,7 @@ name = "AdaptiveResonance" uuid = "3d72adc0-63d3-4141-bf9b-84450dd0395b" authors = ["Sasha Petrenko"] description = "A Julia package for Adaptive Resonance Theory (ART) algorithms." -version = "0.4.2" +version = "0.4.3" [deps] Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b" diff --git a/paper/paper.bib b/paper/paper.bib index 4584c98e..9c419001 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -1,13 +1,13 @@ @misc{CNS_Software, title = {Boston University Cognitive and Neural Systems Technology Lab Software Repository}, - year = {2009}, + year = {2009 [Online]}, url = {http://techlab.bu.edu/resources/software/C51/index.html}, note = {Accessed 2022-04-22} } @misc{ACIL_GitHub, title = {Missouri University of Science and Technology Applied Computational Intelligence Laboratory GitHub Software Repository}, - year = {2022}, + year = {2022 [Online]}, url = {https://github.com/ACIL-Group}, note = {Accessed 2022-04-22} } @@ -15,7 +15,7 @@ @misc{ACIL_GitHub @misc{NuART-Py, author = {Islam Elnabarawy}, title = {NuART-Py: A Python Library of Adaptive Theory Neural Networks}, - year = {2019}, + year = {2019 [Online]}, url = {https://github.com/ACIL-Group/NuART-Py}, note = {Accessed 2022-04-22} } @@ -23,7 +23,7 @@ @misc{NuART-Py @misc{JavaART, author = {Xianshun Chen}, title = {java-adaptive-resonance-theory}, - year = {2018}, + year = {2018 [Online]}, url = {https://github.com/chen0040/java-adaptive-resonance-theory}, note = {Accessed 2022-04-22} } @@ -173,7 +173,6 @@ @article{Tan2019 journal = {Neural Networks}, keywords = {,Adaptive resonance theory,Memory encoding,Universal learning,adaptive resonance theory}, mendeley-groups = {ART,NN Special Issue}, - number = {xxxx}, pages = {58--73}, publisher = {Elsevier Ltd}, title = {Self-organizing neural networks for universal learning and multimodal memory encoding},