diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml index 404cbaec..5bfec32e 100644 --- a/.github/workflows/python-package-conda.yml +++ b/.github/workflows/python-package-conda.yml @@ -10,25 +10,19 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Set up Python 3.10 - uses: actions/setup-python@v3 + + - name: Install Miniconda + uses: conda-incubator/setup-miniconda@v2 with: - python-version: '3.10' - - name: Add conda to system path + auto-update-conda: true + python-version: 3.11 + environment-name: test + + - name: Install dependencies run: | - # $CONDA is an environment variable pointing to the root of the miniconda directory - echo $CONDA/bin >> $GITHUB_PATH - - name: Install current library and dependencies - run: | - pip install -e . - # - name: Lint with flake8 - # run: | - # conda install flake8 - # # stop the build if there are Python syntax errors or undefined names - # flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - # # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - # flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Test with pytest + conda install -n test numpy=1.26 pytest pip + conda run -n test pip install -e . + + - name: Run tests run: | - conda install pytest - pytest + conda run -n test pytest diff --git a/README.md b/README.md index 7355b0bb..26d37e56 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ ngc-learn requires: 1) Python (>=3.10) 2) NumPy (>=1.22.0) 3) SciPy (>=1.7.0) -4) ngcsimlib (>=1.0.0), (visit official page here) +4) ngcsimlib (>=1.0.1), (visit official page here) 5) JAX (>=0.4.28) (to enable GPU use, make sure to install one of the CUDA variants) --- -ngc-learn 2.0.0 and later require Python 3.10 or newer as well as ngcsimlib >=1.0.0. +ngc-learn 2.0.3 and later require Python 3.10 or newer as well as ngcsimlib >=1.0.1. ngc-learn's plotting capabilities (routines within `ngclearn.utils.viz`) require Matplotlib (>=3.8.0) and imageio (>=2.31.5) and both plotting and density estimation tools (routines within ``ngclearn.utils.density``) will require Scikit-learn (>=0.24.2). @@ -75,7 +75,7 @@ Python 3.11.4 (main, MONTH DAY YEAR, TIME) [GCC XX.X.X] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import ngclearn >>> ngclearn.__version__ -'2.0.0' +'2.0.3' ``` Note: For access to the previous Tensorflow-2 version of ngc-learn (of @@ -122,7 +122,7 @@ $ python install -e . **Version:**
-2.0.2 +2.0.3 Author: Alexander G. Ororbia II
diff --git a/docs/installation.md b/docs/installation.md index 03bbe8a2..64bcc5c1 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -9,7 +9,7 @@ requires that you ensure that you have installed the following base dependencies your system. Note that this library was developed and tested on Ubuntu 22.04 (and earlier versions on 18.04/20.04). Specifically, ngc-learn requires: * Python (>=3.10) -* ngcsimlib (>=1.0.0), (official page) +* ngcsimlib (>=1.0.1), (official page) * NumPy (>=1.22.0) * SciPy (>=1.7.0) * JAX (>= 0.4.28; and jaxlib>=0.4.28) @@ -78,7 +78,7 @@ Python 3.11.4 (main, MONTH DAY YEAR, TIME) [GCC XX.X.X] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import ngclearn >>> ngclearn.__version__ -'2.0.2' +'2.0.3' ``` Note: If you do not have a JSON configuration file in place (see tutorials diff --git a/docs/requirements.txt b/docs/requirements.txt index 8fb2158c..0ebb3dc3 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -8,4 +8,4 @@ matplotlib>=3.8.0 jax>=0.4.28 jaxlib>=0.4.28 imageio>=2.31.5 -ngcsimlib>=1.0.0 +ngcsimlib>=1.0.1 diff --git a/docs/tutorials/model_basics/evolving_synapses.md b/docs/tutorials/model_basics/evolving_synapses.md index 7b194bb1..8cf92f8b 100755 --- a/docs/tutorials/model_basics/evolving_synapses.md +++ b/docs/tutorials/model_basics/evolving_synapses.md @@ -29,14 +29,13 @@ dkey, *subkeys = random.split(dkey, 6) ## create simple system with only one F-N cell with Context("Circuit") as circuit: - a = RateCell(name="a", n_units=1, tau_m=0., - act_fx="identity", key=subkeys[0]) - b = RateCell(name="b", n_units=1, tau_m=0., - act_fx="identity", key=subkeys[1]) + a = RateCell(name="a", n_units=1, tau_m=0., act_fx="identity", key=subkeys[0]) + b = RateCell(name="b", n_units=1, tau_m=0., act_fx="identity", key=subkeys[1]) - Wab = HebbianSynapse(name="Wab", shape=(1, 1), eta=1., - sign_value=-1., weight_init=dist.constant(value=1.), - w_bound=0., key=subkeys[3]) + Wab = HebbianSynapse( + name="Wab", shape=(1, 1), eta=1., sign_value=-1., weight_init=dist.constant(value=1.), + w_bound=0., key=subkeys[3] + ) # wire output compartment (rate-coded output zF) of RateCell `a` to input compartment of HebbianSynapse `Wab` Wab.inputs << a.zF @@ -50,7 +49,7 @@ with Context("Circuit") as circuit: ## create and compile core simulation commands evolve_process = (JaxProcess() - >> a.evolve) + >> Wab.evolve) circuit.wrap_and_add_command(jit(evolve_process.pure), name="evolve") advance_process = (JaxProcess() diff --git a/docs/tutorials/model_basics/model_building.md b/docs/tutorials/model_basics/model_building.md index 88707aa9..e5cff8e6 100755 --- a/docs/tutorials/model_basics/model_building.md +++ b/docs/tutorials/model_basics/model_building.md @@ -21,12 +21,11 @@ dkey, *subkeys = random.split(dkey, 4) ## create simple dynamical system: a --> w_ab --> b with Context("model") as model: - a = RateCell(name="a", n_units=1, tau_m=0., - act_fx="identity", key=subkeys[0]) - b = RateCell(name="b", n_units=1, tau_m=20., - act_fx="identity", key=subkeys[1]) - Wab = HebbianSynapse(name="Wab", shape=(1, 1), - weight_init=dist.constant(value=1.), key=subkeys[2]) + a = RateCell(name="a", n_units=1, tau_m=0., act_fx="identity", key=subkeys[0]) + b = RateCell(name="b", n_units=1, tau_m=20., act_fx="identity", key=subkeys[1]) + Wab = HebbianSynapse( + name="Wab", shape=(1, 1), weight_init=dist.constant(value=1.), key=subkeys[2] + ) ``` Next, we will want to wire together the three components we have embedded into diff --git a/docs/tutorials/neurocog/hodgkin_huxley_cell.md b/docs/tutorials/neurocog/hodgkin_huxley_cell.md index 44e3b0a7..e055b5c5 100755 --- a/docs/tutorials/neurocog/hodgkin_huxley_cell.md +++ b/docs/tutorials/neurocog/hodgkin_huxley_cell.md @@ -77,12 +77,10 @@ essentially probability values: `m` ($\mathbf{m}_t$) for the probability of sodium channel subunit activation, and `h` ($\mathbf{h}_t$) for the probability of sodium channel subunit inactivation. -neurons and muscle cells. It is a continuous-time dynamical system. - Formally, the core dynamics of the H-H cell can be written out as follows: $$ -\tau_v \frac{\partial \mathbf{v}_t}{\partial t} &= \mathbf{j}_t - g_Na * \mathbf{m}^3_t * \mathbf{h}_t * (\mathbf{v}_t - v_Na) - g_K * \mathbf{n}^4_t * (\mathbf{v}_t - v_K) - g_L * (\mathbf{v}_t - v_L) \\ +\tau_v \frac{\partial \mathbf{v}_t}{\partial t} &= \mathbf{j}_t - g_{Na} * \mathbf{m}^3_t * \mathbf{h}_t * (\mathbf{v}_t - v_{Na}) - g_K * \mathbf{n}^4_t * (\mathbf{v}_t - v_K) - g_L * (\mathbf{v}_t - v_L) \\ \frac{\partial \mathbf{n}_t}{\partial t} &= \alpha_n(\mathbf{v}_t) * (1 - \mathbf{n}_t) - \beta_n(\mathbf{v}_t) * \mathbf{n}_t \\ \frac{\partial \mathbf{m}_t}{\partial t} &= \alpha_m(\mathbf{v}_t) * (1 - \mathbf{m}_t) - \beta_m(\mathbf{v}_t) * \mathbf{m}_t \\ \frac{\partial \mathbf{h}_t}{\partial t} &= \alpha_h(\mathbf{v}_t) * (1 - \mathbf{h}_t) - \beta_h(\mathbf{v}_t) * \mathbf{h}_t diff --git a/docs/tutorials/neurocog/integration.md b/docs/tutorials/neurocog/integration.md index a42dea7c..db3fa1ca 100644 --- a/docs/tutorials/neurocog/integration.md +++ b/docs/tutorials/neurocog/integration.md @@ -194,7 +194,7 @@ which should yield you a plot like the one below: -As you might observe, RK-4 give the best approximation of the solution. In addition, +As you might observe, RK-4 gives the best approximation of the solution. In addition, when the integration step size is held constant, Euler integration does quite poorly over just a few steps while RK-2 and Heun's method do much better at approximating the analytical equation. In the end, the type of numerical integration method employed can diff --git a/docs/tutorials/neurocog/simple_leaky_integrator.md b/docs/tutorials/neurocog/simple_leaky_integrator.md index 73068a73..ec8d485e 100644 --- a/docs/tutorials/neurocog/simple_leaky_integrator.md +++ b/docs/tutorials/neurocog/simple_leaky_integrator.md @@ -10,7 +10,7 @@ integrator components, the simplified leaky integrate-and-fire (SLIF). With our JSON configuration in place, go ahead and create a Python script, i.e., `run_slif.py`, to write your code for this part of the tutorial. -Now let's go ahead and set up the controller for this lesson's simulation, +Now let's go ahead and set up the controller/context for this lesson's simulation, where we will a dynamical system with only a single component, specifically the simplified LIF (sLIF), like so: @@ -55,14 +55,14 @@ with Context("Model") as model: ``` This node has quite a few compartments and constants but only a handful are important -for understanding how this model governs spiking/firing rates during -a controller's simulation window. Specifically, in this lesson, we will focus on +for understanding how this model governs spiking/firing rates within its simulation window. +Specifically, in this lesson, we will focus on its electrical current `j` (formally labeled here as $\mathbf{j}_t$), its voltage `v` (formally labeled: $\mathbf{v}_t$), its spike emission (or action potential) `s` (formally $\mathbf{s}_t$), and its refractory variable/marker (formally $\mathbf{r}_t$). The subscript $t$ indicates that this compartment variable takes on a certain value at a certain time step -$t$ and we will refer to the ngc-learn controller's integration time constant, +$t$ and we will refer to the ngc-learn context's integration time constant, the amount of time we move forward by, as $\Delta t$. The constants or hyper-parameters we will be most interested in are the cell's membrane resistance `R_m` (formally $R$ with its capacitance $C$ implied), its membrane time @@ -198,7 +198,7 @@ its synaptic current over time - we will not, however, cover this functionality in this walkthrough.)--> In effect, given the above, every time the `sLIF`'s `.advanceState()` function is -called within a simulation controller (`Controller()`), the above Euler integration of +called within a simulation controller context (`Context()`), the above Euler integration of the membrane potential differential equation is happening each time step. Knowing this, the last item required to understand ngc-learn's `sLIF` node's computation is related to its spike $\mathbf{s}_t$. The spike reading is computed simply by diff --git a/history.txt b/history.txt index 108b82d6..4ee30278 100644 --- a/history.txt +++ b/history.txt @@ -80,3 +80,8 @@ History * integration of reinforce-synapse, block/partitioned synapse component ("patched-synapse") * basic unit-tests (pytest framework) integrated to support dev * includes support for Intel's lava-nc emulator (several spiking/stdp components that play with ngc-lava) + +2.0.3 +— — — — — — — — - + * Minor patch to point / depend on minor-patched ngcsimlib 1.0.1 (nudge to minor patched release) + * Added wrapper `inverse_sigmoid` for original `inverse_logistic` routine in model_utils (for convenience) diff --git a/ngclearn/utils/model_utils.py b/ngclearn/utils/model_utils.py index a7b9f141..facad87e 100755 --- a/ngclearn/utils/model_utils.py +++ b/ngclearn/utils/model_utils.py @@ -508,10 +508,13 @@ def d_sigmoid(x): sigm_x = nn.sigmoid(x) ## pre-compute once return sigm_x * (1. - sigm_x) +def inverse_sigmoid(x, clip_bound=0.03): ## wrapper call for naming convention ease + return inverse_logistic(x, clip_bound=clip_bound) + @jit -def inverse_logistic(x, clip_bound=0.03): # 0.03 +def inverse_logistic(x, clip_bound=0.03): """ - The inverse logistic link - logit function. + The inverse logistic link - the logit function. Args: x: data to transform via inverse logistic function diff --git a/pyproject.toml b/pyproject.toml index 012f7e41..71681a99 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,14 @@ [build-system] -requires = ["setuptools>=61.0"] -build-backend = "setuptools.build_meta" +requires = [ + "setuptools>=61.0", # default + "wheel", # also often needed + "numpy>=1.19.5" # add numpy here for build-time use +] +build-backend = "setuptools.build_meta" # using setuptool building engine [project] name = "ngclearn" -version = "2.0.2" +version = "2.0.3" description = "Simulation software for building and analyzing arbitrary predictive coding, spiking network, and biomimetic neural systems." authors = [ {name = "Alexander Ororbia", email = "ago@cs.rit.edu"}, diff --git a/requirements.txt b/requirements.txt index e689ce87..36285e9d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,6 @@ matplotlib>=3.8.0 patchify jax>=0.4.28 jaxlib>=0.4.28 -ngcsimlib>=1.0.0 +ngcsimlib>=1.0.1 imageio>=2.31.5 pandas>=2.2.3