Skip to content

Commit

Permalink
cycle chunks are actually irrelevant -- it is just down to when you d…
Browse files Browse the repository at this point in the history
…o RunDone.
  • Loading branch information
rcoreilly committed Dec 26, 2024
1 parent df0d8e2 commit 8e43d8e
Show file tree
Hide file tree
Showing 20 changed files with 46 additions and 65 deletions.
26 changes: 11 additions & 15 deletions axon/act-net.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

26 changes: 11 additions & 15 deletions axon/act-net.goal
Original file line number Diff line number Diff line change
Expand Up @@ -8,28 +8,24 @@ import (
"cogentcore.org/core/enums"
)

// Cycle runs n cycles of activation updating.
// Cycle runs one cycle of activation updating, equivalent to 1 msec.
// If getNeurons is true, then neuron state is synced back
// from the GPU (for cycle-level display etc). Otherwise only
// layer-level state is synced.
func (nt *Network) Cycle(ncyc int, getNeurons bool) {
// from the GPU (for cycle-level display etc). Otherwise, nothing is.
func (nt *Network) Cycle(getNeurons bool) {
nix := nt.NetIxs()
ctx := nt.Context()
nd := int(nix.NNeurons * ctx.NData)
ld := int(nix.NLayers * ctx.NData)
pd := int(nix.NPools * ctx.NData)

// ToGPUCtxGlobal() // this is not a significant speed factor
for range ncyc {
RunGatherSpikes(nd)
RunLayerGi(ld)
RunBetweenGi(ld)
RunPoolGi(pd)
RunCycleNeuron(nd)
RunSendSpike(nd)
RunCyclePost(ld)
RunCycleInc(1)
}
RunGatherSpikes(nd)
RunLayerGi(ld)
RunBetweenGi(ld)
RunPoolGi(pd)
RunCycleNeuron(nd)
RunSendSpike(nd)
RunCyclePost(ld)
RunCycleInc(1)

if getNeurons {
RunDoneLayersNeurons()
Expand Down
19 changes: 4 additions & 15 deletions axon/looper.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,13 @@ import (
// LooperStandard adds all the standard Axon Trial and Cycle level processing calls
// to the given Looper Stacks. cycle and trial are the enums for the looper levels,
// trainMode is the training mode enum value.
// - fastNCycles is the number of cycles to run in one chunk, when single-cycle iteration
// is not otherwise required (based on step level, netview update level).
// - minus and plus phases of the theta cycle (trial), at plusStart (150) and plusEnd (199) cycles.
// - embedded beta phases within theta, that record St1 and St2 states.
// - embedded beta phases within theta, that record Beta1 and Beta2 states.
// - net.Cycle() at every cycle step.
// - net.DWt() and net.WtFromDWt() learning calls in training mode, with netview update
// between these two calls if it is visible and viewing synapse variables.
// - netview update calls at appropriate levels (no-op if no GUI)
func LooperStandard(ls *looper.Stacks, net *Network, viewFunc func(mode enums.Enum) *NetViewUpdate, fastNCycles, plusStart, plusEnd int, cycle, trial, trainMode enums.Enum) {
func LooperStandard(ls *looper.Stacks, net *Network, viewFunc func(mode enums.Enum) *NetViewUpdate, plusStart, plusEnd int, cycle, trial, trainMode enums.Enum) {
ls.AddEventAllModes(cycle, "Beta1", 50, func() { net.Beta1() })
ls.AddEventAllModes(cycle, "Beta2", 100, func() { net.Beta2() })

Expand All @@ -31,31 +29,22 @@ func LooperStandard(ls *looper.Stacks, net *Network, viewFunc func(mode enums.En
for mode, st := range ls.Stacks {
cycLoop := st.Loops[cycle]
cycLoop.OnStart.Add("Cycle", func() {
nCycles := fastNCycles
getNeurons := false
if ls.ModeStack().StepLevel.Int64() == cycle.Int64() {
nCycles = 1
getNeurons = true
} else if view := viewFunc(mode); view != nil && view.View != nil {
if view.IsCycleUpdating() {
nCycles = 1
getNeurons = true
} else {
nCycles = min(nCycles, view.Time.Cycles())
if view.Time < Theta {
getNeurons = true
}
}
}
net.Cycle(nCycles, getNeurons)
if nCycles > 1 {
cycLoop.Counter.Cur += nCycles - 1
}
net.Cycle(getNeurons)
if UseGPU && !getNeurons {
ctx := net.Context()
for range nCycles {
ctx.CycleInc()
}
ctx.CycleInc() // keep synced
}
})

Expand Down
8 changes: 4 additions & 4 deletions sims/bgdorsal/README.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# pcore_ds
# BG Dorsal

This is a simple test of the [pcore](../../PCORE_BG.md) model of basal ganglia (BG) function, in the **Dorsal Striatum** (DS). See [pcore_vs](../pcore_vs) for the Ventral Striatum (VS) model, which is optimized for making global Go vs. No decisions based on cost / benefit inputs (see also [BOA](../boa)).
This is a test of the [pcore](../../PCORE_BG.md) model of basal ganglia (BG) function, in the **Dorsal Striatum** (DS). See [bgventral](../bgventral) for the Ventral Striatum (VS) model, which is optimized for making global Go vs. No decisions based on cost / benefit inputs (see also [Rubicon](../../Rubicon.md).

The DS is the input layer for the primary motor control part of the basal ganglia, and this model learns to execute a sequence of motor actions through reinforcement learning (RL), getting positive reinforcement for correct actions and lack of reinforcement for incorrect ones. Critically, there is no omnicient "teacher" input: the model has to discover the correct action sequence purely through trial and error, "online" learning (i.e., it learns on a trial-by-trial basis as it acts). This is the only biologically / ecologically realistic form of RL.
The DS is the input layer for the primary motor control part of the basal ganglia, and this model learns to execute a sequence of motor actions through reinforcement learning (RL), getting positive reinforcement for correct actions and lack of reinforcement for incorrect ones. Critically, there is no omnicient "teacher" input: the model has to discover the correct action sequence purely through trial and error, "online" learning (i.e., it learns on a trial-by-trial basis as it acts). This is the only biologically / ecologically realistic form of RL.

The model also has mechanisms to learn about the space of possible motor actions and their parameterization in the DS, which is driven by ascending pathways from the brainstem and spinal motor system, via the deep cerebellar nuclei (DCN) to the CL (central lateral) nucleus of the thalamus. This pathway is not currently beneficial in the model, and will be revisited once a simple model of the cerebellum is implemented, in the context of more fine-grained parameterized motor control. The descending pathway from motor cortex also conveys useful motor signal information, and these cortical reps are directly shaped by the same ascending motor signals.

Expand Down Expand Up @@ -30,7 +30,7 @@ The learned weights to the BG clearly show that it is disinhibiting the appropri

# TODO:

* Set number of cycles per trial in terms of BG motor gating timing: constant offset from onset of VM gating timing, with a cutoff for "nothing happening" trials.
* Set number of cycles per trial in terms of BG motor gating timing: constant offset from onset of VM gating timing, with a cutoff for "nothing happening" trials. This is likely especially important with new linear approx to SynCa learning, which has degraded learning at 300 cycles, which had been significantly better.

* "CL" not beneficial (implemented as direct MotorBS -> Matrix pathways): rel weight of 0.002 is OK but starts to actually impair above that. Likely that a functional cerebellum is needed to make this useful. Also, investigate other modulatory inputs to CL that might alter its signal. Key ref for diffs between CL and PF: LaceyBolamMagill07: C. J. Lacey, J. P. Bolam, P. J. Magill, Novel and distinct operational principles of intralaminar thalamic neurons and their striatal pathways. J. Neurosci. 27, 4374–4384 (2007).

Expand Down
2 changes: 1 addition & 1 deletion sims/bgdorsal/bg-dorsal.go
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ func (ss *Sim) ConfigLoops() {
AddLevel(Trial, seqLen).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)

ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })

Expand Down
2 changes: 1 addition & 1 deletion sims/bgventral/bg-ventral.go
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ func (ss *Sim) ConfigLoops() {
AddLevel(Theta, 3).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Theta, Train) // note: Theta
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Theta, Train) // note: Theta

ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })

Expand Down
2 changes: 1 addition & 1 deletion sims/choose/choose.go
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,7 @@ func (ss *Sim) ConfigLoops() {
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })
ls.AddOnStartToLoop(Trial, "ApplyInputs", func(mode enums.Enum) {
ss.ApplyInputs(mode.(Modes))
Expand Down
2 changes: 1 addition & 1 deletion sims/deepfsa/deep-fsa.go
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ func (ss *Sim) ConfigLoops() {
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)

ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })

Expand Down
2 changes: 1 addition & 1 deletion sims/deepmove/deep-move.go
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ func (ss *Sim) ConfigLoops() {
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)

ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })

Expand Down
2 changes: 1 addition & 1 deletion sims/deepmusic/deep-music.go
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ func (ss *Sim) ConfigLoops() {
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)

ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })

Expand Down
2 changes: 1 addition & 1 deletion sims/hip/hip.go
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ func (ss *Sim) ConfigLoops() {
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)

ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })

Expand Down
2 changes: 1 addition & 1 deletion sims/inhib/inhib.go
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ func (ss *Sim) ConfigLoops() {
AddLevelIncr(Trial, ss.Config.Run.Trials, 1).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)

// ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })

Expand Down
2 changes: 1 addition & 1 deletion sims/mpi/mpi.go
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ func (ss *Sim) ConfigLoops() {
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)

ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })

Expand Down
2 changes: 1 addition & 1 deletion sims/objrec/objrec.go
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ func (ss *Sim) ConfigLoops() {
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)

ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })

Expand Down
2 changes: 1 addition & 1 deletion sims/pfcmaint/pfcmaint.go
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ func (ss *Sim) ConfigLoops() {
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)

ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })

Expand Down
2 changes: 1 addition & 1 deletion sims/pvlv/pvlv.go
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ func (ss *Sim) ConfigLoops() {
AddLevel(Trial, 5).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })
ls.AddOnStartToLoop(Trial, "ApplyInputs", func(mode enums.Enum) {
ss.ApplyInputs(mode.(Modes))
Expand Down
2 changes: 1 addition & 1 deletion sims/ra25/ra25.go
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ func (ss *Sim) ConfigLoops() {
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)

ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })

Expand Down
2 changes: 1 addition & 1 deletion sims/ra25x/ra25x.go
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ func (ss *Sim) ConfigLoops() {
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)

ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })

Expand Down
2 changes: 1 addition & 1 deletion sims/rl/rl.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ func (ss *Sim) ConfigLoops() {
AddLevelIncr(Trial, trials, ss.Config.Run.NData).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Trial, Train)
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Trial, Train)

ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })

Expand Down
2 changes: 1 addition & 1 deletion sims/vspatch/vspatch.go
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ func (ss *Sim) ConfigLoops() {
AddLevel(Theta, ev.Thetas).
AddLevel(Cycle, cycles)

axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, 10, cycles-plusPhase, cycles-1, Cycle, Theta, Train) // note: Theta
axon.LooperStandard(ls, ss.Net, ss.NetViewUpdater, cycles-plusPhase, cycles-1, Cycle, Theta, Train) // note: Theta

ls.Stacks[Train].OnInit.Add("Init", func() { ss.Init() })

Expand Down

0 comments on commit 8e43d8e

Please sign in to comment.