Skip to content

Commit

Permalink
update to latest emergent with path rendering
Browse files Browse the repository at this point in the history
  • Loading branch information
rcoreilly committed Aug 26, 2024
1 parent e6490fc commit b5138f8
Show file tree
Hide file tree
Showing 8 changed files with 67 additions and 59 deletions.
8 changes: 4 additions & 4 deletions examples/ra25/ra25.go
Original file line number Diff line number Diff line change
Expand Up @@ -333,13 +333,13 @@ func (ss *Sim) ConfigNet(net *leabra.Network) {
net.SetRandSeed(ss.RandSeeds[0]) // init new separate random seed, using run = 0

inp := net.AddLayer2D("Input", 5, 5, leabra.InputLayer)
inp.Info = "Input represents sensory input, coming into the cortex via tha thalamus"
inp.Doc = "Input represents sensory input, coming into the cortex via tha thalamus"
hid1 := net.AddLayer2D("Hidden1", ss.Config.Params.Hidden1Size.Y, ss.Config.Params.Hidden1Size.X, leabra.SuperLayer)
hid1.Info = "First hidden layer performs initial internal processing of sensory inputs, transforming in preparation for producing appropriate responses"
hid1.Doc = "First hidden layer performs initial internal processing of sensory inputs, transforming in preparation for producing appropriate responses"
hid2 := net.AddLayer2D("Hidden2", ss.Config.Params.Hidden2Size.Y, ss.Config.Params.Hidden2Size.X, leabra.SuperLayer)
hid2.Info = "Another 'deep' layer of internal processing to prepare directly for Output response"
hid2.Doc = "Another 'deep' layer of internal processing to prepare directly for Output response"
out := net.AddLayer2D("Output", 5, 5, leabra.TargetLayer)
out.Info = "Output represents motor output response, via deep layer 5 neurons projecting supcortically, in motor cortex"
out.Doc = "Output represents motor output response, via deep layer 5 neurons projecting supcortically, in motor cortex"

// use this to position layers relative to each other
// hid2.PlaceRightOf(hid1, 2)
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ go 1.22

require (
cogentcore.org/core v0.3.3-0.20240825182959-fe4840922a21
github.com/emer/emergent/v2 v2.0.0-dev0.1.0.0.20240825183141-a2d399e6a078
github.com/emer/emergent/v2 v2.0.0-dev0.1.0.0.20240826102207-14a576d51386
)

require (
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/emer/emergent/v2 v2.0.0-dev0.1.0.0.20240825183141-a2d399e6a078 h1:st8x8pdpv+ToSYNhWgI+L970lJXWCs4tlSDvV+oNzZg=
github.com/emer/emergent/v2 v2.0.0-dev0.1.0.0.20240825183141-a2d399e6a078/go.mod h1:jXS8jqUgcroXlX4510+FXsgb6osLiDWRJLDCD2gpzow=
github.com/emer/emergent/v2 v2.0.0-dev0.1.0.0.20240826102207-14a576d51386 h1:tFgKfRHzssTtHVvz4IUaBXf1Cqmmb5s1/bPb0HY0sLw=
github.com/emer/emergent/v2 v2.0.0-dev0.1.0.0.20240826102207-14a576d51386/go.mod h1:jXS8jqUgcroXlX4510+FXsgb6osLiDWRJLDCD2gpzow=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
Expand Down
1 change: 1 addition & 0 deletions leabra/layerbase.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ type Layer struct {

func (ly *Layer) StyleObject() any { return ly }
func (ly *Layer) TypeName() string { return ly.Type.String() }
func (ly *Layer) TypeNumber() int { return int(ly.Type) }
func (ly *Layer) NumRecvPaths() int { return len(ly.RecvPaths) }
func (ly *Layer) RecvPath(idx int) emer.Path { return ly.RecvPaths[idx] }
func (ly *Layer) NumSendPaths() int { return len(ly.SendPaths) }
Expand Down
77 changes: 42 additions & 35 deletions leabra/neuron.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@ package leabra

import (
"fmt"
"strings"
"unsafe"

"cogentcore.org/core/enums"
"cogentcore.org/core/math32"
"cogentcore.org/core/types"
"github.com/emer/emergent/v2/emer"
)

Expand Down Expand Up @@ -165,54 +167,59 @@ var VarCategories = []emer.VarCategory{

var NeuronVarProps = map[string]string{
// Act vars
"Act": `cat:"Act" desc:rate-coded activation value reflecting final output of neuron communicated to other neurons, typically in range 0-1. This value includes adaptation and synaptic depression / facilitation effects which produce temporal contrast (see ActLrn for version without this). For rate-code activation, this is noisy-x-over-x-plus-one (NXX1) function; for discrete spiking it is computed from the inverse of the inter-spike interval (ISI), and Spike reflects the discrete spikes."`,
"Ge": `cat:"Act" desc:"total excitatory synaptic conductance -- the net excitatory input to the neuron -- does *not* include Gbar.E"`,
"Gi": `cat:"Act" desc:"total inhibitory synaptic conductance -- the net inhibitory input to the neuron -- does *not* include Gbar.I"`,
"Gk": `cat:"Act" desc:"total potassium conductance, typically reflecting sodium-gated potassium currents involved in adaptation effects -- does *not* include Gbar.K"`,
"Inet": `cat:"Act" desc:"net current produced by all channels -- drives update of Vm"`,
"Vm": `cat:"Act" min:"0" max:"1" desc:"membrane potential -- integrates Inet current over time"`,
"Noise": `cat:"Act" desc:"noise value added to unit (ActNoiseParams determines distribution, and when / where it is added)"`,
"Spike": `cat:"Act" desc:"whether neuron has spiked or not (0 or 1), for discrete spiking neurons."`,
"Targ": `cat:"Act" desc:"target value: drives learning to produce this activation value"`,
"Ext": `cat:"Act" desc:"external input: drives activation of unit from outside influences (e.g., sensory input)"`,
"Act": `cat:"Act"`,
"Ge": `cat:"Act"`,
"Gi": `cat:"Act"`,
"Gk": `cat:"Act"`,
"Inet": `cat:"Act"`,
"Vm": `cat:"Act" min:"0" max:"1"`,
"Noise": `cat:"Act"`,
"Spike": `cat:"Act"`,
"Targ": `cat:"Act"`,
"Ext": `cat:"Act"`,

// Learn vars
"AvgSS": `cat:"Learn" desc:"super-short time-scale average of ActLrn activation -- provides the lowest-level time integration -- for spiking this integrates over spikes before subsequent averaging, and it is also useful for rate-code to provide a longer time integral overall"`,
"AvgS": `cat:"Learn" desc:"short time-scale average of ActLrn activation -- tracks the most recent activation states (integrates over AvgSS values), and represents the plus phase for learning in XCAL algorithms"`,
"AvgM": `cat:"Learn" desc:"medium time-scale average of ActLrn activation -- integrates over AvgS values, and represents the minus phase for learning in XCAL algorithms"`,
"AvgL": `cat:"Learn" desc:"long time-scale average of medium-time scale (trial level) activation, used for the BCM-style floating threshold in XCAL"`,
"AvgLLrn": `cat:"Learn" desc:"how much to learn based on the long-term floating threshold (AvgL) for BCM-style Hebbian learning -- is modulated by level of AvgL itself (stronger Hebbian as average activation goes higher) and optionally the average amount of error experienced in the layer (to retain a common proportionality with the level of error-driven learning across layers)"`,
"AvgSLrn": `cat:"Learn" desc:"short time-scale activation average that is actually used for learning -- typically includes a small contribution from AvgM in addition to mostly AvgS, as determined by LrnActAvgParams.LrnM -- important to ensure that when unit turns off in plus phase (short time scale), enough medium-phase trace remains so that learning signal doesn't just go all the way to 0, at which point no learning would take place"`,
"ActLrn": `cat:"Learn" desc:"learning activation value, reflecting *dendritic* activity that is not affected by synaptic depression or adapdation channels which are located near the axon hillock. This is the what drives the Avg* values that drive learning. Computationally, neurons strongly discount the signals sent to other neurons to provide temporal contrast, but need to learn based on a more stable reflection of their overall inputs in the dendrites."`,
"AvgSS": `cat:"Learn"`,
"AvgS": `cat:"Learn"`,
"AvgM": `cat:"Learn"`,
"AvgL": `cat:"Learn"`,
"AvgLLrn": `cat:"Learn"`,
"AvgSLrn": `cat:"Learn"`,
"ActLrn": `cat:"Learn"`,

// Phase vars
"ActM": `cat:"Phase" desc:"the activation state at end of third quarter, which is the traditional posterior-cortical minus phase activation"`,
"ActP": `cat:"Phase" desc:"the activation state at end of fourth quarter, which is the traditional posterior-cortical plus_phase activation"`,
"ActDif": `cat:"Phase" auto-scale:"+" desc:"ActP - ActM -- difference between plus and minus phase acts -- reflects the individual error gradient for this neuron in standard error-driven learning terms"`,
"ActDel": `cat:"Phase" auto-scale:"+" desc:"delta activation: change in Act from one cycle to next -- can be useful to track where changes are taking place"`,
"ActQ0": `cat:"Phase" desc:"the activation state at start of current alpha cycle (same as the state at end of previous cycle)"`,
"ActQ1": `cat:"Phase" desc:"the activation state at end of first quarter of current alpha cycle"`,
"ActQ2": `cat:"Phase" desc:"the activation state at end of second quarter of current alpha cycle"`,
"ActAvg": `cat:"Phase" desc:"average activation (of final plus phase activation state) over long time intervals (time constant = DtPars.AvgTau -- typically 200) -- useful for finding hog units and seeing overall distribution of activation"`,
"ActM": `cat:"Phase"`,
"ActP": `cat:"Phase"`,
"ActDif": `cat:"Phase" auto-scale:"+"`,
"ActDel": `cat:"Phase" auto-scale:"+"`,
"ActQ0": `cat:"Phase"`,
"ActQ1": `cat:"Phase"`,
"ActQ2": `cat:"Phase"`,
"ActAvg": `cat:"Phase"`,

// Gmisc vars
"GiSyn": `cat:"Gmisc" desc:"aggregated synaptic inhibition (from Inhib pathways) -- time integral of GiRaw -- this is added with computed FFFB inhibition to get the full inhibition in Gi"`,
"GiSelf": `cat:"Gmisc" desc:"total amount of self-inhibition -- time-integrated to avoid oscillations"`,
"ActSent": `cat:"Gmisc" desc:"last activation value sent (only send when diff is over threshold)"`,
"GeRaw": `cat:"Gmisc" desc:"raw excitatory conductance (net input) received from sending units (send delta's are added to this value)"`,
"GiRaw": `cat:"Gmisc" desc:"raw inhibitory conductance (net input) received from sending units (send delta's are added to this value)"`,
"GknaFast": `cat:"Gmisc" desc:"conductance of sodium-gated potassium channel (KNa) fast dynamics (M-type) -- produces accommodation / adaptation of firing"`,
"GknaMed": `cat:"Gmisc" desc:"conductance of sodium-gated potassium channel (KNa) medium dynamics (Slick) -- produces accommodation / adaptation of firing"`,
"GknaSlow": `cat:"Gmisc" desc:"conductance of sodium-gated potassium channel (KNa) slow dynamics (Slack) -- produces accommodation / adaptation of firing"`,
"ISI": `cat:"Gmisc" desc:"current inter-spike-interval -- counts up since last spike. Starts at -1 when initialized."`,
"ISIAvg": `cat:"Gmisc" desc:"average inter-spike-interval -- average time interval between spikes. Starts at -1 when initialized, and goes to -2 after first spike, and is only valid after the second spike post-initialization."`,
"GiSyn": `cat:"Gmisc"`,
"GiSelf": `cat:"Gmisc"`,
"ActSent": `cat:"Gmisc"`,
"GeRaw": `cat:"Gmisc"`,
"GiRaw": `cat:"Gmisc"`,
"GknaFast": `cat:"Gmisc"`,
"GknaMed": `cat:"Gmisc"`,
"GknaSlow": `cat:"Gmisc"`,
"ISI": `cat:"Gmisc"`,
"ISIAvg": `cat:"Gmisc"`,
}

func init() {
NeuronVarsMap = make(map[string]int, len(NeuronVars))
for i, v := range NeuronVars {
NeuronVarsMap[v] = i
}
ntyp := types.For[Neuron]()
for _, fld := range ntyp.Fields {
tag := NeuronVarProps[fld.Name]
NeuronVarProps[fld.Name] = tag + ` doc:"` + strings.ReplaceAll(fld.Doc, "\n", " ") + `"`
}
}

func (nrn *Neuron) VarNames() []string {
Expand Down
1 change: 1 addition & 0 deletions leabra/pathbase.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ func (pt *Path) StyleObject() any { return pt }
func (pt *Path) RecvLayer() emer.Layer { return pt.Recv }
func (pt *Path) SendLayer() emer.Layer { return pt.Send }
func (pt *Path) TypeName() string { return pt.Type.String() }
func (pt *Path) TypeNumber() int { return int(pt.Type) }

func (pt *Path) Defaults() {
pt.WtInit.Defaults()
Expand Down
29 changes: 14 additions & 15 deletions leabra/synapse.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,10 @@ package leabra

import (
"fmt"
"reflect"
"strings"
"unsafe"

"cogentcore.org/core/types"
)

// leabra.Synapse holds state for the synaptic connection between neurons
Expand Down Expand Up @@ -54,28 +56,25 @@ func (sy *Synapse) VarNames() []string {
var SynapseVars = []string{"Wt", "LWt", "DWt", "Norm", "Moment", "Scale"}

var SynapseVarProps = map[string]string{
"Wt": `cat:"Wts" desc:"synaptic weight value, sigmoid contrast-enhanced version of the linear weight LWt"`,
"LWt": `cat:"Wts" desc:"linear (underlying) weight value, which learns according to the lrate specified in the connection spec. this is converted into the effective weight value, Wt, via sigmoidal contrast enhancement (see WtSigParams)"`,
"DWt": `cat:"Wts" auto-scale:"+" desc:"change in synaptic weight, driven by learning algorithm"`,
"Norm": `cat:"Wts" desc:"DWt normalization factor, reset to max of abs value of DWt, decays slowly down over time. Serves as an estimate of variance in weight changes over time"`,
"Moment": `cat:"Wts" auto-scale:"+" desc:"momentum, as time-integrated DWt changes, to accumulate a consistent direction of weight change and cancel out dithering contradictory changes"`,
"Scale": `cat:"Wts" desc:"scaling parameter for this connection: effective weight value is scaled by this factor in computing G conductance. This is useful for topographic connectivity patterns e.g., to enforce more distant connections to always be lower in magnitude than closer connections. Value defaults to 1 (cannot be exactly 0, otherwise is automatically reset to 1; use a very small number to approximate 0). Typically set by using the paths.Pattern Weights() values where appropriate"`,
"Wt": `cat:"Wts"`,
"LWt": `cat:"Wts"`,
"DWt": `cat:"Wts" auto-scale:"+"`,
"Norm": `cat:"Wts"`,
"Moment": `cat:"Wts" auto-scale:"+"`,
"Scale": `cat:"Wts"`,
}

var SynapseVarsMap map[string]int

func init() {
SynapseVarsMap = make(map[string]int, len(SynapseVars))
typ := reflect.TypeOf((*Synapse)(nil)).Elem()
for i, v := range SynapseVars {
SynapseVarsMap[v] = i
pstr := SynapseVarProps[v]
if fld, has := typ.FieldByName(v); has {
if desc, ok := fld.Tag.Lookup("desc"); ok {
pstr += ` desc:"` + desc + `"`
SynapseVarProps[v] = pstr
}
}
}
styp := types.For[Synapse]()
for _, fld := range styp.Fields {
tag := SynapseVarProps[fld.Name]
SynapseVarProps[fld.Name] = tag + ` doc:"` + strings.ReplaceAll(fld.Doc, "\n", " ") + `"`
}
}

Expand Down
Loading

0 comments on commit b5138f8

Please sign in to comment.