# 自连接和有状态突触

## 自连接模块

$$t$$ 时刻整个模块的输入，$$i[t]$$$$y[t]$$sub_module 的输入和输出（注意 $$y[t]$$ 同时也是整个模块的输出），则

$i[t] = f(x[t], y[t-1])$

$i[t] = x[t] + y[t-1].$

import torch
from spikingjelly.activation_based import layer, functional, neuron

T = 8
N = 1

return x + y

print(net)
x = torch.zeros([T, N])
x[0] = 1.5
for t in range(T):
print(t, f'x[t]={x[t]}, s[t]={net(x[t])}')

functional.reset_net(net)


ElementWiseRecurrentContainer(
element-wise function=<function element_wise_add at 0x00000158FC15ACA0>, step_mode=s
(sub_module): IFNode(
v_threshold=1.0, v_reset=0.0, detach_reset=False, step_mode=s, backend=torch
(surrogate_function): Sigmoid(alpha=4.0, spiking=True)
)
)
0 x[t]=tensor([1.5000]), s[t]=tensor([1.])
1 x[t]=tensor([0.]), s[t]=tensor([1.])
2 x[t]=tensor([0.]), s[t]=tensor([1.])
3 x[t]=tensor([0.]), s[t]=tensor([1.])
4 x[t]=tensor([0.]), s[t]=tensor([1.])
5 x[t]=tensor([0.]), s[t]=tensor([1.])
6 x[t]=tensor([0.]), s[t]=tensor([1.])
7 x[t]=tensor([0.]), s[t]=tensor([1.])


## 有状态的突触

2 3 等文章使用有状态的突触。将 spikingjelly.activation_based.layer.SynapseFilter 放在普通无状 态突触的后面，对突触输出的电流进行滤波，就可以得到有状态的突触，例如：

import torch
import torch.nn as nn
from spikingjelly.activation_based import layer, functional, neuron

stateful_conv = nn.Sequential(
layer.SynapseFilter(tau=100.)
)


## Sequential FashionMNIST上的对比实验

import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets
from spikingjelly.activation_based import neuron, surrogate, layer, functional
from torch.cuda import amp
import os, argparse
from torch.utils.tensorboard import SummaryWriter
import time
import datetime
import sys


class PlainNet(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Sequential(
layer.Linear(28, 32),
neuron.IFNode(surrogate_function=surrogate.ATan()),
layer.Linear(32, 10),
neuron.IFNode(surrogate_function=surrogate.ATan())
)

def forward(self, x: torch.Tensor):
return self.fc(x).mean(0)


class StatefulSynapseNet(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Sequential(
layer.Linear(28, 32),
neuron.IFNode(surrogate_function=surrogate.ATan()),
layer.SynapseFilter(tau=2., learnable=True),
layer.Linear(32, 10),
neuron.IFNode(surrogate_function=surrogate.ATan())
)

def forward(self, x: torch.Tensor):
return self.fc(x).mean(0)


class FeedBackNet(nn.Module):
def __init__(self):
super().__init__()

self.fc = nn.Sequential(
layer.Linear(28, 32),
layer.LinearRecurrentContainer(
neuron.IFNode(surrogate_function=surrogate.ATan(), detach_reset=True),
in_features=32, out_features=32, bias=True
),
layer.Linear(32, 10),
neuron.IFNode(surrogate_function=surrogate.ATan())
)

def forward(self, x: torch.Tensor):
return self.fc(x).mean(0)


usage: rsnn_sequential_fmnist.py [-h] [-model MODEL] [-device DEVICE] [-b B] [-epochs N] [-j N] [-data-dir DATA_DIR] [-out-dir OUT_DIR] [-resume RESUME] [-amp] [-cupy] [-opt OPT] [-momentum MOMENTUM] [-lr LR]

Classify Sequential Fashion-MNIST

optional arguments:
-h, --help          show this help message and exit
-model MODEL        use which model, "plain", "ss" (StatefulSynapseNet) or "fb" (FeedBackNet)
-device DEVICE      device
-b B                batch size
-epochs N           number of total epochs to run
-data-dir DATA_DIR  root dir of Fashion-MNIST dataset
-out-dir OUT_DIR    root dir for saving logs and checkpoint
-resume RESUME      resume from the checkpoint path
-amp                automatic mixed precision training
-cupy               use cupy backend
-opt OPT            use which optimizer. SDG or Adam
-momentum MOMENTUM  momentum for SGD
-lr LR              learning rate


python -m spikingjelly.activation_based.examples.rsnn_sequential_fmnist -device cuda:0 -b 256 -epochs 64 -data-dir /datasets/FashionMNIST/ -amp -cupy -opt sgd -lr 0.1 -j 8 -model plain

python -m spikingjelly.activation_based.examples.rsnn_sequential_fmnist -device cuda:0 -b 256 -epochs 64 -data-dir /datasets/FashionMNIST/ -amp -cupy -opt sgd -lr 0.1 -j 8 -model fb

python -m spikingjelly.activation_based.examples.rsnn_sequential_fmnist -device cuda:0 -b 256 -epochs 64 -data-dir /datasets/FashionMNIST/ -amp -cupy -opt sgd -lr 0.1 -j 8 -model ss


1

Yin B, Corradi F, Bohté S M. Effective and efficient computation with multiple-timescale spiking recurrent neural networks[C]//International Conference on Neuromorphic Systems 2020. 2020: 1-8.

2

Diehl P U, Cook M. Unsupervised learning of digit recognition using spike-timing-dependent plasticity[J]. Frontiers in computational neuroscience, 2015, 9: 99.

3

Fang H, Shrestha A, Zhao Z, et al. Exploiting Neuron and Synapse Filter Dynamics in Spatial Temporal Learning of Deep Spiking Neural Network[J].