spikingjelly.activation_based.learning package

Module contents

spikingjelly.activation_based.learning.stdp_linear_single_step(fc: ~torch.nn.modules.linear.Linear, in_spike: ~torch.Tensor, out_spike: ~torch.Tensor, trace_pre: ~typing.Optional[~typing.Union[float, ~torch.Tensor]], trace_post: ~typing.Optional[~typing.Union[float, ~torch.Tensor]], tau_pre: float, tau_post: float, f_pre: ~typing.Callable = <function <lambda>>, f_post: ~typing.Callable = <function <lambda>>)[源代码]
spikingjelly.activation_based.learning.mstdp_linear_single_step(fc: ~torch.nn.modules.linear.Linear, in_spike: ~torch.Tensor, out_spike: ~torch.Tensor, trace_pre: ~typing.Optional[~typing.Union[float, ~torch.Tensor]], trace_post: ~typing.Optional[~typing.Union[float, ~torch.Tensor]], tau_pre: float, tau_post: float, f_pre: ~typing.Callable = <function <lambda>>, f_post: ~typing.Callable = <function <lambda>>)[源代码]
spikingjelly.activation_based.learning.mstdpet_linear_single_step(fc: ~torch.nn.modules.linear.Linear, in_spike: ~torch.Tensor, out_spike: ~torch.Tensor, trace_pre: ~typing.Optional[~typing.Union[float, ~torch.Tensor]], trace_post: ~typing.Optional[~typing.Union[float, ~torch.Tensor]], tau_pre: float, tau_post: float, tau_trace: float, f_pre: ~typing.Callable = <function <lambda>>, f_post: ~typing.Callable = <function <lambda>>)[源代码]
spikingjelly.activation_based.learning.stdp_conv2d_single_step(conv: ~torch.nn.modules.conv.Conv2d, in_spike: ~torch.Tensor, out_spike: ~torch.Tensor, trace_pre: ~typing.Optional[~torch.Tensor], trace_post: ~typing.Optional[~torch.Tensor], tau_pre: float, tau_post: float, f_pre: ~typing.Callable = <function <lambda>>, f_post: ~typing.Callable = <function <lambda>>)[源代码]
spikingjelly.activation_based.learning.stdp_conv1d_single_step(conv: ~torch.nn.modules.conv.Conv1d, in_spike: ~torch.Tensor, out_spike: ~torch.Tensor, trace_pre: ~typing.Optional[~torch.Tensor], trace_post: ~typing.Optional[~torch.Tensor], tau_pre: float, tau_post: float, f_pre: ~typing.Callable = <function <lambda>>, f_post: ~typing.Callable = <function <lambda>>)[源代码]
spikingjelly.activation_based.learning.stdp_multi_step(layer: ~typing.Union[~torch.nn.modules.linear.Linear, ~torch.nn.modules.conv.Conv1d, ~torch.nn.modules.conv.Conv2d], in_spike: ~torch.Tensor, out_spike: ~torch.Tensor, trace_pre: ~typing.Optional[~typing.Union[float, ~torch.Tensor]], trace_post: ~typing.Optional[~typing.Union[float, ~torch.Tensor]], tau_pre: float, tau_post: float, f_pre: ~typing.Callable = <function <lambda>>, f_post: ~typing.Callable = <function <lambda>>)[源代码]
class spikingjelly.activation_based.learning.STDPLearner(step_mode: str, synapse: ~typing.Union[~torch.nn.modules.conv.Conv2d, ~torch.nn.modules.linear.Linear], sn: ~spikingjelly.activation_based.neuron.BaseNode, tau_pre: float, tau_post: float, f_pre: ~typing.Callable = <function STDPLearner.<lambda>>, f_post: ~typing.Callable = <function STDPLearner.<lambda>>)[源代码]

基类:MemoryModule

reset()[源代码]
disable()[源代码]
enable()[源代码]
step(on_grad: bool = True, scale: float = 1.0)[源代码]
training: bool
class spikingjelly.activation_based.learning.MSTDPLearner(step_mode: str, batch_size: float, synapse: ~typing.Union[~torch.nn.modules.conv.Conv2d, ~torch.nn.modules.linear.Linear], sn: ~spikingjelly.activation_based.neuron.BaseNode, tau_pre: float, tau_post: float, f_pre: ~typing.Callable = <function MSTDPLearner.<lambda>>, f_post: ~typing.Callable = <function MSTDPLearner.<lambda>>)[源代码]

基类:MemoryModule

reset()[源代码]
disable()[源代码]
enable()[源代码]
step(reward, on_grad: bool = True, scale: float = 1.0)[源代码]
training: bool
class spikingjelly.activation_based.learning.MSTDPETLearner(step_mode: str, synapse: ~typing.Union[~torch.nn.modules.conv.Conv2d, ~torch.nn.modules.linear.Linear], sn: ~spikingjelly.activation_based.neuron.BaseNode, tau_pre: float, tau_post: float, tau_trace: float, f_pre: ~typing.Callable = <function MSTDPETLearner.<lambda>>, f_post: ~typing.Callable = <function MSTDPETLearner.<lambda>>)[源代码]

基类:MemoryModule

reset()[源代码]
disable()[源代码]
enable()[源代码]
step(reward, on_grad: bool = True, scale: float = 1.0)[源代码]
training: bool