用矩阵编写一个小型神经网络

问题描述 投票:0回答:3

我正在尝试用 Python 模拟两个神经元网络。为每个神经元编写单独的方程非常简单,但由于我想进一步概括代码,这样就可以轻松增加神经元的数量,而无需一遍又一遍地重写方程。两个神经网络方程如下:

基本上,我有两个霍奇金-赫胥黎神经元,它们通过电压方程的最后一项耦合在一起。所以我想做的就是以这样的方式编写代码,以便我可以轻松地扩展网络。为此,我为神经元电压创建了一个向量 V:[V1, V2],并创建了一个向量 X,其中 X 对门控变量 m、h 和 n 进行建模。所以我会有 X = [[m1, h1, n1], [m2, h2, n2]]。然而,目前代码并没有产生尖峰,而是看起来电压突然飙升至无穷大。这表明门控变量 X 存在问题。门控变量 m、h 和 n 应始终在 0 和 1 之间,因此看起来门控变量刚刚达到 1 并保持在那里,这会导致电压烧毁向上。我不确定是什么导致它们停留在 1。代码正在运行并且没有产生任何错误。

import scipy as sp
import numpy as np
import pylab as plt

NN=2 #Number of Neurons in Model

dt=0.01
T = sp.arange(0.0, 1000.0, dt)
nt = len(T)  # total number of time steps

    # Constants
gNa = 120.0 # maximum conducances, in mS/cm^2
gK  =  36.0
gL  =   0.3
ENa =  50.0 # Nernst reversal potentials, in mV
EK  = -77
EL  = -54.387

#Coupling Terms
Vr = 20
w = 1
e11 = e22 = 0
e12 = e21 = 0.1
E = np.array([[e11, e12], [e21, e22]])

#Gating Variable Transition Rates
def alpham(V): return (0.1*V+4.0)/(1.0 - sp.exp(-0.1*V-4.0))
def betam(V):  return 4.0*sp.exp(-(V+65.0) / 18.0)
def alphah(V): return 0.07*sp.exp(-(V+65.0) / 20.0)
def betah(V):  return 1.0/(1.0 + sp.exp(-0.1*V-3.5))
def alphan(V): return (0.01*V+0.55)/(1.0 - sp.exp(-0.1*V-5.5))
def betan(V):  return 0.125*sp.exp(-(V+65.0) / 80.0)
def psp(V,s): return ((5*(1-s))/(1+sp.exp(-(V+3)/8)))-s

#Current Terms
def I_Na(V,x): return gNa * (x[:,0]**3) * x[:,1] * (V - ENa) #x0=m, x1=h, x2=n
def I_K(V,x): return gK * (x[:,2]**4) * (V - EK)
def I_L(V): return gL * (V - EL)
def I_inj(t): return 10.0
    
#Initial Conditions
V = np.zeros((nt,NN)) #Voltage vector
X = np.zeros((nt,NN,3)) #Gating Variables m,h,n (NN neurons x 3 gating variables)
S = np.zeros((nt,NN)) #Coupling term

dmdt = np.zeros((nt,NN))
dhdt = np.zeros((nt,NN))
dndt = np.zeros((nt,NN))

V[0,:] = -65.0
X[0,:,0] = alpham(V[0,:])/(alpham(V[0,:])+betam(V[0,:])) #m
X[0,:,1] = alphah(V[0,:])/(alphah(V[0,:])+betah(V[0,:])) #h
X[0,:,2] = alphan(V[0,:])/(alphan(V[0,:])+betan(V[0,:])) #n

alef = 5.0/(1+sp.exp(-(V[0,:]+3)/8.0))
S[0,:] = alef/(alef+1)

dmdt[0,:] = alpham(V[0,:])*(1-X[0,:,0])-betam(V[0,:])*X[0,:,0]
dhdt[0,:] = alphah(V[0,:])*(1-X[0,:,1])-betah(V[0,:])*X[0,:,1]
dndt[0,:] = alphan(V[0,:])*(1-X[0,:,2])-betan(V[0,:])*X[0,:,2]

#Euler-Maruyama Integration
for i in xrange(1,nt):
    V[i,:]= V[i-1,:]+dt*(I_inj(i-1)-I_Na(V[i-1,:],X[i-1,:])-I_K(V[i-1,:],X[i-1,:])-I_L(V[i-1,:]))+dt*((Vr-V[i-1,:])/w * np.dot(E,S[i-1,:]))    
    
    #Gating Variable
    dmdt[i,:] = dmdt[i-1,:] + alpham(V[i-1,:])*(1-X[i-1,:,0])-betam(V[i-1,:])*X[i-1,:,0]
    dhdt[i,:] = dhdt[i-1,:] + alphah(V[i-1,:])*(1-X[i-1,:,1])-betah(V[i-1,:])*X[i-1,:,1]
    dndt[i,:] = dndt[i-1,:] + alphan(V[i-1,:])*(1-X[i-1,:,2])-betan(V[i-1,:])*X[i-1,:,2]
    z = np.array([dmdt[i-1,:],dhdt[i-1,:],dndt[i-1,:]]).T
    
    #Gating Variable Constraints (0<m,h,n<1)
    X[i,:,0] = max(0,min(X[i,:,0].all(),1))
    X[i,:,1] = max(0,min(X[i,:,1].all(),1))
    X[i,:,2] = max(0,min(X[i,:,2].all(),1))

    #Update Gating Variables
    X[i,:,:]= X[i-1,:,:]+dt*(z)
    
    #Coupling Term
    S[i,:] = S[i-1,:]+dt*psp(V[i-i,:],S[i-1,:])

V1 = V[:,0]
V2 = V[:,1]

plt.plot(T,V1, 'red')
plt.plot(T,V2, 'blue')

plt.show()

我故意不使用 odeint 来积分我的 ODE,因为我想稍后在方程中添加随机性,因此想使用上面的欧拉方法。无论如何,如果有人可以帮助我弄清楚如何修复此代码以便发生预期的尖峰行为,那就太棒了。

python ode
3个回答
1
投票

检查您的输入电流和电导。如果你按照现代化的形式主义编写代码,它也会让你的生活更轻松,即 dm/dt = (m_inf - m)/tau。但具体来说,您的门控变量集成不起作用。您没有正确更新它们。检查是否缺少时间步长数学。


1
投票

这是我与神经元耦合所做的事情。您可以在 github 上找到更多详细信息:https://www.github.com/nosratullah/HodgkinHuxely

import numpy as np

def HodgkinHuxley(I,preVoltage):

    #holders
    v = []
    m = []
    h = []
    n = []
    s = [] #synaptic channel
    Isynlist = []
    dt = 0.05
    t = np.linspace(0,10,len(I))

    #constants
    Cm = 1.0 #microFarad
    ENa=50 #miliVolt
    EK=-77  #miliVolt
    El=-54 #miliVolt
    E_ampa = 0 #miliVolt
    g_Na=120 #mScm-2
    g_K=36 #mScm-2
    g_l=0.03 #mScm-2
    g_syn = 0.3
    t_d = 2 #ms Decay time
    t_r = 0.4 #ms Raise time
    Tij = 0 #ms time delay
    #Define functions
    def alphaN(v):
        return 0.01*(v+50)/(1-np.exp(-(v+50)/10))

    def betaN(v):
        return 0.125*np.exp(-(v+60)/80)

    def alphaM(v):
        return 0.1*(v+35)/(1-np.exp(-(v+35)/10))

    def betaM(v):
        return 4.0*np.exp(-0.0556*(v+60))

    def alphaH(v):
        return 0.07*np.exp(-0.05*(v+60))

    def betaH(v):
        return 1/(1+np.exp(-(0.1)*(v+30)))

    def H_pre(preV):
        return 1 + np.tanh(preV)

    #Initialize the voltage and the channels :
    v.append(-60)
    m0 = alphaM(v[0])/(alphaM(v[0])+betaM(v[0]))
    n0 = alphaN(v[0])/(alphaN(v[0])+betaN(v[0]))
    h0 = alphaH(v[0])/(alphaH(v[0])+betaH(v[0]))
    #s0 = alpha_s(preVoltage[0])/(alpha_s(preVoltage[0])+beta_s(preVoltage[0]))
    s0 = 0

    #t.append(0)
    m.append(m0)
    n.append(n0)
    h.append(h0)
    s.append(s0)

    if (type(preVoltage)==int):
        preVoltage = np.zeros(len(I)) #check if preVoltage exists or not

    #solving ODE using Euler's method:
    for i in range(1,len(t)):
        m.append(m[i-1] + dt*((alphaM(v[i-1])*(1-m[i-1]))-betaM(v[i-1])*m[i-1]))
        n.append(n[i-1] + dt*((alphaN(v[i-1])*(1-n[i-1]))-betaN(v[i-1])*n[i-1]))
        h.append(h[i-1] + dt*((alphaH(v[i-1])*(1-h[i-1]))-betaH(v[i-1])*h[i-1]))
        s.append(s[i-1] + dt * (H_pre(preVoltage[i-1])*((1-s[i-1]/t_r))-(s[i-1]/t_d)))
        gNa = g_Na * h[i-1]*(m[i-1])**3
        gK = g_K*n[i-1]**4
        gl = g_l
        INa = gNa*(v[i-1]-ENa)
        IK = gK*(v[i-1]-EK)
        Il = gl*(v[i-1]-El)
        #Synaptic Current comes from the pre neuron
        Isyn = -0.1 * s[i-1] * (v[i-1] - E_ampa)
        #making a list for Synaptic currents for plotting
        Isynlist.append(Isyn)
        v.append(v[i-1]+(dt)*((1/Cm)*(I[i-1]-(INa+IK+Il+Isyn))))

    return v,Isynlist


length = 10000
zeroVoltage = np.zeros(length)
#inputCurrent = np.ones(length)*4
#inputCurrent[1000:2000] = 10
#inputCurrent[2000::] = 3
#inputCurrent2 = np.ones(length)*5.46
noisyInput1 = np.ones(length)*7 + np.random.normal(0,3,length)
noisyInput2 = np.ones(length)*5 + np.random.normal(0,3,length)
preVoltage,preSyn = HodgkinHuxley(noisyInput1,zeroVoltage)
postVoltage,postSyn = HodgkinHuxley(noisyInput2,preVoltage)
plt.figure(figsize=(15,10))
plt.plot(preVoltage,'r',label='pre synaptic voltage');
#plt.plot(postVoltage,'b');
plt.plot(postSyn,'g-.',label='synaptic voltage');
plt.plot(postVoltage,'b',label='post synaptic voltage');
#plt.xlim(xmin=0)
plt.ylim(ymax=50)
plt.legend(loc='upper left');
plt.savefig('coupledNeuron.png',dpi=150)
plt.show()


0
投票

既然你有了

\dot(x)_j = f(x_j) + \sum_j C_ij g(x_j, x_i)

您也可以这样写您的乘车手侧。 x_j 是一个向量,例如(v、m、h、n s)。

这个振荡器示例可以作为一个起点:

x.shape
是 n_onsite_variables, n_oscillators

def f(x):
    return np.array([x[1, :], -x[0, :]])


def g(xi, xj):
    return xi[0] - xj[0]

def rhs(x, t, c):
    coupling = np.array([sum(cij*g(xi,xj) for cij, xj in zip(ci, x.T))
                                          for ci, xi in zip(c, x.T)])
    coupling = np.outer(np.arange(2), coupling) # coupling in x''

    return f(x) + coupling

x0 = np.random.random(size=(2, 3))
>>> array([[ 0.74386362,  0.85799588,  0.70501992],
   [ 0.65903405,  0.41575505,  0.93166973]])

def ring(n):
    c = np.eye(n, k=1) + np.eye(n, k=-1)
    c[0, -1] = 1
    c[-1, 0] = 1
    return c

c = ring(3)
x1 = rhs(x0, 0, c)
>>> array([[ 0.65903405,  0.41575505,  0.93166973],
    [-0.81915217, -0.59088767, -0.89683958]])

我确信这可以改进。特别是如果您想要更通用的耦合。

© www.soinside.com 2019 - 2024. All rights reserved.