这是我在python中的代码
def error(y_desired, y):
return y_desired != y
def step_func(weighted_sum, theta):
return 1 if ((weighted_sum - theta) >= 0) else 0
def weight_adjustment(error, alpha, x_element):
return error*alpha*x_element
def weighted_sum(w, x_epoch):
weighted_sum = 0
for w, x_epoch in zip(w, x_epoch):
weighted_sum += w*(x_epoch)
return weighted_sum
def perceptron(x, y_desired, w, theta, alpha):
cond = True
epochs = 0
while(cond == True):
count = 0
epochs += 1
print(f'Epoch number - {epochs}')
for epoch in range(len(x)):
weighted_sums = round(weighted_sum(w, x[epoch]), 10)
y = step_func(weighted_sums, theta)
if error(y_desired[epoch], y):
count += 1
for weights in range(len(w)):
w[weights] = round(w[weights] + weight_adjustment(error = (y_desired[epoch] - y),
alpha = alpha,
x_element = x[epoch][weights]), 10)
print(w)
print('\n')
if count == 0:
cond = False
print('Final Weights -')
return w
x = [[0, 0], [0, 1], [1, 0], [1, 1]]
y_desired = [0, 0, 0, 1]
w = [0.3, -0.1]
perceptron(x, y_desired, w, theta = 0.2, alpha = 0.1)
OR / AND工作正常但是当我追求[1,0,0,0]它进入无限循环。 AND / NAND / OR / NOR的分类没有区别。全部都可以线性分类。
我是从根本上还是在减肥训练中错过了某些东西?我在哪里弄错了?如果可能的话,也分享学习材料。
首先,您要四舍五入,这是养成的坏习惯。其次,您要求错误为0,而分类通常基于概率。
我推荐https://stackabuse.com/creating-a-neural-network-from-scratch-in-python/由3篇文章组成的系列文章,在解释基础知识方面做得不错。