class AdalineGD(object): def __init__(self, eta, n_iter): self.eta = eta self.n_iter = n_iter def fit(self, X, y): self.w_ = np.zeros(1 + X.shape[1]) self.cost_ [] for i in range(self.n_iter): output = self.net_input(X) errors = y - output self.w_[1:] += self.eta * X.T.dot(errors) self.w_[0] += self.eta * errors.sum() cost = (errors ** 2).sum() / 2.0 self.cost_.append(cost) return self def net_input(self, X): return np.dot(X, self.w_[1:]) + self.w_[0] def activation(self, X): return self.net_input(X) def predict(self, X): return np.where(self.activation(X) >= 0, 1, -1)
自适应线性神经元的分类算法
class AdalineGD(object): def __init__(self, eta=0.1, n_iter=50): self.eta = eta self.n_iter = n_iter def fit(self, X, y): #与感知器最大的不同 self.w_ = np.zeros(1 + X.shape[1]) self.cost_ = [] #成本函数:判断改进效果,使其不断减小 for i in range(self.n_iter): output = self.net_input(X) errors = y - output self.w_[1:] += self.eta * X.T.dot(errors) self.w_[0] += self.eta * errors.sum() cost = (errors ** 2).sum()/2.0 self.cost_.append(cost) return self def net_input(self, X): return np.dot(X, self.w_[1:] + self.w_[0]) def activation(self, X): return self.net_input(self, X): def predict(self, X): return np.where(self.activation(X) >= 0, 1, -1) #运行算法 ada = AdalineGD(eta=0.0001, n_iter=50) #学习率越低,每次权重的改进越精确;迭代次数越大,优化的结果越准确。 ada.fit(X, y) plot_decision_regions(X, y, classifier=ada) #预测数据输入到神经网络后预测 plt.title('Adaline-Gradient descent') plt.xlabel('花茎长度') plt.ylabel('花瓣长度') plt.legend(loc='upper left') plt.show() plt.plot(range(1, len(ada.cost_)+1), ada.cost_, marker='o') #检测改进效果 plt.xlabel('Epochs') #自我迭代的学习次数 plt.ylabel('sum-squard-error') #做出错误判断的次数