import numpy as np class Perceptron(object): ''' eta:学习率 n_iter:权重向量的训练次数 w_:神经分叉权重向量 errors_:用于记录神经元判断出错次数 ''' def __init__(self, eta = 0.01, n_iter = 10); self.eta = eta self,n_iter = n_iter def fit(self, X, y): ''' 输入训练数据,培训神经元 X是输入样本向量,y是对应的样本分类 X:shape[n_samples, n_features] X:[[1, 2, 3], [4, 5, 6]] n_samples: 2 n_features: 3 y:[1, -1] ''' #初始化权重向量为0,加1是因为提到的w0,即步调函数的阈值 self.w_ = np.zero(1 + X.shape[1]) self.errora_ = []
X = np.array X:shape[n_samples , n_features] X:[ [1 , 2 , 3] , [4 , 5 , 6] ] n_samples : 2 n_features : 3 X.shape[1]就是X的n_samples值,值为2。
感知器的分类算法
import numpy as mp class Perception(objet): def __init__(self, eta=0.01, n_ier=10): #eta学习率,n_iter训练权重向量的重复次数 self.eta = eta self.n_iter = n_iter #根据输入样本培训神经元,X:shape[n_sample, n_feature], n_feature指神经元接收的电信号X数量 def fit(self, X, y): self.w_ = np.zero(1+X.shape[1]) #初始权重向量w_为0,括号里加一是包括激励函数的阈值w0 self.errors_ = [] #errors_用于记录神经元判断出错次数,是个队列 for _ in range(self.n_iter): errors = 0 for xi, target in zip(X, y): #权重更新 update = self.eta * (target - self.predict(xi)) self.w_[1:] += update * xi #阈值更新 self.w_[0] += update errors += int(update != 0.0) self.errors_.append(errors) def net_input(self, X): #输入电信号X与权重w的点积 return np.dot(X, self.w_[1:]) + self.w_[0] def predict(self, X): #预测分类 return np.where(self.net_input(X) >= 0.0, 1, -1)