继续浏览精彩内容
慕课网APP
程序员的梦工厂
打开
继续
感谢您的支持,我会继续努力的
赞赏金额会直接到老师账户
将二维码发送给自己后长按识别
微信支付
支付宝支付

10.蜜汁代码(3)

Coder_zheng
关注TA
已关注
手记 71
粉丝 23
获赞 45
#数据采用:链接: https://pan.baidu.com/s/1gtXlB9E2E5XbnwswG2banQ 提取码: 5zju
import numpy as np
import pandas as pd 
import matplotlib.pyplot as plt
import numpy.random 
import time
#定义了三种停止梯度下降循环的方式
STOP_ITER=0
STOP_COST=1
STOP_GRAD=2


def readData(path):
    #读取数据
    data =pd.read_csv(path,header=None,names=['Exam1','Exam2','Admitted'])
    #加工数据:在每一个样本的开始添加一个1字段
    data.insert(0,'One',1)
    return data

#sigmoid函数,将数值转换为0~1之间的概率
def sigmoid(z):
    return 1/(1+np.exp(-z))

#定义预测函数hθ(x),也即我们的预测函数
def model(X,theta):
    return sigmoid(np.dot(X,theta.T))
#损失函数的定义
def cost(X,Y,theta):
    left =np.multiply(-Y,np.log(model(X,theta)))
    right = np.multiply(1 - Y, np.log(1 - model(X, theta)))
    return np.sum(left-right)/len(X)
#计算梯度,相当于一次梯度下降循环 
def gradient(X, Y, theta):
    grad = np.zeros(theta.shape)
    error = (model(X, theta)- Y).ravel()
    for j in range(len(theta.ravel())): #for each parmeter
        term = np.multiply(error, X[:,j])
        grad[0, j] = np.sum(term) / len(X)
    
    return grad

#对数据进行洗牌,避免由于人工搜集数据导致的数据分布不随机,增加数据的一般性
def shuffleData(data):
    np.random.shuffle(data)
    cols = orig_data.shape[1]
    X = data[:,0:cols-1]
    Y = data[:,cols-1:]
    return X,Y
#设定三种不同的停止策略
def stopCriterion(type,value,threshold):
    if type ==STOP_ITER: return value>threshold
    elif type ==STOP_COST: return abs(value[-1]-value[-2])<threshold
    elif type ==STOP_GRAD: return np.linalg.norm(value)<threshold

#梯度下降函数
def descent(data,theta,batchSize,stopType,thresh,alpha):
    #梯度下降求解
    #初始化时间
    init_time =time.time()
    i =0#迭代次数
    k =0 #batch
    X,Y=shuffleData(data)
    grad =np.zeros(theta.shape)
    costs =[cost(X,Y,theta)]#损失值
    
    
    while True:
        grad =gradient(X[k:k+batchSize],Y[k:k+batchSize],theta)
        k+=batchSize #取batch数量个样本数据
        if k >= n:
            k=0
            X,Y =shuffleData(data)#重新洗牌
        theta =theta -alpha*grad #参数更新
        costs.append(cost(X,Y,theta))
        i+=1
        
        if stopType ==STOP_ITER :value=i
        elif stopType ==STOP_COST :value =costs
        elif stopType ==STOP_GRAD :value =grad
        if stopCriterion(stopType,value,thresh):break
    return theta,i-1,costs,grad,time.time()-init_time

#运行函数
def runExpe(data, theta, batchSize, stopType, thresh, alpha):
    #import pdb; pdb.set_trace();
    theta, iter, costs, grad, dur = descent(data, theta, batchSize, stopType, thresh, alpha)
    name = "Original" if (data[:,1]>2).sum() > 1 else "Scaled"
    name += " data - learning rate: {} - ".format(alpha)
    if batchSize==n: strDescType = "Gradient"
    elif batchSize==1:  strDescType = "Stochastic"
    else: strDescType = "Mini-batch ({})".format(batchSize)
    name += strDescType + " descent - Stop: "
    if stopType == STOP_ITER: strStop = "{} iterations".format(thresh)
    elif stopType == STOP_COST: strStop = "costs change < {}".format(thresh)
    else: strStop = "gradient norm < {}".format(thresh)
    name += strStop
    print ("***{}\nTheta: {} - Iter: {} - Last cost: {:03.2f} - Duration: {:03.2f}s".format(
        name, theta, iter, costs[-1], dur))
    fig, ax = plt.subplots(figsize=(12,4))
    ax.plot(np.arange(len(costs)), costs, 'r')
    ax.set_xlabel('Iterations')
    ax.set_ylabel('Cost')
    ax.set_title(name.upper() + ' - Error vs. Iteration')
    return theta
    
#设定阈值
def predict(X, theta):
    return [1 if x >= 0.5 else 0 for x in model(X, theta)]

if __name__ =='__main__':
    n=100
    data =readData('LogiReg_data.txt')
    orig_data = data.as_matrix() 
    theta = np.zeros([1, 3])
    runExpe(orig_data, theta, n, STOP_ITER, thresh=5000, alpha=0.000001)
	#runExpe(orig_data, theta, n, STOP_ITER, thresh=5000, alpha=0.000001)
    #runExpe(orig_data, theta, n, STOP_COST, thresh=0.000001, alpha=0.001)
    #runExpe(orig_data, theta, n, STOP_GRAD, thresh=0.05, alpha=0.001)
    
    #runExpe(orig_data, theta, 1, STOP_ITER, thresh=5000, alpha=0.001)
    #runExpe(orig_data, theta, 1, STOP_ITER, thresh=15000, alpha=0.000002)
    #runExpe(orig_data, theta, 16, STOP_ITER, thresh=15000, alpha=0.001)
    
    #scaled_X = scaled_data[:, :3]
    #y = scaled_data[:, 3]
    #predictions = predict(scaled_X, theta)
    #correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)]
    #accuracy = (sum(map(int, correct)) % len(correct))
    #print ('accuracy = {0}%'.format(accuracy))


逻辑回归+梯度下降

打开App,阅读手记
0人推荐
发表评论
随时随地看视频慕课网APP