深度学习笔记:深度学习CNN python程序实现
创始人
2024-06-02 22:58:34
0

加深网络

深度学习相对于一般的神经网络区别就在于使用多层隐藏层。在该例中我们构造一个基于CNN的深度学习网络,其训练完成后对于mnist数据集失败准确率可以超过99%

该网络隐藏层结构:
卷积层—ReLU—卷积层—ReLU—池化层—卷积层—ReLU—卷积层—ReLU—池化层—卷积层—ReLU—卷积层—ReLU—池化层—affine—ReLU—dropout—affine—dropout—softmax

先放上完整代码:

# coding: utf-8
import sys, os
sys.path.append("D:\AI learning source code")  # 为了导入父目录的文件而进行的设定
import pickle
import numpy as np
from collections import OrderedDict
from common.layers import *class DeepConvNet:"""识别率为99%以上的高精度的ConvNet网络结构如下所示conv - relu - conv- relu - pool -conv - relu - conv- relu - pool -conv - relu - conv- relu - pool -affine - relu - dropout - affine - dropout - softmax"""def __init__(self, input_dim=(1, 28, 28),conv_param_1 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},conv_param_2 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},conv_param_3 = {'filter_num':32, 'filter_size':3, 'pad':1, 'stride':1},conv_param_4 = {'filter_num':32, 'filter_size':3, 'pad':2, 'stride':1},conv_param_5 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},conv_param_6 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},hidden_size=50, output_size=10):# 初始化权重===========# 各层的神经元平均与前一层的几个神经元有连接(TODO:自动计算)pre_node_nums = np.array([1*3*3, 16*3*3, 16*3*3, 32*3*3, 32*3*3, 64*3*3, 64*4*4, hidden_size])wight_init_scales = np.sqrt(2.0 / pre_node_nums)  # 使用ReLU的情况下推荐的初始值self.params = {}pre_channel_num = input_dim[0]for idx, conv_param in enumerate([conv_param_1, conv_param_2, conv_param_3, conv_param_4, conv_param_5, conv_param_6]):self.params['W' + str(idx+1)] = wight_init_scales[idx] * np.random.randn(conv_param['filter_num'], pre_channel_num, conv_param['filter_size'], conv_param['filter_size'])self.params['b' + str(idx+1)] = np.zeros(conv_param['filter_num'])pre_channel_num = conv_param['filter_num']self.params['W7'] = wight_init_scales[6] * np.random.randn(64*4*4, hidden_size)self.params['b7'] = np.zeros(hidden_size)self.params['W8'] = wight_init_scales[7] * np.random.randn(hidden_size, output_size)self.params['b8'] = np.zeros(output_size)# 生成层===========self.layers = []self.layers.append(Convolution(self.params['W1'], self.params['b1'], conv_param_1['stride'], conv_param_1['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W2'], self.params['b2'], conv_param_2['stride'], conv_param_2['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Convolution(self.params['W3'], self.params['b3'], conv_param_3['stride'], conv_param_3['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W4'], self.params['b4'],conv_param_4['stride'], conv_param_4['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Convolution(self.params['W5'], self.params['b5'],conv_param_5['stride'], conv_param_5['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W6'], self.params['b6'],conv_param_6['stride'], conv_param_6['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Affine(self.params['W7'], self.params['b7']))self.layers.append(Relu())self.layers.append(Dropout(0.5))self.layers.append(Affine(self.params['W8'], self.params['b8']))self.layers.append(Dropout(0.5))self.last_layer = SoftmaxWithLoss()def predict(self, x, train_flg=False):for layer in self.layers:if isinstance(layer, Dropout):x = layer.forward(x, train_flg)else:x = layer.forward(x)return xdef loss(self, x, t):y = self.predict(x, train_flg=True)return self.last_layer.forward(y, t)def accuracy(self, x, t, batch_size=100):if t.ndim != 1 : t = np.argmax(t, axis=1)acc = 0.0for i in range(int(x.shape[0] / batch_size)):tx = x[i*batch_size:(i+1)*batch_size]tt = t[i*batch_size:(i+1)*batch_size]y = self.predict(tx, train_flg=False)y = np.argmax(y, axis=1)acc += np.sum(y == tt)return acc / x.shape[0]def gradient(self, x, t):# forwardself.loss(x, t)# backwarddout = 1dout = self.last_layer.backward(dout)tmp_layers = self.layers.copy()tmp_layers.reverse()for layer in tmp_layers:dout = layer.backward(dout)# 设定grads = {}for i, layer_idx in enumerate((0, 2, 5, 7, 10, 12, 15, 18)):grads['W' + str(i+1)] = self.layers[layer_idx].dWgrads['b' + str(i+1)] = self.layers[layer_idx].dbreturn gradsdef save_params(self, file_name="params.pkl"):params = {}for key, val in self.params.items():params[key] = valwith open(file_name, 'wb') as f:pickle.dump(params, f)def load_params(self, file_name="params.pkl"):with open(file_name, 'rb') as f:params = pickle.load(f)for key, val in params.items():self.params[key] = valfor i, layer_idx in enumerate((0, 2, 5, 7, 10, 12, 15, 18)):self.layers[layer_idx].W = self.params['W' + str(i+1)]self.layers[layer_idx].b = self.params['b' + str(i+1)]

解析:

1

    def __init__(self, input_dim=(1, 28, 28),conv_param_1 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},conv_param_2 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},conv_param_3 = {'filter_num':32, 'filter_size':3, 'pad':1, 'stride':1},conv_param_4 = {'filter_num':32, 'filter_size':3, 'pad':2, 'stride':1},conv_param_5 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},conv_param_6 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},hidden_size=50, output_size=10):

这里我们确定了各个神经网络层的形状,卷积核形状均为3 X 3,步幅为1,填充在第4个卷积层为2,其他为1. 卷积核数量1,2层为16, 3,4层为32, 5,6层为64.最后隐藏层(全连接层)神经元个数50

2

pre_node_nums = np.array([1*3*3, 16*3*3, 16*3*3, 32*3*3, 32*3*3, 64*3*3, 64*4*4, hidden_size])
wight_init_scales = np.sqrt(2.0 / pre_node_nums)  # 使用ReLU的情况下推荐的初始值

这里定义了神经网络各层的神经元个数,以及ReLU函数使用的He初始值的标准差

3

        self.params = {}pre_channel_num = input_dim[0]for idx, conv_param in enumerate([conv_param_1, conv_param_2, conv_param_3, conv_param_4, conv_param_5, conv_param_6]):self.params['W' + str(idx+1)] = wight_init_scales[idx] * np.random.randn(conv_param['filter_num'], pre_channel_num, conv_param['filter_size'], conv_param['filter_size'])self.params['b' + str(idx+1)] = np.zeros(conv_param['filter_num'])pre_channel_num = conv_param['filter_num']self.params['W7'] = wight_init_scales[6] * np.random.randn(64*4*4, hidden_size)self.params['b7'] = np.zeros(hidden_size)self.params['W8'] = wight_init_scales[7] * np.random.randn(hidden_size, output_size)self.params['b8'] = np.zeros(output_size)

这里我们遍历形状参数列表得到各层的形状,并对各层的值进行初始化。其中卷积核与ReLU权重初始值都使用了He初始值,偏置的初始值都为0

3

        # 生成层===========self.layers = []self.layers.append(Convolution(self.params['W1'], self.params['b1'], conv_param_1['stride'], conv_param_1['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W2'], self.params['b2'], conv_param_2['stride'], conv_param_2['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Convolution(self.params['W3'], self.params['b3'], conv_param_3['stride'], conv_param_3['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W4'], self.params['b4'],conv_param_4['stride'], conv_param_4['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Convolution(self.params['W5'], self.params['b5'],conv_param_5['stride'], conv_param_5['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W6'], self.params['b6'],conv_param_6['stride'], conv_param_6['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Affine(self.params['W7'], self.params['b7']))self.layers.append(Relu())self.layers.append(Dropout(0.5))self.layers.append(Affine(self.params['W8'], self.params['b8']))self.layers.append(Dropout(0.5))self.last_layer = SoftmaxWithLoss()

按照神经网络顺序搭建神经网络各层,保存为列表layers。

神经网络结构:
卷积层(16 X 3 X 3)—ReLU—卷积层(16 X 3 X 3)—ReLU—池化层(长2,宽2,步幅2)—卷积层(32 X 3 X 3)—ReLU—卷积层(32 X 3 X 3)—ReLU—池化层(长2,宽2,步幅2)—卷积层(64 X 3 X 3)—ReLU—卷积层(64 X 3 X 3)—ReLU—池化层(长2,宽2,步幅2)—affine—ReLU—dropout(dropout比率0.5)—affine—dropout(dropout比率0.5)—softmax

4

    def predict(self, x, train_flg=False):for layer in self.layers:if isinstance(layer, Dropout):x = layer.forward(x, train_flg)else:x = layer.forward(x)return x

利用神经网络进行预测,其中train_flg为True时代表神经网络处于训练模式,在Dropout层会随机删除神经元。如为False则代表神经网络在预测状态,启用全部神经元

5

    def accuracy(self, x, t, batch_size=100):if t.ndim != 1 : t = np.argmax(t, axis=1)acc = 0.0for i in range(int(x.shape[0] / batch_size)):tx = x[i*batch_size:(i+1)*batch_size]tt = t[i*batch_size:(i+1)*batch_size]y = self.predict(tx, train_flg=False)y = np.argmax(y, axis=1)acc += np.sum(y == tt)return acc / x.shape[0]

返回每一轮batch中预测准确率

    def gradient(self, x, t):# forwardself.loss(x, t)# backwarddout = 1dout = self.last_layer.backward(dout)tmp_layers = self.layers.copy()tmp_layers.reverse()for layer in tmp_layers:dout = layer.backward(dout)# 设定grads = {}for i, layer_idx in enumerate((0, 2, 5, 7, 10, 12, 15, 18)):grads['W' + str(i+1)] = self.layers[layer_idx].dWgrads['b' + str(i+1)] = self.layers[layer_idx].dbreturn grads

使用反向传播梯度下降求网络梯度并返回得到的梯度值

训练程序

# coding: utf-8
import sys, os
sys.path.append("D:\AI learning source code")  # 为了导入父目录而进行的设定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from deep_convnet import DeepConvNet
from common.trainer import Trainer(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)network = DeepConvNet()  
trainer = Trainer(network, x_train, t_train, x_test, t_test,epochs=20, mini_batch_size=100,optimizer='Adam', optimizer_param={'lr':0.001},evaluate_sample_num_per_epoch=1000)
trainer.train()# 保存参数
network.save_params("deep_convnet_params.pkl")
print("Saved Network Parameters!")

在训练程序中,我们调用DeepConvNet,使用Adam权重更新方法,0.001学习率对mnist数据集进行mini-batch训练。其中每一个batch个数为100,进行20个epoch

相关内容

热门资讯

将安卓系统数据导入ios系统,... 你是不是也有过这样的经历:手机里存满了珍贵的照片、联系人、应用数据,突然有一天,你决定换一台iPho...
雷霆战机 安卓最低系统,系统要... 你有没有听说过这款超级酷炫的飞行游戏——雷霆战机?没错,就是那个让你一上手就停不下来的刺激游戏!今天...
电脑独立安装安卓系统,探索电脑... 电脑独立安装安卓系统:探索未来的可能性在当今这个数字化时代,电脑已经不仅仅是一个用来打字的工具,它更...
树莓派虚拟安卓系统,打造便携式... 你有没有想过,用树莓派来运行安卓系统?听起来是不是有点酷炫?想象一个迷你的小树莓派,竟然能变身成为一...
安卓系统如何安装tk,安卓系统... 你有没有想过,你的安卓手机里装个tk,生活是不是能变得更有趣呢?别急,别急,我来给你详细说说怎么操作...
安卓计步数系统,健康生活新伙伴 你有没有发现,每天手机里那个默默无闻的安卓计步数系统,竟然悄悄地记录了你走过的每一步?今天,就让我带...
安卓原生系统真的好吗,究竟是否... 你有没有想过,安卓原生系统到底是不是那个传说中的“好”?咱们今天就来聊聊这个话题,看看这个系统到底有...
安卓2.3系统开机画面,复古与... 亲爱的读者,你是否还记得那些充满怀旧情怀的安卓2.3系统开机画面?那个曾经陪伴我们度过无数时光的小图...
哪个安卓管理系统好,安卓管理系... 你有没有想过,手机里那么多应用,管理起来是不是有点头疼?别急,今天就来给你揭秘哪个安卓管理系统好,让...
安卓系统取消流量监控,告别流量... 你知道吗?最近安卓系统来了一次大动作,取消了对流量的监控!这可真是让人眼前一亮的消息呢。想象以前每次...
安卓导航如何备份系统,安卓导航... 你有没有想过,如果你的安卓导航系统突然崩溃了,你会怎么办?别急,今天就来给你支个招,教你如何轻松备份...
小米安卓4.0系统下载,深度解... 你有没有想过,你的小米手机是不是也能来个“大变身”?没错,就是升级到安卓4.0系统!想象你的手机瞬间...
自动鉴别平板安卓系统,自动鉴别... 你有没有想过,你的平板电脑里那些安卓系统,其实都是经过一番“智慧”的筛选和鉴别才来到你面前的呢?没错...
bose能连安卓系统,开启无线... 你有没有想过,家里的音响设备也能跟上科技潮流,和你的安卓手机无缝连接呢?没错,今天就要来聊聊这个神奇...
安卓x是什么系统,探索新一代智... 你有没有听说最近安卓界又出了个新花样——安卓X系统?没错,就是那个我们平时手机里常用的安卓系统,这次...
安卓系统怎么提升网速,五大技巧... 你是不是也和我一样,在使用安卓手机时,总是觉得网速不够快,有时候刷个网页都慢得让人抓狂?别急,今天就...
机车安卓系统重装,轻松恢复系统... 你那台机车安卓系统是不是突然间卡壳了,运行速度慢得像蜗牛爬?别急,今天就来给你详细说说怎么给机车安卓...
华为手机更改安卓系统,探索安卓... 你知道吗?最近华为手机界可是掀起了一阵不小的波澜呢!没错,就是那个我们熟悉的华为,竟然悄悄地更改了安...
安卓系统低版微信,揭秘微信新版... 你有没有发现,有时候手机里的微信版本有点儿“老气横秋”呢?别急,今天就来聊聊这个让人有点头疼的安卓系...
安卓系统安装apple pay... 你有没有想过,即使你的手机是安卓系统,也能享受到Apple Pay的便捷支付体验呢?没错,就是那个曾...