深度学习笔记:深度学习CNN python程序实现
创始人
2024-06-02 22:58:34
0

加深网络

深度学习相对于一般的神经网络区别就在于使用多层隐藏层。在该例中我们构造一个基于CNN的深度学习网络,其训练完成后对于mnist数据集失败准确率可以超过99%

该网络隐藏层结构:
卷积层—ReLU—卷积层—ReLU—池化层—卷积层—ReLU—卷积层—ReLU—池化层—卷积层—ReLU—卷积层—ReLU—池化层—affine—ReLU—dropout—affine—dropout—softmax

先放上完整代码:

# coding: utf-8
import sys, os
sys.path.append("D:\AI learning source code")  # 为了导入父目录的文件而进行的设定
import pickle
import numpy as np
from collections import OrderedDict
from common.layers import *class DeepConvNet:"""识别率为99%以上的高精度的ConvNet网络结构如下所示conv - relu - conv- relu - pool -conv - relu - conv- relu - pool -conv - relu - conv- relu - pool -affine - relu - dropout - affine - dropout - softmax"""def __init__(self, input_dim=(1, 28, 28),conv_param_1 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},conv_param_2 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},conv_param_3 = {'filter_num':32, 'filter_size':3, 'pad':1, 'stride':1},conv_param_4 = {'filter_num':32, 'filter_size':3, 'pad':2, 'stride':1},conv_param_5 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},conv_param_6 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},hidden_size=50, output_size=10):# 初始化权重===========# 各层的神经元平均与前一层的几个神经元有连接(TODO:自动计算)pre_node_nums = np.array([1*3*3, 16*3*3, 16*3*3, 32*3*3, 32*3*3, 64*3*3, 64*4*4, hidden_size])wight_init_scales = np.sqrt(2.0 / pre_node_nums)  # 使用ReLU的情况下推荐的初始值self.params = {}pre_channel_num = input_dim[0]for idx, conv_param in enumerate([conv_param_1, conv_param_2, conv_param_3, conv_param_4, conv_param_5, conv_param_6]):self.params['W' + str(idx+1)] = wight_init_scales[idx] * np.random.randn(conv_param['filter_num'], pre_channel_num, conv_param['filter_size'], conv_param['filter_size'])self.params['b' + str(idx+1)] = np.zeros(conv_param['filter_num'])pre_channel_num = conv_param['filter_num']self.params['W7'] = wight_init_scales[6] * np.random.randn(64*4*4, hidden_size)self.params['b7'] = np.zeros(hidden_size)self.params['W8'] = wight_init_scales[7] * np.random.randn(hidden_size, output_size)self.params['b8'] = np.zeros(output_size)# 生成层===========self.layers = []self.layers.append(Convolution(self.params['W1'], self.params['b1'], conv_param_1['stride'], conv_param_1['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W2'], self.params['b2'], conv_param_2['stride'], conv_param_2['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Convolution(self.params['W3'], self.params['b3'], conv_param_3['stride'], conv_param_3['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W4'], self.params['b4'],conv_param_4['stride'], conv_param_4['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Convolution(self.params['W5'], self.params['b5'],conv_param_5['stride'], conv_param_5['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W6'], self.params['b6'],conv_param_6['stride'], conv_param_6['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Affine(self.params['W7'], self.params['b7']))self.layers.append(Relu())self.layers.append(Dropout(0.5))self.layers.append(Affine(self.params['W8'], self.params['b8']))self.layers.append(Dropout(0.5))self.last_layer = SoftmaxWithLoss()def predict(self, x, train_flg=False):for layer in self.layers:if isinstance(layer, Dropout):x = layer.forward(x, train_flg)else:x = layer.forward(x)return xdef loss(self, x, t):y = self.predict(x, train_flg=True)return self.last_layer.forward(y, t)def accuracy(self, x, t, batch_size=100):if t.ndim != 1 : t = np.argmax(t, axis=1)acc = 0.0for i in range(int(x.shape[0] / batch_size)):tx = x[i*batch_size:(i+1)*batch_size]tt = t[i*batch_size:(i+1)*batch_size]y = self.predict(tx, train_flg=False)y = np.argmax(y, axis=1)acc += np.sum(y == tt)return acc / x.shape[0]def gradient(self, x, t):# forwardself.loss(x, t)# backwarddout = 1dout = self.last_layer.backward(dout)tmp_layers = self.layers.copy()tmp_layers.reverse()for layer in tmp_layers:dout = layer.backward(dout)# 设定grads = {}for i, layer_idx in enumerate((0, 2, 5, 7, 10, 12, 15, 18)):grads['W' + str(i+1)] = self.layers[layer_idx].dWgrads['b' + str(i+1)] = self.layers[layer_idx].dbreturn gradsdef save_params(self, file_name="params.pkl"):params = {}for key, val in self.params.items():params[key] = valwith open(file_name, 'wb') as f:pickle.dump(params, f)def load_params(self, file_name="params.pkl"):with open(file_name, 'rb') as f:params = pickle.load(f)for key, val in params.items():self.params[key] = valfor i, layer_idx in enumerate((0, 2, 5, 7, 10, 12, 15, 18)):self.layers[layer_idx].W = self.params['W' + str(i+1)]self.layers[layer_idx].b = self.params['b' + str(i+1)]

解析:

1

    def __init__(self, input_dim=(1, 28, 28),conv_param_1 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},conv_param_2 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},conv_param_3 = {'filter_num':32, 'filter_size':3, 'pad':1, 'stride':1},conv_param_4 = {'filter_num':32, 'filter_size':3, 'pad':2, 'stride':1},conv_param_5 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},conv_param_6 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},hidden_size=50, output_size=10):

这里我们确定了各个神经网络层的形状,卷积核形状均为3 X 3,步幅为1,填充在第4个卷积层为2,其他为1. 卷积核数量1,2层为16, 3,4层为32, 5,6层为64.最后隐藏层(全连接层)神经元个数50

2

pre_node_nums = np.array([1*3*3, 16*3*3, 16*3*3, 32*3*3, 32*3*3, 64*3*3, 64*4*4, hidden_size])
wight_init_scales = np.sqrt(2.0 / pre_node_nums)  # 使用ReLU的情况下推荐的初始值

这里定义了神经网络各层的神经元个数,以及ReLU函数使用的He初始值的标准差

3

        self.params = {}pre_channel_num = input_dim[0]for idx, conv_param in enumerate([conv_param_1, conv_param_2, conv_param_3, conv_param_4, conv_param_5, conv_param_6]):self.params['W' + str(idx+1)] = wight_init_scales[idx] * np.random.randn(conv_param['filter_num'], pre_channel_num, conv_param['filter_size'], conv_param['filter_size'])self.params['b' + str(idx+1)] = np.zeros(conv_param['filter_num'])pre_channel_num = conv_param['filter_num']self.params['W7'] = wight_init_scales[6] * np.random.randn(64*4*4, hidden_size)self.params['b7'] = np.zeros(hidden_size)self.params['W8'] = wight_init_scales[7] * np.random.randn(hidden_size, output_size)self.params['b8'] = np.zeros(output_size)

这里我们遍历形状参数列表得到各层的形状,并对各层的值进行初始化。其中卷积核与ReLU权重初始值都使用了He初始值,偏置的初始值都为0

3

        # 生成层===========self.layers = []self.layers.append(Convolution(self.params['W1'], self.params['b1'], conv_param_1['stride'], conv_param_1['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W2'], self.params['b2'], conv_param_2['stride'], conv_param_2['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Convolution(self.params['W3'], self.params['b3'], conv_param_3['stride'], conv_param_3['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W4'], self.params['b4'],conv_param_4['stride'], conv_param_4['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Convolution(self.params['W5'], self.params['b5'],conv_param_5['stride'], conv_param_5['pad']))self.layers.append(Relu())self.layers.append(Convolution(self.params['W6'], self.params['b6'],conv_param_6['stride'], conv_param_6['pad']))self.layers.append(Relu())self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))self.layers.append(Affine(self.params['W7'], self.params['b7']))self.layers.append(Relu())self.layers.append(Dropout(0.5))self.layers.append(Affine(self.params['W8'], self.params['b8']))self.layers.append(Dropout(0.5))self.last_layer = SoftmaxWithLoss()

按照神经网络顺序搭建神经网络各层,保存为列表layers。

神经网络结构:
卷积层(16 X 3 X 3)—ReLU—卷积层(16 X 3 X 3)—ReLU—池化层(长2,宽2,步幅2)—卷积层(32 X 3 X 3)—ReLU—卷积层(32 X 3 X 3)—ReLU—池化层(长2,宽2,步幅2)—卷积层(64 X 3 X 3)—ReLU—卷积层(64 X 3 X 3)—ReLU—池化层(长2,宽2,步幅2)—affine—ReLU—dropout(dropout比率0.5)—affine—dropout(dropout比率0.5)—softmax

4

    def predict(self, x, train_flg=False):for layer in self.layers:if isinstance(layer, Dropout):x = layer.forward(x, train_flg)else:x = layer.forward(x)return x

利用神经网络进行预测,其中train_flg为True时代表神经网络处于训练模式,在Dropout层会随机删除神经元。如为False则代表神经网络在预测状态,启用全部神经元

5

    def accuracy(self, x, t, batch_size=100):if t.ndim != 1 : t = np.argmax(t, axis=1)acc = 0.0for i in range(int(x.shape[0] / batch_size)):tx = x[i*batch_size:(i+1)*batch_size]tt = t[i*batch_size:(i+1)*batch_size]y = self.predict(tx, train_flg=False)y = np.argmax(y, axis=1)acc += np.sum(y == tt)return acc / x.shape[0]

返回每一轮batch中预测准确率

    def gradient(self, x, t):# forwardself.loss(x, t)# backwarddout = 1dout = self.last_layer.backward(dout)tmp_layers = self.layers.copy()tmp_layers.reverse()for layer in tmp_layers:dout = layer.backward(dout)# 设定grads = {}for i, layer_idx in enumerate((0, 2, 5, 7, 10, 12, 15, 18)):grads['W' + str(i+1)] = self.layers[layer_idx].dWgrads['b' + str(i+1)] = self.layers[layer_idx].dbreturn grads

使用反向传播梯度下降求网络梯度并返回得到的梯度值

训练程序

# coding: utf-8
import sys, os
sys.path.append("D:\AI learning source code")  # 为了导入父目录而进行的设定
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from deep_convnet import DeepConvNet
from common.trainer import Trainer(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)network = DeepConvNet()  
trainer = Trainer(network, x_train, t_train, x_test, t_test,epochs=20, mini_batch_size=100,optimizer='Adam', optimizer_param={'lr':0.001},evaluate_sample_num_per_epoch=1000)
trainer.train()# 保存参数
network.save_params("deep_convnet_params.pkl")
print("Saved Network Parameters!")

在训练程序中,我们调用DeepConvNet,使用Adam权重更新方法,0.001学习率对mnist数据集进行mini-batch训练。其中每一个batch个数为100,进行20个epoch

相关内容

热门资讯

编程安卓系统和鸿蒙主题,跨平台... 你有没有想过,手机的世界里,除了苹果的iOS和安卓的操作系统,还有个神秘的鸿蒙系统?今天,咱们就来聊...
哪个安卓机系统好用,探索安卓系... 你有没有想过,手机里的安卓系统就像是个大厨,不同的系统就像不同的烹饪手法,有的让你吃得津津有味,有的...
安卓如何控制苹果系统,从安卓到... 你知道吗?在这个科技飞速发展的时代,安卓和苹果两大操作系统之间的较量从未停歇。虽然它们各自有着忠实的...
安卓原生系统文件夹,安卓原生系... 你有没有发现,每次打开安卓手机,里面那些文件夹就像是一个个神秘的宝箱,里面藏着各种各样的宝贝?今天,...
基于安卓系统的游戏开发,从入门... 你有没有想过,为什么安卓手机上的游戏总是那么吸引人?是不是因为它们就像是你身边的好朋友,随时随地都能...
安卓系统怎样装驱动精灵,安卓系... 你那安卓设备是不是突然间有点儿不给力了?别急,今天就来手把手教你如何给安卓系统装上驱动精灵,让你的设...
如何本地安装安卓系统包,详细步... 你有没有想过,把安卓系统装在你的电脑上,是不是就像给电脑穿上了时尚的新衣?想象你可以在电脑上直接玩手...
安卓12卡刷系统教程,体验全新... 你有没有发现,你的安卓手机最近有点儿不给力了?运行速度慢得像蜗牛,是不是也想给它来个“换血大法”,让...
安卓系统无法打开swf文件,安... 最近是不是发现你的安卓手机有点儿不给力?打开SWF文件时,是不是总是出现“无法打开”的尴尬局面?别急...
鸿蒙系统依赖于安卓系统吗,独立... 你有没有想过,我们手机里的那个鸿蒙系统,它是不是真的完全独立于安卓系统呢?这个问题,估计不少手机控都...
适合安卓系统的图片软件,精选图... 手机里堆满了各种美美的照片,是不是觉得找起来有点头疼呢?别急,今天就来给你安利几款超级适合安卓系统的...
阴阳师安卓系统典藏,探寻阴阳师... 亲爱的阴阳师们,你是否在安卓系统上玩得如痴如醉,对那些精美的典藏式神们垂涎欲滴?今天,就让我带你深入...
安卓系统有碎片化缺点,系统优化... 你知道吗?在手机江湖里,安卓系统可是个响当当的大侠。它那开放、自由的个性,让无数手机厂商和开发者都为...
安卓4系统手机微信,功能解析与... 你有没有发现,现在市面上还有很多安卓4系统的手机在使用呢?尤其是那些喜欢微信的朋友们,这款手机简直就...
鸿蒙系统是安卓的盗版,从安卓“... 你知道吗?最近在科技圈里,关于鸿蒙系统的讨论可是热闹非凡呢!有人说是安卓的盗版,有人则认为这是华为的...
安卓系统怎么剪辑音乐,轻松打造... 你是不是也和我一样,手机里存了超多好听的歌,但是有时候想给它们来个变身,变成一段专属的旋律呢?别急,...
怎么把安卓手机系统变为pc系统... 你有没有想过,把你的安卓手机变成一台PC呢?听起来是不是有点酷炫?想象你可以在手机上玩电脑游戏,或者...
手机怎么装安卓11系统,手机安... 你有没有想过,让你的手机也来个“青春焕发”,升级一下系统呢?没错,就是安卓11系统!这个新系统不仅带...
安卓系统如何拼网络,构建高效连... 你有没有想过,你的安卓手机是怎么和网络“谈恋爱”的呢?没错,就是拼网络!今天,就让我带你一探究竟,看...
安卓系统怎么看小说,轻松畅享电... 你有没有发现,手机里装了那么多应用,最离不开的竟然是那个小小的小说阅读器?没错,就是安卓系统上的小说...